mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
fix: Do not bump elements during RDB load (#4507)
fix: Do not bump elements during RDB load #4507 The Issue Before this PR, when loading an RDB, we modified fetched_items_ as part of the loading process. This has little effect, unless the next issued command calls FLUSHALL / FLUSHDB (could happen in DFLY LOAD, REPLICAOF or just calling FLUSHALL directly). In such a case, a CHECK() fails. The Fix While load is not run as a command (in a transaction), it still uses APIs that assume that they are called in the context of a command. As such, it indirectly used DbSlice::FindInternal(), which bumps elements when called. This PR adds another sub-mode to DbSlice, named load_in_progress_. When true, we treat DbSlice as if it is not in cache mode, ignoring cache_mode_. BTW this PR also renames caching_mode_ to cache_mode_ as we generally use the term cache mode and not caching mode, including in the --cache_mode flag. Fixes #4497
This commit is contained in:
parent
bafb427a09
commit
3fdd9877f4
4 changed files with 39 additions and 10 deletions
|
@ -270,11 +270,12 @@ SliceEvents& SliceEvents::operator+=(const SliceEvents& o) {
|
||||||
|
|
||||||
#undef ADD
|
#undef ADD
|
||||||
|
|
||||||
DbSlice::DbSlice(uint32_t index, bool caching_mode, EngineShard* owner)
|
DbSlice::DbSlice(uint32_t index, bool cache_mode, EngineShard* owner)
|
||||||
: shard_id_(index),
|
: shard_id_(index),
|
||||||
caching_mode_(caching_mode),
|
cache_mode_(cache_mode),
|
||||||
owner_(owner),
|
owner_(owner),
|
||||||
client_tracking_map_(owner->memory_resource()) {
|
client_tracking_map_(owner->memory_resource()) {
|
||||||
|
load_in_progress_ = false;
|
||||||
db_arr_.emplace_back();
|
db_arr_.emplace_back();
|
||||||
CreateDb(0);
|
CreateDb(0);
|
||||||
expire_base_[0] = expire_base_[1] = 0;
|
expire_base_[0] = expire_base_[1] = 0;
|
||||||
|
@ -471,7 +472,7 @@ OpResult<DbSlice::PrimeItAndExp> DbSlice::FindInternal(const Context& cntx, std:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (caching_mode_ && IsValid(res.it)) {
|
if (IsCacheMode() && IsValid(res.it)) {
|
||||||
if (!change_cb_.empty()) {
|
if (!change_cb_.empty()) {
|
||||||
FetchedItemsRestorer fetched_restorer(&fetched_items_);
|
FetchedItemsRestorer fetched_restorer(&fetched_items_);
|
||||||
auto bump_cb = [&](PrimeTable::bucket_iterator bit) {
|
auto bump_cb = [&](PrimeTable::bucket_iterator bit) {
|
||||||
|
@ -600,14 +601,14 @@ OpResult<DbSlice::AddOrFindResult> DbSlice::AddOrFindInternal(const Context& cnt
|
||||||
!owner_->IsReplica() && !(ServerState::tlocal()->gstate() == GlobalState::LOADING);
|
!owner_->IsReplica() && !(ServerState::tlocal()->gstate() == GlobalState::LOADING);
|
||||||
|
|
||||||
// If we are over limit in non-cache scenario, just be conservative and throw.
|
// If we are over limit in non-cache scenario, just be conservative and throw.
|
||||||
if (apply_memory_limit && !caching_mode_ && memory_budget_ + memory_offset < 0) {
|
if (apply_memory_limit && !IsCacheMode() && memory_budget_ + memory_offset < 0) {
|
||||||
LOG_EVERY_T(WARNING, 1) << "AddOrFind: over limit, budget: " << memory_budget_
|
LOG_EVERY_T(WARNING, 1) << "AddOrFind: over limit, budget: " << memory_budget_
|
||||||
<< " reclaimed: " << reclaimed << " offset: " << memory_offset;
|
<< " reclaimed: " << reclaimed << " offset: " << memory_offset;
|
||||||
events_.insertion_rejections++;
|
events_.insertion_rejections++;
|
||||||
return OpStatus::OUT_OF_MEMORY;
|
return OpStatus::OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
PrimeEvictionPolicy evp{cntx, (bool(caching_mode_) && !owner_->IsReplica()),
|
PrimeEvictionPolicy evp{cntx, (IsCacheMode() && !owner_->IsReplica()),
|
||||||
memory_offset, ssize_t(soft_budget_limit_),
|
memory_offset, ssize_t(soft_budget_limit_),
|
||||||
this, apply_memory_limit};
|
this, apply_memory_limit};
|
||||||
|
|
||||||
|
@ -1272,7 +1273,7 @@ pair<uint64_t, size_t> DbSlice::FreeMemWithEvictionStep(DbIndex db_ind, size_t s
|
||||||
return {0, evicted_bytes};
|
return {0, evicted_bytes};
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((!caching_mode_) || !expire_allowed_)
|
if ((!IsCacheMode()) || !expire_allowed_)
|
||||||
return {0, 0};
|
return {0, 0};
|
||||||
|
|
||||||
auto max_eviction_per_hb = GetFlag(FLAGS_max_eviction_per_heartbeat);
|
auto max_eviction_per_hb = GetFlag(FLAGS_max_eviction_per_heartbeat);
|
||||||
|
|
|
@ -229,7 +229,7 @@ class DbSlice {
|
||||||
int32_t expire_options = 0; // ExpireFlags
|
int32_t expire_options = 0; // ExpireFlags
|
||||||
};
|
};
|
||||||
|
|
||||||
DbSlice(uint32_t index, bool caching_mode, EngineShard* owner);
|
DbSlice(uint32_t index, bool cache_mode, EngineShard* owner);
|
||||||
~DbSlice();
|
~DbSlice();
|
||||||
|
|
||||||
// Activates `db_ind` database if it does not exist (see ActivateDb below).
|
// Activates `db_ind` database if it does not exist (see ActivateDb below).
|
||||||
|
@ -468,7 +468,16 @@ class DbSlice {
|
||||||
}
|
}
|
||||||
|
|
||||||
void TEST_EnableCacheMode() {
|
void TEST_EnableCacheMode() {
|
||||||
caching_mode_ = 1;
|
cache_mode_ = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsCacheMode() const {
|
||||||
|
// During loading time we never bump elements.
|
||||||
|
return cache_mode_ && !load_in_progress_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetLoadInProgress(bool in_progress) {
|
||||||
|
load_in_progress_ = in_progress;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test hook to inspect last locked keys.
|
// Test hook to inspect last locked keys.
|
||||||
|
@ -575,7 +584,8 @@ class DbSlice {
|
||||||
mutable LocalBlockingCounter block_counter_;
|
mutable LocalBlockingCounter block_counter_;
|
||||||
|
|
||||||
ShardId shard_id_;
|
ShardId shard_id_;
|
||||||
uint8_t caching_mode_ : 1;
|
uint8_t cache_mode_ : 1;
|
||||||
|
uint8_t load_in_progress_ : 1;
|
||||||
|
|
||||||
EngineShard* owner_;
|
EngineShard* owner_;
|
||||||
|
|
||||||
|
|
|
@ -2034,6 +2034,9 @@ error_code RdbLoader::Load(io::Source* src) {
|
||||||
|
|
||||||
auto cleanup = absl::Cleanup([&] { FinishLoad(start, &keys_loaded); });
|
auto cleanup = absl::Cleanup([&] { FinishLoad(start, &keys_loaded); });
|
||||||
|
|
||||||
|
shard_set->AwaitRunningOnShardQueue([](EngineShard* es) {
|
||||||
|
namespaces->GetDefaultNamespace().GetCurrentDbSlice().SetLoadInProgress(true);
|
||||||
|
});
|
||||||
while (!stop_early_.load(memory_order_relaxed)) {
|
while (!stop_early_.load(memory_order_relaxed)) {
|
||||||
if (pause_) {
|
if (pause_) {
|
||||||
ThisFiber::SleepFor(100ms);
|
ThisFiber::SleepFor(100ms);
|
||||||
|
@ -2223,7 +2226,10 @@ void RdbLoader::FinishLoad(absl::Time start_time, size_t* keys_loaded) {
|
||||||
FlushShardAsync(i);
|
FlushShardAsync(i);
|
||||||
|
|
||||||
// Send sentinel callbacks to ensure that all previous messages have been processed.
|
// Send sentinel callbacks to ensure that all previous messages have been processed.
|
||||||
shard_set->Add(i, [bc]() mutable { bc->Dec(); });
|
shard_set->Add(i, [bc]() mutable {
|
||||||
|
namespaces->GetDefaultNamespace().GetCurrentDbSlice().SetLoadInProgress(false);
|
||||||
|
bc->Dec();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
bc->Wait(); // wait for sentinels to report.
|
bc->Wait(); // wait for sentinels to report.
|
||||||
|
|
||||||
|
|
|
@ -722,4 +722,16 @@ TEST_F(RdbTest, SnapshotTooBig) {
|
||||||
ASSERT_THAT(resp, ErrArg("Out of memory"));
|
ASSERT_THAT(resp, ErrArg("Out of memory"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(RdbTest, HugeKeyIssue4497) {
|
||||||
|
SetTestFlag("cache_mode", "true");
|
||||||
|
ResetService();
|
||||||
|
|
||||||
|
EXPECT_EQ(Run({"flushall"}), "OK");
|
||||||
|
EXPECT_EQ(Run({"debug", "populate", "1", "k", "1000", "rand", "type", "set", "elements", "5000"}),
|
||||||
|
"OK");
|
||||||
|
EXPECT_EQ(Run({"save", "rdb", "hugekey.rdb"}), "OK");
|
||||||
|
EXPECT_EQ(Run({"dfly", "load", "hugekey.rdb"}), "OK");
|
||||||
|
EXPECT_EQ(Run({"flushall"}), "OK");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace dfly
|
} // namespace dfly
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue