dragonfly/src/server/table.cc
Shahar Mike 54c9633cb8
feat(dbslice): Add self-laundering iterator in DbSlice (#2815)
A self-laundering iterator will enable us to, eventually, yield from fibers while holding an iterator. For example:

```cpp
auto it1 = db_slice.Find(...);
Yield();  // Until now - this could have invalidated `it1`
auto it2 = db_slice.Find(...);
```

Why is this a good idea? Because it will enable yielding inside PreUpdate() which will allow breaking down of writing huge entries in small quantities to disk/network, eliminating the need to allocate huge chunks of memory just for serialization.

Also, it'll probably unlock future developments as well, as yielding can be useful in other contexts.
2024-04-09 12:00:52 +03:00

125 lines
3.4 KiB
C++

// Copyright 2022, DragonflyDB authors. All rights reserved.
// See LICENSE for licensing terms.
//
#include "server/table.h"
#include "base/flags.h"
#include "base/logging.h"
#include "server/cluster/cluster_config.h"
#include "server/server_state.h"
ABSL_FLAG(bool, enable_top_keys_tracking, false,
"Enables / disables tracking of hot keys debugging feature");
namespace dfly {
#define ADD(x) (x) += o.x
// It should be const, but we override this variable in our tests so that they run faster.
unsigned kInitSegmentLog = 3;
void DbTableStats::AddTypeMemoryUsage(unsigned type, int64_t delta) {
if (type >= memory_usage_by_type.size()) {
LOG_FIRST_N(WARNING, 1) << "Encountered unknown type when aggregating per-type memory: "
<< type;
DCHECK(false) << "Unsupported type " << type;
return;
}
obj_memory_usage += delta;
memory_usage_by_type[type] += delta;
}
DbTableStats& DbTableStats::operator+=(const DbTableStats& o) {
constexpr size_t kDbSz = sizeof(DbTableStats) - sizeof(memory_usage_by_type);
static_assert(kDbSz == 48);
ADD(inline_keys);
ADD(obj_memory_usage);
ADD(listpack_blob_cnt);
ADD(listpack_bytes);
ADD(tiered_entries);
ADD(tiered_size);
for (size_t i = 0; i < o.memory_usage_by_type.size(); ++i) {
memory_usage_by_type[i] += o.memory_usage_by_type[i];
}
return *this;
}
SlotStats& SlotStats::operator+=(const SlotStats& o) {
static_assert(sizeof(SlotStats) == 32);
ADD(key_count);
ADD(total_reads);
ADD(total_writes);
ADD(memory_bytes);
return *this;
}
size_t LockTable::Size() const {
return locks_.size();
}
std::optional<const IntentLock> LockTable::Find(std::string_view key) const {
DCHECK_EQ(KeyLockArgs::GetLockKey(key), key);
if (auto it = locks_.find(Key::FromView(key)); it != locks_.end())
return it->second;
return std::nullopt;
}
bool LockTable::Acquire(std::string_view key, IntentLock::Mode mode) {
DCHECK_EQ(KeyLockArgs::GetLockKey(key), key);
auto [it, inserted] = locks_.try_emplace(Key::FromView(key));
if (!inserted) // If more than one transaction refers to a key
const_cast<Key&>(it->first).MakeOwned(); // we must fall back to using a self-contained string
return it->second.Acquire(mode);
}
void LockTable::Release(std::string_view key, IntentLock::Mode mode) {
DCHECK_EQ(KeyLockArgs::GetLockKey(key), key);
auto it = locks_.find(Key::FromView(key));
CHECK(it != locks_.end()) << key;
it->second.Release(mode);
if (it->second.IsFree())
locks_.erase(it);
}
DbTable::DbTable(PMR_NS::memory_resource* mr, DbIndex db_index)
: prime(kInitSegmentLog, detail::PrimeTablePolicy{}, mr),
expire(0, detail::ExpireTablePolicy{}, mr),
mcflag(0, detail::ExpireTablePolicy{}, mr),
top_keys({.enabled = absl::GetFlag(FLAGS_enable_top_keys_tracking)}),
index(db_index) {
if (ClusterConfig::IsEnabled()) {
slots_stats.resize(ClusterConfig::kMaxSlotNum + 1);
}
thread_index = ServerState::tlocal()->thread_index();
}
DbTable::~DbTable() {
DCHECK_EQ(thread_index, ServerState::tlocal()->thread_index());
}
void DbTable::Clear() {
prime.size();
prime.Clear();
expire.Clear();
mcflag.Clear();
stats = DbTableStats{};
}
PrimeIterator DbTable::Launder(PrimeIterator it, std::string_view key) {
if (!it.IsOccupied() || it->first != key) {
it = prime.Find(key);
}
return it;
}
} // namespace dfly