chore: remove duplicate code from dash and simplify (#2765)

* rename all Policy members for consistency
* remove duplicate code
This commit is contained in:
Kostas Kyrimis 2024-03-29 11:14:58 +02:00 committed by GitHub
parent c8724adddf
commit 370f334baf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 132 additions and 162 deletions

View file

@ -34,15 +34,8 @@ class DashTable : public detail::DashTableBase {
DashTable(const DashTable&) = delete; DashTable(const DashTable&) = delete;
DashTable& operator=(const DashTable&) = delete; DashTable& operator=(const DashTable&) = delete;
struct SegmentPolicy {
static constexpr unsigned NUM_SLOTS = Policy::kSlotNum;
static constexpr unsigned BUCKET_CNT = Policy::kBucketNum;
static constexpr unsigned STASH_BUCKET_NUM = Policy::kStashBucketNum;
static constexpr bool USE_VERSION = Policy::kUseVersion;
};
using Base = detail::DashTableBase; using Base = detail::DashTableBase;
using SegmentType = detail::Segment<_Key, _Value, SegmentPolicy>; using SegmentType = detail::Segment<_Key, _Value, Policy>;
using SegmentIterator = typename SegmentType::Iterator; using SegmentIterator = typename SegmentType::Iterator;
public: public:
@ -50,17 +43,12 @@ class DashTable : public detail::DashTableBase {
using Value_t = _Value; using Value_t = _Value;
using Segment_t = SegmentType; using Segment_t = SegmentType;
//! Number of "official" buckets that are used to position a key. In other words, does not include
//! stash buckets.
static constexpr unsigned kLogicalBucketNum = Policy::kBucketNum;
//! Total number of buckets in a segment (including stash). //! Total number of buckets in a segment (including stash).
static constexpr unsigned kPhysicalBucketNum = SegmentType::kTotalBuckets;
static constexpr unsigned kBucketWidth = Policy::kSlotNum;
static constexpr double kTaxAmount = SegmentType::kTaxSize; static constexpr double kTaxAmount = SegmentType::kTaxSize;
static constexpr size_t kSegBytes = sizeof(SegmentType); static constexpr size_t kSegBytes = sizeof(SegmentType);
static constexpr size_t kSegCapacity = SegmentType::capacity(); static constexpr size_t kSegCapacity = SegmentType::capacity();
static constexpr bool kUseVersion = Policy::kUseVersion; static constexpr size_t kSlotNum = SegmentType::kSlotNum;
static constexpr size_t kBucketNum = SegmentType::kBucketNum;
// if IsSingleBucket is true - iterates only over a single bucket. // if IsSingleBucket is true - iterates only over a single bucket.
template <bool IsConst, bool IsSingleBucket = false> class Iterator; template <bool IsConst, bool IsSingleBucket = false> class Iterator;
@ -556,11 +544,11 @@ void DashTable<_Key, _Value, Policy>::CVCUponInsert(uint64_t ver_threshold, cons
return; return;
} }
static_assert(kPhysicalBucketNum < 0xFF, ""); static_assert(SegmentType::kTotalBuckets < 0xFF, "");
// Segment is full, we need to return the whole segment, because it can be split // Segment is full, we need to return the whole segment, because it can be split
// and its entries can be reshuffled into different buckets. // and its entries can be reshuffled into different buckets.
for (uint8_t i = 0; i < kPhysicalBucketNum; ++i) { for (uint8_t i = 0; i < SegmentType::kTotalBuckets; ++i) {
if (target->GetVersion(i) < ver_threshold && !target->GetBucket(i).IsEmpty()) { if (target->GetVersion(i) < ver_threshold && !target->GetBucket(i).IsEmpty()) {
cb(bucket_iterator{this, seg_id, i}); cb(bucket_iterator{this, seg_id, i});
} }
@ -646,8 +634,8 @@ bool DashTable<_Key, _Value, Policy>::ShiftRight(bucket_iterator it) {
typename Segment_t::Hash_t hash_val = 0; typename Segment_t::Hash_t hash_val = 0;
auto& bucket = seg->GetBucket(it.bucket_id_); auto& bucket = seg->GetBucket(it.bucket_id_);
if (bucket.GetBusy() & (1 << (kBucketWidth - 1))) { if (bucket.GetBusy() & (1 << (kSlotNum - 1))) {
it.slot_id_ = kBucketWidth - 1; it.slot_id_ = kSlotNum - 1;
hash_val = DoHash(it->first); hash_val = DoHash(it->first);
policy_.DestroyKey(it->first); policy_.DestroyKey(it->first);
policy_.DestroyValue(it->second); policy_.DestroyValue(it->second);
@ -800,7 +788,7 @@ auto DashTable<_Key, _Value, Policy>::InsertInternal(U&& key, V&& value, Evictio
for (unsigned i = 0; i < Policy::kStashBucketNum; ++i) { for (unsigned i = 0; i < Policy::kStashBucketNum; ++i) {
hotspot.probes.by_type.stash_buckets[i] = hotspot.probes.by_type.stash_buckets[i] =
bucket_iterator{this, target_seg_id, uint8_t(kLogicalBucketNum + i), 0}; bucket_iterator{this, target_seg_id, uint8_t(Policy::kBucketNum + i), 0};
} }
hotspot.num_buckets = HotspotBuckets::kNumBuckets; hotspot.num_buckets = HotspotBuckets::kNumBuckets;
@ -910,7 +898,7 @@ auto DashTable<_Key, _Value, Policy>::TraverseBySegmentOrder(Cursor curs, Cb&& c
s->TraverseBucket(bid, std::move(dt_cb)); s->TraverseBucket(bid, std::move(dt_cb));
++bid; ++bid;
if (bid == kPhysicalBucketNum) { if (bid == SegmentType::kTotalBuckets) {
sid = NextSeg(sid); sid = NextSeg(sid);
bid = 0; bid = 0;
if (sid >= segment_.size()) { if (sid >= segment_.size()) {
@ -924,7 +912,7 @@ auto DashTable<_Key, _Value, Policy>::TraverseBySegmentOrder(Cursor curs, Cb&& c
template <typename _Key, typename _Value, typename Policy> template <typename _Key, typename _Value, typename Policy>
auto DashTable<_Key, _Value, Policy>::GetRandomCursor(absl::BitGen* bitgen) -> Cursor { auto DashTable<_Key, _Value, Policy>::GetRandomCursor(absl::BitGen* bitgen) -> Cursor {
uint32_t sid = absl::Uniform<uint32_t>(*bitgen, 0, segment_.size()); uint32_t sid = absl::Uniform<uint32_t>(*bitgen, 0, segment_.size());
uint8_t bid = absl::Uniform<uint8_t>(*bitgen, 0, kLogicalBucketNum); uint8_t bid = absl::Uniform<uint8_t>(*bitgen, 0, Policy::kBucketNum);
return Cursor{global_depth_, sid, bid}; return Cursor{global_depth_, sid, bid};
} }
@ -932,7 +920,7 @@ auto DashTable<_Key, _Value, Policy>::GetRandomCursor(absl::BitGen* bitgen) -> C
template <typename _Key, typename _Value, typename Policy> template <typename _Key, typename _Value, typename Policy>
template <typename Cb> template <typename Cb>
auto DashTable<_Key, _Value, Policy>::Traverse(Cursor curs, Cb&& cb) -> Cursor { auto DashTable<_Key, _Value, Policy>::Traverse(Cursor curs, Cb&& cb) -> Cursor {
if (curs.bucket_id() >= kLogicalBucketNum) // sanity. if (curs.bucket_id() >= Policy::kBucketNum) // sanity.
return 0; return 0;
uint32_t sid = curs.segment_id(global_depth_); uint32_t sid = curs.segment_id(global_depth_);
@ -955,7 +943,7 @@ auto DashTable<_Key, _Value, Policy>::Traverse(Cursor curs, Cb&& cb) -> Cursor {
sid = 0; sid = 0;
++bid; ++bid;
if (bid >= kLogicalBucketNum) if (bid >= Policy::kBucketNum)
return 0; // "End of traversal" cursor. return 0; // "End of traversal" cursor.
} }
} while (!fetched); } while (!fetched);

View file

@ -289,36 +289,38 @@ static_assert(alignof(VersionedBB<14, 4>) == 1, "");
static_assert(sizeof(VersionedBB<12, 4>) == 12 * 2 + 8, ""); static_assert(sizeof(VersionedBB<12, 4>) == 12 * 2 + 8, "");
static_assert(sizeof(VersionedBB<14, 4>) <= 14 * 2 + 8, ""); static_assert(sizeof(VersionedBB<14, 4>) <= 14 * 2 + 8, "");
// Segment - static-hashtable of size NUM_SLOTS*(BUCKET_CNT + STASH_BUCKET_NUM). // Segment - static-hashtable of size kSlotNum*(kBucketNum + kStashBucketNum).
struct DefaultSegmentPolicy { struct DefaultSegmentPolicy {
static constexpr unsigned NUM_SLOTS = 12; static constexpr unsigned kSlotNum = 12;
static constexpr unsigned BUCKET_CNT = 64; static constexpr unsigned kBucketNum = 64;
static constexpr unsigned STASH_BUCKET_NUM = 2; static constexpr unsigned kStashBucketNum = 2;
static constexpr bool USE_VERSION = true; static constexpr bool kUseVersion = true;
}; };
template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy> class Segment { template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy> class Segment {
static constexpr unsigned BUCKET_CNT = Policy::BUCKET_CNT; public:
static constexpr unsigned STASH_BUCKET_NUM = Policy::STASH_BUCKET_NUM; static constexpr unsigned kSlotNum = Policy::kSlotNum;
static constexpr unsigned NUM_SLOTS = Policy::NUM_SLOTS; static constexpr unsigned kBucketNum = Policy::kBucketNum;
static constexpr bool USE_VERSION = Policy::USE_VERSION; static constexpr unsigned kStashBucketNum = Policy::kStashBucketNum;
static constexpr bool kUseVersion = Policy::kUseVersion;
static_assert(BUCKET_CNT + STASH_BUCKET_NUM < 255); private:
static_assert(kBucketNum + kStashBucketNum < 255);
static constexpr unsigned kFingerBits = 8; static constexpr unsigned kFingerBits = 8;
using BucketType = using BucketType =
std::conditional_t<USE_VERSION, VersionedBB<NUM_SLOTS, 4>, BucketBase<NUM_SLOTS, 4>>; std::conditional_t<kUseVersion, VersionedBB<kSlotNum, 4>, BucketBase<kSlotNum, 4>>;
struct Bucket : public BucketType { struct Bucket : public BucketType {
using BucketType::kNanSlot; using BucketType::kNanSlot;
using typename BucketType::SlotId; using typename BucketType::SlotId;
_Key key[NUM_SLOTS]; _Key key[kSlotNum];
_Value value[NUM_SLOTS]; _Value value[kSlotNum];
template <typename U, typename V> template <typename U, typename V>
void Insert(uint8_t slot, U&& u, V&& v, uint8_t meta_hash, bool probe) { void Insert(uint8_t slot, U&& u, V&& v, uint8_t meta_hash, bool probe) {
assert(slot < NUM_SLOTS); assert(slot < kSlotNum);
key[slot] = std::forward<U>(u); key[slot] = std::forward<U>(u);
value[slot] = std::forward<V>(v); value[slot] = std::forward<V>(v);
@ -337,14 +339,13 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
std::swap(value[slot_a], value[slot_b]); std::swap(value[slot_a], value[slot_b]);
} }
// calls for each busy slot: cb(iterator, probe) template <typename This, typename Cb> void ForEachSlotImpl(This obj, Cb&& cb) const {
template <typename Cb> void ForEachSlot(Cb&& cb) const {
uint32_t mask = this->GetBusy(); uint32_t mask = this->GetBusy();
uint32_t probe_mask = this->GetProbe(true); uint32_t probe_mask = this->GetProbe(true);
for (unsigned j = 0; j < NUM_SLOTS; ++j) { for (unsigned j = 0; j < kSlotNum; ++j) {
if (mask & 1) { if (mask & 1) {
cb(this, j, probe_mask & 1); cb(obj, j, probe_mask & 1);
} }
mask >>= 1; mask >>= 1;
probe_mask >>= 1; probe_mask >>= 1;
@ -352,17 +353,13 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
} }
// calls for each busy slot: cb(iterator, probe) // calls for each busy slot: cb(iterator, probe)
template <typename Cb> void ForEachSlot(Cb&& cb) { template <typename Cb> void ForEachSlot(Cb&& cb) const {
uint32_t mask = this->GetBusy(); ForEachSlotImpl(this, std::forward<Cb&&>(cb));
uint32_t probe_mask = this->GetProbe(true); }
for (unsigned j = 0; j < NUM_SLOTS; ++j) { // calls for each busy slot: cb(iterator, probe)
if (mask & 1) { template <typename Cb> void ForEachSlot(Cb&& cb) {
cb(this, j, probe_mask & 1); ForEachSlotImpl(this, std::forward<Cb&&>(cb));
}
mask >>= 1;
probe_mask >>= 1;
}
} }
}; // class Bucket }; // class Bucket
@ -392,12 +389,9 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
}; };
/* number of normal buckets in one segment*/ /* number of normal buckets in one segment*/
static constexpr uint8_t kRegularBucketCnt = BUCKET_CNT; static constexpr uint8_t kTotalBuckets = kBucketNum + kStashBucketNum;
static constexpr uint8_t kTotalBuckets = kRegularBucketCnt + STASH_BUCKET_NUM;
static constexpr uint8_t kStashBucketCnt = STASH_BUCKET_NUM;
static constexpr size_t kFpMask = (1 << kFingerBits) - 1; static constexpr size_t kFpMask = (1 << kFingerBits) - 1;
static constexpr size_t kNumSlots = NUM_SLOTS;
using Value_t = _Value; using Value_t = _Value;
using Key_t = _Key; using Key_t = _Key;
@ -439,13 +433,11 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
local_depth_ = depth; local_depth_ = depth;
} }
template <bool UV = Policy::USE_VERSION> template <bool UV = kUseVersion> std::enable_if_t<UV, uint64_t> GetVersion(uint8_t bid) const {
std::enable_if_t<UV, uint64_t> GetVersion(uint8_t bid) const {
return bucket_[bid].GetVersion(); return bucket_[bid].GetVersion();
} }
template <bool UV = Policy::USE_VERSION> template <bool UV = kUseVersion> std::enable_if_t<UV> SetVersion(uint8_t bid, uint64_t v) {
std::enable_if_t<UV> SetVersion(uint8_t bid, uint64_t v) {
return bucket_[bid].SetVersion(v); return bucket_[bid].SetVersion(v);
} }
@ -466,7 +458,7 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
// Used in test. // Used in test.
unsigned NumProbingBuckets() const { unsigned NumProbingBuckets() const {
unsigned res = 0; unsigned res = 0;
for (unsigned i = 0; i < kRegularBucketCnt; ++i) { for (unsigned i = 0; i < kBucketNum; ++i) {
res += (bucket_[i].GetProbe(true) != 0); res += (bucket_[i].GetProbe(true) != 0);
} }
return res; return res;
@ -526,13 +518,13 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
// Returns UINT16_MAX if segment is full. Otherwise, returns number of touched bucket ids (1 or 2) // Returns UINT16_MAX if segment is full. Otherwise, returns number of touched bucket ids (1 or 2)
// if the insertion would happen. The ids are put into bid array that should have at least 2 // if the insertion would happen. The ids are put into bid array that should have at least 2
// spaces. // spaces.
template <bool UV = Policy::USE_VERSION> template <bool UV = kUseVersion>
std::enable_if_t<UV, unsigned> CVCOnInsert(uint64_t ver_threshold, Hash_t key_hash, std::enable_if_t<UV, unsigned> CVCOnInsert(uint64_t ver_threshold, Hash_t key_hash,
uint8_t bid[2]) const; uint8_t bid[2]) const;
// Returns bucket ids whose versions will change as a result of bumping up the item // Returns bucket ids whose versions will change as a result of bumping up the item
// Can return upto 3 buckets. // Can return upto 3 buckets.
template <bool UV = Policy::USE_VERSION> template <bool UV = kUseVersion>
std::enable_if_t<UV, unsigned> CVCOnBump(uint64_t ver_threshold, unsigned bid, unsigned slot, std::enable_if_t<UV, unsigned> CVCOnBump(uint64_t ver_threshold, unsigned bid, unsigned slot,
Hash_t hash, uint8_t result_bid[3]) const; Hash_t hash, uint8_t result_bid[3]) const;
@ -542,10 +534,10 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
// Shifts all slots in the bucket right. // Shifts all slots in the bucket right.
// Returns true if the last slot was busy and the entry has been deleted. // Returns true if the last slot was busy and the entry has been deleted.
bool ShiftRight(unsigned bid, Hash_t right_hashval) { bool ShiftRight(unsigned bid, Hash_t right_hashval) {
if (bid >= kRegularBucketCnt) { // Stash if (bid >= kBucketNum) { // Stash
constexpr auto kLastSlotMask = 1u << (kNumSlots - 1); constexpr auto kLastSlotMask = 1u << (kSlotNum - 1);
if (bucket_[bid].GetBusy() & kLastSlotMask) if (bucket_[bid].GetBusy() & kLastSlotMask)
RemoveStashReference(bid - kRegularBucketCnt, right_hashval); RemoveStashReference(bid - kBucketNum, right_hashval);
} }
return bucket_[bid].ShiftRight(); return bucket_[bid].ShiftRight();
@ -565,15 +557,15 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
static_assert(sizeof(Iterator) == 2); static_assert(sizeof(Iterator) == 2);
static unsigned BucketIndex(Hash_t hash) { static unsigned BucketIndex(Hash_t hash) {
return (hash >> kFingerBits) % kRegularBucketCnt; return (hash >> kFingerBits) % kBucketNum;
} }
static uint8_t NextBid(uint8_t bid) { static uint8_t NextBid(uint8_t bid) {
return bid < kRegularBucketCnt - 1 ? bid + 1 : 0; return bid < kBucketNum - 1 ? bid + 1 : 0;
} }
static uint8_t PrevBid(uint8_t bid) { static uint8_t PrevBid(uint8_t bid) {
return bid ? bid - 1 : kRegularBucketCnt - 1; return bid ? bid - 1 : kBucketNum - 1;
} }
// if own_items is true it means we try to move owned item to probing bucket. // if own_items is true it means we try to move owned item to probing bucket.
@ -591,7 +583,7 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
int TryInsertToBucket(unsigned bidx, U&& key, V&& value, uint8_t meta_hash, bool probe) { int TryInsertToBucket(unsigned bidx, U&& key, V&& value, uint8_t meta_hash, bool probe) {
auto& b = bucket_[bidx]; auto& b = bucket_[bidx];
auto slot = b.FindEmptySlot(); auto slot = b.FindEmptySlot();
assert(slot < int(kNumSlots)); assert(slot < int(kSlotNum));
if (slot < 0) { if (slot < 0) {
return -1; return -1;
} }
@ -609,7 +601,7 @@ template <typename _Key, typename _Value, typename Policy = DefaultSegmentPolicy
public: public:
static constexpr size_t kBucketSz = sizeof(Bucket); static constexpr size_t kBucketSz = sizeof(Bucket);
static constexpr size_t kMaxSize = kTotalBuckets * kNumSlots; static constexpr size_t kMaxSize = kTotalBuckets * kSlotNum;
static constexpr double kTaxSize = static constexpr double kTaxSize =
(double(sizeof(Segment)) / kMaxSize) - sizeof(Key_t) - sizeof(Value_t); (double(sizeof(Segment)) / kMaxSize) - sizeof(Key_t) - sizeof(Value_t);
@ -1030,7 +1022,7 @@ auto Segment<Key, Value, Policy>::Bucket::FindByFp(uint8_t fp_hash, bool probe,
unsigned delta = __builtin_ctz(mask); unsigned delta = __builtin_ctz(mask);
mask >>= delta; mask >>= delta;
for (unsigned i = delta; i < NUM_SLOTS; ++i) { for (unsigned i = delta; i < kSlotNum; ++i) {
if ((mask & 1) && pred(key[i], k)) { if ((mask & 1) && pred(key[i], k)) {
return i; return i;
} }
@ -1043,7 +1035,7 @@ auto Segment<Key, Value, Policy>::Bucket::FindByFp(uint8_t fp_hash, bool probe,
template <typename Key, typename Value, typename Policy> template <typename Key, typename Value, typename Policy>
bool Segment<Key, Value, Policy>::Bucket::ShiftRight() { bool Segment<Key, Value, Policy>::Bucket::ShiftRight() {
bool res = BucketType::ShiftRight(); bool res = BucketType::ShiftRight();
for (int i = NUM_SLOTS - 1; i > 0; i--) { for (int i = kSlotNum - 1; i > 0; i--) {
std::swap(key[i], key[i - 1]); std::swap(key[i], key[i - 1]);
std::swap(value[i], value[i - 1]); std::swap(value[i], value[i - 1]);
} }
@ -1066,7 +1058,7 @@ auto Segment<Key, Value, Policy>::TryMoveFromStash(unsigned stash_id, unsigned s
Hash_t key_hash) -> Iterator { Hash_t key_hash) -> Iterator {
uint8_t bid = BucketIndex(key_hash); uint8_t bid = BucketIndex(key_hash);
uint8_t hash_fp = key_hash & kFpMask; uint8_t hash_fp = key_hash & kFpMask;
uint8_t stash_bid = kRegularBucketCnt + stash_id; uint8_t stash_bid = kBucketNum + stash_id;
auto& key = Key(stash_bid, stash_slot_id); auto& key = Key(stash_bid, stash_slot_id);
auto& value = Value(stash_bid, stash_slot_id); auto& value = Value(stash_bid, stash_slot_id);
@ -1079,7 +1071,7 @@ auto Segment<Key, Value, Policy>::TryMoveFromStash(unsigned stash_id, unsigned s
} }
if (reg_slot >= 0) { if (reg_slot >= 0) {
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
// We maintain the invariant for the physical bucket by updating the version when // We maintain the invariant for the physical bucket by updating the version when
// the entries move between buckets. // the entries move between buckets.
uint64_t ver = bucket_[stash_bid].GetVersion(); uint64_t ver = bucket_[stash_bid].GetVersion();
@ -1140,9 +1132,9 @@ auto Segment<Key, Value, Policy>::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co
} }
auto stash_cb = [&](unsigned overflow_index, unsigned pos) -> SlotId { auto stash_cb = [&](unsigned overflow_index, unsigned pos) -> SlotId {
assert(pos < STASH_BUCKET_NUM); assert(pos < kStashBucketNum);
pos += kRegularBucketCnt; pos += kBucketNum;
const Bucket& bucket = bucket_[pos]; const Bucket& bucket = bucket_[pos];
return bucket.FindByFp(fp_hash, false, key, cf); return bucket.FindByFp(fp_hash, false, key, cf);
}; };
@ -1152,10 +1144,10 @@ auto Segment<Key, Value, Policy>::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co
stats.stash_overflow_probes++; stats.stash_overflow_probes++;
#endif #endif
for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { for (unsigned i = 0; i < kStashBucketNum; ++i) {
auto sid = stash_cb(0, i); auto sid = stash_cb(0, i);
if (sid != BucketType::kNanSlot) { if (sid != BucketType::kNanSlot) {
return Iterator{uint8_t(kRegularBucketCnt + i), sid}; return Iterator{uint8_t(kBucketNum + i), sid};
} }
} }
@ -1169,12 +1161,12 @@ auto Segment<Key, Value, Policy>::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co
auto stash_res = target.IterateStash(fp_hash, false, stash_cb); auto stash_res = target.IterateStash(fp_hash, false, stash_cb);
if (stash_res.second != BucketType::kNanSlot) { if (stash_res.second != BucketType::kNanSlot) {
return Iterator{uint8_t(kRegularBucketCnt + stash_res.first), stash_res.second}; return Iterator{uint8_t(kBucketNum + stash_res.first), stash_res.second};
} }
stash_res = probe.IterateStash(fp_hash, true, stash_cb); stash_res = probe.IterateStash(fp_hash, true, stash_cb);
if (stash_res.second != BucketType::kNanSlot) { if (stash_res.second != BucketType::kNanSlot) {
return Iterator{uint8_t(kRegularBucketCnt + stash_res.first), stash_res.second}; return Iterator{uint8_t(kBucketNum + stash_res.first), stash_res.second};
} }
return Iterator{}; return Iterator{};
} }
@ -1199,8 +1191,8 @@ void Segment<Key, Value, Policy>::Delete(const Iterator& it, Hash_t key_hash) {
auto& b = bucket_[it.index]; auto& b = bucket_[it.index];
if (it.index >= kRegularBucketCnt) { if (it.index >= kBucketNum) {
RemoveStashReference(it.index - kRegularBucketCnt, key_hash); RemoveStashReference(it.index - kBucketNum, key_hash);
} }
b.Delete(it.slot); b.Delete(it.slot);
@ -1219,7 +1211,7 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
// do_versioning(); // do_versioning();
auto is_mine = [this](Hash_t hash) { return (hash >> (64 - local_depth_) & 1) == 0; }; auto is_mine = [this](Hash_t hash) { return (hash >> (64 - local_depth_) & 1) == 0; };
for (unsigned i = 0; i < kRegularBucketCnt; ++i) { for (unsigned i = 0; i < kBucketNum; ++i) {
uint32_t invalid_mask = 0; uint32_t invalid_mask = 0;
auto cb = [&](auto* bucket, unsigned slot, bool probe) { auto cb = [&](auto* bucket, unsigned slot, bool probe) {
@ -1257,7 +1249,7 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
assert(it.found()); assert(it.found());
(void)it; (void)it;
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
// Maintaining consistent versioning. // Maintaining consistent versioning.
uint64_t ver = bucket->GetVersion(); uint64_t ver = bucket->GetVersion();
dest_right->bucket_[it.index].UpdateVersion(ver); dest_right->bucket_[it.index].UpdateVersion(ver);
@ -1268,9 +1260,9 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
bucket_[i].ClearSlots(invalid_mask); bucket_[i].ClearSlots(invalid_mask);
} }
for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { for (unsigned i = 0; i < kStashBucketNum; ++i) {
uint32_t invalid_mask = 0; uint32_t invalid_mask = 0;
unsigned bid = kRegularBucketCnt + i; unsigned bid = kBucketNum + i;
Bucket& stash = bucket_[bid]; Bucket& stash = bucket_[bid];
auto cb = [&](auto* bucket, unsigned slot, bool probe) { auto cb = [&](auto* bucket, unsigned slot, bool probe) {
@ -1293,7 +1285,7 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
(void)it; (void)it;
assert(it.index != kNanBid); assert(it.index != kNanBid);
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
// Update the version in the destination bucket. // Update the version in the destination bucket.
uint64_t ver = bucket->GetVersion(); uint64_t ver = bucket->GetVersion();
dest_right->bucket_[it.index].UpdateVersion(ver); dest_right->bucket_[it.index].UpdateVersion(ver);
@ -1327,7 +1319,7 @@ void Segment<Key, Value, Policy>::MoveFrom(HFunc&& hfunc, Segment* src) {
return; return;
} }
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
// Update the version in the destination bucket. // Update the version in the destination bucket.
this->bucket_[it.index].UpdateVersion(bucket->GetVersion()); this->bucket_[it.index].UpdateVersion(bucket->GetVersion());
} }
@ -1355,7 +1347,7 @@ int Segment<Key, Value, Policy>::MoveToOther(bool own_items, unsigned from_bid,
return -1; return -1;
// We never decrease the version of the entry. // We never decrease the version of the entry.
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
auto& dst = bucket_[to_bid]; auto& dst = bucket_[to_bid];
dst.UpdateVersion(src.GetVersion()); dst.UpdateVersion(src.GetVersion());
} }
@ -1425,13 +1417,13 @@ auto Segment<Key, Value, Policy>::InsertUniq(U&& key, V&& value, Hash_t key_hash
} }
// we balance stash fill rate by starting from y % STASH_BUCKET_NUM. // we balance stash fill rate by starting from y % STASH_BUCKET_NUM.
for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { for (unsigned i = 0; i < kStashBucketNum; ++i) {
unsigned stash_pos = (bid + i) % STASH_BUCKET_NUM; unsigned stash_pos = (bid + i) % kStashBucketNum;
int stash_slot = TryInsertToBucket(kRegularBucketCnt + stash_pos, std::forward<U>(key), int stash_slot = TryInsertToBucket(kBucketNum + stash_pos, std::forward<U>(key),
std::forward<V>(value), meta_hash, false); std::forward<V>(value), meta_hash, false);
if (stash_slot >= 0) { if (stash_slot >= 0) {
target.SetStashPtr(stash_pos, meta_hash, &neighbor); target.SetStashPtr(stash_pos, meta_hash, &neighbor);
return Iterator{uint8_t(kRegularBucketCnt + stash_pos), uint8_t(stash_slot)}; return Iterator{uint8_t(kBucketNum + stash_pos), uint8_t(stash_slot)};
} }
} }
@ -1462,33 +1454,31 @@ std::enable_if_t<UV, unsigned> Segment<Key, Value, Policy>::CVCOnInsert(uint64_t
// both nid and bid are full. // both nid and bid are full.
const uint8_t after_next = NextBid(nid); const uint8_t after_next = NextBid(nid);
if (CheckIfMovesToOther(true, nid, after_next)) { auto do_fun = [this, ver_threshold, &cnt, &bid_res, after_next](auto bid, auto nid) {
// We could tighten the checks here and below because // We could tighten the checks here and below because
// if nid is less than ver_threshold, than after_next won't be affected and won't cross // if nid is less than ver_threshold, than after_next won't be affected and won't cross
// ver_threshold as well. // ver_threshold as well.
if (bucket_[nid].GetVersion() < ver_threshold) if (bucket_[bid].GetVersion() < ver_threshold)
bid_res[cnt++] = bid;
if (!bucket_[nid].IsEmpty() && bucket_[nid].GetVersion() < ver_threshold)
bid_res[cnt++] = nid; bid_res[cnt++] = nid;
};
if (!bucket_[after_next].IsEmpty() && bucket_[after_next].GetVersion() < ver_threshold) if (CheckIfMovesToOther(true, nid, after_next)) {
bid_res[cnt++] = after_next; do_fun(nid, after_next);
return cnt; return cnt;
} }
const uint8_t prev_bid = PrevBid(bid); const uint8_t prev_bid = PrevBid(bid);
if (CheckIfMovesToOther(false, bid, prev_bid)) { if (CheckIfMovesToOther(false, bid, prev_bid)) {
if (bucket_[bid].GetVersion() < ver_threshold) do_fun(bid, prev_bid);
bid_res[cnt++] = bid;
if (!bucket_[prev_bid].IsEmpty() && bucket_[prev_bid].GetVersion() < ver_threshold)
bid_res[cnt++] = prev_bid;
return cnt; return cnt;
} }
// Important to repeat exactly the insertion logic of InsertUnique. // Important to repeat exactly the insertion logic of InsertUnique.
for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { for (unsigned i = 0; i < kStashBucketNum; ++i) {
unsigned stash_bid = kRegularBucketCnt + ((bid + i) % STASH_BUCKET_NUM); unsigned stash_bid = kBucketNum + ((bid + i) % kStashBucketNum);
const Bucket& stash = bucket_[stash_bid]; const Bucket& stash = bucket_[stash_bid];
if (!stash.IsFull()) { if (!stash.IsFull()) {
if (!stash.IsEmpty() && stash.GetVersion() < ver_threshold) if (!stash.IsEmpty() && stash.GetVersion() < ver_threshold)
@ -1507,7 +1497,7 @@ std::enable_if_t<UV, unsigned> Segment<Key, Value, Policy>::CVCOnBump(uint64_t v
unsigned bid, unsigned slot, unsigned bid, unsigned slot,
Hash_t hash, Hash_t hash,
uint8_t result_bid[3]) const { uint8_t result_bid[3]) const {
if (bid < kRegularBucketCnt) { if (bid < kBucketNum) {
// Right now we do not migrate entries from nid to bid, only from stash to normal buckets. // Right now we do not migrate entries from nid to bid, only from stash to normal buckets.
// The reason for this is that CVCBumpUp implementation swaps the slots of the same bucket // The reason for this is that CVCBumpUp implementation swaps the slots of the same bucket
// so there is no further action needed. // so there is no further action needed.
@ -1541,34 +1531,27 @@ std::enable_if_t<UV, unsigned> Segment<Key, Value, Policy>::CVCOnBump(uint64_t v
const uint8_t probing_bid = NextBid(target_bid); const uint8_t probing_bid = NextBid(target_bid);
const auto& probing = bucket_[probing_bid]; const auto& probing = bucket_[probing_bid];
unsigned stash_pos = bid - kRegularBucketCnt; unsigned stash_pos = bid - kBucketNum;
uint8_t fp_hash = hash & kFpMask; uint8_t fp_hash = hash & kFpMask;
auto find_stash = [&](unsigned, unsigned pos) { auto find_stash = [&](unsigned, unsigned pos) {
return stash_pos == pos ? slot : BucketType::kNanSlot; return stash_pos == pos ? slot : BucketType::kNanSlot;
}; };
if (target.GetVersion() < ver_threshold) { auto do_fun = [&result, &result_bid, fp_hash, find_stash](auto& target, auto target_bid,
bool probe) {
if (target.HasStashOverflow()) { if (target.HasStashOverflow()) {
result_bid[result++] = target_bid; result_bid[result++] = target_bid;
} else { } else {
SlotId slot_id = target.IterateStash(fp_hash, false, find_stash).second; SlotId slot_id = target.IterateStash(fp_hash, probe, find_stash).second;
if (slot_id != BucketType::kNanSlot) { if (slot_id != BucketType::kNanSlot) {
result_bid[result++] = target_bid; result_bid[result++] = target_bid;
} }
} }
} };
if (probing.GetVersion() < ver_threshold) { do_fun(target, target_bid, false);
if (probing.HasStashOverflow()) { do_fun(probing, probing_bid, true);
result_bid[result++] = probing_bid;
} else {
SlotId slot_id = probing.IterateStash(fp_hash, true, find_stash).second;
if (slot_id != BucketType::kNanSlot) {
result_bid[result++] = probing_bid;
}
}
}
return result; return result;
} }
@ -1585,7 +1568,7 @@ void Segment<Key, Value, Policy>::TraverseBucket(uint8_t bid, Cb&& cb) {
template <typename Key, typename Value, typename Policy> template <typename Key, typename Value, typename Policy>
template <typename Cb, typename HashFn> template <typename Cb, typename HashFn>
bool Segment<Key, Value, Policy>::TraverseLogicalBucket(uint8_t bid, HashFn&& hfun, Cb&& cb) const { bool Segment<Key, Value, Policy>::TraverseLogicalBucket(uint8_t bid, HashFn&& hfun, Cb&& cb) const {
assert(bid < kRegularBucketCnt); assert(bid < kBucketNum);
const Bucket& b = bucket_[bid]; const Bucket& b = bucket_[bid];
bool found = false; bool found = false;
@ -1615,7 +1598,7 @@ bool Segment<Key, Value, Policy>::TraverseLogicalBucket(uint8_t bid, HashFn&& hf
// Finally go over stash buckets and find those entries that belong to b. // Finally go over stash buckets and find those entries that belong to b.
if (b.HasStash()) { if (b.HasStash()) {
// do not bother with overflow fps. Just go over all the stash buckets. // do not bother with overflow fps. Just go over all the stash buckets.
for (uint8_t j = kRegularBucketCnt; j < kTotalBuckets; ++j) { for (uint8_t j = kBucketNum; j < kTotalBuckets; ++j) {
const auto& stashb = bucket_[j]; const auto& stashb = bucket_[j];
stashb.ForEachSlot([&](auto* bucket, SlotId slot, bool probe) { stashb.ForEachSlot([&](auto* bucket, SlotId slot, bool probe) {
if (BucketIndex(hfun(bucket->key[slot])) == bid) { if (BucketIndex(hfun(bucket->key[slot])) == bid) {
@ -1671,7 +1654,7 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
if (!bp.CanBump(from.key[slot])) { if (!bp.CanBump(from.key[slot])) {
return Iterator{bid, slot}; return Iterator{bid, slot};
} }
if (bid < kRegularBucketCnt) { if (bid < kBucketNum) {
// non stash case. // non stash case.
if (slot > 0 && bp.CanBump(from.key[slot - 1])) { if (slot > 0 && bp.CanBump(from.key[slot - 1])) {
from.Swap(slot - 1, slot); from.Swap(slot - 1, slot);
@ -1683,7 +1666,7 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
// stash bucket // stash bucket
// We swap the item with the item in the "normal" bucket in the last slot. // We swap the item with the item in the "normal" bucket in the last slot.
unsigned stash_pos = bid - kRegularBucketCnt; unsigned stash_pos = bid - kBucketNum;
// If we have an empty space for some reason just unload the stash entry. // If we have an empty space for some reason just unload the stash entry.
if (Iterator it = TryMoveFromStash(stash_pos, slot, key_hash); it.found()) { if (Iterator it = TryMoveFromStash(stash_pos, slot, key_hash); it.found()) {
@ -1700,11 +1683,10 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
// bucket_offs - 0 if exact bucket, 1 if neighbour // bucket_offs - 0 if exact bucket, 1 if neighbour
unsigned bucket_offs = target.UnsetStashPtr(fp_hash, stash_pos, &next); unsigned bucket_offs = target.UnsetStashPtr(fp_hash, stash_pos, &next);
uint8_t swap_bid = (target_bid + bucket_offs) % kRegularBucketCnt; uint8_t swap_bid = (target_bid + bucket_offs) % kBucketNum;
// TODO exit early when target_bid == swap_bid
auto& swapb = bucket_[swap_bid]; auto& swapb = bucket_[swap_bid];
constexpr unsigned kLastSlot = kNumSlots - 1; constexpr unsigned kLastSlot = kSlotNum - 1;
assert(swapb.GetBusy() & (1 << kLastSlot)); assert(swapb.GetBusy() & (1 << kLastSlot));
// Don't move sticky items back to the stash because they're not evictable // Don't move sticky items back to the stash because they're not evictable
@ -1730,7 +1712,7 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
swapb.SetHash(kLastSlot, fp_hash, bucket_offs == 1); swapb.SetHash(kLastSlot, fp_hash, bucket_offs == 1);
// update versions. // update versions.
if constexpr (USE_VERSION) { if constexpr (kUseVersion) {
uint64_t from_ver = from.GetVersion(); uint64_t from_ver = from.GetVersion();
uint64_t swap_ver = swapb.GetVersion(); uint64_t swap_ver = swapb.GetVersion();
if (from_ver < swap_ver) { if (from_ver < swap_ver) {
@ -1759,8 +1741,8 @@ template <typename HFunc>
unsigned Segment<Key, Value, Policy>::UnloadStash(HFunc&& hfunc) { unsigned Segment<Key, Value, Policy>::UnloadStash(HFunc&& hfunc) {
unsigned moved = 0; unsigned moved = 0;
for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { for (unsigned i = 0; i < kStashBucketNum; ++i) {
unsigned bid = kRegularBucketCnt + i; unsigned bid = kBucketNum + i;
Bucket& stash = bucket_[bid]; Bucket& stash = bucket_[bid];
uint32_t invalid_mask = 0; uint32_t invalid_mask = 0;

View file

@ -163,7 +163,7 @@ set<Segment::Key_t> DashTest::FillSegment(unsigned bid) {
std::equal_to<Segment::Key_t> eq; std::equal_to<Segment::Key_t> eq;
for (Segment::Key_t key = 0; key < 1000000u; ++key) { for (Segment::Key_t key = 0; key < 1000000u; ++key) {
uint64_t hash = dt_.DoHash(key); uint64_t hash = dt_.DoHash(key);
unsigned bi = (hash >> 8) % Segment::kRegularBucketCnt; unsigned bi = (hash >> 8) % Segment::kBucketNum;
if (bi != bid) if (bi != bid)
continue; continue;
uint8_t fp = hash & 0xFF; uint8_t fp = hash & 0xFF;
@ -219,7 +219,7 @@ TEST_F(DashTest, Basic) {
auto hfun = &UInt64Policy::HashFn; auto hfun = &UInt64Policy::HashFn;
auto cursor = segment_.TraverseLogicalBucket((hash >> 8) % Segment::kRegularBucketCnt, hfun, cb); auto cursor = segment_.TraverseLogicalBucket((hash >> 8) % Segment::kBucketNum, hfun, cb);
ASSERT_EQ(1, has_called); ASSERT_EQ(1, has_called);
ASSERT_EQ(0, segment_.TraverseLogicalBucket(cursor, hfun, cb)); ASSERT_EQ(0, segment_.TraverseLogicalBucket(cursor, hfun, cb));
ASSERT_EQ(1, has_called); ASSERT_EQ(1, has_called);
@ -237,11 +237,11 @@ TEST_F(DashTest, Segment) {
set<Segment::Key_t> keys = FillSegment(0); set<Segment::Key_t> keys = FillSegment(0);
EXPECT_TRUE(segment_.GetBucket(0).IsFull() && segment_.GetBucket(1).IsFull()); EXPECT_TRUE(segment_.GetBucket(0).IsFull() && segment_.GetBucket(1).IsFull());
for (size_t i = 2; i < Segment::kRegularBucketCnt; ++i) { for (size_t i = 2; i < Segment::kBucketNum; ++i) {
EXPECT_EQ(0, segment_.GetBucket(i).Size()); EXPECT_EQ(0, segment_.GetBucket(i).Size());
} }
EXPECT_EQ(4 * Segment::kNumSlots, keys.size()); EXPECT_EQ(4 * Segment::kSlotNum, keys.size());
EXPECT_EQ(4 * Segment::kNumSlots, segment_.SlowSize()); EXPECT_EQ(4 * Segment::kSlotNum, segment_.SlowSize());
auto hfun = &UInt64Policy::HashFn; auto hfun = &UInt64Policy::HashFn;
unsigned has_called = 0; unsigned has_called = 0;
@ -254,12 +254,12 @@ TEST_F(DashTest, Segment) {
segment_.TraverseAll(cb); segment_.TraverseAll(cb);
ASSERT_EQ(keys.size(), has_called); ASSERT_EQ(keys.size(), has_called);
ASSERT_TRUE(segment_.GetBucket(Segment::kRegularBucketCnt).IsFull()); ASSERT_TRUE(segment_.GetBucket(Segment::kBucketNum).IsFull());
std::array<uint64_t, Segment::kNumSlots * 2> arr; std::array<uint64_t, Segment::kSlotNum * 2> arr;
uint64_t* next = arr.begin(); uint64_t* next = arr.begin();
for (unsigned i = Segment::kRegularBucketCnt; i < Segment::kRegularBucketCnt + 2; ++i) { for (unsigned i = Segment::kBucketNum; i < Segment::kBucketNum + 2; ++i) {
const auto* k = &segment_.Key(i, 0); const auto* k = &segment_.Key(i, 0);
next = std::copy(k, k + Segment::kNumSlots, next); next = std::copy(k, k + Segment::kSlotNum, next);
} }
std::equal_to<Segment::Key_t> eq; std::equal_to<Segment::Key_t> eq;
for (auto k : arr) { for (auto k : arr) {
@ -268,7 +268,7 @@ TEST_F(DashTest, Segment) {
ASSERT_TRUE(it.found()); ASSERT_TRUE(it.found());
segment_.Delete(it, hash); segment_.Delete(it, hash);
} }
EXPECT_EQ(2 * Segment::kNumSlots, segment_.SlowSize()); EXPECT_EQ(2 * Segment::kSlotNum, segment_.SlowSize());
ASSERT_FALSE(Contains(arr.front())); ASSERT_FALSE(Contains(arr.front()));
} }
@ -331,7 +331,7 @@ TEST_F(DashTest, Split) {
ASSERT_EQ(segment_.SlowSize(), sum[0]); ASSERT_EQ(segment_.SlowSize(), sum[0]);
EXPECT_EQ(s2.SlowSize(), sum[1]); EXPECT_EQ(s2.SlowSize(), sum[1]);
EXPECT_EQ(keys.size(), sum[0] + sum[1]); EXPECT_EQ(keys.size(), sum[0] + sum[1]);
EXPECT_EQ(4 * Segment::kNumSlots, keys.size()); EXPECT_EQ(4 * Segment::kSlotNum, keys.size());
} }
TEST_F(DashTest, Merge) { TEST_F(DashTest, Merge) {
@ -346,9 +346,9 @@ TEST_F(DashTest, Merge) {
TEST_F(DashTest, BumpUp) { TEST_F(DashTest, BumpUp) {
set<Segment::Key_t> keys = FillSegment(0); set<Segment::Key_t> keys = FillSegment(0);
constexpr unsigned kFirstStashId = Segment::kRegularBucketCnt; constexpr unsigned kFirstStashId = Segment::kBucketNum;
constexpr unsigned kSecondStashId = Segment::kRegularBucketCnt + 1; constexpr unsigned kSecondStashId = Segment::kBucketNum + 1;
constexpr unsigned kNumSlots = Segment::kNumSlots; constexpr unsigned kSlotNum = Segment::kSlotNum;
EXPECT_TRUE(segment_.GetBucket(0).IsFull()); EXPECT_TRUE(segment_.GetBucket(0).IsFull());
EXPECT_TRUE(segment_.GetBucket(1).IsFull()); EXPECT_TRUE(segment_.GetBucket(1).IsFull());
@ -394,7 +394,7 @@ TEST_F(DashTest, BumpUp) {
EXPECT_EQ(touched_bid[1], 0); EXPECT_EQ(touched_bid[1], 0);
segment_.BumpUp(kSecondStashId, 9, hash, RelaxedBumpPolicy{}); segment_.BumpUp(kSecondStashId, 9, hash, RelaxedBumpPolicy{});
ASSERT_TRUE(key == segment_.Key(0, kNumSlots - 1) || key == segment_.Key(1, kNumSlots - 1)); ASSERT_TRUE(key == segment_.Key(0, kSlotNum - 1) || key == segment_.Key(1, kSlotNum - 1));
EXPECT_TRUE(segment_.GetBucket(kSecondStashId).IsFull()); EXPECT_TRUE(segment_.GetBucket(kSecondStashId).IsFull());
EXPECT_TRUE(Contains(key)); EXPECT_TRUE(Contains(key));
EXPECT_TRUE(segment_.Key(kSecondStashId, 9)); EXPECT_TRUE(segment_.Key(kSecondStashId, 9));
@ -408,7 +408,7 @@ TEST_F(DashTest, BumpPolicy) {
}; };
set<Segment::Key_t> keys = FillSegment(0); set<Segment::Key_t> keys = FillSegment(0);
constexpr unsigned kFirstStashId = Segment::kRegularBucketCnt; constexpr unsigned kFirstStashId = Segment::kBucketNum;
EXPECT_TRUE(segment_.GetBucket(0).IsFull()); EXPECT_TRUE(segment_.GetBucket(0).IsFull());
EXPECT_TRUE(segment_.GetBucket(1).IsFull()); EXPECT_TRUE(segment_.GetBucket(1).IsFull());
@ -651,7 +651,7 @@ TEST_F(DashTest, Eviction) {
last_slot = bit.slot_id(); last_slot = bit.slot_id();
++bit; ++bit;
} }
ASSERT_LT(last_slot, Dash64::kBucketWidth); ASSERT_LT(last_slot, Dash64::kSlotNum);
bit = dt_.begin(); bit = dt_.begin();
dt_.ShiftRight(bit); dt_.ShiftRight(bit);
@ -673,7 +673,7 @@ TEST_F(DashTest, Eviction) {
// Now the bucket is full. // Now the bucket is full.
keys.clear(); keys.clear();
uint64_t last_key = dt_.GetSegment(0)->Key(0, Dash64::kBucketWidth - 1); uint64_t last_key = dt_.GetSegment(0)->Key(0, Dash64::kSlotNum - 1);
for (Dash64::bucket_iterator bit = dt_.begin(); !bit.is_done(); ++bit) { for (Dash64::bucket_iterator bit = dt_.begin(); !bit.is_done(); ++bit) {
keys.insert(bit->first); keys.insert(bit->first);
} }
@ -927,14 +927,14 @@ struct SimpleEvictPolicy {
// returns number of items evicted from the table. // returns number of items evicted from the table.
// 0 means - nothing has been evicted. // 0 means - nothing has been evicted.
unsigned Evict(const U64Dash::HotspotBuckets& hotb, U64Dash* me) { unsigned Evict(const U64Dash::HotspotBuckets& hotb, U64Dash* me) {
constexpr unsigned kRegularBucketCnt = U64Dash::HotspotBuckets::kNumBuckets; constexpr unsigned kBucketNum = U64Dash::HotspotBuckets::kNumBuckets;
uint32_t bid = hotb.key_hash % kRegularBucketCnt; uint32_t bid = hotb.key_hash % kBucketNum;
unsigned slot_index = (hotb.key_hash >> 32) % U64Dash::kBucketWidth; unsigned slot_index = (hotb.key_hash >> 32) % U64Dash::kSlotNum;
for (unsigned i = 0; i < kRegularBucketCnt; ++i) { for (unsigned i = 0; i < kBucketNum; ++i) {
auto it = hotb.at((bid + i) % kRegularBucketCnt); auto it = hotb.at((bid + i) % kBucketNum);
it += slot_index; it += slot_index;
if (it.is_done()) if (it.is_done())
@ -973,7 +973,7 @@ struct ShiftRightPolicy {
unsigned stash_pos = hotb.key_hash % kNumStashBuckets; unsigned stash_pos = hotb.key_hash % kNumStashBuckets;
auto stash_it = hotb.probes.by_type.stash_buckets[stash_pos]; auto stash_it = hotb.probes.by_type.stash_buckets[stash_pos];
stash_it += (U64Dash::kBucketWidth - 1); // go to the last slot. stash_it += (U64Dash::kSlotNum - 1); // go to the last slot.
uint64_t k = stash_it->first; uint64_t k = stash_it->first;
DVLOG(1) << "Deleting key " << k << " from " << stash_it.bucket_id() << "/" DVLOG(1) << "Deleting key " << k << " from " << stash_it.bucket_id() << "/"

View file

@ -182,7 +182,7 @@ unsigned PrimeEvictionPolicy::Evict(const PrimeTable::HotspotBuckets& eb, PrimeT
// choose "randomly" a stash bucket to evict an item. // choose "randomly" a stash bucket to evict an item.
auto bucket_it = eb.probes.by_type.stash_buckets[eb.key_hash % kNumStashBuckets]; auto bucket_it = eb.probes.by_type.stash_buckets[eb.key_hash % kNumStashBuckets];
auto last_slot_it = bucket_it; auto last_slot_it = bucket_it;
last_slot_it += (PrimeTable::kBucketWidth - 1); last_slot_it += (PrimeTable::kSlotNum - 1);
if (!last_slot_it.is_done()) { if (!last_slot_it.is_done()) {
// don't evict sticky items // don't evict sticky items
if (last_slot_it->first.IsSticky()) { if (last_slot_it->first.IsSticky()) {
@ -1261,7 +1261,7 @@ void DbSlice::FreeMemWithEvictionStep(DbIndex db_ind, size_t increase_goal_bytes
auto& db_table = db_arr_[db_ind]; auto& db_table = db_arr_[db_ind];
int32_t num_segments = db_table->prime.GetSegmentCount(); int32_t num_segments = db_table->prime.GetSegmentCount();
int32_t num_buckets = PrimeTable::Segment_t::kTotalBuckets; int32_t num_buckets = PrimeTable::Segment_t::kTotalBuckets;
int32_t num_slots = PrimeTable::Segment_t::kNumSlots; int32_t num_slots = PrimeTable::Segment_t::kSlotNum;
size_t used_memory_after; size_t used_memory_after;
size_t evicted = 0; size_t evicted = 0;
@ -1349,8 +1349,8 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t
PrimeTable::Segment_t* segment = table->prime.GetSegment(it.segment_id()); PrimeTable::Segment_t* segment = table->prime.GetSegment(it.segment_id());
DCHECK(segment); DCHECK(segment);
constexpr unsigned kNumStashBuckets = PrimeTable::Segment_t::kStashBucketCnt; constexpr unsigned kNumStashBuckets = PrimeTable::Segment_t::kStashBucketNum;
constexpr unsigned kNumRegularBuckets = PrimeTable::Segment_t::kRegularBucketCnt; constexpr unsigned kNumRegularBuckets = PrimeTable::Segment_t::kBucketNum;
PrimeTable::bucket_iterator it2(it); PrimeTable::bucket_iterator it2(it);
unsigned evicted = 0; unsigned evicted = 0;
@ -1370,7 +1370,7 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t
if (bucket.IsEmpty()) if (bucket.IsEmpty())
continue; continue;
for (int slot_id = PrimeTable::Segment_t::kNumSlots - 1; slot_id >= 0; --slot_id) { for (int slot_id = PrimeTable::Segment_t::kSlotNum - 1; slot_id >= 0; --slot_id) {
if (!bucket.IsBusy(slot_id)) if (!bucket.IsBusy(slot_id))
continue; continue;
@ -1394,7 +1394,7 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t
} }
// Try normal buckets now. We iterate from largest slot to smallest across the whole segment. // Try normal buckets now. We iterate from largest slot to smallest across the whole segment.
for (int slot_id = PrimeTable::Segment_t::kNumSlots - 1; !evict_succeeded && slot_id >= 0; for (int slot_id = PrimeTable::Segment_t::kSlotNum - 1; !evict_succeeded && slot_id >= 0;
--slot_id) { --slot_id) {
for (unsigned i = 0; i < kNumRegularBuckets; ++i) { for (unsigned i = 0; i < kNumRegularBuckets; ++i) {
unsigned bid = (it.bucket_id() + i) % kNumRegularBuckets; unsigned bid = (it.bucket_id() + i) % kNumRegularBuckets;