From 370f334baf1526635a86c6adb271ce199945cf64 Mon Sep 17 00:00:00 2001 From: Kostas Kyrimis Date: Fri, 29 Mar 2024 11:14:58 +0200 Subject: [PATCH] chore: remove duplicate code from dash and simplify (#2765) * rename all Policy members for consistency * remove duplicate code --- src/core/dash.h | 36 +++---- src/core/dash_internal.h | 198 ++++++++++++++++++--------------------- src/core/dash_test.cc | 48 +++++----- src/server/db_slice.cc | 12 +-- 4 files changed, 132 insertions(+), 162 deletions(-) diff --git a/src/core/dash.h b/src/core/dash.h index 4328b5b4f..7dd94873e 100644 --- a/src/core/dash.h +++ b/src/core/dash.h @@ -34,15 +34,8 @@ class DashTable : public detail::DashTableBase { DashTable(const DashTable&) = delete; DashTable& operator=(const DashTable&) = delete; - struct SegmentPolicy { - static constexpr unsigned NUM_SLOTS = Policy::kSlotNum; - static constexpr unsigned BUCKET_CNT = Policy::kBucketNum; - static constexpr unsigned STASH_BUCKET_NUM = Policy::kStashBucketNum; - static constexpr bool USE_VERSION = Policy::kUseVersion; - }; - using Base = detail::DashTableBase; - using SegmentType = detail::Segment<_Key, _Value, SegmentPolicy>; + using SegmentType = detail::Segment<_Key, _Value, Policy>; using SegmentIterator = typename SegmentType::Iterator; public: @@ -50,17 +43,12 @@ class DashTable : public detail::DashTableBase { using Value_t = _Value; using Segment_t = SegmentType; - //! Number of "official" buckets that are used to position a key. In other words, does not include - //! stash buckets. - static constexpr unsigned kLogicalBucketNum = Policy::kBucketNum; - //! Total number of buckets in a segment (including stash). - static constexpr unsigned kPhysicalBucketNum = SegmentType::kTotalBuckets; - static constexpr unsigned kBucketWidth = Policy::kSlotNum; static constexpr double kTaxAmount = SegmentType::kTaxSize; static constexpr size_t kSegBytes = sizeof(SegmentType); static constexpr size_t kSegCapacity = SegmentType::capacity(); - static constexpr bool kUseVersion = Policy::kUseVersion; + static constexpr size_t kSlotNum = SegmentType::kSlotNum; + static constexpr size_t kBucketNum = SegmentType::kBucketNum; // if IsSingleBucket is true - iterates only over a single bucket. template class Iterator; @@ -556,11 +544,11 @@ void DashTable<_Key, _Value, Policy>::CVCUponInsert(uint64_t ver_threshold, cons return; } - static_assert(kPhysicalBucketNum < 0xFF, ""); + static_assert(SegmentType::kTotalBuckets < 0xFF, ""); // Segment is full, we need to return the whole segment, because it can be split // and its entries can be reshuffled into different buckets. - for (uint8_t i = 0; i < kPhysicalBucketNum; ++i) { + for (uint8_t i = 0; i < SegmentType::kTotalBuckets; ++i) { if (target->GetVersion(i) < ver_threshold && !target->GetBucket(i).IsEmpty()) { cb(bucket_iterator{this, seg_id, i}); } @@ -646,8 +634,8 @@ bool DashTable<_Key, _Value, Policy>::ShiftRight(bucket_iterator it) { typename Segment_t::Hash_t hash_val = 0; auto& bucket = seg->GetBucket(it.bucket_id_); - if (bucket.GetBusy() & (1 << (kBucketWidth - 1))) { - it.slot_id_ = kBucketWidth - 1; + if (bucket.GetBusy() & (1 << (kSlotNum - 1))) { + it.slot_id_ = kSlotNum - 1; hash_val = DoHash(it->first); policy_.DestroyKey(it->first); policy_.DestroyValue(it->second); @@ -800,7 +788,7 @@ auto DashTable<_Key, _Value, Policy>::InsertInternal(U&& key, V&& value, Evictio for (unsigned i = 0; i < Policy::kStashBucketNum; ++i) { hotspot.probes.by_type.stash_buckets[i] = - bucket_iterator{this, target_seg_id, uint8_t(kLogicalBucketNum + i), 0}; + bucket_iterator{this, target_seg_id, uint8_t(Policy::kBucketNum + i), 0}; } hotspot.num_buckets = HotspotBuckets::kNumBuckets; @@ -910,7 +898,7 @@ auto DashTable<_Key, _Value, Policy>::TraverseBySegmentOrder(Cursor curs, Cb&& c s->TraverseBucket(bid, std::move(dt_cb)); ++bid; - if (bid == kPhysicalBucketNum) { + if (bid == SegmentType::kTotalBuckets) { sid = NextSeg(sid); bid = 0; if (sid >= segment_.size()) { @@ -924,7 +912,7 @@ auto DashTable<_Key, _Value, Policy>::TraverseBySegmentOrder(Cursor curs, Cb&& c template auto DashTable<_Key, _Value, Policy>::GetRandomCursor(absl::BitGen* bitgen) -> Cursor { uint32_t sid = absl::Uniform(*bitgen, 0, segment_.size()); - uint8_t bid = absl::Uniform(*bitgen, 0, kLogicalBucketNum); + uint8_t bid = absl::Uniform(*bitgen, 0, Policy::kBucketNum); return Cursor{global_depth_, sid, bid}; } @@ -932,7 +920,7 @@ auto DashTable<_Key, _Value, Policy>::GetRandomCursor(absl::BitGen* bitgen) -> C template template auto DashTable<_Key, _Value, Policy>::Traverse(Cursor curs, Cb&& cb) -> Cursor { - if (curs.bucket_id() >= kLogicalBucketNum) // sanity. + if (curs.bucket_id() >= Policy::kBucketNum) // sanity. return 0; uint32_t sid = curs.segment_id(global_depth_); @@ -955,7 +943,7 @@ auto DashTable<_Key, _Value, Policy>::Traverse(Cursor curs, Cb&& cb) -> Cursor { sid = 0; ++bid; - if (bid >= kLogicalBucketNum) + if (bid >= Policy::kBucketNum) return 0; // "End of traversal" cursor. } } while (!fetched); diff --git a/src/core/dash_internal.h b/src/core/dash_internal.h index a4072f957..ec877abba 100644 --- a/src/core/dash_internal.h +++ b/src/core/dash_internal.h @@ -289,36 +289,38 @@ static_assert(alignof(VersionedBB<14, 4>) == 1, ""); static_assert(sizeof(VersionedBB<12, 4>) == 12 * 2 + 8, ""); static_assert(sizeof(VersionedBB<14, 4>) <= 14 * 2 + 8, ""); -// Segment - static-hashtable of size NUM_SLOTS*(BUCKET_CNT + STASH_BUCKET_NUM). +// Segment - static-hashtable of size kSlotNum*(kBucketNum + kStashBucketNum). struct DefaultSegmentPolicy { - static constexpr unsigned NUM_SLOTS = 12; - static constexpr unsigned BUCKET_CNT = 64; - static constexpr unsigned STASH_BUCKET_NUM = 2; - static constexpr bool USE_VERSION = true; + static constexpr unsigned kSlotNum = 12; + static constexpr unsigned kBucketNum = 64; + static constexpr unsigned kStashBucketNum = 2; + static constexpr bool kUseVersion = true; }; template class Segment { - static constexpr unsigned BUCKET_CNT = Policy::BUCKET_CNT; - static constexpr unsigned STASH_BUCKET_NUM = Policy::STASH_BUCKET_NUM; - static constexpr unsigned NUM_SLOTS = Policy::NUM_SLOTS; - static constexpr bool USE_VERSION = Policy::USE_VERSION; + public: + static constexpr unsigned kSlotNum = Policy::kSlotNum; + static constexpr unsigned kBucketNum = Policy::kBucketNum; + static constexpr unsigned kStashBucketNum = Policy::kStashBucketNum; + static constexpr bool kUseVersion = Policy::kUseVersion; - static_assert(BUCKET_CNT + STASH_BUCKET_NUM < 255); + private: + static_assert(kBucketNum + kStashBucketNum < 255); static constexpr unsigned kFingerBits = 8; using BucketType = - std::conditional_t, BucketBase>; + std::conditional_t, BucketBase>; struct Bucket : public BucketType { using BucketType::kNanSlot; using typename BucketType::SlotId; - _Key key[NUM_SLOTS]; - _Value value[NUM_SLOTS]; + _Key key[kSlotNum]; + _Value value[kSlotNum]; template void Insert(uint8_t slot, U&& u, V&& v, uint8_t meta_hash, bool probe) { - assert(slot < NUM_SLOTS); + assert(slot < kSlotNum); key[slot] = std::forward(u); value[slot] = std::forward(v); @@ -337,14 +339,13 @@ template void ForEachSlot(Cb&& cb) const { + template void ForEachSlotImpl(This obj, Cb&& cb) const { uint32_t mask = this->GetBusy(); uint32_t probe_mask = this->GetProbe(true); - for (unsigned j = 0; j < NUM_SLOTS; ++j) { + for (unsigned j = 0; j < kSlotNum; ++j) { if (mask & 1) { - cb(this, j, probe_mask & 1); + cb(obj, j, probe_mask & 1); } mask >>= 1; probe_mask >>= 1; @@ -352,17 +353,13 @@ template void ForEachSlot(Cb&& cb) { - uint32_t mask = this->GetBusy(); - uint32_t probe_mask = this->GetProbe(true); + template void ForEachSlot(Cb&& cb) const { + ForEachSlotImpl(this, std::forward(cb)); + } - for (unsigned j = 0; j < NUM_SLOTS; ++j) { - if (mask & 1) { - cb(this, j, probe_mask & 1); - } - mask >>= 1; - probe_mask >>= 1; - } + // calls for each busy slot: cb(iterator, probe) + template void ForEachSlot(Cb&& cb) { + ForEachSlotImpl(this, std::forward(cb)); } }; // class Bucket @@ -392,12 +389,9 @@ template - std::enable_if_t GetVersion(uint8_t bid) const { + template std::enable_if_t GetVersion(uint8_t bid) const { return bucket_[bid].GetVersion(); } - template - std::enable_if_t SetVersion(uint8_t bid, uint64_t v) { + template std::enable_if_t SetVersion(uint8_t bid, uint64_t v) { return bucket_[bid].SetVersion(v); } @@ -466,7 +458,7 @@ template + template std::enable_if_t CVCOnInsert(uint64_t ver_threshold, Hash_t key_hash, uint8_t bid[2]) const; // Returns bucket ids whose versions will change as a result of bumping up the item // Can return upto 3 buckets. - template + template std::enable_if_t CVCOnBump(uint64_t ver_threshold, unsigned bid, unsigned slot, Hash_t hash, uint8_t result_bid[3]) const; @@ -542,10 +534,10 @@ template = kRegularBucketCnt) { // Stash - constexpr auto kLastSlotMask = 1u << (kNumSlots - 1); + if (bid >= kBucketNum) { // Stash + constexpr auto kLastSlotMask = 1u << (kSlotNum - 1); if (bucket_[bid].GetBusy() & kLastSlotMask) - RemoveStashReference(bid - kRegularBucketCnt, right_hashval); + RemoveStashReference(bid - kBucketNum, right_hashval); } return bucket_[bid].ShiftRight(); @@ -565,15 +557,15 @@ template > kFingerBits) % kRegularBucketCnt; + return (hash >> kFingerBits) % kBucketNum; } static uint8_t NextBid(uint8_t bid) { - return bid < kRegularBucketCnt - 1 ? bid + 1 : 0; + return bid < kBucketNum - 1 ? bid + 1 : 0; } static uint8_t PrevBid(uint8_t bid) { - return bid ? bid - 1 : kRegularBucketCnt - 1; + return bid ? bid - 1 : kBucketNum - 1; } // if own_items is true it means we try to move owned item to probing bucket. @@ -591,7 +583,7 @@ template ::Bucket::FindByFp(uint8_t fp_hash, bool probe, unsigned delta = __builtin_ctz(mask); mask >>= delta; - for (unsigned i = delta; i < NUM_SLOTS; ++i) { + for (unsigned i = delta; i < kSlotNum; ++i) { if ((mask & 1) && pred(key[i], k)) { return i; } @@ -1043,7 +1035,7 @@ auto Segment::Bucket::FindByFp(uint8_t fp_hash, bool probe, template bool Segment::Bucket::ShiftRight() { bool res = BucketType::ShiftRight(); - for (int i = NUM_SLOTS - 1; i > 0; i--) { + for (int i = kSlotNum - 1; i > 0; i--) { std::swap(key[i], key[i - 1]); std::swap(value[i], value[i - 1]); } @@ -1066,7 +1058,7 @@ auto Segment::TryMoveFromStash(unsigned stash_id, unsigned s Hash_t key_hash) -> Iterator { uint8_t bid = BucketIndex(key_hash); uint8_t hash_fp = key_hash & kFpMask; - uint8_t stash_bid = kRegularBucketCnt + stash_id; + uint8_t stash_bid = kBucketNum + stash_id; auto& key = Key(stash_bid, stash_slot_id); auto& value = Value(stash_bid, stash_slot_id); @@ -1079,7 +1071,7 @@ auto Segment::TryMoveFromStash(unsigned stash_id, unsigned s } if (reg_slot >= 0) { - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { // We maintain the invariant for the physical bucket by updating the version when // the entries move between buckets. uint64_t ver = bucket_[stash_bid].GetVersion(); @@ -1140,9 +1132,9 @@ auto Segment::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co } auto stash_cb = [&](unsigned overflow_index, unsigned pos) -> SlotId { - assert(pos < STASH_BUCKET_NUM); + assert(pos < kStashBucketNum); - pos += kRegularBucketCnt; + pos += kBucketNum; const Bucket& bucket = bucket_[pos]; return bucket.FindByFp(fp_hash, false, key, cf); }; @@ -1152,10 +1144,10 @@ auto Segment::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co stats.stash_overflow_probes++; #endif - for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { + for (unsigned i = 0; i < kStashBucketNum; ++i) { auto sid = stash_cb(0, i); if (sid != BucketType::kNanSlot) { - return Iterator{uint8_t(kRegularBucketCnt + i), sid}; + return Iterator{uint8_t(kBucketNum + i), sid}; } } @@ -1169,12 +1161,12 @@ auto Segment::FindIt(U&& key, Hash_t key_hash, Pred&& cf) co auto stash_res = target.IterateStash(fp_hash, false, stash_cb); if (stash_res.second != BucketType::kNanSlot) { - return Iterator{uint8_t(kRegularBucketCnt + stash_res.first), stash_res.second}; + return Iterator{uint8_t(kBucketNum + stash_res.first), stash_res.second}; } stash_res = probe.IterateStash(fp_hash, true, stash_cb); if (stash_res.second != BucketType::kNanSlot) { - return Iterator{uint8_t(kRegularBucketCnt + stash_res.first), stash_res.second}; + return Iterator{uint8_t(kBucketNum + stash_res.first), stash_res.second}; } return Iterator{}; } @@ -1199,8 +1191,8 @@ void Segment::Delete(const Iterator& it, Hash_t key_hash) { auto& b = bucket_[it.index]; - if (it.index >= kRegularBucketCnt) { - RemoveStashReference(it.index - kRegularBucketCnt, key_hash); + if (it.index >= kBucketNum) { + RemoveStashReference(it.index - kBucketNum, key_hash); } b.Delete(it.slot); @@ -1219,7 +1211,7 @@ void Segment::Split(HFunc&& hfn, Segment* dest_right) { // do_versioning(); auto is_mine = [this](Hash_t hash) { return (hash >> (64 - local_depth_) & 1) == 0; }; - for (unsigned i = 0; i < kRegularBucketCnt; ++i) { + for (unsigned i = 0; i < kBucketNum; ++i) { uint32_t invalid_mask = 0; auto cb = [&](auto* bucket, unsigned slot, bool probe) { @@ -1257,7 +1249,7 @@ void Segment::Split(HFunc&& hfn, Segment* dest_right) { assert(it.found()); (void)it; - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { // Maintaining consistent versioning. uint64_t ver = bucket->GetVersion(); dest_right->bucket_[it.index].UpdateVersion(ver); @@ -1268,9 +1260,9 @@ void Segment::Split(HFunc&& hfn, Segment* dest_right) { bucket_[i].ClearSlots(invalid_mask); } - for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { + for (unsigned i = 0; i < kStashBucketNum; ++i) { uint32_t invalid_mask = 0; - unsigned bid = kRegularBucketCnt + i; + unsigned bid = kBucketNum + i; Bucket& stash = bucket_[bid]; auto cb = [&](auto* bucket, unsigned slot, bool probe) { @@ -1293,7 +1285,7 @@ void Segment::Split(HFunc&& hfn, Segment* dest_right) { (void)it; assert(it.index != kNanBid); - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { // Update the version in the destination bucket. uint64_t ver = bucket->GetVersion(); dest_right->bucket_[it.index].UpdateVersion(ver); @@ -1327,7 +1319,7 @@ void Segment::MoveFrom(HFunc&& hfunc, Segment* src) { return; } - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { // Update the version in the destination bucket. this->bucket_[it.index].UpdateVersion(bucket->GetVersion()); } @@ -1355,7 +1347,7 @@ int Segment::MoveToOther(bool own_items, unsigned from_bid, return -1; // We never decrease the version of the entry. - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { auto& dst = bucket_[to_bid]; dst.UpdateVersion(src.GetVersion()); } @@ -1425,13 +1417,13 @@ auto Segment::InsertUniq(U&& key, V&& value, Hash_t key_hash } // we balance stash fill rate by starting from y % STASH_BUCKET_NUM. - for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { - unsigned stash_pos = (bid + i) % STASH_BUCKET_NUM; - int stash_slot = TryInsertToBucket(kRegularBucketCnt + stash_pos, std::forward(key), + for (unsigned i = 0; i < kStashBucketNum; ++i) { + unsigned stash_pos = (bid + i) % kStashBucketNum; + int stash_slot = TryInsertToBucket(kBucketNum + stash_pos, std::forward(key), std::forward(value), meta_hash, false); if (stash_slot >= 0) { target.SetStashPtr(stash_pos, meta_hash, &neighbor); - return Iterator{uint8_t(kRegularBucketCnt + stash_pos), uint8_t(stash_slot)}; + return Iterator{uint8_t(kBucketNum + stash_pos), uint8_t(stash_slot)}; } } @@ -1462,33 +1454,31 @@ std::enable_if_t Segment::CVCOnInsert(uint64_t // both nid and bid are full. const uint8_t after_next = NextBid(nid); - if (CheckIfMovesToOther(true, nid, after_next)) { + auto do_fun = [this, ver_threshold, &cnt, &bid_res, after_next](auto bid, auto nid) { // We could tighten the checks here and below because // if nid is less than ver_threshold, than after_next won't be affected and won't cross // ver_threshold as well. - if (bucket_[nid].GetVersion() < ver_threshold) + if (bucket_[bid].GetVersion() < ver_threshold) + bid_res[cnt++] = bid; + + if (!bucket_[nid].IsEmpty() && bucket_[nid].GetVersion() < ver_threshold) bid_res[cnt++] = nid; + }; - if (!bucket_[after_next].IsEmpty() && bucket_[after_next].GetVersion() < ver_threshold) - bid_res[cnt++] = after_next; - + if (CheckIfMovesToOther(true, nid, after_next)) { + do_fun(nid, after_next); return cnt; } const uint8_t prev_bid = PrevBid(bid); if (CheckIfMovesToOther(false, bid, prev_bid)) { - if (bucket_[bid].GetVersion() < ver_threshold) - bid_res[cnt++] = bid; - - if (!bucket_[prev_bid].IsEmpty() && bucket_[prev_bid].GetVersion() < ver_threshold) - bid_res[cnt++] = prev_bid; - + do_fun(bid, prev_bid); return cnt; } // Important to repeat exactly the insertion logic of InsertUnique. - for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { - unsigned stash_bid = kRegularBucketCnt + ((bid + i) % STASH_BUCKET_NUM); + for (unsigned i = 0; i < kStashBucketNum; ++i) { + unsigned stash_bid = kBucketNum + ((bid + i) % kStashBucketNum); const Bucket& stash = bucket_[stash_bid]; if (!stash.IsFull()) { if (!stash.IsEmpty() && stash.GetVersion() < ver_threshold) @@ -1507,7 +1497,7 @@ std::enable_if_t Segment::CVCOnBump(uint64_t v unsigned bid, unsigned slot, Hash_t hash, uint8_t result_bid[3]) const { - if (bid < kRegularBucketCnt) { + if (bid < kBucketNum) { // Right now we do not migrate entries from nid to bid, only from stash to normal buckets. // The reason for this is that CVCBumpUp implementation swaps the slots of the same bucket // so there is no further action needed. @@ -1541,34 +1531,27 @@ std::enable_if_t Segment::CVCOnBump(uint64_t v const uint8_t probing_bid = NextBid(target_bid); const auto& probing = bucket_[probing_bid]; - unsigned stash_pos = bid - kRegularBucketCnt; + unsigned stash_pos = bid - kBucketNum; uint8_t fp_hash = hash & kFpMask; auto find_stash = [&](unsigned, unsigned pos) { return stash_pos == pos ? slot : BucketType::kNanSlot; }; - if (target.GetVersion() < ver_threshold) { + auto do_fun = [&result, &result_bid, fp_hash, find_stash](auto& target, auto target_bid, + bool probe) { if (target.HasStashOverflow()) { result_bid[result++] = target_bid; } else { - SlotId slot_id = target.IterateStash(fp_hash, false, find_stash).second; + SlotId slot_id = target.IterateStash(fp_hash, probe, find_stash).second; if (slot_id != BucketType::kNanSlot) { result_bid[result++] = target_bid; } } - } + }; - if (probing.GetVersion() < ver_threshold) { - if (probing.HasStashOverflow()) { - result_bid[result++] = probing_bid; - } else { - SlotId slot_id = probing.IterateStash(fp_hash, true, find_stash).second; - if (slot_id != BucketType::kNanSlot) { - result_bid[result++] = probing_bid; - } - } - } + do_fun(target, target_bid, false); + do_fun(probing, probing_bid, true); return result; } @@ -1585,7 +1568,7 @@ void Segment::TraverseBucket(uint8_t bid, Cb&& cb) { template template bool Segment::TraverseLogicalBucket(uint8_t bid, HashFn&& hfun, Cb&& cb) const { - assert(bid < kRegularBucketCnt); + assert(bid < kBucketNum); const Bucket& b = bucket_[bid]; bool found = false; @@ -1615,7 +1598,7 @@ bool Segment::TraverseLogicalBucket(uint8_t bid, HashFn&& hf // Finally go over stash buckets and find those entries that belong to b. if (b.HasStash()) { // do not bother with overflow fps. Just go over all the stash buckets. - for (uint8_t j = kRegularBucketCnt; j < kTotalBuckets; ++j) { + for (uint8_t j = kBucketNum; j < kTotalBuckets; ++j) { const auto& stashb = bucket_[j]; stashb.ForEachSlot([&](auto* bucket, SlotId slot, bool probe) { if (BucketIndex(hfun(bucket->key[slot])) == bid) { @@ -1671,7 +1654,7 @@ auto Segment::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha if (!bp.CanBump(from.key[slot])) { return Iterator{bid, slot}; } - if (bid < kRegularBucketCnt) { + if (bid < kBucketNum) { // non stash case. if (slot > 0 && bp.CanBump(from.key[slot - 1])) { from.Swap(slot - 1, slot); @@ -1683,7 +1666,7 @@ auto Segment::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha // stash bucket // We swap the item with the item in the "normal" bucket in the last slot. - unsigned stash_pos = bid - kRegularBucketCnt; + unsigned stash_pos = bid - kBucketNum; // If we have an empty space for some reason just unload the stash entry. if (Iterator it = TryMoveFromStash(stash_pos, slot, key_hash); it.found()) { @@ -1700,11 +1683,10 @@ auto Segment::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha // bucket_offs - 0 if exact bucket, 1 if neighbour unsigned bucket_offs = target.UnsetStashPtr(fp_hash, stash_pos, &next); - uint8_t swap_bid = (target_bid + bucket_offs) % kRegularBucketCnt; - // TODO exit early when target_bid == swap_bid + uint8_t swap_bid = (target_bid + bucket_offs) % kBucketNum; auto& swapb = bucket_[swap_bid]; - constexpr unsigned kLastSlot = kNumSlots - 1; + constexpr unsigned kLastSlot = kSlotNum - 1; assert(swapb.GetBusy() & (1 << kLastSlot)); // Don't move sticky items back to the stash because they're not evictable @@ -1730,7 +1712,7 @@ auto Segment::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha swapb.SetHash(kLastSlot, fp_hash, bucket_offs == 1); // update versions. - if constexpr (USE_VERSION) { + if constexpr (kUseVersion) { uint64_t from_ver = from.GetVersion(); uint64_t swap_ver = swapb.GetVersion(); if (from_ver < swap_ver) { @@ -1759,8 +1741,8 @@ template unsigned Segment::UnloadStash(HFunc&& hfunc) { unsigned moved = 0; - for (unsigned i = 0; i < STASH_BUCKET_NUM; ++i) { - unsigned bid = kRegularBucketCnt + i; + for (unsigned i = 0; i < kStashBucketNum; ++i) { + unsigned bid = kBucketNum + i; Bucket& stash = bucket_[bid]; uint32_t invalid_mask = 0; diff --git a/src/core/dash_test.cc b/src/core/dash_test.cc index ebff3677d..843dda525 100644 --- a/src/core/dash_test.cc +++ b/src/core/dash_test.cc @@ -163,7 +163,7 @@ set DashTest::FillSegment(unsigned bid) { std::equal_to eq; for (Segment::Key_t key = 0; key < 1000000u; ++key) { uint64_t hash = dt_.DoHash(key); - unsigned bi = (hash >> 8) % Segment::kRegularBucketCnt; + unsigned bi = (hash >> 8) % Segment::kBucketNum; if (bi != bid) continue; uint8_t fp = hash & 0xFF; @@ -219,7 +219,7 @@ TEST_F(DashTest, Basic) { auto hfun = &UInt64Policy::HashFn; - auto cursor = segment_.TraverseLogicalBucket((hash >> 8) % Segment::kRegularBucketCnt, hfun, cb); + auto cursor = segment_.TraverseLogicalBucket((hash >> 8) % Segment::kBucketNum, hfun, cb); ASSERT_EQ(1, has_called); ASSERT_EQ(0, segment_.TraverseLogicalBucket(cursor, hfun, cb)); ASSERT_EQ(1, has_called); @@ -237,11 +237,11 @@ TEST_F(DashTest, Segment) { set keys = FillSegment(0); EXPECT_TRUE(segment_.GetBucket(0).IsFull() && segment_.GetBucket(1).IsFull()); - for (size_t i = 2; i < Segment::kRegularBucketCnt; ++i) { + for (size_t i = 2; i < Segment::kBucketNum; ++i) { EXPECT_EQ(0, segment_.GetBucket(i).Size()); } - EXPECT_EQ(4 * Segment::kNumSlots, keys.size()); - EXPECT_EQ(4 * Segment::kNumSlots, segment_.SlowSize()); + EXPECT_EQ(4 * Segment::kSlotNum, keys.size()); + EXPECT_EQ(4 * Segment::kSlotNum, segment_.SlowSize()); auto hfun = &UInt64Policy::HashFn; unsigned has_called = 0; @@ -254,12 +254,12 @@ TEST_F(DashTest, Segment) { segment_.TraverseAll(cb); ASSERT_EQ(keys.size(), has_called); - ASSERT_TRUE(segment_.GetBucket(Segment::kRegularBucketCnt).IsFull()); - std::array arr; + ASSERT_TRUE(segment_.GetBucket(Segment::kBucketNum).IsFull()); + std::array arr; uint64_t* next = arr.begin(); - for (unsigned i = Segment::kRegularBucketCnt; i < Segment::kRegularBucketCnt + 2; ++i) { + for (unsigned i = Segment::kBucketNum; i < Segment::kBucketNum + 2; ++i) { const auto* k = &segment_.Key(i, 0); - next = std::copy(k, k + Segment::kNumSlots, next); + next = std::copy(k, k + Segment::kSlotNum, next); } std::equal_to eq; for (auto k : arr) { @@ -268,7 +268,7 @@ TEST_F(DashTest, Segment) { ASSERT_TRUE(it.found()); segment_.Delete(it, hash); } - EXPECT_EQ(2 * Segment::kNumSlots, segment_.SlowSize()); + EXPECT_EQ(2 * Segment::kSlotNum, segment_.SlowSize()); ASSERT_FALSE(Contains(arr.front())); } @@ -331,7 +331,7 @@ TEST_F(DashTest, Split) { ASSERT_EQ(segment_.SlowSize(), sum[0]); EXPECT_EQ(s2.SlowSize(), sum[1]); EXPECT_EQ(keys.size(), sum[0] + sum[1]); - EXPECT_EQ(4 * Segment::kNumSlots, keys.size()); + EXPECT_EQ(4 * Segment::kSlotNum, keys.size()); } TEST_F(DashTest, Merge) { @@ -346,9 +346,9 @@ TEST_F(DashTest, Merge) { TEST_F(DashTest, BumpUp) { set keys = FillSegment(0); - constexpr unsigned kFirstStashId = Segment::kRegularBucketCnt; - constexpr unsigned kSecondStashId = Segment::kRegularBucketCnt + 1; - constexpr unsigned kNumSlots = Segment::kNumSlots; + constexpr unsigned kFirstStashId = Segment::kBucketNum; + constexpr unsigned kSecondStashId = Segment::kBucketNum + 1; + constexpr unsigned kSlotNum = Segment::kSlotNum; EXPECT_TRUE(segment_.GetBucket(0).IsFull()); EXPECT_TRUE(segment_.GetBucket(1).IsFull()); @@ -394,7 +394,7 @@ TEST_F(DashTest, BumpUp) { EXPECT_EQ(touched_bid[1], 0); segment_.BumpUp(kSecondStashId, 9, hash, RelaxedBumpPolicy{}); - ASSERT_TRUE(key == segment_.Key(0, kNumSlots - 1) || key == segment_.Key(1, kNumSlots - 1)); + ASSERT_TRUE(key == segment_.Key(0, kSlotNum - 1) || key == segment_.Key(1, kSlotNum - 1)); EXPECT_TRUE(segment_.GetBucket(kSecondStashId).IsFull()); EXPECT_TRUE(Contains(key)); EXPECT_TRUE(segment_.Key(kSecondStashId, 9)); @@ -408,7 +408,7 @@ TEST_F(DashTest, BumpPolicy) { }; set keys = FillSegment(0); - constexpr unsigned kFirstStashId = Segment::kRegularBucketCnt; + constexpr unsigned kFirstStashId = Segment::kBucketNum; EXPECT_TRUE(segment_.GetBucket(0).IsFull()); EXPECT_TRUE(segment_.GetBucket(1).IsFull()); @@ -651,7 +651,7 @@ TEST_F(DashTest, Eviction) { last_slot = bit.slot_id(); ++bit; } - ASSERT_LT(last_slot, Dash64::kBucketWidth); + ASSERT_LT(last_slot, Dash64::kSlotNum); bit = dt_.begin(); dt_.ShiftRight(bit); @@ -673,7 +673,7 @@ TEST_F(DashTest, Eviction) { // Now the bucket is full. keys.clear(); - uint64_t last_key = dt_.GetSegment(0)->Key(0, Dash64::kBucketWidth - 1); + uint64_t last_key = dt_.GetSegment(0)->Key(0, Dash64::kSlotNum - 1); for (Dash64::bucket_iterator bit = dt_.begin(); !bit.is_done(); ++bit) { keys.insert(bit->first); } @@ -927,14 +927,14 @@ struct SimpleEvictPolicy { // returns number of items evicted from the table. // 0 means - nothing has been evicted. unsigned Evict(const U64Dash::HotspotBuckets& hotb, U64Dash* me) { - constexpr unsigned kRegularBucketCnt = U64Dash::HotspotBuckets::kNumBuckets; + constexpr unsigned kBucketNum = U64Dash::HotspotBuckets::kNumBuckets; - uint32_t bid = hotb.key_hash % kRegularBucketCnt; + uint32_t bid = hotb.key_hash % kBucketNum; - unsigned slot_index = (hotb.key_hash >> 32) % U64Dash::kBucketWidth; + unsigned slot_index = (hotb.key_hash >> 32) % U64Dash::kSlotNum; - for (unsigned i = 0; i < kRegularBucketCnt; ++i) { - auto it = hotb.at((bid + i) % kRegularBucketCnt); + for (unsigned i = 0; i < kBucketNum; ++i) { + auto it = hotb.at((bid + i) % kBucketNum); it += slot_index; if (it.is_done()) @@ -973,7 +973,7 @@ struct ShiftRightPolicy { unsigned stash_pos = hotb.key_hash % kNumStashBuckets; auto stash_it = hotb.probes.by_type.stash_buckets[stash_pos]; - stash_it += (U64Dash::kBucketWidth - 1); // go to the last slot. + stash_it += (U64Dash::kSlotNum - 1); // go to the last slot. uint64_t k = stash_it->first; DVLOG(1) << "Deleting key " << k << " from " << stash_it.bucket_id() << "/" diff --git a/src/server/db_slice.cc b/src/server/db_slice.cc index 43bb0d7ee..0461c30f9 100644 --- a/src/server/db_slice.cc +++ b/src/server/db_slice.cc @@ -182,7 +182,7 @@ unsigned PrimeEvictionPolicy::Evict(const PrimeTable::HotspotBuckets& eb, PrimeT // choose "randomly" a stash bucket to evict an item. auto bucket_it = eb.probes.by_type.stash_buckets[eb.key_hash % kNumStashBuckets]; auto last_slot_it = bucket_it; - last_slot_it += (PrimeTable::kBucketWidth - 1); + last_slot_it += (PrimeTable::kSlotNum - 1); if (!last_slot_it.is_done()) { // don't evict sticky items if (last_slot_it->first.IsSticky()) { @@ -1261,7 +1261,7 @@ void DbSlice::FreeMemWithEvictionStep(DbIndex db_ind, size_t increase_goal_bytes auto& db_table = db_arr_[db_ind]; int32_t num_segments = db_table->prime.GetSegmentCount(); int32_t num_buckets = PrimeTable::Segment_t::kTotalBuckets; - int32_t num_slots = PrimeTable::Segment_t::kNumSlots; + int32_t num_slots = PrimeTable::Segment_t::kSlotNum; size_t used_memory_after; size_t evicted = 0; @@ -1349,8 +1349,8 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t PrimeTable::Segment_t* segment = table->prime.GetSegment(it.segment_id()); DCHECK(segment); - constexpr unsigned kNumStashBuckets = PrimeTable::Segment_t::kStashBucketCnt; - constexpr unsigned kNumRegularBuckets = PrimeTable::Segment_t::kRegularBucketCnt; + constexpr unsigned kNumStashBuckets = PrimeTable::Segment_t::kStashBucketNum; + constexpr unsigned kNumRegularBuckets = PrimeTable::Segment_t::kBucketNum; PrimeTable::bucket_iterator it2(it); unsigned evicted = 0; @@ -1370,7 +1370,7 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t if (bucket.IsEmpty()) continue; - for (int slot_id = PrimeTable::Segment_t::kNumSlots - 1; slot_id >= 0; --slot_id) { + for (int slot_id = PrimeTable::Segment_t::kSlotNum - 1; slot_id >= 0; --slot_id) { if (!bucket.IsBusy(slot_id)) continue; @@ -1394,7 +1394,7 @@ size_t DbSlice::EvictObjects(size_t memory_to_free, PrimeIterator it, DbTable* t } // Try normal buckets now. We iterate from largest slot to smallest across the whole segment. - for (int slot_id = PrimeTable::Segment_t::kNumSlots - 1; !evict_succeeded && slot_id >= 0; + for (int slot_id = PrimeTable::Segment_t::kSlotNum - 1; !evict_succeeded && slot_id >= 0; --slot_id) { for (unsigned i = 0; i < kNumRegularBuckets; ++i) { unsigned bid = (it.bucket_id() + i) % kNumRegularBuckets;