chore: introduce a secondary TaskQueue for shards (#3508)

Also allow the TaskQueue to support multiple consumer fibers.

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2024-08-14 10:53:29 +03:00 committed by GitHub
parent 5cfe4154cc
commit fa0913e662
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 62 additions and 27 deletions

2
helio

@ -1 +1 @@
Subproject commit fb46b481e3eb82ecbc7bbf1d22b2fda7f5fac409 Subproject commit f9e28c79d3f9234ab0f094a7101cad8b5847c184

View file

@ -4,8 +4,35 @@
#include "core/task_queue.h" #include "core/task_queue.h"
#include <absl/strings/str_cat.h>
#include "base/logging.h"
using namespace std;
namespace dfly { namespace dfly {
__thread unsigned TaskQueue::blocked_submitters_ = 0; __thread unsigned TaskQueue::blocked_submitters_ = 0;
TaskQueue::TaskQueue(unsigned queue_size, unsigned start_size, unsigned pool_max_size)
: queue_(queue_size), consumer_fibers_(start_size), pool_max_size_(pool_max_size) {
CHECK_GT(start_size, 0u);
CHECK_LE(start_size, pool_max_size);
}
void TaskQueue::Start(std::string_view base_name) {
for (size_t i = 0; i < consumer_fibers_.size(); ++i) {
auto& fb = consumer_fibers_[i];
CHECK(!fb.IsJoinable());
string name = absl::StrCat(base_name, "/", i);
fb = util::fb2::Fiber(name, [this] { queue_.Run(); });
}
}
void TaskQueue::Shutdown() {
queue_.Shutdown();
for (auto& fb : consumer_fibers_)
fb.JoinIfNeeded();
}
} // namespace dfly } // namespace dfly

View file

@ -15,8 +15,8 @@ namespace dfly {
*/ */
class TaskQueue { class TaskQueue {
public: public:
explicit TaskQueue(unsigned queue_size = 128) : queue_(queue_size) { // TODO: to add a mechanism to moderate pool size. Currently it's static with pool_start_size.
} TaskQueue(unsigned queue_size, unsigned pool_start_size, unsigned pool_max_size);
template <typename F> bool TryAdd(F&& f) { template <typename F> bool TryAdd(F&& f) {
return queue_.TryAdd(std::forward<F>(f)); return queue_.TryAdd(std::forward<F>(f));
@ -51,18 +51,13 @@ class TaskQueue {
* @brief Start running consumer loop in the caller thread by spawning fibers. * @brief Start running consumer loop in the caller thread by spawning fibers.
* Returns immediately. * Returns immediately.
*/ */
void Start(std::string_view base_name) { void Start(std::string_view base_name);
consumer_fiber_ = util::fb2::Fiber(base_name, [this] { queue_.Run(); });
}
/** /**
* @brief Notifies Run() function to empty the queue and to exit and waits for the consumer * @brief Notifies Run() function to empty the queue and to exit and waits for the consumer
* fiber to finish. * fiber to finish.
*/ */
void Shutdown() { void Shutdown();
queue_.Shutdown();
consumer_fiber_.JoinIfNeeded();
}
static unsigned blocked_submitters() { static unsigned blocked_submitters() {
return blocked_submitters_; return blocked_submitters_;
@ -70,7 +65,9 @@ class TaskQueue {
private: private:
util::fb2::FiberQueue queue_; util::fb2::FiberQueue queue_;
util::fb2::Fiber consumer_fiber_; std::vector<util::fb2::Fiber> consumer_fibers_;
unsigned pool_max_size_;
static __thread unsigned blocked_submitters_; static __thread unsigned blocked_submitters_;
}; };

View file

@ -371,7 +371,8 @@ uint32_t EngineShard::DefragTask() {
} }
EngineShard::EngineShard(util::ProactorBase* pb, mi_heap_t* heap) EngineShard::EngineShard(util::ProactorBase* pb, mi_heap_t* heap)
: queue_(kQueueLen), : queue_(kQueueLen, 1, 1),
queue2_(kQueueLen / 2, 2, 2),
txq_([](const Transaction* t) { return t->txid(); }), txq_([](const Transaction* t) { return t->txid(); }),
mi_resource_(heap), mi_resource_(heap),
shard_id_(pb->GetPoolIndex()) { shard_id_(pb->GetPoolIndex()) {
@ -379,6 +380,7 @@ EngineShard::EngineShard(util::ProactorBase* pb, mi_heap_t* heap)
defrag_task_ = pb->AddOnIdleTask([this]() { return DefragTask(); }); defrag_task_ = pb->AddOnIdleTask([this]() { return DefragTask(); });
queue_.Start(absl::StrCat("shard_queue_", shard_id())); queue_.Start(absl::StrCat("shard_queue_", shard_id()));
queue2_.Start(absl::StrCat("l2_queue_", shard_id()));
} }
EngineShard::~EngineShard() { EngineShard::~EngineShard() {
@ -389,7 +391,7 @@ void EngineShard::Shutdown() {
DVLOG(1) << "EngineShard::Shutdown"; DVLOG(1) << "EngineShard::Shutdown";
queue_.Shutdown(); queue_.Shutdown();
queue2_.Shutdown();
DCHECK(!fiber_periodic_.IsJoinable()); DCHECK(!fiber_periodic_.IsJoinable());
ProactorBase::me()->RemoveOnIdleTask(defrag_task_); ProactorBase::me()->RemoveOnIdleTask(defrag_task_);

View file

@ -68,6 +68,10 @@ class EngineShard {
return &queue_; return &queue_;
} }
TaskQueue* GetSecondaryQueue() {
return &queue2_;
}
// Processes TxQueue, blocked transactions or any other execution state related to that // Processes TxQueue, blocked transactions or any other execution state related to that
// shard. Tries executing the passed transaction if possible (does not guarantee though). // shard. Tries executing the passed transaction if possible (does not guarantee though).
void PollExecution(const char* context, Transaction* trans); void PollExecution(const char* context, Transaction* trans);
@ -223,7 +227,7 @@ class EngineShard {
// return true if we did not complete the shard scan // return true if we did not complete the shard scan
bool DoDefrag(); bool DoDefrag();
TaskQueue queue_; TaskQueue queue_, queue2_;
TxQueue txq_; TxQueue txq_;
MiMemoryResource mi_resource_; MiMemoryResource mi_resource_;

View file

@ -103,11 +103,11 @@ EngineShardSet* shard_set = nullptr;
void EngineShardSet::Init(uint32_t sz, std::function<void()> shard_handler) { void EngineShardSet::Init(uint32_t sz, std::function<void()> shard_handler) {
CHECK_EQ(0u, size()); CHECK_EQ(0u, size());
shard_queue_.resize(sz); shards_.reset(new EngineShard*[sz]);
size_ = sz;
size_t max_shard_file_size = GetTieredFileLimit(sz); size_t max_shard_file_size = GetTieredFileLimit(sz);
pp_->AwaitFiberOnAll([&](uint32_t index, ProactorBase* pb) { pp_->AwaitFiberOnAll([this](uint32_t index, ProactorBase* pb) {
if (index < shard_queue_.size()) { if (index < size_) {
InitThreadLocal(pb); InitThreadLocal(pb);
} }
}); });
@ -115,7 +115,7 @@ void EngineShardSet::Init(uint32_t sz, std::function<void()> shard_handler) {
namespaces.Init(); namespaces.Init();
pp_->AwaitFiberOnAll([&](uint32_t index, ProactorBase* pb) { pp_->AwaitFiberOnAll([&](uint32_t index, ProactorBase* pb) {
if (index < shard_queue_.size()) { if (index < size_) {
auto* shard = EngineShard::tlocal(); auto* shard = EngineShard::tlocal();
shard->InitTieredStorage(pb, max_shard_file_size); shard->InitTieredStorage(pb, max_shard_file_size);
@ -144,7 +144,7 @@ void EngineShardSet::Shutdown() {
void EngineShardSet::InitThreadLocal(ProactorBase* pb) { void EngineShardSet::InitThreadLocal(ProactorBase* pb) {
EngineShard::InitThreadLocal(pb); EngineShard::InitThreadLocal(pb);
EngineShard* es = EngineShard::tlocal(); EngineShard* es = EngineShard::tlocal();
shard_queue_[es->shard_id()] = es->GetFiberQueue(); shards_[es->shard_id()] = es;
} }
void EngineShardSet::TEST_EnableCacheMode() { void EngineShardSet::TEST_EnableCacheMode() {

View file

@ -45,7 +45,7 @@ class EngineShardSet {
} }
uint32_t size() const { uint32_t size() const {
return uint32_t(shard_queue_.size()); return size_;
} }
util::ProactorPool* pool() { util::ProactorPool* pool() {
@ -63,13 +63,17 @@ class EngineShardSet {
// Uses a shard queue to dispatch. Callback runs in a dedicated fiber. // Uses a shard queue to dispatch. Callback runs in a dedicated fiber.
template <typename F> auto Await(ShardId sid, F&& f) { template <typename F> auto Await(ShardId sid, F&& f) {
return shard_queue_[sid]->Await(std::forward<F>(f)); return shards_[sid]->GetFiberQueue()->Await(std::forward<F>(f));
} }
// Uses a shard queue to dispatch. Callback runs in a dedicated fiber. // Uses a shard queue to dispatch. Callback runs in a dedicated fiber.
template <typename F> auto Add(ShardId sid, F&& f) { template <typename F> auto Add(ShardId sid, F&& f) {
assert(sid < shard_queue_.size()); assert(sid < size_);
return shard_queue_[sid]->Add(std::forward<F>(f)); return shards_[sid]->GetFiberQueue()->Add(std::forward<F>(f));
}
template <typename F> auto AddL2(ShardId sid, F&& f) {
return shards_[sid]->GetSecondaryQueue()->Add(std::forward<F>(f));
} }
// Runs a brief function on all shards. Waits for it to complete. // Runs a brief function on all shards. Waits for it to complete.
@ -94,8 +98,8 @@ class EngineShardSet {
// The functions running inside the shard queue run atomically (sequentially) // The functions running inside the shard queue run atomically (sequentially)
// with respect each other on the same shard. // with respect each other on the same shard.
template <typename U> void AwaitRunningOnShardQueue(U&& func) { template <typename U> void AwaitRunningOnShardQueue(U&& func) {
util::fb2::BlockingCounter bc(shard_queue_.size()); util::fb2::BlockingCounter bc(size_);
for (size_t i = 0; i < shard_queue_.size(); ++i) { for (size_t i = 0; i < size_; ++i) {
Add(i, [&func, bc]() mutable { Add(i, [&func, bc]() mutable {
func(EngineShard::tlocal()); func(EngineShard::tlocal());
bc->Dec(); bc->Dec();
@ -112,7 +116,8 @@ class EngineShardSet {
void InitThreadLocal(util::ProactorBase* pb); void InitThreadLocal(util::ProactorBase* pb);
util::ProactorPool* pp_; util::ProactorPool* pp_;
std::vector<TaskQueue*> shard_queue_; std::unique_ptr<EngineShard*[]> shards_;
uint32_t size_ = 0;
}; };
template <typename U, typename P> template <typename U, typename P>