mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
chore: Pull helio with new future (#2944)
This commit is contained in:
parent
84aa237ba7
commit
1a5eacca87
18 changed files with 67 additions and 66 deletions
2
helio
2
helio
|
@ -1 +1 @@
|
|||
Subproject commit 499a3f5736935ea11a0c531bb10c60dc6c101657
|
||||
Subproject commit 8095758cd5ed11bd40b76bbf20c852bde6180c95
|
|
@ -44,7 +44,7 @@ class BlockingControllerTest : public Test {
|
|||
void BlockingControllerTest::SetUp() {
|
||||
pp_.reset(fb2::Pool::Epoll(kNumThreads));
|
||||
pp_->Run();
|
||||
pp_->Await([](unsigned index, ProactorBase* p) {
|
||||
pp_->AwaitBrief([](unsigned index, ProactorBase* p) {
|
||||
ServerState::Init(index, kNumThreads, nullptr);
|
||||
if (facade::tl_facade_stats == nullptr) {
|
||||
facade::tl_facade_stats = new facade::FacadeStats;
|
||||
|
|
|
@ -218,7 +218,7 @@ void ChannelStoreUpdater::Apply() {
|
|||
// structs. This means that any point on the other thread is safe to update the channel store.
|
||||
// Regardless of whether we need to replace, we dispatch to make sure all
|
||||
// queued SubscribeMaps in the freelist are no longer in use.
|
||||
shard_set->pool()->Await([](unsigned idx, util::ProactorBase*) {
|
||||
shard_set->pool()->AwaitBrief([](unsigned idx, util::ProactorBase*) {
|
||||
ServerState::tlocal()->UpdateChannelStore(
|
||||
ChannelStore::control_block.most_recent.load(memory_order_relaxed));
|
||||
});
|
||||
|
|
|
@ -658,7 +658,7 @@ static string_view StateToStr(MigrationState state) {
|
|||
static uint64_t GetKeyCount(const SlotRanges& slots) {
|
||||
atomic_uint64_t keys = 0;
|
||||
|
||||
shard_set->pool()->Await([&](auto*) {
|
||||
shard_set->pool()->AwaitFiberOnAll([&](auto*) {
|
||||
EngineShard* shard = EngineShard::tlocal();
|
||||
if (shard == nullptr)
|
||||
return;
|
||||
|
|
|
@ -61,7 +61,7 @@ thread_local std::optional<LockTagOptions> locktag_lock_options;
|
|||
void TEST_InvalidateLockTagOptions() {
|
||||
locktag_lock_options = nullopt; // For test main thread
|
||||
CHECK(shard_set != nullptr);
|
||||
shard_set->pool()->Await(
|
||||
shard_set->pool()->AwaitBrief(
|
||||
[](ShardId shard, ProactorBase* proactor) { locktag_lock_options = nullopt; });
|
||||
}
|
||||
|
||||
|
|
|
@ -114,8 +114,8 @@ void ConnectionContext::ChangeMonitor(bool start) {
|
|||
my_monitors.Remove(conn());
|
||||
}
|
||||
// Tell other threads that about the change in the number of connection that we monitor
|
||||
shard_set->pool()->Await(
|
||||
[start](auto*) { ServerState::tlocal()->Monitors().NotifyChangeCount(start); });
|
||||
shard_set->pool()->AwaitBrief(
|
||||
[start](unsigned, auto*) { ServerState::tlocal()->Monitors().NotifyChangeCount(start); });
|
||||
EnableMonitoring(start);
|
||||
}
|
||||
|
||||
|
|
|
@ -560,9 +560,8 @@ void DebugCmd::Load(string_view filename) {
|
|||
path = dir_path;
|
||||
}
|
||||
|
||||
auto fut_ec = sf_.Load(path.generic_string());
|
||||
if (fut_ec.valid()) {
|
||||
GenericError ec = fut_ec.get();
|
||||
if (auto fut_ec = sf_.Load(path.generic_string()); fut_ec) {
|
||||
GenericError ec = fut_ec->Get();
|
||||
if (ec) {
|
||||
string msg = ec.Format();
|
||||
LOG(WARNING) << "Could not load file " << msg;
|
||||
|
|
|
@ -400,7 +400,7 @@ void DflyCmd::TakeOver(CmdArgList args, ConnectionContext* cntx) {
|
|||
// We need to await for all dispatches to finish: Otherwise a transaction might be scheduled
|
||||
// after this function exits but before the actual shutdown.
|
||||
facade::DispatchTracker tracker{sf_->GetNonPriviligedListeners(), cntx->conn()};
|
||||
shard_set->pool()->Await([&](unsigned index, auto* pb) {
|
||||
shard_set->pool()->AwaitBrief([&](unsigned index, auto* pb) {
|
||||
sf_->CancelBlockingOnThread();
|
||||
tracker.TrackOnThread();
|
||||
});
|
||||
|
|
|
@ -855,7 +855,7 @@ void Service::Init(util::AcceptServer* acceptor, std::vector<facade::Listener*>
|
|||
}
|
||||
|
||||
// Must initialize before the shard_set because EngineShard::Init references ServerState.
|
||||
pp_.Await([&](uint32_t index, ProactorBase* pb) {
|
||||
pp_.AwaitBrief([&](uint32_t index, ProactorBase* pb) {
|
||||
tl_facade_stats = new FacadeStats;
|
||||
ServerState::Init(index, shard_num, &user_registry_);
|
||||
});
|
||||
|
@ -873,7 +873,7 @@ void Service::Init(util::AcceptServer* acceptor, std::vector<facade::Listener*>
|
|||
server_family_.Init(acceptor, std::move(listeners));
|
||||
|
||||
ChannelStore* cs = new ChannelStore{};
|
||||
pp_.Await(
|
||||
pp_.AwaitBrief(
|
||||
[cs](uint32_t index, ProactorBase* pb) { ServerState::tlocal()->UpdateChannelStore(cs); });
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ void MemoryCmd::Run(CmdArgList args) {
|
|||
}
|
||||
|
||||
if (sub_cmd == "DECOMMIT") {
|
||||
shard_set->pool()->Await([](auto* pb) {
|
||||
shard_set->pool()->AwaitBrief([](unsigned, auto* pb) {
|
||||
ServerState::tlocal()->DecommitMemory(ServerState::kDataHeap | ServerState::kBackingHeap |
|
||||
ServerState::kGlibcmalloc);
|
||||
});
|
||||
|
@ -196,7 +196,7 @@ ConnectionMemoryUsage GetConnectionMemoryUsage(ServerFamily* server) {
|
|||
});
|
||||
}
|
||||
|
||||
shard_set->pool()->Await([&](unsigned index, auto*) {
|
||||
shard_set->pool()->AwaitBrief([&](unsigned index, auto*) {
|
||||
mems[index].pipelined_bytes += tl_facade_stats->conn_stats.pipeline_cmd_cache_bytes;
|
||||
mems[index].pipelined_bytes += tl_facade_stats->conn_stats.dispatch_queue_bytes;
|
||||
});
|
||||
|
@ -372,7 +372,7 @@ void MemoryCmd::Track(CmdArgList args) {
|
|||
}
|
||||
|
||||
atomic_bool error{false};
|
||||
shard_set->pool()->Await([&](unsigned index, auto*) {
|
||||
shard_set->pool()->AwaitBrief([&](unsigned index, auto*) {
|
||||
if (!AllocationTracker::Get().Add(tracking_info)) {
|
||||
error.store(true);
|
||||
}
|
||||
|
@ -392,7 +392,7 @@ void MemoryCmd::Track(CmdArgList args) {
|
|||
}
|
||||
|
||||
atomic_bool error{false};
|
||||
shard_set->pool()->Await([&, lo = lower_bound, hi = upper_bound](unsigned index, auto*) {
|
||||
shard_set->pool()->AwaitBrief([&, lo = lower_bound, hi = upper_bound](unsigned index, auto*) {
|
||||
if (!AllocationTracker::Get().Remove(lo, hi)) {
|
||||
error.store(true);
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ void MemoryCmd::Track(CmdArgList args) {
|
|||
}
|
||||
|
||||
if (sub_cmd == "CLEAR") {
|
||||
shard_set->pool()->Await([&](unsigned index, auto*) { AllocationTracker::Get().Clear(); });
|
||||
shard_set->pool()->AwaitBrief([&](unsigned index, auto*) { AllocationTracker::Get().Clear(); });
|
||||
return cntx_->SendOk();
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ void MemoryCmd::Track(CmdArgList args) {
|
|||
}
|
||||
|
||||
atomic_bool found{false};
|
||||
shard_set->pool()->Await([&](unsigned index, auto*) {
|
||||
shard_set->pool()->AwaitBrief([&](unsigned index, auto*) {
|
||||
if (mi_heap_check_owned(mi_heap_get_backing(), (void*)ptr)) {
|
||||
found.store(true);
|
||||
}
|
||||
|
|
|
@ -336,7 +336,7 @@ vector<pair<string, ScriptMgr::ScriptData>> ScriptMgr::GetAll() const {
|
|||
}
|
||||
|
||||
void ScriptMgr::UpdateScriptCaches(ScriptKey sha, ScriptParams params) const {
|
||||
shard_set->pool()->Await([&sha, ¶ms](auto index, auto* pb) {
|
||||
shard_set->pool()->AwaitBrief([&sha, ¶ms](auto index, auto* pb) {
|
||||
ServerState::tlocal()->SetScriptParams(sha, params);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -277,8 +277,8 @@ template <typename T> void UpdateMax(T* maxv, T current) {
|
|||
}
|
||||
|
||||
void SetMasterFlagOnAllThreads(bool is_master) {
|
||||
auto cb = [is_master](auto* pb) { ServerState::tlocal()->is_master = is_master; };
|
||||
shard_set->pool()->Await(cb);
|
||||
auto cb = [is_master](unsigned, auto*) { ServerState::tlocal()->is_master = is_master; };
|
||||
shard_set->pool()->AwaitBrief(cb);
|
||||
}
|
||||
|
||||
std::optional<cron::cronexpr> InferSnapshotCronExpr() {
|
||||
|
@ -616,7 +616,7 @@ std::optional<fb2::Fiber> Pause(std::vector<facade::Listener*> listeners, facade
|
|||
// command that did not pause on the new state yet we will pause after waking up.
|
||||
DispatchTracker tracker{std::move(listeners), conn, true /* ignore paused commands */,
|
||||
true /*ignore blocking*/};
|
||||
shard_set->pool()->Await([&tracker, pause_state](util::ProactorBase* pb) {
|
||||
shard_set->pool()->AwaitBrief([&tracker, pause_state](unsigned, util::ProactorBase*) {
|
||||
// Commands don't suspend before checking the pause state, so
|
||||
// it's impossible to deadlock on waiting for a command that will be paused.
|
||||
tracker.TrackOnThread();
|
||||
|
@ -628,7 +628,7 @@ std::optional<fb2::Fiber> Pause(std::vector<facade::Listener*> listeners, facade
|
|||
const absl::Duration kDispatchTimeout = absl::Seconds(1);
|
||||
if (!tracker.Wait(kDispatchTimeout)) {
|
||||
LOG(WARNING) << "Couldn't wait for commands to finish dispatching in " << kDispatchTimeout;
|
||||
shard_set->pool()->Await([pause_state](util::ProactorBase* pb) {
|
||||
shard_set->pool()->AwaitBrief([pause_state](unsigned, util::ProactorBase*) {
|
||||
ServerState::tlocal()->SetPauseState(pause_state, false);
|
||||
});
|
||||
return std::nullopt;
|
||||
|
@ -821,8 +821,9 @@ void ServerFamily::JoinSnapshotSchedule() {
|
|||
void ServerFamily::Shutdown() {
|
||||
VLOG(1) << "ServerFamily::Shutdown";
|
||||
|
||||
if (load_result_.valid())
|
||||
load_result_.wait();
|
||||
if (load_result_) {
|
||||
std::exchange(load_result_, std::nullopt)->Get();
|
||||
}
|
||||
|
||||
JoinSnapshotSchedule();
|
||||
|
||||
|
@ -864,14 +865,14 @@ struct AggregateLoadResult {
|
|||
// Load starts as many fibers as there are files to load each one separately.
|
||||
// It starts one more fiber that waits for all load fibers to finish and returns the first
|
||||
// error (if any occured) with a future.
|
||||
fb2::Future<GenericError> ServerFamily::Load(const std::string& load_path) {
|
||||
std::optional<fb2::Future<GenericError>> ServerFamily::Load(const std::string& load_path) {
|
||||
auto paths_result = snapshot_storage_->LoadPaths(load_path);
|
||||
if (!paths_result) {
|
||||
LOG(ERROR) << "Failed to load snapshot: " << paths_result.error().Format();
|
||||
|
||||
fb2::Promise<GenericError> ec_promise;
|
||||
ec_promise.set_value(paths_result.error());
|
||||
return ec_promise.get_future();
|
||||
fb2::Future<GenericError> future;
|
||||
future.Resolve(paths_result.error());
|
||||
return future;
|
||||
}
|
||||
|
||||
std::vector<std::string> paths = *paths_result;
|
||||
|
@ -913,12 +914,11 @@ fb2::Future<GenericError> ServerFamily::Load(const std::string& load_path) {
|
|||
load_fibers.push_back(proactor->LaunchFiber(std::move(load_fiber)));
|
||||
}
|
||||
|
||||
fb2::Promise<GenericError> ec_promise;
|
||||
fb2::Future<GenericError> ec_future = ec_promise.get_future();
|
||||
fb2::Future<GenericError> future;
|
||||
|
||||
// Run fiber that empties the channel and sets ec_promise.
|
||||
auto load_join_fiber = [this, aggregated_result, load_fibers = std::move(load_fibers),
|
||||
ec_promise = std::move(ec_promise)]() mutable {
|
||||
future]() mutable {
|
||||
for (auto& fiber : load_fibers) {
|
||||
fiber.Join();
|
||||
}
|
||||
|
@ -932,11 +932,11 @@ fb2::Future<GenericError> ServerFamily::Load(const std::string& load_path) {
|
|||
|
||||
LOG(INFO) << "Load finished, num keys read: " << aggregated_result->keys_read;
|
||||
service_.SwitchState(GlobalState::LOADING, GlobalState::ACTIVE);
|
||||
ec_promise.set_value(*(aggregated_result->first_error));
|
||||
future.Resolve(*(aggregated_result->first_error));
|
||||
};
|
||||
pool.GetNextProactor()->Dispatch(std::move(load_join_fiber));
|
||||
|
||||
return ec_future;
|
||||
return future;
|
||||
}
|
||||
|
||||
void ServerFamily::SnapshotScheduling() {
|
||||
|
@ -1784,7 +1784,8 @@ static void MergeDbSliceStats(const DbSlice::Stats& src, Metrics* dest) {
|
|||
}
|
||||
|
||||
void ServerFamily::ResetStat() {
|
||||
shard_set->pool()->Await([registry = service_.mutable_registry(), this](unsigned index, auto*) {
|
||||
shard_set->pool()->AwaitBrief(
|
||||
[registry = service_.mutable_registry(), this](unsigned index, auto*) {
|
||||
registry->ResetCallStats(index);
|
||||
SinkReplyBuilder::ResetThreadLocalStats();
|
||||
auto& stats = tl_facade_stats->conn_stats;
|
||||
|
|
|
@ -185,7 +185,7 @@ class ServerFamily {
|
|||
|
||||
// Load snapshot from file (.rdb file or summary.dfs file) and return
|
||||
// future with error_code.
|
||||
util::fb2::Future<GenericError> Load(const std::string& file_name);
|
||||
std::optional<util::fb2::Future<GenericError>> Load(const std::string& file_name);
|
||||
|
||||
bool TEST_IsSaving() const;
|
||||
|
||||
|
@ -288,7 +288,7 @@ class ServerFamily {
|
|||
void StopAllClusterReplicas();
|
||||
|
||||
util::fb2::Fiber snapshot_schedule_fb_;
|
||||
util::fb2::Future<GenericError> load_result_;
|
||||
std::optional<util::fb2::Future<GenericError>> load_result_;
|
||||
|
||||
uint32_t stats_caching_task_ = 0;
|
||||
Service& service_;
|
||||
|
|
|
@ -505,7 +505,7 @@ string StringValue::Get() && {
|
|||
auto prev = exchange(v_, monostate{});
|
||||
if (holds_alternative<string>(prev))
|
||||
return std::move(std::get<string>(prev));
|
||||
return std::get<util::fb2::Future<std::string>>(prev).get();
|
||||
return std::get<util::fb2::Future<std::string>>(prev).Get();
|
||||
}
|
||||
|
||||
bool StringValue::IsEmpty() const {
|
||||
|
|
|
@ -304,8 +304,9 @@ unsigned BaseFamilyTest::NumLocked() {
|
|||
}
|
||||
|
||||
void BaseFamilyTest::ClearMetrics() {
|
||||
shard_set->pool()->Await(
|
||||
[](auto*) { ServerState::tlocal()->stats = ServerState::Stats(shard_set->size()); });
|
||||
shard_set->pool()->AwaitBrief([](unsigned, auto*) {
|
||||
ServerState::tlocal()->stats = ServerState::Stats(shard_set->size());
|
||||
});
|
||||
}
|
||||
|
||||
void BaseFamilyTest::WaitUntilLocked(DbIndex db_index, string_view key, double timeout) {
|
||||
|
|
|
@ -35,7 +35,7 @@ void OpManager::Close() {
|
|||
|
||||
util::fb2::Future<std::string> OpManager::Read(EntryId id, DiskSegment segment) {
|
||||
// Fill pages for prepared read as it has no penalty and potentially covers more small segments
|
||||
return PrepareRead(segment.FillPages()).ForId(id, segment).futures.emplace_back().get_future();
|
||||
return PrepareRead(segment.FillPages()).ForId(id, segment).futures.emplace_back();
|
||||
}
|
||||
|
||||
void OpManager::Delete(EntryId id) {
|
||||
|
@ -98,7 +98,7 @@ void OpManager::ProcessRead(size_t offset, std::string_view value) {
|
|||
auto key_value = value.substr(ko.segment.offset - info->segment.offset, ko.segment.length);
|
||||
|
||||
for (auto& fut : ko.futures)
|
||||
fut.set_value(std::string{key_value});
|
||||
fut.Resolve(std::string{key_value});
|
||||
|
||||
ReportFetched(Borrowed(ko.id), key_value, ko.segment);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ class OpManager {
|
|||
|
||||
OwnedEntryId id;
|
||||
DiskSegment segment;
|
||||
absl::InlinedVector<util::fb2::Promise<std::string>, 1> futures;
|
||||
absl::InlinedVector<util::fb2::Future<std::string>, 1> futures;
|
||||
};
|
||||
|
||||
// Describes an ongoing read operation for a fixed segment
|
||||
|
|
|
@ -58,7 +58,7 @@ TEST_F(OpManagerTest, SimpleStashesWithReads) {
|
|||
for (unsigned i = 0; i < 100; i++) {
|
||||
EXPECT_GE(stashed_[i].offset, i > 0);
|
||||
EXPECT_EQ(stashed_[i].length, 10 + (i > 9));
|
||||
EXPECT_EQ(Read(i, stashed_[i]).get(), absl::StrCat("VALUE", i, "real"));
|
||||
EXPECT_EQ(Read(i, stashed_[i]).Get(), absl::StrCat("VALUE", i, "real"));
|
||||
EXPECT_EQ(fetched_.extract(i).mapped(), absl::StrCat("VALUE", i, "real"));
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ TEST_F(OpManagerTest, DeleteAfterReads) {
|
|||
Delete(stashed_[0u]);
|
||||
|
||||
for (auto& fut : reads)
|
||||
EXPECT_EQ(fut.get(), "DATA");
|
||||
EXPECT_EQ(fut.Get(), "DATA");
|
||||
|
||||
Close();
|
||||
});
|
||||
|
@ -111,7 +111,7 @@ TEST_F(OpManagerTest, ReadSamePageDifferentOffsets) {
|
|||
futures.emplace_back(Read(absl::StrCat("k", i), number_segments[i]));
|
||||
|
||||
for (size_t i = 0; i < 100; i++)
|
||||
EXPECT_EQ(futures[i].get(), std::to_string(i));
|
||||
EXPECT_EQ(futures[i].Get(), std::to_string(i));
|
||||
|
||||
Close();
|
||||
});
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue