mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-11 10:25:47 +02:00
server: disable single shard tx optimization on scheduling (#4647)
Signed-off-by: adi_holden <adi@dragonflydb.io>
This commit is contained in:
parent
9e52438862
commit
86e12013f0
3 changed files with 16 additions and 3 deletions
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -32,7 +32,7 @@ jobs:
|
||||||
path: ~/.cache/pre-commit
|
path: ~/.cache/pre-commit
|
||||||
key: pre-commit|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
key: pre-commit|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||||
- name: Run pre-commit checks
|
- name: Run pre-commit checks
|
||||||
run:
|
run: |
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pre-commit run --show-diff-on-failure --color=always --from-ref HEAD^ --to-ref HEAD
|
pre-commit run --show-diff-on-failure --color=always --from-ref HEAD^ --to-ref HEAD
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
|
@ -265,7 +265,7 @@ void BaseFamilyTest::ResetService() {
|
||||||
LOG(ERROR) << "Deadlock detected!!!!";
|
LOG(ERROR) << "Deadlock detected!!!!";
|
||||||
absl::SetFlag(&FLAGS_alsologtostderr, true);
|
absl::SetFlag(&FLAGS_alsologtostderr, true);
|
||||||
fb2::Mutex m;
|
fb2::Mutex m;
|
||||||
shard_set->pool()->AwaitFiberOnAll([&m](unsigned index, ProactorBase* base) {
|
shard_set->pool()->AwaitFiberOnAll([&m, this](unsigned index, ProactorBase* base) {
|
||||||
ThisFiber::SetName("Watchdog");
|
ThisFiber::SetName("Watchdog");
|
||||||
std::unique_lock lk(m);
|
std::unique_lock lk(m);
|
||||||
LOG(ERROR) << "Proactor " << index << ":\n";
|
LOG(ERROR) << "Proactor " << index << ":\n";
|
||||||
|
@ -293,6 +293,14 @@ void BaseFamilyTest::ResetService() {
|
||||||
->trans_locks) {
|
->trans_locks) {
|
||||||
LOG(ERROR) << "Key " << k_v.first << " " << k_v.second;
|
LOG(ERROR) << "Key " << k_v.first << " " << k_v.second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG(ERROR) << "Transaction for shard " << es->shard_id();
|
||||||
|
for (auto& conn : connections_) {
|
||||||
|
auto* context = conn.second->cmd_cntx();
|
||||||
|
if (context->transaction && context->transaction->IsActive(es->shard_id())) {
|
||||||
|
LOG(ERROR) << context->transaction->DebugId(es->shard_id());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -556,6 +556,7 @@ string Transaction::DebugId(std::optional<ShardId> sid) const {
|
||||||
absl::StrAppend(&res, ":", multi_->cmd_seq_num);
|
absl::StrAppend(&res, ":", multi_->cmd_seq_num);
|
||||||
}
|
}
|
||||||
absl::StrAppend(&res, " {id=", trans_id(this));
|
absl::StrAppend(&res, " {id=", trans_id(this));
|
||||||
|
absl::StrAppend(&res, " {cb_ptr=", absl::StrFormat("%p", static_cast<const void*>(cb_ptr_)));
|
||||||
if (sid) {
|
if (sid) {
|
||||||
absl::StrAppend(&res, ",mask[", *sid, "]=", int(shard_data_[SidToId(*sid)].local_mask),
|
absl::StrAppend(&res, ",mask[", *sid, "]=", int(shard_data_[SidToId(*sid)].local_mask),
|
||||||
",is_armed=", DEBUG_IsArmedInShard(*sid),
|
",is_armed=", DEBUG_IsArmedInShard(*sid),
|
||||||
|
@ -756,7 +757,11 @@ void Transaction::ScheduleInternal() {
|
||||||
|
|
||||||
ScheduleContext schedule_ctx{this, optimistic_exec};
|
ScheduleContext schedule_ctx{this, optimistic_exec};
|
||||||
|
|
||||||
if (unique_shard_cnt_ == 1) {
|
// TODO: this optimization is disabled due to a issue #4648 revealing this code can
|
||||||
|
// lead to transaction not being scheduled.
|
||||||
|
// To reproduce the bug remove the false in the condition and run
|
||||||
|
// ./list_family_test --gtest_filter=*AwakeMulti on alpine machine
|
||||||
|
if (false && unique_shard_cnt_ == 1) {
|
||||||
// Single shard optimization. Note: we could apply the same optimization
|
// Single shard optimization. Note: we could apply the same optimization
|
||||||
// to multi-shard transactions as well by creating a vector of ScheduleContext.
|
// to multi-shard transactions as well by creating a vector of ScheduleContext.
|
||||||
schedule_queues[unique_shard_id_].queue.Push(&schedule_ctx);
|
schedule_queues[unique_shard_id_].queue.Push(&schedule_ctx);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue