mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
fix: Remove incremental locking (#1094)
This commit is contained in:
parent
023abfa46e
commit
b345604226
5 changed files with 15 additions and 118 deletions
|
@ -130,16 +130,11 @@ void Transaction::InitShardData(absl::Span<const PerShardCache> shard_index, siz
|
|||
sd.arg_count = si.args.size();
|
||||
sd.arg_start = args_.size();
|
||||
|
||||
if (multi_) {
|
||||
// Multi transactions can re-intitialize on different shards, so clear ACTIVE flag.
|
||||
// Multi transactions can re-intitialize on different shards, so clear ACTIVE flag.
|
||||
if (multi_)
|
||||
sd.local_mask &= ~ACTIVE;
|
||||
|
||||
// If we increase locks, clear KEYLOCK_ACQUIRED to track new locks.
|
||||
if (multi_->IsIncrLocks())
|
||||
sd.local_mask &= ~KEYLOCK_ACQUIRED;
|
||||
}
|
||||
|
||||
if (sd.arg_count == 0 && !si.requested_active)
|
||||
if (sd.arg_count == 0)
|
||||
continue;
|
||||
|
||||
sd.local_mask |= ACTIVE;
|
||||
|
@ -163,9 +158,7 @@ void Transaction::InitMultiData(KeyIndex key_index) {
|
|||
if (multi_->mode == NON_ATOMIC)
|
||||
return;
|
||||
|
||||
// TODO: determine correct locking mode for transactions, scripts and regular commands.
|
||||
IntentLock::Mode mode = Mode();
|
||||
multi_->keys.clear();
|
||||
|
||||
auto& tmp_uniques = tmp_space.uniq_keys;
|
||||
tmp_uniques.clear();
|
||||
|
@ -174,16 +167,12 @@ void Transaction::InitMultiData(KeyIndex key_index) {
|
|||
if (auto [_, inserted] = tmp_uniques.insert(key); !inserted)
|
||||
return;
|
||||
|
||||
if (multi_->IsIncrLocks()) {
|
||||
multi_->keys.emplace_back(key);
|
||||
} else {
|
||||
multi_->lock_counts[key][mode]++;
|
||||
}
|
||||
multi_->lock_counts[key][mode]++;
|
||||
};
|
||||
|
||||
// With EVAL, we call this function for EVAL itself as well as for each command
|
||||
// for eval. currently, we lock everything only during the eval call.
|
||||
if (multi_->IsIncrLocks() || !multi_->locks_recorded) {
|
||||
if (!multi_->locks_recorded) {
|
||||
for (size_t i = key_index.start; i < key_index.end; i += key_index.step)
|
||||
lock_key(ArgS(full_args_, i));
|
||||
if (key_index.bonus)
|
||||
|
@ -192,7 +181,7 @@ void Transaction::InitMultiData(KeyIndex key_index) {
|
|||
|
||||
multi_->locks_recorded = true;
|
||||
DCHECK(IsAtomicMulti());
|
||||
DCHECK(multi_->mode == GLOBAL || !multi_->keys.empty() || !multi_->lock_counts.empty());
|
||||
DCHECK(multi_->mode == GLOBAL || !multi_->lock_counts.empty());
|
||||
}
|
||||
|
||||
void Transaction::StoreKeysInArgs(KeyIndex key_index, bool rev_mapping) {
|
||||
|
@ -384,24 +373,6 @@ void Transaction::StartMultiLockedAhead(DbIndex dbid, CmdArgList keys) {
|
|||
ScheduleInternal();
|
||||
}
|
||||
|
||||
void Transaction::StartMultiLockedIncr(DbIndex dbid, const vector<bool>& shards) {
|
||||
DCHECK(multi_);
|
||||
DCHECK(shard_data_.empty()); // Make sure default InitByArgs didn't run.
|
||||
DCHECK(std::any_of(shards.begin(), shards.end(), [](bool s) { return s; }));
|
||||
|
||||
multi_->mode = LOCK_INCREMENTAL;
|
||||
InitBase(dbid, {});
|
||||
|
||||
auto& shard_index = tmp_space.GetShardIndex(shard_set->size());
|
||||
for (size_t i = 0; i < shards.size(); i++)
|
||||
shard_index[i].requested_active = shards[i];
|
||||
|
||||
shard_data_.resize(shard_index.size());
|
||||
InitShardData(shard_index, 0, false);
|
||||
|
||||
ScheduleInternal();
|
||||
}
|
||||
|
||||
void Transaction::StartMultiNonAtomic() {
|
||||
DCHECK(multi_);
|
||||
multi_->mode = NON_ATOMIC;
|
||||
|
@ -461,7 +432,6 @@ bool Transaction::RunInShard(EngineShard* shard) {
|
|||
|
||||
bool was_suspended = sd.local_mask & SUSPENDED_Q;
|
||||
bool awaked_prerun = sd.local_mask & AWAKED_Q;
|
||||
bool incremental_lock = multi_ && multi_->IsIncrLocks();
|
||||
|
||||
// For multi we unlock transaction (i.e. its keys) in UnlockMulti() call.
|
||||
// Therefore we differentiate between concluding, which says that this specific
|
||||
|
@ -472,15 +442,6 @@ bool Transaction::RunInShard(EngineShard* shard) {
|
|||
bool should_release = is_concluding && !IsAtomicMulti();
|
||||
IntentLock::Mode mode = Mode();
|
||||
|
||||
// We make sure that we lock exactly once for each (multi-hop) transaction inside
|
||||
// transactions that lock incrementally.
|
||||
if (!IsGlobal() && incremental_lock && ((sd.local_mask & KEYLOCK_ACQUIRED) == 0)) {
|
||||
DCHECK(!awaked_prerun); // we should not have a blocking transaction inside multi block.
|
||||
|
||||
sd.local_mask |= KEYLOCK_ACQUIRED;
|
||||
shard->db_slice().Acquire(mode, GetLockArgs(idx));
|
||||
}
|
||||
|
||||
DCHECK(IsGlobal() || (sd.local_mask & KEYLOCK_ACQUIRED) || (multi_ && multi_->mode == GLOBAL));
|
||||
|
||||
/*************************************************************************/
|
||||
|
@ -620,7 +581,6 @@ void Transaction::ScheduleInternal() {
|
|||
coordinator_state_ |= COORD_SCHED;
|
||||
// If we granted all locks, we can run out of order.
|
||||
if (!ooo_disabled && lock_granted_cnt.load(memory_order_relaxed) == num_shards) {
|
||||
// Currently we don't support OOO for incremental locking. Sp far they are global.
|
||||
coordinator_state_ |= COORD_OOO;
|
||||
}
|
||||
VLOG(2) << "Scheduled " << DebugId()
|
||||
|
@ -667,18 +627,6 @@ void Transaction::ScheduleInternal() {
|
|||
}
|
||||
}
|
||||
|
||||
void Transaction::MultiData::AddLocks(IntentLock::Mode mode) {
|
||||
DCHECK(IsIncrLocks());
|
||||
for (auto& key : keys) {
|
||||
lock_counts[std::move(key)][mode]++;
|
||||
}
|
||||
keys.clear();
|
||||
}
|
||||
|
||||
bool Transaction::MultiData::IsIncrLocks() const {
|
||||
return mode == LOCK_INCREMENTAL;
|
||||
}
|
||||
|
||||
// Optimized "Schedule and execute" function for the most common use-case of a single hop
|
||||
// transactions like set/mset/mget etc. Does not apply for more complicated cases like RENAME or
|
||||
// BLPOP where a data must be read from multiple shards before performing another hop.
|
||||
|
@ -734,9 +682,6 @@ OpStatus Transaction::ScheduleSingleHop(RunnableType cb) {
|
|||
if (!IsAtomicMulti()) // Multi schedule in advance.
|
||||
ScheduleInternal();
|
||||
|
||||
if (multi_ && multi_->IsIncrLocks())
|
||||
multi_->AddLocks(Mode());
|
||||
|
||||
ExecuteAsync();
|
||||
}
|
||||
|
||||
|
@ -808,9 +753,6 @@ void Transaction::Schedule() {
|
|||
if (multi_ && multi_->role == SQUASHED_STUB)
|
||||
return;
|
||||
|
||||
if (multi_ && multi_->IsIncrLocks())
|
||||
multi_->AddLocks(Mode());
|
||||
|
||||
if (!IsAtomicMulti())
|
||||
ScheduleInternal();
|
||||
}
|
||||
|
@ -1101,7 +1043,7 @@ bool Transaction::CancelShardCb(EngineShard* shard) {
|
|||
if (sd.local_mask & KEYLOCK_ACQUIRED) {
|
||||
auto mode = Mode();
|
||||
auto lock_args = GetLockArgs(shard->shard_id());
|
||||
DCHECK(lock_args.args.size() > 0 || (multi_ && multi_->mode == LOCK_INCREMENTAL));
|
||||
DCHECK(lock_args.args.size() > 0);
|
||||
shard->db_slice().Release(mode, lock_args);
|
||||
sd.local_mask &= ~KEYLOCK_ACQUIRED;
|
||||
}
|
||||
|
@ -1115,7 +1057,7 @@ bool Transaction::CancelShardCb(EngineShard* shard) {
|
|||
|
||||
// runs in engine-shard thread.
|
||||
ArgSlice Transaction::GetShardArgs(ShardId sid) const {
|
||||
DCHECK(!args_.empty() || (multi_ && multi_->IsIncrLocks()));
|
||||
DCHECK(!args_.empty());
|
||||
|
||||
// We can read unique_shard_cnt_ only because ShardArgsInShard is called after IsArmedInShard
|
||||
// barrier.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue