mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
chore(server): cleanup replication shard sync execution (#4374)
chore server: cleanup replication shard sync execution Signed-off-by: adi_holden <adi@dragonflydb.io>
This commit is contained in:
parent
c4be62edc8
commit
22994cf3b7
8 changed files with 14 additions and 32 deletions
|
@ -94,7 +94,7 @@ class ClusterShardMigration {
|
||||||
if (tx_data->opcode == journal::Op::PING) {
|
if (tx_data->opcode == journal::Op::PING) {
|
||||||
// TODO check about ping logic
|
// TODO check about ping logic
|
||||||
} else {
|
} else {
|
||||||
ExecuteTxWithNoShardSync(std::move(*tx_data), cntx);
|
ExecuteTx(std::move(*tx_data), cntx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,11 +125,10 @@ class ClusterShardMigration {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void ExecuteTxWithNoShardSync(TransactionData&& tx_data, Context* cntx) {
|
void ExecuteTx(TransactionData&& tx_data, Context* cntx) {
|
||||||
if (cntx->IsCancelled()) {
|
if (cntx->IsCancelled()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
CHECK(tx_data.shard_cnt <= 1); // we don't support sync for multishard execution
|
|
||||||
if (!tx_data.IsGlobalCmd()) {
|
if (!tx_data.IsGlobalCmd()) {
|
||||||
executor_.Execute(tx_data.dbid, tx_data.command);
|
executor_.Execute(tx_data.dbid, tx_data.command);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -60,7 +60,6 @@ void TransactionData::AddEntry(journal::ParsedEntry&& entry) {
|
||||||
case journal::Op::EXPIRED:
|
case journal::Op::EXPIRED:
|
||||||
case journal::Op::COMMAND:
|
case journal::Op::COMMAND:
|
||||||
command = std::move(entry.cmd);
|
command = std::move(entry.cmd);
|
||||||
shard_cnt = entry.shard_cnt;
|
|
||||||
dbid = entry.dbid;
|
dbid = entry.dbid;
|
||||||
txid = entry.txid;
|
txid = entry.txid;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -47,7 +47,6 @@ struct TransactionData {
|
||||||
|
|
||||||
TxId txid{0};
|
TxId txid{0};
|
||||||
DbIndex dbid{0};
|
DbIndex dbid{0};
|
||||||
uint32_t shard_cnt{0};
|
|
||||||
journal::ParsedEntry::CmdData command;
|
journal::ParsedEntry::CmdData command;
|
||||||
|
|
||||||
journal::Op opcode = journal::Op::NOOP;
|
journal::Op opcode = journal::Op::NOOP;
|
||||||
|
|
|
@ -28,7 +28,8 @@ struct EntryBase {
|
||||||
TxId txid;
|
TxId txid;
|
||||||
Op opcode;
|
Op opcode;
|
||||||
DbIndex dbid;
|
DbIndex dbid;
|
||||||
uint32_t shard_cnt;
|
uint32_t shard_cnt; // This field is no longer used by the replica, but we continue to serialize
|
||||||
|
// and deserialize it to maintain backward compatibility.
|
||||||
std::optional<cluster::SlotId> slot;
|
std::optional<cluster::SlotId> slot;
|
||||||
LSN lsn{0};
|
LSN lsn{0};
|
||||||
};
|
};
|
||||||
|
|
|
@ -966,7 +966,8 @@ void DflyShardReplica::ExecuteTx(TransactionData&& tx_data, Context* cntx) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool inserted_by_me = multi_shard_exe_->InsertTxToSharedMap(tx_data.txid, tx_data.shard_cnt);
|
bool inserted_by_me =
|
||||||
|
multi_shard_exe_->InsertTxToSharedMap(tx_data.txid, master_context_.num_flows);
|
||||||
|
|
||||||
auto& multi_shard_data = multi_shard_exe_->Find(tx_data.txid);
|
auto& multi_shard_data = multi_shard_exe_->Find(tx_data.txid);
|
||||||
|
|
||||||
|
|
|
@ -399,17 +399,12 @@ OpStatus OpMSet(const OpArgs& op_args, const ShardArgs& args) {
|
||||||
if (stored * 2 == args.Size()) {
|
if (stored * 2 == args.Size()) {
|
||||||
RecordJournal(op_args, "MSET", args, op_args.tx->GetUniqueShardCnt());
|
RecordJournal(op_args, "MSET", args, op_args.tx->GetUniqueShardCnt());
|
||||||
DCHECK_EQ(result, OpStatus::OK);
|
DCHECK_EQ(result, OpStatus::OK);
|
||||||
return result;
|
} else if (stored > 0) {
|
||||||
|
vector<string_view> store_args(args.begin(), args.end());
|
||||||
|
store_args.resize(stored * 2);
|
||||||
|
RecordJournal(op_args, "MSET", store_args, op_args.tx->GetUniqueShardCnt());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even without changes, we have to send a dummy command like PING for the
|
|
||||||
// replica to ack
|
|
||||||
string_view cmd = stored == 0 ? "PING" : "MSET";
|
|
||||||
vector<string_view> store_args(args.begin(), args.end());
|
|
||||||
store_args.resize(stored * 2);
|
|
||||||
RecordJournal(op_args, cmd, store_args, op_args.tx->GetUniqueShardCnt());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -704,7 +704,7 @@ void Transaction::RunCallback(EngineShard* shard) {
|
||||||
|
|
||||||
// Log to journal only once the command finished running
|
// Log to journal only once the command finished running
|
||||||
if ((coordinator_state_ & COORD_CONCLUDING) || (multi_ && multi_->concluding)) {
|
if ((coordinator_state_ & COORD_CONCLUDING) || (multi_ && multi_->concluding)) {
|
||||||
LogAutoJournalOnShard(shard, result);
|
LogAutoJournalOnShard(shard);
|
||||||
MaybeInvokeTrackingCb();
|
MaybeInvokeTrackingCb();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1346,7 +1346,7 @@ OpStatus Transaction::RunSquashedMultiCb(RunnableType cb) {
|
||||||
auto result = cb(this, shard);
|
auto result = cb(this, shard);
|
||||||
db_slice.OnCbFinish();
|
db_slice.OnCbFinish();
|
||||||
|
|
||||||
LogAutoJournalOnShard(shard, result);
|
LogAutoJournalOnShard(shard);
|
||||||
MaybeInvokeTrackingCb();
|
MaybeInvokeTrackingCb();
|
||||||
|
|
||||||
DCHECK_EQ(result.flags, 0); // if it's sophisticated, we shouldn't squash it
|
DCHECK_EQ(result.flags, 0); // if it's sophisticated, we shouldn't squash it
|
||||||
|
@ -1438,7 +1438,7 @@ optional<string_view> Transaction::GetWakeKey(ShardId sid) const {
|
||||||
return ArgS(full_args_, sd.wake_key_pos);
|
return ArgS(full_args_, sd.wake_key_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Transaction::LogAutoJournalOnShard(EngineShard* shard, RunnableResult result) {
|
void Transaction::LogAutoJournalOnShard(EngineShard* shard) {
|
||||||
// TODO: For now, we ignore non shard coordination.
|
// TODO: For now, we ignore non shard coordination.
|
||||||
if (shard == nullptr)
|
if (shard == nullptr)
|
||||||
return;
|
return;
|
||||||
|
@ -1455,20 +1455,8 @@ void Transaction::LogAutoJournalOnShard(EngineShard* shard, RunnableResult resul
|
||||||
if (journal == nullptr)
|
if (journal == nullptr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (result.status != OpStatus::OK) {
|
|
||||||
// We log NOOP even for NO_AUTOJOURNAL commands because the non-success status could have been
|
|
||||||
// due to OOM in a single shard, while other shards succeeded
|
|
||||||
journal->RecordEntry(txid_, journal::Op::NOOP, db_index_, unique_shard_cnt_,
|
|
||||||
unique_slot_checker_.GetUniqueSlotId(), journal::Entry::Payload{}, true);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If autojournaling was disabled and not re-enabled the callback is writing to journal.
|
// If autojournaling was disabled and not re-enabled the callback is writing to journal.
|
||||||
// We do not allow preemption in callbacks and therefor the call to RecordJournal from
|
|
||||||
// from callbacks does not allow await.
|
|
||||||
// To make sure we flush the changes to sync we call TriggerJournalWriteToSink here.
|
|
||||||
if ((cid_->opt_mask() & CO::NO_AUTOJOURNAL) && !re_enabled_auto_journal_) {
|
if ((cid_->opt_mask() & CO::NO_AUTOJOURNAL) && !re_enabled_auto_journal_) {
|
||||||
TriggerJournalWriteToSink();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -559,7 +559,7 @@ class Transaction {
|
||||||
|
|
||||||
// Log command in shard's journal, if this is a write command with auto-journaling enabled.
|
// Log command in shard's journal, if this is a write command with auto-journaling enabled.
|
||||||
// Should be called immediately after the last hop.
|
// Should be called immediately after the last hop.
|
||||||
void LogAutoJournalOnShard(EngineShard* shard, RunnableResult shard_result);
|
void LogAutoJournalOnShard(EngineShard* shard);
|
||||||
|
|
||||||
// Whether the callback can be run directly on this thread without dispatching on the shard queue
|
// Whether the callback can be run directly on this thread without dispatching on the shard queue
|
||||||
bool CanRunInlined() const;
|
bool CanRunInlined() const;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue