1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Target/TargetRegisterInfo.h"
27 #include "llvm/Target/TargetSubtarget.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/ADT/SmallSet.h"
33 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
34 const MachineLoopInfo &mli,
35 const MachineDominatorTree &mdt)
36 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
37 InstrItins(mf.getTarget().getInstrItineraryData()),
38 Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
42 /// Run - perform scheduling.
44 void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
45 MachineBasicBlock::iterator begin,
46 MachineBasicBlock::iterator end,
50 InsertPosIndex = endcount;
52 ScheduleDAG::Run(bb, end);
55 /// getUnderlyingObjectFromInt - This is the function that does the work of
56 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
57 static const Value *getUnderlyingObjectFromInt(const Value *V) {
59 if (const Operator *U = dyn_cast<Operator>(V)) {
60 // If we find a ptrtoint, we can transfer control back to the
61 // regular getUnderlyingObjectFromInt.
62 if (U->getOpcode() == Instruction::PtrToInt)
63 return U->getOperand(0);
64 // If we find an add of a constant or a multiplied value, it's
65 // likely that the other operand will lead us to the base
66 // object. We don't have to worry about the case where the
67 // object address is somehow being computed by the multiply,
68 // because our callers only care when the result is an
69 // identifibale object.
70 if (U->getOpcode() != Instruction::Add ||
71 (!isa<ConstantInt>(U->getOperand(1)) &&
72 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul))
78 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
82 /// getUnderlyingObject - This is a wrapper around GetUnderlyingObject
83 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
84 static const Value *getUnderlyingObject(const Value *V) {
85 // First just call Value::getUnderlyingObject to let it do what it does.
87 V = GetUnderlyingObject(V);
88 // If it found an inttoptr, use special code to continue climing.
89 if (Operator::getOpcode(V) != Instruction::IntToPtr)
91 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
92 // If that succeeded in finding a pointer, continue the search.
93 if (!O->getType()->isPointerTy())
100 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
101 /// information and it can be tracked to a normal reference to a known
102 /// object, return the Value for that object. Otherwise return null.
103 static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
104 const MachineFrameInfo *MFI,
107 if (!MI->hasOneMemOperand() ||
108 !(*MI->memoperands_begin())->getValue() ||
109 (*MI->memoperands_begin())->isVolatile())
112 const Value *V = (*MI->memoperands_begin())->getValue();
116 V = getUnderlyingObject(V);
117 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
118 // For now, ignore PseudoSourceValues which may alias LLVM IR values
119 // because the code that uses this function has no way to cope with
121 if (PSV->isAliased(MFI))
124 MayAlias = PSV->mayAlias(MFI);
128 if (isIdentifiedObject(V))
134 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
135 if (MachineLoop *ML = MLI.getLoopFor(BB))
136 if (BB == ML->getLoopLatch()) {
137 MachineBasicBlock *Header = ML->getHeader();
138 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
139 E = Header->livein_end(); I != E; ++I)
140 LoopLiveInRegs.insert(*I);
141 LoopRegs.VisitLoop(ML);
145 /// AddSchedBarrierDeps - Add dependencies from instructions in the current
146 /// list of instructions being scheduled to scheduling barrier by adding
147 /// the exit SU to the register defs and use list. This is because we want to
148 /// make sure instructions which define registers that are either used by
149 /// the terminator or are live-out are properly scheduled. This is
150 /// especially important when the definition latency of the return value(s)
151 /// are too high to be hidden by the branch or when the liveout registers
152 /// used by instructions in the fallthrough block.
153 void ScheduleDAGInstrs::AddSchedBarrierDeps() {
154 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
155 ExitSU.setInstr(ExitMI);
156 bool AllDepKnown = ExitMI &&
157 (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
158 if (ExitMI && AllDepKnown) {
159 // If it's a call or a barrier, add dependencies on the defs and uses of
161 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
162 const MachineOperand &MO = ExitMI->getOperand(i);
163 if (!MO.isReg() || MO.isDef()) continue;
164 unsigned Reg = MO.getReg();
165 if (Reg == 0) continue;
167 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
168 Uses[Reg].push_back(&ExitSU);
171 // For others, e.g. fallthrough, conditional branch, assume the exit
172 // uses all the registers that are livein to the successor blocks.
173 SmallSet<unsigned, 8> Seen;
174 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
175 SE = BB->succ_end(); SI != SE; ++SI)
176 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
177 E = (*SI)->livein_end(); I != E; ++I) {
179 if (Seen.insert(Reg))
180 Uses[Reg].push_back(&ExitSU);
185 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
186 // We'll be allocating one SUnit for each instruction, plus one for
187 // the region exit node.
188 SUnits.reserve(BB->size());
190 // We build scheduling units by walking a block's instruction list from bottom
193 // Remember where a generic side-effecting instruction is as we procede.
194 SUnit *BarrierChain = 0, *AliasChain = 0;
196 // Memory references to specific known memory locations are tracked
197 // so that they can be given more precise dependencies. We track
198 // separately the known memory locations that may alias and those
199 // that are known not to alias
200 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
201 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
203 // Keep track of dangling debug references to registers.
204 std::vector<std::pair<MachineInstr*, unsigned> >
205 DanglingDebugValue(TRI->getNumRegs(),
206 std::make_pair(static_cast<MachineInstr*>(0), 0));
208 // Check to see if the scheduler cares about latencies.
209 bool UnitLatencies = ForceUnitLatencies();
211 // Ask the target if address-backscheduling is desirable, and if so how much.
212 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
213 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
215 // Remove any stale debug info; sometimes BuildSchedGraph is called again
216 // without emitting the info from the previous call.
219 // Model data dependencies between instructions being scheduled and the
221 AddSchedBarrierDeps();
223 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
224 assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs");
227 // Walk the list of instructions, from bottom moving up.
228 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
230 MachineInstr *MI = prior(MII);
231 // DBG_VALUE does not have SUnit's built, so just remember these for later
233 if (MI->isDebugValue()) {
234 if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() &&
235 MI->getOperand(0).getReg())
236 DanglingDebugValue[MI->getOperand(0).getReg()] =
237 std::make_pair(MI, DbgValueVec.size());
238 DbgValueVec.push_back(MI);
241 const TargetInstrDesc &TID = MI->getDesc();
242 assert(!TID.isTerminator() && !MI->isLabel() &&
243 "Cannot schedule terminators or labels!");
244 // Create the SUnit for this MI.
245 SUnit *SU = NewSUnit(MI);
246 SU->isCall = TID.isCall();
247 SU->isCommutable = TID.isCommutable();
249 // Assign the Latency field of SU using target-provided information.
255 // Add register-based dependencies (data, anti, and output).
256 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
257 const MachineOperand &MO = MI->getOperand(j);
258 if (!MO.isReg()) continue;
259 unsigned Reg = MO.getReg();
260 if (Reg == 0) continue;
262 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
264 if (MO.isDef() && DanglingDebugValue[Reg].first!=0) {
265 SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first);
266 DbgValueVec[DanglingDebugValue[Reg].second] = 0;
267 DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0);
270 std::vector<SUnit *> &UseList = Uses[Reg];
271 // Defs are push in the order they are visited and never reordered.
272 std::vector<SUnit *> &DefList = Defs[Reg];
273 // Optionally add output and anti dependencies. For anti
274 // dependencies we use a latency of 0 because for a multi-issue
275 // target we want to allow the defining instruction to issue
276 // in the same cycle as the using instruction.
277 // TODO: Using a latency of 1 here for output dependencies assumes
278 // there's no cost for reusing registers.
279 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
280 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
281 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
282 SUnit *DefSU = DefList[i];
283 if (DefSU == &ExitSU)
286 (Kind != SDep::Output || !MO.isDead() ||
287 !DefSU->getInstr()->registerDefIsDead(Reg)))
288 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg));
290 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
291 std::vector<SUnit *> &MemDefList = Defs[*Alias];
292 for (unsigned i = 0, e = MemDefList.size(); i != e; ++i) {
293 SUnit *DefSU = MemDefList[i];
294 if (DefSU == &ExitSU)
297 (Kind != SDep::Output || !MO.isDead() ||
298 !DefSU->getInstr()->registerDefIsDead(*Alias)))
299 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
304 // Add any data dependencies.
305 unsigned DataLatency = SU->Latency;
306 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
307 SUnit *UseSU = UseList[i];
310 unsigned LDataLatency = DataLatency;
311 // Optionally add in a special extra latency for nodes that
313 // TODO: Do this for register aliases too.
314 // TODO: Perhaps we should get rid of
315 // SpecialAddressLatency and just move this into
316 // adjustSchedDependency for the targets that care about it.
317 if (SpecialAddressLatency != 0 && !UnitLatencies &&
319 MachineInstr *UseMI = UseSU->getInstr();
320 const TargetInstrDesc &UseTID = UseMI->getDesc();
321 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
322 assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
323 if (RegUseIndex >= 0 &&
324 (UseTID.mayLoad() || UseTID.mayStore()) &&
325 (unsigned)RegUseIndex < UseTID.getNumOperands() &&
326 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
327 LDataLatency += SpecialAddressLatency;
329 // Adjust the dependence latency using operand def/use
330 // information (if any), and then allow the target to
331 // perform its own adjustments.
332 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
333 if (!UnitLatencies) {
334 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
335 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
339 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
340 std::vector<SUnit *> &UseList = Uses[*Alias];
341 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
342 SUnit *UseSU = UseList[i];
345 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
346 if (!UnitLatencies) {
347 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
348 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
354 // If a def is going to wrap back around to the top of the loop,
356 if (!UnitLatencies && DefList.empty()) {
357 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
358 if (I != LoopRegs.Deps.end()) {
359 const MachineOperand *UseMO = I->second.first;
360 unsigned Count = I->second.second;
361 const MachineInstr *UseMI = UseMO->getParent();
362 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
363 const TargetInstrDesc &UseTID = UseMI->getDesc();
364 // TODO: If we knew the total depth of the region here, we could
365 // handle the case where the whole loop is inside the region but
366 // is large enough that the isScheduleHigh trick isn't needed.
367 if (UseMOIdx < UseTID.getNumOperands()) {
368 // Currently, we only support scheduling regions consisting of
369 // single basic blocks. Check to see if the instruction is in
370 // the same region by checking to see if it has the same parent.
371 if (UseMI->getParent() != MI->getParent()) {
372 unsigned Latency = SU->Latency;
373 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
374 Latency += SpecialAddressLatency;
375 // This is a wild guess as to the portion of the latency which
376 // will be overlapped by work done outside the current
377 // scheduling region.
378 Latency -= std::min(Latency, Count);
379 // Add the artificial edge.
380 ExitSU.addPred(SDep(SU, SDep::Order, Latency,
381 /*Reg=*/0, /*isNormalMemory=*/false,
382 /*isMustAlias=*/false,
383 /*isArtificial=*/true));
384 } else if (SpecialAddressLatency > 0 &&
385 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
386 // The entire loop body is within the current scheduling region
387 // and the latency of this operation is assumed to be greater
388 // than the latency of the loop.
389 // TODO: Recursively mark data-edge predecessors as
390 // isScheduleHigh too.
391 SU->isScheduleHigh = true;
394 LoopRegs.Deps.erase(I);
402 // Calls will not be reordered because of chain dependencies (see
403 // below). Since call operands are dead, calls may continue to be added
404 // to the DefList making dependence checking quadratic in the size of
405 // the block. Instead, we leave only one call at the back of the
408 while (!DefList.empty() && DefList.back()->isCall)
411 DefList.push_back(SU);
413 UseList.push_back(SU);
417 // Add chain dependencies.
418 // Chain dependencies used to enforce memory order should have
419 // latency of 0 (except for true dependency of Store followed by
420 // aliased Load... we estimate that with a single cycle of latency
421 // assuming the hardware will bypass)
422 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
423 // after stack slots are lowered to actual addresses.
424 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
425 // produce more precise dependence information.
426 #define STORE_LOAD_LATENCY 1
427 unsigned TrueMemOrderLatency = 0;
428 if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
429 (MI->hasVolatileMemoryRef() &&
430 (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
431 // Be conservative with these and add dependencies on all memory
432 // references, even those that are known to not alias.
433 for (std::map<const Value *, SUnit *>::iterator I =
434 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
435 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
437 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
438 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
439 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
440 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
442 NonAliasMemDefs.clear();
443 NonAliasMemUses.clear();
444 // Add SU to the barrier chain.
446 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
451 // Chain all possibly aliasing memory references though SU.
453 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
455 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
456 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
457 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
458 E = AliasMemDefs.end(); I != E; ++I) {
459 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
461 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
462 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
463 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
464 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
466 PendingLoads.clear();
467 AliasMemDefs.clear();
468 AliasMemUses.clear();
469 } else if (TID.mayStore()) {
470 bool MayAlias = true;
471 TrueMemOrderLatency = STORE_LOAD_LATENCY;
472 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
473 // A store to a specific PseudoSourceValue. Add precise dependencies.
474 // Record the def in MemDefs, first adding a dep if there is
476 std::map<const Value *, SUnit *>::iterator I =
477 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
478 std::map<const Value *, SUnit *>::iterator IE =
479 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
481 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
482 /*isNormalMemory=*/true));
486 AliasMemDefs[V] = SU;
488 NonAliasMemDefs[V] = SU;
490 // Handle the uses in MemUses, if there are any.
491 std::map<const Value *, std::vector<SUnit *> >::iterator J =
492 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
493 std::map<const Value *, std::vector<SUnit *> >::iterator JE =
494 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
496 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
497 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
498 /*Reg=*/0, /*isNormalMemory=*/true));
502 // Add dependencies from all the PendingLoads, i.e. loads
503 // with no underlying object.
504 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
505 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
506 // Add dependence on alias chain, if needed.
508 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
510 // Add dependence on barrier chain, if needed.
512 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
514 // Treat all other stores conservatively.
515 goto new_alias_chain;
518 if (!ExitSU.isPred(SU))
519 // Push store's up a bit to avoid them getting in between cmp
521 ExitSU.addPred(SDep(SU, SDep::Order, 0,
522 /*Reg=*/0, /*isNormalMemory=*/false,
523 /*isMustAlias=*/false,
524 /*isArtificial=*/true));
525 } else if (TID.mayLoad()) {
526 bool MayAlias = true;
527 TrueMemOrderLatency = 0;
528 if (MI->isInvariantLoad(AA)) {
529 // Invariant load, no chain dependencies needed!
532 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
533 // A load from a specific PseudoSourceValue. Add precise dependencies.
534 std::map<const Value *, SUnit *>::iterator I =
535 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
536 std::map<const Value *, SUnit *>::iterator IE =
537 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
539 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
540 /*isNormalMemory=*/true));
542 AliasMemUses[V].push_back(SU);
544 NonAliasMemUses[V].push_back(SU);
546 // A load with no underlying object. Depend on all
547 // potentially aliasing stores.
548 for (std::map<const Value *, SUnit *>::iterator I =
549 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
550 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
552 PendingLoads.push_back(SU);
556 // Add dependencies on alias and barrier chains, if needed.
557 if (MayAlias && AliasChain)
558 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
560 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
565 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
569 PendingLoads.clear();
572 void ScheduleDAGInstrs::FinishBlock() {
576 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
577 // Compute the latency for the node.
578 if (!InstrItins || InstrItins->isEmpty()) {
581 // Simplistic target-independent heuristic: assume that loads take
583 if (SU->getInstr()->getDesc().mayLoad())
586 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
590 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
592 if (!InstrItins || InstrItins->isEmpty())
595 // For a data dependency with a known register...
596 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
599 const unsigned Reg = dep.getReg();
601 // ... find the definition of the register in the defining
603 MachineInstr *DefMI = Def->getInstr();
604 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
606 const MachineOperand &MO = DefMI->getOperand(DefIdx);
607 if (MO.isReg() && MO.isImplicit() &&
608 DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
609 // This is an implicit def, getOperandLatency() won't return the correct
611 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
612 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
613 // What we want is to compute latency between def of %D6/%D7 and use of
615 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
617 MachineInstr *UseMI = Use->getInstr();
618 // For all uses of the register, calculate the maxmimum latency
621 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
622 const MachineOperand &MO = UseMI->getOperand(i);
623 if (!MO.isReg() || !MO.isUse())
625 unsigned MOReg = MO.getReg();
629 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
631 Latency = std::max(Latency, UseCycle);
634 // UseMI is null, then it must be a scheduling barrier.
635 if (!InstrItins || InstrItins->isEmpty())
637 unsigned DefClass = DefMI->getDesc().getSchedClass();
638 Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
641 // If we found a latency, then replace the existing dependence latency.
643 dep.setLatency(Latency);
647 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
648 SU->getInstr()->dump();
651 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
653 raw_string_ostream oss(s);
656 else if (SU == &ExitSU)
659 SU->getInstr()->print(oss);
663 // EmitSchedule - Emit the machine code in scheduled order.
664 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
665 // For MachineInstr-based scheduling, we're rescheduling the instructions in
666 // the block, so start by removing them from the block.
667 while (Begin != InsertPos) {
668 MachineBasicBlock::iterator I = Begin;
673 // First reinsert any remaining debug_values; these are either constants,
674 // or refer to live-in registers. The beginning of the block is the right
675 // place for the latter. The former might reasonably be placed elsewhere
676 // using some kind of ordering algorithm, but right now it doesn't matter.
677 for (int i = DbgValueVec.size()-1; i>=0; --i)
679 BB->insert(InsertPos, DbgValueVec[i]);
681 // Then re-insert them according to the given schedule.
682 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
683 SUnit *SU = Sequence[i];
685 // Null SUnit* is a noop.
690 BB->insert(InsertPos, SU->getInstr());
691 for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i)
692 BB->insert(InsertPos, SU->DbgInstrList[i]);
695 // Update the Begin iterator, as the first instruction in the block
696 // may have been scheduled later.
697 if (!DbgValueVec.empty()) {
698 for (int i = DbgValueVec.size()-1; i>=0; --i)
699 if (DbgValueVec[i]!=0) {
700 Begin = DbgValueVec[DbgValueVec.size()-1];
703 } else if (!Sequence.empty())
704 Begin = Sequence[0]->getInstr();