1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/MC/MCAsmInfo.h"
24 #include "llvm/MC/MCInstrItineraries.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetFrameLowering.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
35 static cl::opt<bool> DisableHazardRecognizer(
36 "disable-sched-hazard", cl::Hidden, cl::init(false),
37 cl::desc("Disable hazard detection during preRA scheduling"));
39 TargetInstrInfo::~TargetInstrInfo() {
42 const TargetRegisterClass*
43 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
44 const TargetRegisterInfo *TRI,
45 const MachineFunction &MF) const {
46 if (OpNum >= MCID.getNumOperands())
49 short RegClass = MCID.OpInfo[OpNum].RegClass;
50 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
51 return TRI->getPointerRegClass(MF, RegClass);
53 // Instructions like INSERT_SUBREG do not have fixed register classes.
57 // Otherwise just look it up normally.
58 return TRI->getRegClass(RegClass);
61 /// insertNoop - Insert a noop into the instruction stream at the specified
63 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator MI) const {
65 llvm_unreachable("Target didn't implement insertNoop!");
68 /// Measure the specified inline asm to determine an approximation of its
70 /// Comments (which run till the next SeparatorString or newline) do not
71 /// count as an instruction.
72 /// Any other non-whitespace text is considered an instruction, with
73 /// multiple instructions separated by SeparatorString or newlines.
74 /// Variable-length instructions are not handled here; this function
75 /// may be overloaded in the target code to do that.
76 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
77 const MCAsmInfo &MAI) const {
80 // Count the number of instructions in the asm.
81 bool atInsnStart = true;
84 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
85 strlen(MAI.getSeparatorString())) == 0)
87 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
88 Length += MAI.getMaxInstLength();
91 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
92 strlen(MAI.getCommentString())) == 0)
99 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
100 /// after it, replacing it with an unconditional branch to NewDest.
102 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
103 MachineBasicBlock *NewDest) const {
104 MachineBasicBlock *MBB = Tail->getParent();
106 // Remove all the old successors of MBB from the CFG.
107 while (!MBB->succ_empty())
108 MBB->removeSuccessor(MBB->succ_begin());
110 // Remove all the dead instructions from the end of MBB.
111 MBB->erase(Tail, MBB->end());
113 // If MBB isn't immediately before MBB, insert a branch to it.
114 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
115 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
116 Tail->getDebugLoc());
117 MBB->addSuccessor(NewDest);
120 // commuteInstruction - The default implementation of this method just exchanges
121 // the two operands returned by findCommutedOpIndices.
122 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
124 const MCInstrDesc &MCID = MI->getDesc();
125 bool HasDef = MCID.getNumDefs();
126 if (HasDef && !MI->getOperand(0).isReg())
127 // No idea how to commute this instruction. Target should implement its own.
130 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
131 assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
135 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
136 "This only knows how to commute register operands so far");
137 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
138 unsigned Reg1 = MI->getOperand(Idx1).getReg();
139 unsigned Reg2 = MI->getOperand(Idx2).getReg();
140 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
141 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
142 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
143 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
144 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
145 // If destination is tied to either of the commuted source register, then
146 // it must be updated.
147 if (HasDef && Reg0 == Reg1 &&
148 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
152 } else if (HasDef && Reg0 == Reg2 &&
153 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
160 // Create a new instruction.
161 MachineFunction &MF = *MI->getParent()->getParent();
162 MI = MF.CloneMachineInstr(MI);
166 MI->getOperand(0).setReg(Reg0);
167 MI->getOperand(0).setSubReg(SubReg0);
169 MI->getOperand(Idx2).setReg(Reg1);
170 MI->getOperand(Idx1).setReg(Reg2);
171 MI->getOperand(Idx2).setSubReg(SubReg1);
172 MI->getOperand(Idx1).setSubReg(SubReg2);
173 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
174 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
178 /// findCommutedOpIndices - If specified MI is commutable, return the two
179 /// operand indices that would swap value. Return true if the instruction
180 /// is not in a form which this routine understands.
181 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
183 unsigned &SrcOpIdx2) const {
184 assert(!MI->isBundle() &&
185 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
187 const MCInstrDesc &MCID = MI->getDesc();
188 if (!MCID.isCommutable())
190 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
191 // is not true, then the target must implement this.
192 SrcOpIdx1 = MCID.getNumDefs();
193 SrcOpIdx2 = SrcOpIdx1 + 1;
194 if (!MI->getOperand(SrcOpIdx1).isReg() ||
195 !MI->getOperand(SrcOpIdx2).isReg())
203 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
204 if (!MI->isTerminator()) return false;
206 // Conditional branch is a special case.
207 if (MI->isBranch() && !MI->isBarrier())
209 if (!MI->isPredicable())
211 return !isPredicated(MI);
215 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
216 const SmallVectorImpl<MachineOperand> &Pred) const {
217 bool MadeChange = false;
219 assert(!MI->isBundle() &&
220 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
222 const MCInstrDesc &MCID = MI->getDesc();
223 if (!MI->isPredicable())
226 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
227 if (MCID.OpInfo[i].isPredicate()) {
228 MachineOperand &MO = MI->getOperand(i);
230 MO.setReg(Pred[j].getReg());
232 } else if (MO.isImm()) {
233 MO.setImm(Pred[j].getImm());
235 } else if (MO.isMBB()) {
236 MO.setMBB(Pred[j].getMBB());
245 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
246 const MachineMemOperand *&MMO,
247 int &FrameIndex) const {
248 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
249 oe = MI->memoperands_end();
252 if ((*o)->isLoad()) {
253 if (const FixedStackPseudoSourceValue *Value =
254 dyn_cast_or_null<FixedStackPseudoSourceValue>(
255 (*o)->getPseudoValue())) {
256 FrameIndex = Value->getFrameIndex();
265 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
266 const MachineMemOperand *&MMO,
267 int &FrameIndex) const {
268 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
269 oe = MI->memoperands_end();
272 if ((*o)->isStore()) {
273 if (const FixedStackPseudoSourceValue *Value =
274 dyn_cast_or_null<FixedStackPseudoSourceValue>(
275 (*o)->getPseudoValue())) {
276 FrameIndex = Value->getFrameIndex();
285 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
286 unsigned SubIdx, unsigned &Size,
288 const MachineFunction &MF) const {
290 Size = RC->getSize();
294 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
295 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
296 // Convert bit size to byte size to be consistent with
297 // MCRegisterClass::getSize().
301 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
302 if (BitOffset < 0 || BitOffset % 8)
306 Offset = (unsigned)BitOffset / 8;
308 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
310 if (!MF.getTarget().getDataLayout()->isLittleEndian()) {
311 Offset = RC->getSize() - (Offset + Size);
316 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
317 MachineBasicBlock::iterator I,
320 const MachineInstr *Orig,
321 const TargetRegisterInfo &TRI) const {
322 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
323 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
328 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
329 const MachineInstr *MI1,
330 const MachineRegisterInfo *MRI) const {
331 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
334 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
335 MachineFunction &MF) const {
336 assert(!Orig->isNotDuplicable() &&
337 "Instruction cannot be duplicated");
338 return MF.CloneMachineInstr(Orig);
341 // If the COPY instruction in MI can be folded to a stack operation, return
342 // the register class to use.
343 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
345 assert(MI->isCopy() && "MI must be a COPY instruction");
346 if (MI->getNumOperands() != 2)
348 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
350 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
351 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
353 if (FoldOp.getSubReg() || LiveOp.getSubReg())
356 unsigned FoldReg = FoldOp.getReg();
357 unsigned LiveReg = LiveOp.getReg();
359 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
360 "Cannot fold physregs");
362 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
363 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
365 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
366 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
368 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
371 // FIXME: Allow folding when register classes are memory compatible.
375 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
376 llvm_unreachable("Not a MachO target");
379 bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
380 ArrayRef<unsigned> Ops) const {
381 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
384 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
385 ArrayRef<unsigned> Ops, int FrameIndex,
386 const TargetInstrInfo &TII) {
387 unsigned StartIdx = 0;
388 switch (MI->getOpcode()) {
389 case TargetOpcode::STACKMAP:
390 StartIdx = 2; // Skip ID, nShadowBytes.
392 case TargetOpcode::PATCHPOINT: {
393 // For PatchPoint, the call args are not foldable.
394 PatchPointOpers opers(MI);
395 StartIdx = opers.getVarIdx();
399 llvm_unreachable("unexpected stackmap opcode");
402 // Return false if any operands requested for folding are not foldable (not
403 // part of the stackmap's live values).
404 for (unsigned Op : Ops) {
409 MachineInstr *NewMI =
410 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
411 MachineInstrBuilder MIB(MF, NewMI);
413 // No need to fold return, the meta data, and function arguments
414 for (unsigned i = 0; i < StartIdx; ++i)
415 MIB.addOperand(MI->getOperand(i));
417 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
418 MachineOperand &MO = MI->getOperand(i);
419 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
421 unsigned SpillOffset;
422 // Compute the spill slot size and offset.
423 const TargetRegisterClass *RC =
424 MF.getRegInfo().getRegClass(MO.getReg());
426 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
428 report_fatal_error("cannot spill patchpoint subregister operand");
429 MIB.addImm(StackMaps::IndirectMemRefOp);
430 MIB.addImm(SpillSize);
431 MIB.addFrameIndex(FrameIndex);
432 MIB.addImm(SpillOffset);
440 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
441 /// slot into the specified machine instruction for the specified operand(s).
442 /// If this is possible, a new instruction is returned with the specified
443 /// operand folded, otherwise NULL is returned. The client is responsible for
444 /// removing the old instruction and adding the new one in the instruction
446 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
447 ArrayRef<unsigned> Ops,
450 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
451 if (MI->getOperand(Ops[i]).isDef())
452 Flags |= MachineMemOperand::MOStore;
454 Flags |= MachineMemOperand::MOLoad;
456 MachineBasicBlock *MBB = MI->getParent();
457 assert(MBB && "foldMemoryOperand needs an inserted instruction");
458 MachineFunction &MF = *MBB->getParent();
460 MachineInstr *NewMI = nullptr;
462 if (MI->getOpcode() == TargetOpcode::STACKMAP ||
463 MI->getOpcode() == TargetOpcode::PATCHPOINT) {
464 // Fold stackmap/patchpoint.
465 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
467 // Ask the target to do the actual folding.
468 NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
472 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
473 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
474 assert((!(Flags & MachineMemOperand::MOStore) ||
475 NewMI->mayStore()) &&
476 "Folded a def to a non-store!");
477 assert((!(Flags & MachineMemOperand::MOLoad) ||
479 "Folded a use to a non-load!");
480 const MachineFrameInfo &MFI = *MF.getFrameInfo();
481 assert(MFI.getObjectOffset(FI) != -1);
482 MachineMemOperand *MMO =
483 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
484 Flags, MFI.getObjectSize(FI),
485 MFI.getObjectAlignment(FI));
486 NewMI->addMemOperand(MF, MMO);
488 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
489 return MBB->insert(MI, NewMI);
492 // Straight COPY may fold as load/store.
493 if (!MI->isCopy() || Ops.size() != 1)
496 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
500 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
501 MachineBasicBlock::iterator Pos = MI;
502 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
504 if (Flags == MachineMemOperand::MOStore)
505 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
507 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
511 /// foldMemoryOperand - Same as the previous version except it allows folding
512 /// of any load and store from / to any address, not just from a specific
514 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
515 ArrayRef<unsigned> Ops,
516 MachineInstr *LoadMI) const {
517 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
519 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
520 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
522 MachineBasicBlock &MBB = *MI->getParent();
523 MachineFunction &MF = *MBB.getParent();
525 // Ask the target to do the actual folding.
526 MachineInstr *NewMI = nullptr;
529 if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
530 MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
531 isLoadFromStackSlot(LoadMI, FrameIndex)) {
532 // Fold stackmap/patchpoint.
533 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
535 // Ask the target to do the actual folding.
536 NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
539 if (!NewMI) return nullptr;
541 NewMI = MBB.insert(MI, NewMI);
543 // Copy the memoperands from the load to the folded instruction.
544 if (MI->memoperands_empty()) {
545 NewMI->setMemRefs(LoadMI->memoperands_begin(),
546 LoadMI->memoperands_end());
549 // Handle the rare case of folding multiple loads.
550 NewMI->setMemRefs(MI->memoperands_begin(),
551 MI->memoperands_end());
552 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
553 E = LoadMI->memoperands_end(); I != E; ++I) {
554 NewMI->addMemOperand(MF, *I);
560 bool TargetInstrInfo::
561 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
562 AliasAnalysis *AA) const {
563 const MachineFunction &MF = *MI->getParent()->getParent();
564 const MachineRegisterInfo &MRI = MF.getRegInfo();
566 // Remat clients assume operand 0 is the defined register.
567 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
569 unsigned DefReg = MI->getOperand(0).getReg();
571 // A sub-register definition can only be rematerialized if the instruction
572 // doesn't read the other parts of the register. Otherwise it is really a
573 // read-modify-write operation on the full virtual register which cannot be
575 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
576 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
579 // A load from a fixed stack slot can be rematerialized. This may be
580 // redundant with subsequent checks, but it's target-independent,
581 // simple, and a common case.
583 if (isLoadFromStackSlot(MI, FrameIdx) &&
584 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
587 // Avoid instructions obviously unsafe for remat.
588 if (MI->isNotDuplicable() || MI->mayStore() ||
589 MI->hasUnmodeledSideEffects())
592 // Don't remat inline asm. We have no idea how expensive it is
593 // even if it's side effect free.
594 if (MI->isInlineAsm())
597 // Avoid instructions which load from potentially varying memory.
598 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
601 // If any of the registers accessed are non-constant, conservatively assume
602 // the instruction is not rematerializable.
603 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
604 const MachineOperand &MO = MI->getOperand(i);
605 if (!MO.isReg()) continue;
606 unsigned Reg = MO.getReg();
610 // Check for a well-behaved physical register.
611 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
613 // If the physreg has no defs anywhere, it's just an ambient register
614 // and we can freely move its uses. Alternatively, if it's allocatable,
615 // it could get allocated to something with a def during allocation.
616 if (!MRI.isConstantPhysReg(Reg, MF))
619 // A physreg def. We can't remat it.
625 // Only allow one virtual-register def. There may be multiple defs of the
626 // same virtual register, though.
627 if (MO.isDef() && Reg != DefReg)
630 // Don't allow any virtual-register uses. Rematting an instruction with
631 // virtual register uses would length the live ranges of the uses, which
632 // is not necessarily a good idea, certainly not "trivial".
637 // Everything checked out.
641 int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
642 const MachineFunction *MF = MI->getParent()->getParent();
643 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
644 bool StackGrowsDown =
645 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
647 int FrameSetupOpcode = getCallFrameSetupOpcode();
648 int FrameDestroyOpcode = getCallFrameDestroyOpcode();
650 if (MI->getOpcode() != FrameSetupOpcode &&
651 MI->getOpcode() != FrameDestroyOpcode)
654 int SPAdj = MI->getOperand(0).getImm();
656 if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
657 (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
663 /// isSchedulingBoundary - Test if the given instruction should be
664 /// considered a scheduling boundary. This primarily includes labels
666 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
667 const MachineBasicBlock *MBB,
668 const MachineFunction &MF) const {
669 // Terminators and labels can't be scheduled around.
670 if (MI->isTerminator() || MI->isPosition())
673 // Don't attempt to schedule around any instruction that defines
674 // a stack-oriented pointer, as it's unlikely to be profitable. This
675 // saves compile time, because it doesn't require every single
676 // stack slot reference to depend on the instruction that does the
678 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
679 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
680 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
686 // Provide a global flag for disabling the PreRA hazard recognizer that targets
687 // may choose to honor.
688 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
689 return !DisableHazardRecognizer;
692 // Default implementation of CreateTargetRAHazardRecognizer.
693 ScheduleHazardRecognizer *TargetInstrInfo::
694 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
695 const ScheduleDAG *DAG) const {
696 // Dummy hazard recognizer allows all instructions to issue.
697 return new ScheduleHazardRecognizer();
700 // Default implementation of CreateTargetMIHazardRecognizer.
701 ScheduleHazardRecognizer *TargetInstrInfo::
702 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
703 const ScheduleDAG *DAG) const {
704 return (ScheduleHazardRecognizer *)
705 new ScoreboardHazardRecognizer(II, DAG, "misched");
708 // Default implementation of CreateTargetPostRAHazardRecognizer.
709 ScheduleHazardRecognizer *TargetInstrInfo::
710 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
711 const ScheduleDAG *DAG) const {
712 return (ScheduleHazardRecognizer *)
713 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
716 //===----------------------------------------------------------------------===//
717 // SelectionDAG latency interface.
718 //===----------------------------------------------------------------------===//
721 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
722 SDNode *DefNode, unsigned DefIdx,
723 SDNode *UseNode, unsigned UseIdx) const {
724 if (!ItinData || ItinData->isEmpty())
727 if (!DefNode->isMachineOpcode())
730 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
731 if (!UseNode->isMachineOpcode())
732 return ItinData->getOperandCycle(DefClass, DefIdx);
733 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
734 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
737 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
739 if (!ItinData || ItinData->isEmpty())
742 if (!N->isMachineOpcode())
745 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
748 //===----------------------------------------------------------------------===//
749 // MachineInstr latency interface.
750 //===----------------------------------------------------------------------===//
753 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
754 const MachineInstr *MI) const {
755 if (!ItinData || ItinData->isEmpty())
758 unsigned Class = MI->getDesc().getSchedClass();
759 int UOps = ItinData->Itineraries[Class].NumMicroOps;
763 // The # of u-ops is dynamically determined. The specific target should
764 // override this function to return the right number.
768 /// Return the default expected latency for a def based on it's opcode.
769 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
770 const MachineInstr *DefMI) const {
771 if (DefMI->isTransient())
773 if (DefMI->mayLoad())
774 return SchedModel.LoadLatency;
775 if (isHighLatencyDef(DefMI->getOpcode()))
776 return SchedModel.HighLatency;
780 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
784 unsigned TargetInstrInfo::
785 getInstrLatency(const InstrItineraryData *ItinData,
786 const MachineInstr *MI,
787 unsigned *PredCost) const {
788 // Default to one cycle for no itinerary. However, an "empty" itinerary may
789 // still have a MinLatency property, which getStageLatency checks.
791 return MI->mayLoad() ? 2 : 1;
793 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
796 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
797 const MachineInstr *DefMI,
798 unsigned DefIdx) const {
799 if (!ItinData || ItinData->isEmpty())
802 unsigned DefClass = DefMI->getDesc().getSchedClass();
803 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
804 return (DefCycle != -1 && DefCycle <= 1);
807 /// Both DefMI and UseMI must be valid. By default, call directly to the
808 /// itinerary. This may be overriden by the target.
809 int TargetInstrInfo::
810 getOperandLatency(const InstrItineraryData *ItinData,
811 const MachineInstr *DefMI, unsigned DefIdx,
812 const MachineInstr *UseMI, unsigned UseIdx) const {
813 unsigned DefClass = DefMI->getDesc().getSchedClass();
814 unsigned UseClass = UseMI->getDesc().getSchedClass();
815 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
818 /// If we can determine the operand latency from the def only, without itinerary
819 /// lookup, do so. Otherwise return -1.
820 int TargetInstrInfo::computeDefOperandLatency(
821 const InstrItineraryData *ItinData,
822 const MachineInstr *DefMI) const {
824 // Let the target hook getInstrLatency handle missing itineraries.
826 return getInstrLatency(ItinData, DefMI);
828 if(ItinData->isEmpty())
829 return defaultDefLatency(ItinData->SchedModel, DefMI);
831 // ...operand lookup required
835 /// computeOperandLatency - Compute and return the latency of the given data
836 /// dependent def and use when the operand indices are already known. UseMI may
837 /// be NULL for an unknown use.
839 /// FindMin may be set to get the minimum vs. expected latency. Minimum
840 /// latency is used for scheduling groups, while expected latency is for
841 /// instruction cost and critical path.
843 /// Depending on the subtarget's itinerary properties, this may or may not need
844 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
845 /// UseIdx to compute min latency.
846 unsigned TargetInstrInfo::
847 computeOperandLatency(const InstrItineraryData *ItinData,
848 const MachineInstr *DefMI, unsigned DefIdx,
849 const MachineInstr *UseMI, unsigned UseIdx) const {
851 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
855 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
859 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
861 unsigned DefClass = DefMI->getDesc().getSchedClass();
862 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
864 if (OperLatency >= 0)
867 // No operand latency was found.
868 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
870 // Expected latency is the max of the stage latency and itinerary props.
871 InstrLatency = std::max(InstrLatency,
872 defaultDefLatency(ItinData->SchedModel, DefMI));
876 bool TargetInstrInfo::getRegSequenceInputs(
877 const MachineInstr &MI, unsigned DefIdx,
878 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
879 assert((MI.isRegSequence() ||
880 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
882 if (!MI.isRegSequence())
883 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
885 // We are looking at:
886 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
887 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
888 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
890 const MachineOperand &MOReg = MI.getOperand(OpIdx);
891 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
892 assert(MOSubIdx.isImm() &&
893 "One of the subindex of the reg_sequence is not an immediate");
894 // Record Reg:SubReg, SubIdx.
895 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
896 (unsigned)MOSubIdx.getImm()));
901 bool TargetInstrInfo::getExtractSubregInputs(
902 const MachineInstr &MI, unsigned DefIdx,
903 RegSubRegPairAndIdx &InputReg) const {
904 assert((MI.isExtractSubreg() ||
905 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
907 if (!MI.isExtractSubreg())
908 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
910 // We are looking at:
911 // Def = EXTRACT_SUBREG v0.sub1, sub0.
912 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
913 const MachineOperand &MOReg = MI.getOperand(1);
914 const MachineOperand &MOSubIdx = MI.getOperand(2);
915 assert(MOSubIdx.isImm() &&
916 "The subindex of the extract_subreg is not an immediate");
918 InputReg.Reg = MOReg.getReg();
919 InputReg.SubReg = MOReg.getSubReg();
920 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
924 bool TargetInstrInfo::getInsertSubregInputs(
925 const MachineInstr &MI, unsigned DefIdx,
926 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
927 assert((MI.isInsertSubreg() ||
928 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
930 if (!MI.isInsertSubreg())
931 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
933 // We are looking at:
934 // Def = INSERT_SEQUENCE v0, v1, sub0.
935 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
936 const MachineOperand &MOBaseReg = MI.getOperand(1);
937 const MachineOperand &MOInsertedReg = MI.getOperand(2);
938 const MachineOperand &MOSubIdx = MI.getOperand(3);
939 assert(MOSubIdx.isImm() &&
940 "One of the subindex of the reg_sequence is not an immediate");
941 BaseReg.Reg = MOBaseReg.getReg();
942 BaseReg.SubReg = MOBaseReg.getSubReg();
944 InsertedReg.Reg = MOInsertedReg.getReg();
945 InsertedReg.SubReg = MOInsertedReg.getSubReg();
946 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();