1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/MC/MCAsmInfo.h"
24 #include "llvm/MC/MCInstrItineraries.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetFrameLowering.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
35 static cl::opt<bool> DisableHazardRecognizer(
36 "disable-sched-hazard", cl::Hidden, cl::init(false),
37 cl::desc("Disable hazard detection during preRA scheduling"));
39 TargetInstrInfo::~TargetInstrInfo() {
42 const TargetRegisterClass*
43 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
44 const TargetRegisterInfo *TRI,
45 const MachineFunction &MF) const {
46 if (OpNum >= MCID.getNumOperands())
49 short RegClass = MCID.OpInfo[OpNum].RegClass;
50 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
51 return TRI->getPointerRegClass(MF, RegClass);
53 // Instructions like INSERT_SUBREG do not have fixed register classes.
57 // Otherwise just look it up normally.
58 return TRI->getRegClass(RegClass);
61 /// insertNoop - Insert a noop into the instruction stream at the specified
63 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator MI) const {
65 llvm_unreachable("Target didn't implement insertNoop!");
68 /// Measure the specified inline asm to determine an approximation of its
70 /// Comments (which run till the next SeparatorString or newline) do not
71 /// count as an instruction.
72 /// Any other non-whitespace text is considered an instruction, with
73 /// multiple instructions separated by SeparatorString or newlines.
74 /// Variable-length instructions are not handled here; this function
75 /// may be overloaded in the target code to do that.
76 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
77 const MCAsmInfo &MAI) const {
80 // Count the number of instructions in the asm.
81 bool atInsnStart = true;
84 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
85 strlen(MAI.getSeparatorString())) == 0)
87 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
88 Length += MAI.getMaxInstLength();
91 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
92 strlen(MAI.getCommentString())) == 0)
99 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
100 /// after it, replacing it with an unconditional branch to NewDest.
102 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
103 MachineBasicBlock *NewDest) const {
104 MachineBasicBlock *MBB = Tail->getParent();
106 // Remove all the old successors of MBB from the CFG.
107 while (!MBB->succ_empty())
108 MBB->removeSuccessor(MBB->succ_begin());
110 // Remove all the dead instructions from the end of MBB.
111 MBB->erase(Tail, MBB->end());
113 // If MBB isn't immediately before MBB, insert a branch to it.
114 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
115 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
116 Tail->getDebugLoc());
117 MBB->addSuccessor(NewDest);
120 // commuteInstruction - The default implementation of this method just exchanges
121 // the two operands returned by findCommutedOpIndices.
122 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
124 const MCInstrDesc &MCID = MI->getDesc();
125 bool HasDef = MCID.getNumDefs();
126 if (HasDef && !MI->getOperand(0).isReg())
127 // No idea how to commute this instruction. Target should implement its own.
130 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
131 assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
135 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
136 "This only knows how to commute register operands so far");
137 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
138 unsigned Reg1 = MI->getOperand(Idx1).getReg();
139 unsigned Reg2 = MI->getOperand(Idx2).getReg();
140 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
141 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
142 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
143 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
144 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
145 bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
146 bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
147 bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
148 bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
149 // If destination is tied to either of the commuted source register, then
150 // it must be updated.
151 if (HasDef && Reg0 == Reg1 &&
152 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
156 } else if (HasDef && Reg0 == Reg2 &&
157 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
164 // Create a new instruction.
165 MachineFunction &MF = *MI->getParent()->getParent();
166 MI = MF.CloneMachineInstr(MI);
170 MI->getOperand(0).setReg(Reg0);
171 MI->getOperand(0).setSubReg(SubReg0);
173 MI->getOperand(Idx2).setReg(Reg1);
174 MI->getOperand(Idx1).setReg(Reg2);
175 MI->getOperand(Idx2).setSubReg(SubReg1);
176 MI->getOperand(Idx1).setSubReg(SubReg2);
177 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
178 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
179 MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
180 MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
181 MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
182 MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
186 /// findCommutedOpIndices - If specified MI is commutable, return the two
187 /// operand indices that would swap value. Return true if the instruction
188 /// is not in a form which this routine understands.
189 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
191 unsigned &SrcOpIdx2) const {
192 assert(!MI->isBundle() &&
193 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
195 const MCInstrDesc &MCID = MI->getDesc();
196 if (!MCID.isCommutable())
198 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
199 // is not true, then the target must implement this.
200 SrcOpIdx1 = MCID.getNumDefs();
201 SrcOpIdx2 = SrcOpIdx1 + 1;
202 if (!MI->getOperand(SrcOpIdx1).isReg() ||
203 !MI->getOperand(SrcOpIdx2).isReg())
211 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
212 if (!MI->isTerminator()) return false;
214 // Conditional branch is a special case.
215 if (MI->isBranch() && !MI->isBarrier())
217 if (!MI->isPredicable())
219 return !isPredicated(MI);
223 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
224 const SmallVectorImpl<MachineOperand> &Pred) const {
225 bool MadeChange = false;
227 assert(!MI->isBundle() &&
228 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
230 const MCInstrDesc &MCID = MI->getDesc();
231 if (!MI->isPredicable())
234 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
235 if (MCID.OpInfo[i].isPredicate()) {
236 MachineOperand &MO = MI->getOperand(i);
238 MO.setReg(Pred[j].getReg());
240 } else if (MO.isImm()) {
241 MO.setImm(Pred[j].getImm());
243 } else if (MO.isMBB()) {
244 MO.setMBB(Pred[j].getMBB());
253 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
254 const MachineMemOperand *&MMO,
255 int &FrameIndex) const {
256 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
257 oe = MI->memoperands_end();
260 if ((*o)->isLoad()) {
261 if (const FixedStackPseudoSourceValue *Value =
262 dyn_cast_or_null<FixedStackPseudoSourceValue>(
263 (*o)->getPseudoValue())) {
264 FrameIndex = Value->getFrameIndex();
273 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
274 const MachineMemOperand *&MMO,
275 int &FrameIndex) const {
276 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
277 oe = MI->memoperands_end();
280 if ((*o)->isStore()) {
281 if (const FixedStackPseudoSourceValue *Value =
282 dyn_cast_or_null<FixedStackPseudoSourceValue>(
283 (*o)->getPseudoValue())) {
284 FrameIndex = Value->getFrameIndex();
293 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
294 unsigned SubIdx, unsigned &Size,
296 const MachineFunction &MF) const {
298 Size = RC->getSize();
302 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
303 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
304 // Convert bit size to byte size to be consistent with
305 // MCRegisterClass::getSize().
309 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
310 if (BitOffset < 0 || BitOffset % 8)
314 Offset = (unsigned)BitOffset / 8;
316 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
318 if (!MF.getTarget().getDataLayout()->isLittleEndian()) {
319 Offset = RC->getSize() - (Offset + Size);
324 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
325 MachineBasicBlock::iterator I,
328 const MachineInstr *Orig,
329 const TargetRegisterInfo &TRI) const {
330 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
331 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
336 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
337 const MachineInstr *MI1,
338 const MachineRegisterInfo *MRI) const {
339 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
342 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
343 MachineFunction &MF) const {
344 assert(!Orig->isNotDuplicable() &&
345 "Instruction cannot be duplicated");
346 return MF.CloneMachineInstr(Orig);
349 // If the COPY instruction in MI can be folded to a stack operation, return
350 // the register class to use.
351 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
353 assert(MI->isCopy() && "MI must be a COPY instruction");
354 if (MI->getNumOperands() != 2)
356 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
358 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
359 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
361 if (FoldOp.getSubReg() || LiveOp.getSubReg())
364 unsigned FoldReg = FoldOp.getReg();
365 unsigned LiveReg = LiveOp.getReg();
367 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
368 "Cannot fold physregs");
370 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
371 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
373 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
374 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
376 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
379 // FIXME: Allow folding when register classes are memory compatible.
383 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
384 llvm_unreachable("Not a MachO target");
387 bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
388 ArrayRef<unsigned> Ops) const {
389 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
392 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
393 ArrayRef<unsigned> Ops, int FrameIndex,
394 const TargetInstrInfo &TII) {
395 unsigned StartIdx = 0;
396 switch (MI->getOpcode()) {
397 case TargetOpcode::STACKMAP:
398 StartIdx = 2; // Skip ID, nShadowBytes.
400 case TargetOpcode::PATCHPOINT: {
401 // For PatchPoint, the call args are not foldable.
402 PatchPointOpers opers(MI);
403 StartIdx = opers.getVarIdx();
407 llvm_unreachable("unexpected stackmap opcode");
410 // Return false if any operands requested for folding are not foldable (not
411 // part of the stackmap's live values).
412 for (unsigned Op : Ops) {
417 MachineInstr *NewMI =
418 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
419 MachineInstrBuilder MIB(MF, NewMI);
421 // No need to fold return, the meta data, and function arguments
422 for (unsigned i = 0; i < StartIdx; ++i)
423 MIB.addOperand(MI->getOperand(i));
425 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
426 MachineOperand &MO = MI->getOperand(i);
427 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
429 unsigned SpillOffset;
430 // Compute the spill slot size and offset.
431 const TargetRegisterClass *RC =
432 MF.getRegInfo().getRegClass(MO.getReg());
434 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
436 report_fatal_error("cannot spill patchpoint subregister operand");
437 MIB.addImm(StackMaps::IndirectMemRefOp);
438 MIB.addImm(SpillSize);
439 MIB.addFrameIndex(FrameIndex);
440 MIB.addImm(SpillOffset);
448 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
449 /// slot into the specified machine instruction for the specified operand(s).
450 /// If this is possible, a new instruction is returned with the specified
451 /// operand folded, otherwise NULL is returned. The client is responsible for
452 /// removing the old instruction and adding the new one in the instruction
454 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
455 ArrayRef<unsigned> Ops,
458 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
459 if (MI->getOperand(Ops[i]).isDef())
460 Flags |= MachineMemOperand::MOStore;
462 Flags |= MachineMemOperand::MOLoad;
464 MachineBasicBlock *MBB = MI->getParent();
465 assert(MBB && "foldMemoryOperand needs an inserted instruction");
466 MachineFunction &MF = *MBB->getParent();
468 MachineInstr *NewMI = nullptr;
470 if (MI->getOpcode() == TargetOpcode::STACKMAP ||
471 MI->getOpcode() == TargetOpcode::PATCHPOINT) {
472 // Fold stackmap/patchpoint.
473 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
475 MBB->insert(MI, NewMI);
477 // Ask the target to do the actual folding.
478 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
482 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
483 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
484 assert((!(Flags & MachineMemOperand::MOStore) ||
485 NewMI->mayStore()) &&
486 "Folded a def to a non-store!");
487 assert((!(Flags & MachineMemOperand::MOLoad) ||
489 "Folded a use to a non-load!");
490 const MachineFrameInfo &MFI = *MF.getFrameInfo();
491 assert(MFI.getObjectOffset(FI) != -1);
492 MachineMemOperand *MMO =
493 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
494 Flags, MFI.getObjectSize(FI),
495 MFI.getObjectAlignment(FI));
496 NewMI->addMemOperand(MF, MMO);
501 // Straight COPY may fold as load/store.
502 if (!MI->isCopy() || Ops.size() != 1)
505 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
509 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
510 MachineBasicBlock::iterator Pos = MI;
511 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
513 if (Flags == MachineMemOperand::MOStore)
514 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
516 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
520 /// foldMemoryOperand - Same as the previous version except it allows folding
521 /// of any load and store from / to any address, not just from a specific
523 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
524 ArrayRef<unsigned> Ops,
525 MachineInstr *LoadMI) const {
526 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
528 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
529 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
531 MachineBasicBlock &MBB = *MI->getParent();
532 MachineFunction &MF = *MBB.getParent();
534 // Ask the target to do the actual folding.
535 MachineInstr *NewMI = nullptr;
538 if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
539 MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
540 isLoadFromStackSlot(LoadMI, FrameIndex)) {
541 // Fold stackmap/patchpoint.
542 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
544 NewMI = MBB.insert(MI, NewMI);
546 // Ask the target to do the actual folding.
547 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
550 if (!NewMI) return nullptr;
552 // Copy the memoperands from the load to the folded instruction.
553 if (MI->memoperands_empty()) {
554 NewMI->setMemRefs(LoadMI->memoperands_begin(),
555 LoadMI->memoperands_end());
558 // Handle the rare case of folding multiple loads.
559 NewMI->setMemRefs(MI->memoperands_begin(),
560 MI->memoperands_end());
561 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
562 E = LoadMI->memoperands_end(); I != E; ++I) {
563 NewMI->addMemOperand(MF, *I);
569 bool TargetInstrInfo::
570 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
571 AliasAnalysis *AA) const {
572 const MachineFunction &MF = *MI->getParent()->getParent();
573 const MachineRegisterInfo &MRI = MF.getRegInfo();
575 // Remat clients assume operand 0 is the defined register.
576 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
578 unsigned DefReg = MI->getOperand(0).getReg();
580 // A sub-register definition can only be rematerialized if the instruction
581 // doesn't read the other parts of the register. Otherwise it is really a
582 // read-modify-write operation on the full virtual register which cannot be
584 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
585 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
588 // A load from a fixed stack slot can be rematerialized. This may be
589 // redundant with subsequent checks, but it's target-independent,
590 // simple, and a common case.
592 if (isLoadFromStackSlot(MI, FrameIdx) &&
593 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
596 // Avoid instructions obviously unsafe for remat.
597 if (MI->isNotDuplicable() || MI->mayStore() ||
598 MI->hasUnmodeledSideEffects())
601 // Don't remat inline asm. We have no idea how expensive it is
602 // even if it's side effect free.
603 if (MI->isInlineAsm())
606 // Avoid instructions which load from potentially varying memory.
607 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
610 // If any of the registers accessed are non-constant, conservatively assume
611 // the instruction is not rematerializable.
612 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
613 const MachineOperand &MO = MI->getOperand(i);
614 if (!MO.isReg()) continue;
615 unsigned Reg = MO.getReg();
619 // Check for a well-behaved physical register.
620 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
622 // If the physreg has no defs anywhere, it's just an ambient register
623 // and we can freely move its uses. Alternatively, if it's allocatable,
624 // it could get allocated to something with a def during allocation.
625 if (!MRI.isConstantPhysReg(Reg, MF))
628 // A physreg def. We can't remat it.
634 // Only allow one virtual-register def. There may be multiple defs of the
635 // same virtual register, though.
636 if (MO.isDef() && Reg != DefReg)
639 // Don't allow any virtual-register uses. Rematting an instruction with
640 // virtual register uses would length the live ranges of the uses, which
641 // is not necessarily a good idea, certainly not "trivial".
646 // Everything checked out.
650 int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
651 const MachineFunction *MF = MI->getParent()->getParent();
652 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
653 bool StackGrowsDown =
654 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
656 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
657 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
659 if (MI->getOpcode() != FrameSetupOpcode &&
660 MI->getOpcode() != FrameDestroyOpcode)
663 int SPAdj = MI->getOperand(0).getImm();
665 if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
666 (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
672 /// isSchedulingBoundary - Test if the given instruction should be
673 /// considered a scheduling boundary. This primarily includes labels
675 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
676 const MachineBasicBlock *MBB,
677 const MachineFunction &MF) const {
678 // Terminators and labels can't be scheduled around.
679 if (MI->isTerminator() || MI->isPosition())
682 // Don't attempt to schedule around any instruction that defines
683 // a stack-oriented pointer, as it's unlikely to be profitable. This
684 // saves compile time, because it doesn't require every single
685 // stack slot reference to depend on the instruction that does the
687 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
688 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
689 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
695 // Provide a global flag for disabling the PreRA hazard recognizer that targets
696 // may choose to honor.
697 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
698 return !DisableHazardRecognizer;
701 // Default implementation of CreateTargetRAHazardRecognizer.
702 ScheduleHazardRecognizer *TargetInstrInfo::
703 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
704 const ScheduleDAG *DAG) const {
705 // Dummy hazard recognizer allows all instructions to issue.
706 return new ScheduleHazardRecognizer();
709 // Default implementation of CreateTargetMIHazardRecognizer.
710 ScheduleHazardRecognizer *TargetInstrInfo::
711 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
712 const ScheduleDAG *DAG) const {
713 return (ScheduleHazardRecognizer *)
714 new ScoreboardHazardRecognizer(II, DAG, "misched");
717 // Default implementation of CreateTargetPostRAHazardRecognizer.
718 ScheduleHazardRecognizer *TargetInstrInfo::
719 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
720 const ScheduleDAG *DAG) const {
721 return (ScheduleHazardRecognizer *)
722 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
725 //===----------------------------------------------------------------------===//
726 // SelectionDAG latency interface.
727 //===----------------------------------------------------------------------===//
730 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
731 SDNode *DefNode, unsigned DefIdx,
732 SDNode *UseNode, unsigned UseIdx) const {
733 if (!ItinData || ItinData->isEmpty())
736 if (!DefNode->isMachineOpcode())
739 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
740 if (!UseNode->isMachineOpcode())
741 return ItinData->getOperandCycle(DefClass, DefIdx);
742 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
743 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
746 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
748 if (!ItinData || ItinData->isEmpty())
751 if (!N->isMachineOpcode())
754 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
757 //===----------------------------------------------------------------------===//
758 // MachineInstr latency interface.
759 //===----------------------------------------------------------------------===//
762 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
763 const MachineInstr *MI) const {
764 if (!ItinData || ItinData->isEmpty())
767 unsigned Class = MI->getDesc().getSchedClass();
768 int UOps = ItinData->Itineraries[Class].NumMicroOps;
772 // The # of u-ops is dynamically determined. The specific target should
773 // override this function to return the right number.
777 /// Return the default expected latency for a def based on it's opcode.
778 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
779 const MachineInstr *DefMI) const {
780 if (DefMI->isTransient())
782 if (DefMI->mayLoad())
783 return SchedModel.LoadLatency;
784 if (isHighLatencyDef(DefMI->getOpcode()))
785 return SchedModel.HighLatency;
789 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
793 unsigned TargetInstrInfo::
794 getInstrLatency(const InstrItineraryData *ItinData,
795 const MachineInstr *MI,
796 unsigned *PredCost) const {
797 // Default to one cycle for no itinerary. However, an "empty" itinerary may
798 // still have a MinLatency property, which getStageLatency checks.
800 return MI->mayLoad() ? 2 : 1;
802 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
805 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
806 const MachineInstr *DefMI,
807 unsigned DefIdx) const {
808 if (!ItinData || ItinData->isEmpty())
811 unsigned DefClass = DefMI->getDesc().getSchedClass();
812 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
813 return (DefCycle != -1 && DefCycle <= 1);
816 /// Both DefMI and UseMI must be valid. By default, call directly to the
817 /// itinerary. This may be overriden by the target.
818 int TargetInstrInfo::
819 getOperandLatency(const InstrItineraryData *ItinData,
820 const MachineInstr *DefMI, unsigned DefIdx,
821 const MachineInstr *UseMI, unsigned UseIdx) const {
822 unsigned DefClass = DefMI->getDesc().getSchedClass();
823 unsigned UseClass = UseMI->getDesc().getSchedClass();
824 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
827 /// If we can determine the operand latency from the def only, without itinerary
828 /// lookup, do so. Otherwise return -1.
829 int TargetInstrInfo::computeDefOperandLatency(
830 const InstrItineraryData *ItinData,
831 const MachineInstr *DefMI) const {
833 // Let the target hook getInstrLatency handle missing itineraries.
835 return getInstrLatency(ItinData, DefMI);
837 if(ItinData->isEmpty())
838 return defaultDefLatency(ItinData->SchedModel, DefMI);
840 // ...operand lookup required
844 /// computeOperandLatency - Compute and return the latency of the given data
845 /// dependent def and use when the operand indices are already known. UseMI may
846 /// be NULL for an unknown use.
848 /// FindMin may be set to get the minimum vs. expected latency. Minimum
849 /// latency is used for scheduling groups, while expected latency is for
850 /// instruction cost and critical path.
852 /// Depending on the subtarget's itinerary properties, this may or may not need
853 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
854 /// UseIdx to compute min latency.
855 unsigned TargetInstrInfo::
856 computeOperandLatency(const InstrItineraryData *ItinData,
857 const MachineInstr *DefMI, unsigned DefIdx,
858 const MachineInstr *UseMI, unsigned UseIdx) const {
860 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
864 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
868 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
870 unsigned DefClass = DefMI->getDesc().getSchedClass();
871 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
873 if (OperLatency >= 0)
876 // No operand latency was found.
877 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
879 // Expected latency is the max of the stage latency and itinerary props.
880 InstrLatency = std::max(InstrLatency,
881 defaultDefLatency(ItinData->SchedModel, DefMI));
885 bool TargetInstrInfo::getRegSequenceInputs(
886 const MachineInstr &MI, unsigned DefIdx,
887 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
888 assert((MI.isRegSequence() ||
889 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
891 if (!MI.isRegSequence())
892 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
894 // We are looking at:
895 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
896 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
897 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
899 const MachineOperand &MOReg = MI.getOperand(OpIdx);
900 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
901 assert(MOSubIdx.isImm() &&
902 "One of the subindex of the reg_sequence is not an immediate");
903 // Record Reg:SubReg, SubIdx.
904 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
905 (unsigned)MOSubIdx.getImm()));
910 bool TargetInstrInfo::getExtractSubregInputs(
911 const MachineInstr &MI, unsigned DefIdx,
912 RegSubRegPairAndIdx &InputReg) const {
913 assert((MI.isExtractSubreg() ||
914 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
916 if (!MI.isExtractSubreg())
917 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
919 // We are looking at:
920 // Def = EXTRACT_SUBREG v0.sub1, sub0.
921 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
922 const MachineOperand &MOReg = MI.getOperand(1);
923 const MachineOperand &MOSubIdx = MI.getOperand(2);
924 assert(MOSubIdx.isImm() &&
925 "The subindex of the extract_subreg is not an immediate");
927 InputReg.Reg = MOReg.getReg();
928 InputReg.SubReg = MOReg.getSubReg();
929 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
933 bool TargetInstrInfo::getInsertSubregInputs(
934 const MachineInstr &MI, unsigned DefIdx,
935 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
936 assert((MI.isInsertSubreg() ||
937 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
939 if (!MI.isInsertSubreg())
940 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
942 // We are looking at:
943 // Def = INSERT_SEQUENCE v0, v1, sub0.
944 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
945 const MachineOperand &MOBaseReg = MI.getOperand(1);
946 const MachineOperand &MOInsertedReg = MI.getOperand(2);
947 const MachineOperand &MOSubIdx = MI.getOperand(3);
948 assert(MOSubIdx.isImm() &&
949 "One of the subindex of the reg_sequence is not an immediate");
950 BaseReg.Reg = MOBaseReg.getReg();
951 BaseReg.SubReg = MOBaseReg.getSubReg();
953 InsertedReg.Reg = MOInsertedReg.getReg();
954 InsertedReg.SubReg = MOInsertedReg.getSubReg();
955 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();