1 //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfoImpl class, it just provides default
11 // implementations of various methods.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Target/TargetInstrInfo.h"
16 #include "llvm/Target/TargetLowering.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
33 static cl::opt<bool> DisableHazardRecognizer(
34 "disable-sched-hazard", cl::Hidden, cl::init(false),
35 cl::desc("Disable hazard detection during preRA scheduling"));
37 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
38 /// after it, replacing it with an unconditional branch to NewDest.
40 TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
41 MachineBasicBlock *NewDest) const {
42 MachineBasicBlock *MBB = Tail->getParent();
44 // Remove all the old successors of MBB from the CFG.
45 while (!MBB->succ_empty())
46 MBB->removeSuccessor(MBB->succ_begin());
48 // Remove all the dead instructions from the end of MBB.
49 MBB->erase(Tail, MBB->end());
51 // If MBB isn't immediately before MBB, insert a branch to it.
52 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
53 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
55 MBB->addSuccessor(NewDest);
58 // commuteInstruction - The default implementation of this method just exchanges
59 // the two operands returned by findCommutedOpIndices.
60 MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
62 const MCInstrDesc &MCID = MI->getDesc();
63 bool HasDef = MCID.getNumDefs();
64 if (HasDef && !MI->getOperand(0).isReg())
65 // No idea how to commute this instruction. Target should implement its own.
68 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
70 raw_string_ostream Msg(msg);
71 Msg << "Don't know how to commute: " << *MI;
72 report_fatal_error(Msg.str());
75 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
76 "This only knows how to commute register operands so far");
77 unsigned Reg1 = MI->getOperand(Idx1).getReg();
78 unsigned Reg2 = MI->getOperand(Idx2).getReg();
79 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
80 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
81 bool ChangeReg0 = false;
82 if (HasDef && MI->getOperand(0).getReg() == Reg1) {
83 // Must be two address instruction!
84 assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
85 "Expecting a two-address instruction!");
91 // Create a new instruction.
92 unsigned Reg0 = HasDef
93 ? (ChangeReg0 ? Reg2 : MI->getOperand(0).getReg()) : 0;
94 bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false;
95 MachineFunction &MF = *MI->getParent()->getParent();
97 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
98 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
99 .addReg(Reg2, getKillRegState(Reg2IsKill))
100 .addReg(Reg1, getKillRegState(Reg2IsKill));
102 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
103 .addReg(Reg2, getKillRegState(Reg2IsKill))
104 .addReg(Reg1, getKillRegState(Reg2IsKill));
108 MI->getOperand(0).setReg(Reg2);
109 MI->getOperand(Idx2).setReg(Reg1);
110 MI->getOperand(Idx1).setReg(Reg2);
111 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
112 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
116 /// findCommutedOpIndices - If specified MI is commutable, return the two
117 /// operand indices that would swap value. Return true if the instruction
118 /// is not in a form which this routine understands.
119 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
121 unsigned &SrcOpIdx2) const {
122 const MCInstrDesc &MCID = MI->getDesc();
123 if (!MCID.isCommutable())
125 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
126 // is not true, then the target must implement this.
127 SrcOpIdx1 = MCID.getNumDefs();
128 SrcOpIdx2 = SrcOpIdx1 + 1;
129 if (!MI->getOperand(SrcOpIdx1).isReg() ||
130 !MI->getOperand(SrcOpIdx2).isReg())
137 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
138 const SmallVectorImpl<MachineOperand> &Pred) const {
139 bool MadeChange = false;
140 const MCInstrDesc &MCID = MI->getDesc();
141 if (!MCID.isPredicable())
144 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
145 if (MCID.OpInfo[i].isPredicate()) {
146 MachineOperand &MO = MI->getOperand(i);
148 MO.setReg(Pred[j].getReg());
150 } else if (MO.isImm()) {
151 MO.setImm(Pred[j].getImm());
153 } else if (MO.isMBB()) {
154 MO.setMBB(Pred[j].getMBB());
163 bool TargetInstrInfoImpl::hasLoadFromStackSlot(const MachineInstr *MI,
164 const MachineMemOperand *&MMO,
165 int &FrameIndex) const {
166 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
167 oe = MI->memoperands_end();
170 if ((*o)->isLoad() && (*o)->getValue())
171 if (const FixedStackPseudoSourceValue *Value =
172 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
173 FrameIndex = Value->getFrameIndex();
181 bool TargetInstrInfoImpl::hasStoreToStackSlot(const MachineInstr *MI,
182 const MachineMemOperand *&MMO,
183 int &FrameIndex) const {
184 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
185 oe = MI->memoperands_end();
188 if ((*o)->isStore() && (*o)->getValue())
189 if (const FixedStackPseudoSourceValue *Value =
190 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
191 FrameIndex = Value->getFrameIndex();
199 void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
200 MachineBasicBlock::iterator I,
203 const MachineInstr *Orig,
204 const TargetRegisterInfo &TRI) const {
205 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
206 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
211 TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
212 const MachineInstr *MI1,
213 const MachineRegisterInfo *MRI) const {
214 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
217 MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
218 MachineFunction &MF) const {
219 assert(!Orig->getDesc().isNotDuplicable() &&
220 "Instruction cannot be duplicated");
221 return MF.CloneMachineInstr(Orig);
224 // If the COPY instruction in MI can be folded to a stack operation, return
225 // the register class to use.
226 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
228 assert(MI->isCopy() && "MI must be a COPY instruction");
229 if (MI->getNumOperands() != 2)
231 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
233 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
234 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
236 if (FoldOp.getSubReg() || LiveOp.getSubReg())
239 unsigned FoldReg = FoldOp.getReg();
240 unsigned LiveReg = LiveOp.getReg();
242 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
243 "Cannot fold physregs");
245 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
246 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
248 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
249 return RC->contains(LiveOp.getReg()) ? RC : 0;
251 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
254 // FIXME: Allow folding when register classes are memory compatible.
258 bool TargetInstrInfoImpl::
259 canFoldMemoryOperand(const MachineInstr *MI,
260 const SmallVectorImpl<unsigned> &Ops) const {
261 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
264 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
265 /// slot into the specified machine instruction for the specified operand(s).
266 /// If this is possible, a new instruction is returned with the specified
267 /// operand folded, otherwise NULL is returned. The client is responsible for
268 /// removing the old instruction and adding the new one in the instruction
271 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
272 const SmallVectorImpl<unsigned> &Ops,
275 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
276 if (MI->getOperand(Ops[i]).isDef())
277 Flags |= MachineMemOperand::MOStore;
279 Flags |= MachineMemOperand::MOLoad;
281 MachineBasicBlock *MBB = MI->getParent();
282 assert(MBB && "foldMemoryOperand needs an inserted instruction");
283 MachineFunction &MF = *MBB->getParent();
285 // Ask the target to do the actual folding.
286 if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
287 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
288 assert((!(Flags & MachineMemOperand::MOStore) ||
289 NewMI->getDesc().mayStore()) &&
290 "Folded a def to a non-store!");
291 assert((!(Flags & MachineMemOperand::MOLoad) ||
292 NewMI->getDesc().mayLoad()) &&
293 "Folded a use to a non-load!");
294 const MachineFrameInfo &MFI = *MF.getFrameInfo();
295 assert(MFI.getObjectOffset(FI) != -1);
296 MachineMemOperand *MMO =
297 MF.getMachineMemOperand(
298 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
299 Flags, MFI.getObjectSize(FI),
300 MFI.getObjectAlignment(FI));
301 NewMI->addMemOperand(MF, MMO);
303 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
304 return MBB->insert(MI, NewMI);
307 // Straight COPY may fold as load/store.
308 if (!MI->isCopy() || Ops.size() != 1)
311 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
315 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
316 MachineBasicBlock::iterator Pos = MI;
317 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
319 if (Flags == MachineMemOperand::MOStore)
320 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
322 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
326 /// foldMemoryOperand - Same as the previous version except it allows folding
327 /// of any load and store from / to any address, not just from a specific
330 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
331 const SmallVectorImpl<unsigned> &Ops,
332 MachineInstr* LoadMI) const {
333 assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
335 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
336 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
338 MachineBasicBlock &MBB = *MI->getParent();
339 MachineFunction &MF = *MBB.getParent();
341 // Ask the target to do the actual folding.
342 MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
343 if (!NewMI) return 0;
345 NewMI = MBB.insert(MI, NewMI);
347 // Copy the memoperands from the load to the folded instruction.
348 NewMI->setMemRefs(LoadMI->memoperands_begin(),
349 LoadMI->memoperands_end());
354 bool TargetInstrInfo::
355 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
356 AliasAnalysis *AA) const {
357 const MachineFunction &MF = *MI->getParent()->getParent();
358 const MachineRegisterInfo &MRI = MF.getRegInfo();
359 const TargetMachine &TM = MF.getTarget();
360 const TargetInstrInfo &TII = *TM.getInstrInfo();
361 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
363 // A load from a fixed stack slot can be rematerialized. This may be
364 // redundant with subsequent checks, but it's target-independent,
365 // simple, and a common case.
367 if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
368 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
371 const MCInstrDesc &MCID = MI->getDesc();
373 // Avoid instructions obviously unsafe for remat.
374 if (MCID.isNotDuplicable() || MCID.mayStore() ||
375 MI->hasUnmodeledSideEffects())
378 // Don't remat inline asm. We have no idea how expensive it is
379 // even if it's side effect free.
380 if (MI->isInlineAsm())
383 // Avoid instructions which load from potentially varying memory.
384 if (MCID.mayLoad() && !MI->isInvariantLoad(AA))
387 // If any of the registers accessed are non-constant, conservatively assume
388 // the instruction is not rematerializable.
389 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
390 const MachineOperand &MO = MI->getOperand(i);
391 if (!MO.isReg()) continue;
392 unsigned Reg = MO.getReg();
396 // Check for a well-behaved physical register.
397 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
399 // If the physreg has no defs anywhere, it's just an ambient register
400 // and we can freely move its uses. Alternatively, if it's allocatable,
401 // it could get allocated to something with a def during allocation.
402 if (!MRI.def_empty(Reg))
404 BitVector AllocatableRegs = TRI.getAllocatableSet(MF, 0);
405 if (AllocatableRegs.test(Reg))
407 // Check for a def among the register's aliases too.
408 for (const unsigned *Alias = TRI.getAliasSet(Reg); *Alias; ++Alias) {
409 unsigned AliasReg = *Alias;
410 if (!MRI.def_empty(AliasReg))
412 if (AllocatableRegs.test(AliasReg))
416 // A physreg def. We can't remat it.
422 // Only allow one virtual-register def, and that in the first operand.
423 if (MO.isDef() != (i == 0))
426 // Don't allow any virtual-register uses. Rematting an instruction with
427 // virtual register uses would length the live ranges of the uses, which
428 // is not necessarily a good idea, certainly not "trivial".
433 // Everything checked out.
437 /// isSchedulingBoundary - Test if the given instruction should be
438 /// considered a scheduling boundary. This primarily includes labels
440 bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
441 const MachineBasicBlock *MBB,
442 const MachineFunction &MF) const{
443 // Terminators and labels can't be scheduled around.
444 if (MI->getDesc().isTerminator() || MI->isLabel())
447 // Don't attempt to schedule around any instruction that defines
448 // a stack-oriented pointer, as it's unlikely to be profitable. This
449 // saves compile time, because it doesn't require every single
450 // stack slot reference to depend on the instruction that does the
452 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
453 if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
459 // Provide a global flag for disabling the PreRA hazard recognizer that targets
460 // may choose to honor.
461 bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
462 return !DisableHazardRecognizer;
465 // Default implementation of CreateTargetRAHazardRecognizer.
466 ScheduleHazardRecognizer *TargetInstrInfoImpl::
467 CreateTargetHazardRecognizer(const TargetMachine *TM,
468 const ScheduleDAG *DAG) const {
469 // Dummy hazard recognizer allows all instructions to issue.
470 return new ScheduleHazardRecognizer();
473 // Default implementation of CreateTargetPostRAHazardRecognizer.
474 ScheduleHazardRecognizer *TargetInstrInfoImpl::
475 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
476 const ScheduleDAG *DAG) const {
477 return (ScheduleHazardRecognizer *)
478 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");