1 //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfoImpl class, it just provides default
11 // implementations of various methods.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Target/TargetInstrInfo.h"
16 #include "llvm/Target/TargetLowering.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PostRAHazardRecognizer.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/raw_ostream.h"
32 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
33 /// after it, replacing it with an unconditional branch to NewDest.
35 TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
36 MachineBasicBlock *NewDest) const {
37 MachineBasicBlock *MBB = Tail->getParent();
39 // Remove all the old successors of MBB from the CFG.
40 while (!MBB->succ_empty())
41 MBB->removeSuccessor(MBB->succ_begin());
43 // Remove all the dead instructions from the end of MBB.
44 MBB->erase(Tail, MBB->end());
46 // If MBB isn't immediately before MBB, insert a branch to it.
47 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
48 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
50 MBB->addSuccessor(NewDest);
53 // commuteInstruction - The default implementation of this method just exchanges
54 // the two operands returned by findCommutedOpIndices.
55 MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
57 const TargetInstrDesc &TID = MI->getDesc();
58 bool HasDef = TID.getNumDefs();
59 if (HasDef && !MI->getOperand(0).isReg())
60 // No idea how to commute this instruction. Target should implement its own.
63 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
65 raw_string_ostream Msg(msg);
66 Msg << "Don't know how to commute: " << *MI;
67 report_fatal_error(Msg.str());
70 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
71 "This only knows how to commute register operands so far");
72 unsigned Reg1 = MI->getOperand(Idx1).getReg();
73 unsigned Reg2 = MI->getOperand(Idx2).getReg();
74 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
75 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
76 bool ChangeReg0 = false;
77 if (HasDef && MI->getOperand(0).getReg() == Reg1) {
78 // Must be two address instruction!
79 assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
80 "Expecting a two-address instruction!");
86 // Create a new instruction.
87 unsigned Reg0 = HasDef
88 ? (ChangeReg0 ? Reg2 : MI->getOperand(0).getReg()) : 0;
89 bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false;
90 MachineFunction &MF = *MI->getParent()->getParent();
92 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
93 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
94 .addReg(Reg2, getKillRegState(Reg2IsKill))
95 .addReg(Reg1, getKillRegState(Reg2IsKill));
97 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
98 .addReg(Reg2, getKillRegState(Reg2IsKill))
99 .addReg(Reg1, getKillRegState(Reg2IsKill));
103 MI->getOperand(0).setReg(Reg2);
104 MI->getOperand(Idx2).setReg(Reg1);
105 MI->getOperand(Idx1).setReg(Reg2);
106 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
107 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
111 /// findCommutedOpIndices - If specified MI is commutable, return the two
112 /// operand indices that would swap value. Return true if the instruction
113 /// is not in a form which this routine understands.
114 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
116 unsigned &SrcOpIdx2) const {
117 const TargetInstrDesc &TID = MI->getDesc();
118 if (!TID.isCommutable())
120 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
121 // is not true, then the target must implement this.
122 SrcOpIdx1 = TID.getNumDefs();
123 SrcOpIdx2 = SrcOpIdx1 + 1;
124 if (!MI->getOperand(SrcOpIdx1).isReg() ||
125 !MI->getOperand(SrcOpIdx2).isReg())
132 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
133 const SmallVectorImpl<MachineOperand> &Pred) const {
134 bool MadeChange = false;
135 const TargetInstrDesc &TID = MI->getDesc();
136 if (!TID.isPredicable())
139 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
140 if (TID.OpInfo[i].isPredicate()) {
141 MachineOperand &MO = MI->getOperand(i);
143 MO.setReg(Pred[j].getReg());
145 } else if (MO.isImm()) {
146 MO.setImm(Pred[j].getImm());
148 } else if (MO.isMBB()) {
149 MO.setMBB(Pred[j].getMBB());
158 void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
159 MachineBasicBlock::iterator I,
162 const MachineInstr *Orig,
163 const TargetRegisterInfo &TRI) const {
164 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
165 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
169 bool TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
170 const MachineInstr *MI1) const {
171 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
174 MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
175 MachineFunction &MF) const {
176 assert(!Orig->getDesc().isNotDuplicable() &&
177 "Instruction cannot be duplicated");
178 return MF.CloneMachineInstr(Orig);
182 TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
184 for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
186 const MachineBasicBlock &MBB = *MBBI;
187 for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
189 FnSize += GetInstSizeInBytes(I);
194 // If the COPY instruction in MI can be folded to a stack operation, return
195 // the register class to use.
196 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
198 assert(MI->isCopy() && "MI must be a COPY instruction");
199 if (MI->getNumOperands() != 2)
201 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
203 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
204 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
206 if (FoldOp.getSubReg() || LiveOp.getSubReg())
209 unsigned FoldReg = FoldOp.getReg();
210 unsigned LiveReg = LiveOp.getReg();
212 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
213 "Cannot fold physregs");
215 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
216 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
218 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
219 return RC->contains(LiveOp.getReg()) ? RC : 0;
221 const TargetRegisterClass *LiveRC = MRI.getRegClass(LiveReg);
222 if (RC == LiveRC || RC->hasSubClass(LiveRC))
225 // FIXME: Allow folding when register classes are memory compatible.
229 bool TargetInstrInfoImpl::
230 canFoldMemoryOperand(const MachineInstr *MI,
231 const SmallVectorImpl<unsigned> &Ops) const {
232 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
235 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
236 /// slot into the specified machine instruction for the specified operand(s).
237 /// If this is possible, a new instruction is returned with the specified
238 /// operand folded, otherwise NULL is returned. The client is responsible for
239 /// removing the old instruction and adding the new one in the instruction
242 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
243 const SmallVectorImpl<unsigned> &Ops,
246 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
247 if (MI->getOperand(Ops[i]).isDef())
248 Flags |= MachineMemOperand::MOStore;
250 Flags |= MachineMemOperand::MOLoad;
252 MachineBasicBlock *MBB = MI->getParent();
253 assert(MBB && "foldMemoryOperand needs an inserted instruction");
254 MachineFunction &MF = *MBB->getParent();
256 // Ask the target to do the actual folding.
257 if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
258 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
259 assert((!(Flags & MachineMemOperand::MOStore) ||
260 NewMI->getDesc().mayStore()) &&
261 "Folded a def to a non-store!");
262 assert((!(Flags & MachineMemOperand::MOLoad) ||
263 NewMI->getDesc().mayLoad()) &&
264 "Folded a use to a non-load!");
265 const MachineFrameInfo &MFI = *MF.getFrameInfo();
266 assert(MFI.getObjectOffset(FI) != -1);
267 MachineMemOperand *MMO =
268 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
270 MFI.getObjectSize(FI),
271 MFI.getObjectAlignment(FI));
272 NewMI->addMemOperand(MF, MMO);
274 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
275 return MBB->insert(MI, NewMI);
278 // Straight COPY may fold as load/store.
279 if (!MI->isCopy() || Ops.size() != 1)
282 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
286 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
287 MachineBasicBlock::iterator Pos = MI;
288 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
290 if (Flags == MachineMemOperand::MOStore)
291 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
293 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
297 /// foldMemoryOperand - Same as the previous version except it allows folding
298 /// of any load and store from / to any address, not just from a specific
301 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
302 const SmallVectorImpl<unsigned> &Ops,
303 MachineInstr* LoadMI) const {
304 assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
306 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
307 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
309 MachineBasicBlock &MBB = *MI->getParent();
310 MachineFunction &MF = *MBB.getParent();
312 // Ask the target to do the actual folding.
313 MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
314 if (!NewMI) return 0;
316 NewMI = MBB.insert(MI, NewMI);
318 // Copy the memoperands from the load to the folded instruction.
319 NewMI->setMemRefs(LoadMI->memoperands_begin(),
320 LoadMI->memoperands_end());
325 bool TargetInstrInfo::
326 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
327 AliasAnalysis *AA) const {
328 const MachineFunction &MF = *MI->getParent()->getParent();
329 const MachineRegisterInfo &MRI = MF.getRegInfo();
330 const TargetMachine &TM = MF.getTarget();
331 const TargetInstrInfo &TII = *TM.getInstrInfo();
332 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
334 // A load from a fixed stack slot can be rematerialized. This may be
335 // redundant with subsequent checks, but it's target-independent,
336 // simple, and a common case.
338 if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
339 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
342 const TargetInstrDesc &TID = MI->getDesc();
344 // Avoid instructions obviously unsafe for remat.
345 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() ||
349 // Avoid instructions which load from potentially varying memory.
350 if (TID.mayLoad() && !MI->isInvariantLoad(AA))
353 // If any of the registers accessed are non-constant, conservatively assume
354 // the instruction is not rematerializable.
355 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
356 const MachineOperand &MO = MI->getOperand(i);
357 if (!MO.isReg()) continue;
358 unsigned Reg = MO.getReg();
362 // Check for a well-behaved physical register.
363 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
365 // If the physreg has no defs anywhere, it's just an ambient register
366 // and we can freely move its uses. Alternatively, if it's allocatable,
367 // it could get allocated to something with a def during allocation.
368 if (!MRI.def_empty(Reg))
370 BitVector AllocatableRegs = TRI.getAllocatableSet(MF, 0);
371 if (AllocatableRegs.test(Reg))
373 // Check for a def among the register's aliases too.
374 for (const unsigned *Alias = TRI.getAliasSet(Reg); *Alias; ++Alias) {
375 unsigned AliasReg = *Alias;
376 if (!MRI.def_empty(AliasReg))
378 if (AllocatableRegs.test(AliasReg))
382 // A physreg def. We can't remat it.
388 // Only allow one virtual-register def, and that in the first operand.
389 if (MO.isDef() != (i == 0))
392 // For the def, it should be the only def of that register.
393 if (MO.isDef() && (llvm::next(MRI.def_begin(Reg)) != MRI.def_end() ||
397 // Don't allow any virtual-register uses. Rematting an instruction with
398 // virtual register uses would length the live ranges of the uses, which
399 // is not necessarily a good idea, certainly not "trivial".
404 // Everything checked out.
408 /// isSchedulingBoundary - Test if the given instruction should be
409 /// considered a scheduling boundary. This primarily includes labels
411 bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
412 const MachineBasicBlock *MBB,
413 const MachineFunction &MF) const{
414 // Terminators and labels can't be scheduled around.
415 if (MI->getDesc().isTerminator() || MI->isLabel())
418 // Don't attempt to schedule around any instruction that defines
419 // a stack-oriented pointer, as it's unlikely to be profitable. This
420 // saves compile time, because it doesn't require every single
421 // stack slot reference to depend on the instruction that does the
423 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
424 if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
430 // Default implementation of CreateTargetPostRAHazardRecognizer.
431 ScheduleHazardRecognizer *TargetInstrInfoImpl::
432 CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
433 return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II);