1 //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #ifndef X86INSTRUCTIONINFO_H
15 #define X86INSTRUCTIONINFO_H
18 #include "X86RegisterInfo.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/Target/TargetInstrInfo.h"
22 #define GET_INSTRINFO_HEADER
23 #include "X86GenInstrInfo.inc"
26 class X86RegisterInfo;
27 class X86TargetMachine;
30 // X86 specific condition code. These correspond to X86_*_COND in
31 // X86InstrInfo.td. They must be kept in synch.
50 // Artificial condition codes. These are used by AnalyzeBranch
51 // to indicate a block terminated with two conditional branches to
52 // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE,
53 // which can't be represented on x86 with a single condition. These
54 // are never used in MachineInstrs.
61 // Turn condition code into conditional branch opcode.
62 unsigned GetCondBranchFromCond(CondCode CC);
64 // Turn CMov opcode into condition code.
65 CondCode getCondFromCMovOpc(unsigned Opc);
67 /// GetOppositeBranchCondition - Return the inverse of the specified cond,
68 /// e.g. turning COND_E to COND_NE.
69 CondCode GetOppositeBranchCondition(X86::CondCode CC);
70 } // end namespace X86;
73 /// isGlobalStubReference - Return true if the specified TargetFlag operand is
74 /// a reference to a stub for a global, not the global itself.
75 inline static bool isGlobalStubReference(unsigned char TargetFlag) {
77 case X86II::MO_DLLIMPORT: // dllimport stub.
78 case X86II::MO_GOTPCREL: // rip-relative GOT reference.
79 case X86II::MO_GOT: // normal GOT reference.
80 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref.
81 case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref.
82 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref.
89 /// isGlobalRelativeToPICBase - Return true if the specified global value
90 /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this
91 /// is true, the addressing mode has the PIC base register added in (e.g. EBX).
92 inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
94 case X86II::MO_GOTOFF: // isPICStyleGOT: local global.
95 case X86II::MO_GOT: // isPICStyleGOT: other global.
96 case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
97 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
98 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
99 case X86II::MO_TLVP: // ??? Pretty sure..
106 inline static bool isScale(const MachineOperand &MO) {
108 (MO.getImm() == 1 || MO.getImm() == 2 ||
109 MO.getImm() == 4 || MO.getImm() == 8);
112 inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
113 if (MI->getOperand(Op).isFI()) return true;
114 return Op+4 <= MI->getNumOperands() &&
115 MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) &&
116 MI->getOperand(Op+2).isReg() &&
117 (MI->getOperand(Op+3).isImm() ||
118 MI->getOperand(Op+3).isGlobal() ||
119 MI->getOperand(Op+3).isCPI() ||
120 MI->getOperand(Op+3).isJTI());
123 inline static bool isMem(const MachineInstr *MI, unsigned Op) {
124 if (MI->getOperand(Op).isFI()) return true;
125 return Op+5 <= MI->getNumOperands() &&
126 MI->getOperand(Op+4).isReg() &&
130 class X86InstrInfo : public X86GenInstrInfo {
131 X86TargetMachine &TM;
132 const X86RegisterInfo RI;
134 /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
135 /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
137 typedef DenseMap<unsigned,
138 std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
139 RegOp2MemOpTableType RegOp2MemOpTable2Addr;
140 RegOp2MemOpTableType RegOp2MemOpTable0;
141 RegOp2MemOpTableType RegOp2MemOpTable1;
142 RegOp2MemOpTableType RegOp2MemOpTable2;
143 RegOp2MemOpTableType RegOp2MemOpTable3;
145 /// MemOp2RegOpTable - Load / store unfolding opcode map.
147 typedef DenseMap<unsigned,
148 std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
149 MemOp2RegOpTableType MemOp2RegOpTable;
151 static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
152 MemOp2RegOpTableType &M2RTable,
153 unsigned RegOp, unsigned MemOp, unsigned Flags);
156 explicit X86InstrInfo(X86TargetMachine &tm);
158 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
159 /// such, whenever a client has an instance of instruction info, it should
160 /// always be able to get register info as well (through this method).
162 virtual const X86RegisterInfo &getRegisterInfo() const { return RI; }
164 /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
165 /// extension instruction. That is, it's like a copy where it's legal for the
166 /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
167 /// true, then it's expected the pre-extension value is available as a subreg
168 /// of the result register. This also returns the sub-register index in
170 virtual bool isCoalescableExtInstr(const MachineInstr &MI,
171 unsigned &SrcReg, unsigned &DstReg,
172 unsigned &SubIdx) const;
174 unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
175 /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
176 /// stack locations as well. This uses a heuristic so it isn't
177 /// reliable for correctness.
178 unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
179 int &FrameIndex) const;
181 unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const;
182 /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
183 /// stack locations as well. This uses a heuristic so it isn't
184 /// reliable for correctness.
185 unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
186 int &FrameIndex) const;
188 bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
189 AliasAnalysis *AA) const;
190 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
191 unsigned DestReg, unsigned SubIdx,
192 const MachineInstr *Orig,
193 const TargetRegisterInfo &TRI) const;
195 /// Given an operand within a MachineInstr, insert preceding code to put it
196 /// into the right format for a particular kind of LEA instruction. This may
197 /// involve using an appropriate super-register instead (with an implicit use
198 /// of the original) or creating a new virtual register and inserting COPY
199 /// instructions to get the data into the right class.
201 /// Reference parameters are set to indicate how caller should add this
202 /// operand to the LEA instruction.
203 bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
204 unsigned LEAOpcode, bool AllowSP,
205 unsigned &NewSrc, bool &isKill,
206 bool &isUndef, MachineOperand &ImplicitOp) const;
208 /// convertToThreeAddress - This method must be implemented by targets that
209 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
210 /// may be able to convert a two-address instruction into a true
211 /// three-address instruction on demand. This allows the X86 target (for
212 /// example) to convert ADD and SHL instructions into LEA instructions if they
213 /// would require register copies due to two-addressness.
215 /// This method returns a null pointer if the transformation cannot be
216 /// performed, otherwise it returns the new instruction.
218 virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
219 MachineBasicBlock::iterator &MBBI,
220 LiveVariables *LV) const;
222 /// commuteInstruction - We have a few instructions that must be hacked on to
225 virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
228 virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const;
229 virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
230 MachineBasicBlock *&FBB,
231 SmallVectorImpl<MachineOperand> &Cond,
232 bool AllowModify) const;
233 virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
234 virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
235 MachineBasicBlock *FBB,
236 const SmallVectorImpl<MachineOperand> &Cond,
238 virtual bool canInsertSelect(const MachineBasicBlock&,
239 const SmallVectorImpl<MachineOperand> &Cond,
240 unsigned, unsigned, int&, int&, int&) const;
241 virtual void insertSelect(MachineBasicBlock &MBB,
242 MachineBasicBlock::iterator MI, DebugLoc DL,
244 const SmallVectorImpl<MachineOperand> &Cond,
245 unsigned TrueReg, unsigned FalseReg) const;
246 virtual void copyPhysReg(MachineBasicBlock &MBB,
247 MachineBasicBlock::iterator MI, DebugLoc DL,
248 unsigned DestReg, unsigned SrcReg,
250 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
251 MachineBasicBlock::iterator MI,
252 unsigned SrcReg, bool isKill, int FrameIndex,
253 const TargetRegisterClass *RC,
254 const TargetRegisterInfo *TRI) const;
256 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
257 SmallVectorImpl<MachineOperand> &Addr,
258 const TargetRegisterClass *RC,
259 MachineInstr::mmo_iterator MMOBegin,
260 MachineInstr::mmo_iterator MMOEnd,
261 SmallVectorImpl<MachineInstr*> &NewMIs) const;
263 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
264 MachineBasicBlock::iterator MI,
265 unsigned DestReg, int FrameIndex,
266 const TargetRegisterClass *RC,
267 const TargetRegisterInfo *TRI) const;
269 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
270 SmallVectorImpl<MachineOperand> &Addr,
271 const TargetRegisterClass *RC,
272 MachineInstr::mmo_iterator MMOBegin,
273 MachineInstr::mmo_iterator MMOEnd,
274 SmallVectorImpl<MachineInstr*> &NewMIs) const;
276 virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
278 /// foldMemoryOperand - If this target supports it, fold a load or store of
279 /// the specified stack slot into the specified machine instruction for the
280 /// specified operand(s). If this is possible, the target should perform the
281 /// folding and return true, otherwise it should return false. If it folds
282 /// the instruction, it is likely that the MachineInstruction the iterator
283 /// references has been changed.
284 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
286 const SmallVectorImpl<unsigned> &Ops,
287 int FrameIndex) const;
289 /// foldMemoryOperand - Same as the previous version except it allows folding
290 /// of any load and store from / to any address, not just from a specific
292 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
294 const SmallVectorImpl<unsigned> &Ops,
295 MachineInstr* LoadMI) const;
297 /// canFoldMemoryOperand - Returns true if the specified load / store is
298 /// folding is possible.
299 virtual bool canFoldMemoryOperand(const MachineInstr*,
300 const SmallVectorImpl<unsigned> &) const;
302 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
303 /// a store or a load and a store into two or more instruction. If this is
304 /// possible, returns true as well as the new instructions by reference.
305 virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
306 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
307 SmallVectorImpl<MachineInstr*> &NewMIs) const;
309 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
310 SmallVectorImpl<SDNode*> &NewNodes) const;
312 /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
313 /// instruction after load / store are unfolded from an instruction of the
314 /// specified opcode. It returns zero if the specified unfolding is not
315 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
316 /// index of the operand which will hold the register holding the loaded
318 virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
319 bool UnfoldLoad, bool UnfoldStore,
320 unsigned *LoadRegIndex = 0) const;
322 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
323 /// to determine if two loads are loading from the same base address. It
324 /// should only return true if the base pointers are the same and the
325 /// only differences between the two addresses are the offset. It also returns
326 /// the offsets by reference.
327 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
328 int64_t &Offset1, int64_t &Offset2) const;
330 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
331 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
332 /// be scheduled togther. On some targets if two loads are loading from
333 /// addresses in the same cache line, it's better if they are scheduled
334 /// together. This function takes two integers that represent the load offsets
335 /// from the common base address. It returns true if it decides it's desirable
336 /// to schedule the two loads together. "NumLoads" is the number of loads that
337 /// have already been scheduled after Load1.
338 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
339 int64_t Offset1, int64_t Offset2,
340 unsigned NumLoads) const;
342 virtual bool shouldScheduleAdjacent(MachineInstr* First,
343 MachineInstr *Second) const LLVM_OVERRIDE;
345 virtual void getNoopForMachoTarget(MCInst &NopInst) const;
348 bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
350 /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
351 /// instruction that defines the specified register class.
352 bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
354 static bool isX86_64ExtendedReg(const MachineOperand &MO) {
355 if (!MO.isReg()) return false;
356 return X86II::isX86_64ExtendedReg(MO.getReg());
359 /// getGlobalBaseReg - Return a virtual register initialized with the
360 /// the global base register value. Output instructions required to
361 /// initialize the register in the function entry block, if necessary.
363 unsigned getGlobalBaseReg(MachineFunction *MF) const;
365 std::pair<uint16_t, uint16_t>
366 getExecutionDomain(const MachineInstr *MI) const;
368 void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
370 unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
371 const TargetRegisterInfo *TRI) const;
372 void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
373 const TargetRegisterInfo *TRI) const;
375 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
378 const SmallVectorImpl<MachineOperand> &MOs,
379 unsigned Size, unsigned Alignment) const;
381 bool isHighLatencyDef(int opc) const;
383 bool hasHighOperandLatency(const InstrItineraryData *ItinData,
384 const MachineRegisterInfo *MRI,
385 const MachineInstr *DefMI, unsigned DefIdx,
386 const MachineInstr *UseMI, unsigned UseIdx) const;
388 /// analyzeCompare - For a comparison instruction, return the source registers
389 /// in SrcReg and SrcReg2 if having two register operands, and the value it
390 /// compares against in CmpValue. Return true if the comparison instruction
392 virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
394 int &CmpMask, int &CmpValue) const;
396 /// optimizeCompareInstr - Check if there exists an earlier instruction that
397 /// operates on the same source operands and sets flags in the same way as
398 /// Compare; remove Compare if possible.
399 virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
400 unsigned SrcReg2, int CmpMask, int CmpValue,
401 const MachineRegisterInfo *MRI) const;
403 /// optimizeLoadInstr - Try to remove the load by folding it to a register
404 /// operand at the use. We fold the load instructions if and only if the
405 /// def and use are in the same BB. We only look at one load and see
406 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
407 /// defined by the load we are trying to fold. DefMI returns the machine
408 /// instruction that defines FoldAsLoadDefReg, and the function returns
409 /// the machine instruction generated due to folding.
410 virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
411 const MachineRegisterInfo *MRI,
412 unsigned &FoldAsLoadDefReg,
413 MachineInstr *&DefMI) const;
416 MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
417 MachineFunction::iterator &MFI,
418 MachineBasicBlock::iterator &MBBI,
419 LiveVariables *LV) const;
421 /// isFrameOperand - Return true and the FrameIndex if the specified
422 /// operand and follow operands form a reference to the stack frame.
423 bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
424 int &FrameIndex) const;
427 } // End llvm namespace