1 //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #ifndef X86INSTRUCTIONINFO_H
15 #define X86INSTRUCTIONINFO_H
17 #include "MCTargetDesc/X86BaseInfo.h"
18 #include "X86RegisterInfo.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/Target/TargetInstrInfo.h"
22 #define GET_INSTRINFO_HEADER
23 #include "X86GenInstrInfo.inc"
26 class X86RegisterInfo;
27 class X86TargetMachine;
30 // X86 specific condition code. These correspond to X86_*_COND in
31 // X86InstrInfo.td. They must be kept in synch.
50 // Artificial condition codes. These are used by AnalyzeBranch
51 // to indicate a block terminated with two conditional branches to
52 // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE,
53 // which can't be represented on x86 with a single condition. These
54 // are never used in MachineInstrs.
61 // Turn condition code into conditional branch opcode.
62 unsigned GetCondBranchFromCond(CondCode CC);
64 // Turn CMov opcode into condition code.
65 CondCode getCondFromCMovOpc(unsigned Opc);
67 /// GetOppositeBranchCondition - Return the inverse of the specified cond,
68 /// e.g. turning COND_E to COND_NE.
69 CondCode GetOppositeBranchCondition(X86::CondCode CC);
70 } // end namespace X86;
73 /// isGlobalStubReference - Return true if the specified TargetFlag operand is
74 /// a reference to a stub for a global, not the global itself.
75 inline static bool isGlobalStubReference(unsigned char TargetFlag) {
77 case X86II::MO_DLLIMPORT: // dllimport stub.
78 case X86II::MO_GOTPCREL: // rip-relative GOT reference.
79 case X86II::MO_GOT: // normal GOT reference.
80 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref.
81 case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref.
82 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref.
89 /// isGlobalRelativeToPICBase - Return true if the specified global value
90 /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this
91 /// is true, the addressing mode has the PIC base register added in (e.g. EBX).
92 inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
94 case X86II::MO_GOTOFF: // isPICStyleGOT: local global.
95 case X86II::MO_GOT: // isPICStyleGOT: other global.
96 case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
97 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
98 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
99 case X86II::MO_TLVP: // ??? Pretty sure..
106 inline static bool isScale(const MachineOperand &MO) {
108 (MO.getImm() == 1 || MO.getImm() == 2 ||
109 MO.getImm() == 4 || MO.getImm() == 8);
112 inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
113 if (MI->getOperand(Op).isFI()) return true;
114 return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
115 MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
116 isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
117 MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
118 (MI->getOperand(Op+X86::AddrDisp).isImm() ||
119 MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
120 MI->getOperand(Op+X86::AddrDisp).isCPI() ||
121 MI->getOperand(Op+X86::AddrDisp).isJTI());
124 inline static bool isMem(const MachineInstr *MI, unsigned Op) {
125 if (MI->getOperand(Op).isFI()) return true;
126 return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
127 MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
131 class X86InstrInfo final : public X86GenInstrInfo {
132 X86TargetMachine &TM;
133 const X86RegisterInfo RI;
135 /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
136 /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
138 typedef DenseMap<unsigned,
139 std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
140 RegOp2MemOpTableType RegOp2MemOpTable2Addr;
141 RegOp2MemOpTableType RegOp2MemOpTable0;
142 RegOp2MemOpTableType RegOp2MemOpTable1;
143 RegOp2MemOpTableType RegOp2MemOpTable2;
144 RegOp2MemOpTableType RegOp2MemOpTable3;
146 /// MemOp2RegOpTable - Load / store unfolding opcode map.
148 typedef DenseMap<unsigned,
149 std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
150 MemOp2RegOpTableType MemOp2RegOpTable;
152 static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
153 MemOp2RegOpTableType &M2RTable,
154 unsigned RegOp, unsigned MemOp, unsigned Flags);
156 virtual void anchor();
159 explicit X86InstrInfo(X86TargetMachine &tm);
161 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
162 /// such, whenever a client has an instance of instruction info, it should
163 /// always be able to get register info as well (through this method).
165 const X86RegisterInfo &getRegisterInfo() const { return RI; }
167 /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
168 /// extension instruction. That is, it's like a copy where it's legal for the
169 /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
170 /// true, then it's expected the pre-extension value is available as a subreg
171 /// of the result register. This also returns the sub-register index in
173 bool isCoalescableExtInstr(const MachineInstr &MI,
174 unsigned &SrcReg, unsigned &DstReg,
175 unsigned &SubIdx) const override;
177 unsigned isLoadFromStackSlot(const MachineInstr *MI,
178 int &FrameIndex) const override;
179 /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
180 /// stack locations as well. This uses a heuristic so it isn't
181 /// reliable for correctness.
182 unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
183 int &FrameIndex) const override;
185 unsigned isStoreToStackSlot(const MachineInstr *MI,
186 int &FrameIndex) const override;
187 /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
188 /// stack locations as well. This uses a heuristic so it isn't
189 /// reliable for correctness.
190 unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
191 int &FrameIndex) const override;
193 bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
194 AliasAnalysis *AA) const override;
195 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
196 unsigned DestReg, unsigned SubIdx,
197 const MachineInstr *Orig,
198 const TargetRegisterInfo &TRI) const override;
200 /// Given an operand within a MachineInstr, insert preceding code to put it
201 /// into the right format for a particular kind of LEA instruction. This may
202 /// involve using an appropriate super-register instead (with an implicit use
203 /// of the original) or creating a new virtual register and inserting COPY
204 /// instructions to get the data into the right class.
206 /// Reference parameters are set to indicate how caller should add this
207 /// operand to the LEA instruction.
208 bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
209 unsigned LEAOpcode, bool AllowSP,
210 unsigned &NewSrc, bool &isKill,
211 bool &isUndef, MachineOperand &ImplicitOp) const;
213 /// convertToThreeAddress - This method must be implemented by targets that
214 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
215 /// may be able to convert a two-address instruction into a true
216 /// three-address instruction on demand. This allows the X86 target (for
217 /// example) to convert ADD and SHL instructions into LEA instructions if they
218 /// would require register copies due to two-addressness.
220 /// This method returns a null pointer if the transformation cannot be
221 /// performed, otherwise it returns the new instruction.
223 MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
224 MachineBasicBlock::iterator &MBBI,
225 LiveVariables *LV) const override;
227 /// commuteInstruction - We have a few instructions that must be hacked on to
230 MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override;
232 bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
233 unsigned &SrcOpIdx2) const override;
236 bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
237 bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
238 MachineBasicBlock *&FBB,
239 SmallVectorImpl<MachineOperand> &Cond,
240 bool AllowModify) const override;
241 unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
242 unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
243 MachineBasicBlock *FBB,
244 const SmallVectorImpl<MachineOperand> &Cond,
245 DebugLoc DL) const override;
246 bool canInsertSelect(const MachineBasicBlock&,
247 const SmallVectorImpl<MachineOperand> &Cond,
248 unsigned, unsigned, int&, int&, int&) const override;
249 void insertSelect(MachineBasicBlock &MBB,
250 MachineBasicBlock::iterator MI, DebugLoc DL,
252 const SmallVectorImpl<MachineOperand> &Cond,
253 unsigned TrueReg, unsigned FalseReg) const override;
254 void copyPhysReg(MachineBasicBlock &MBB,
255 MachineBasicBlock::iterator MI, DebugLoc DL,
256 unsigned DestReg, unsigned SrcReg,
257 bool KillSrc) const override;
258 void storeRegToStackSlot(MachineBasicBlock &MBB,
259 MachineBasicBlock::iterator MI,
260 unsigned SrcReg, bool isKill, int FrameIndex,
261 const TargetRegisterClass *RC,
262 const TargetRegisterInfo *TRI) const override;
264 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
265 SmallVectorImpl<MachineOperand> &Addr,
266 const TargetRegisterClass *RC,
267 MachineInstr::mmo_iterator MMOBegin,
268 MachineInstr::mmo_iterator MMOEnd,
269 SmallVectorImpl<MachineInstr*> &NewMIs) const;
271 void loadRegFromStackSlot(MachineBasicBlock &MBB,
272 MachineBasicBlock::iterator MI,
273 unsigned DestReg, int FrameIndex,
274 const TargetRegisterClass *RC,
275 const TargetRegisterInfo *TRI) const override;
277 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
278 SmallVectorImpl<MachineOperand> &Addr,
279 const TargetRegisterClass *RC,
280 MachineInstr::mmo_iterator MMOBegin,
281 MachineInstr::mmo_iterator MMOEnd,
282 SmallVectorImpl<MachineInstr*> &NewMIs) const;
284 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
286 /// foldMemoryOperand - If this target supports it, fold a load or store of
287 /// the specified stack slot into the specified machine instruction for the
288 /// specified operand(s). If this is possible, the target should perform the
289 /// folding and return true, otherwise it should return false. If it folds
290 /// the instruction, it is likely that the MachineInstruction the iterator
291 /// references has been changed.
292 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
294 const SmallVectorImpl<unsigned> &Ops,
295 int FrameIndex) const override;
297 /// foldMemoryOperand - Same as the previous version except it allows folding
298 /// of any load and store from / to any address, not just from a specific
300 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
302 const SmallVectorImpl<unsigned> &Ops,
303 MachineInstr* LoadMI) const override;
305 /// canFoldMemoryOperand - Returns true if the specified load / store is
306 /// folding is possible.
307 bool canFoldMemoryOperand(const MachineInstr*,
308 const SmallVectorImpl<unsigned> &) const override;
310 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
311 /// a store or a load and a store into two or more instruction. If this is
312 /// possible, returns true as well as the new instructions by reference.
313 bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
314 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
315 SmallVectorImpl<MachineInstr*> &NewMIs) const override;
317 bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
318 SmallVectorImpl<SDNode*> &NewNodes) const override;
320 /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
321 /// instruction after load / store are unfolded from an instruction of the
322 /// specified opcode. It returns zero if the specified unfolding is not
323 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
324 /// index of the operand which will hold the register holding the loaded
326 unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
327 bool UnfoldLoad, bool UnfoldStore,
328 unsigned *LoadRegIndex = nullptr) const override;
330 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
331 /// to determine if two loads are loading from the same base address. It
332 /// should only return true if the base pointers are the same and the
333 /// only differences between the two addresses are the offset. It also returns
334 /// the offsets by reference.
335 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
336 int64_t &Offset2) const override;
338 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
339 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
340 /// be scheduled togther. On some targets if two loads are loading from
341 /// addresses in the same cache line, it's better if they are scheduled
342 /// together. This function takes two integers that represent the load offsets
343 /// from the common base address. It returns true if it decides it's desirable
344 /// to schedule the two loads together. "NumLoads" is the number of loads that
345 /// have already been scheduled after Load1.
346 bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
347 int64_t Offset1, int64_t Offset2,
348 unsigned NumLoads) const override;
350 bool shouldScheduleAdjacent(MachineInstr* First,
351 MachineInstr *Second) const override;
353 void getNoopForMachoTarget(MCInst &NopInst) const override;
356 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
358 /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
359 /// instruction that defines the specified register class.
360 bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
362 /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha
363 /// would clobber the EFLAGS condition register. Note the result may be
364 /// conservative. If it cannot definitely determine the safety after visiting
365 /// a few instructions in each direction it assumes it's not safe.
366 bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
367 MachineBasicBlock::iterator I) const;
369 static bool isX86_64ExtendedReg(const MachineOperand &MO) {
370 if (!MO.isReg()) return false;
371 return X86II::isX86_64ExtendedReg(MO.getReg());
374 /// getGlobalBaseReg - Return a virtual register initialized with the
375 /// the global base register value. Output instructions required to
376 /// initialize the register in the function entry block, if necessary.
378 unsigned getGlobalBaseReg(MachineFunction *MF) const;
380 std::pair<uint16_t, uint16_t>
381 getExecutionDomain(const MachineInstr *MI) const override;
383 void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
386 getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
387 const TargetRegisterInfo *TRI) const override;
388 unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
389 const TargetRegisterInfo *TRI) const override;
390 void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
391 const TargetRegisterInfo *TRI) const override;
393 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
396 const SmallVectorImpl<MachineOperand> &MOs,
397 unsigned Size, unsigned Alignment) const;
399 bool isHighLatencyDef(int opc) const override;
401 bool hasHighOperandLatency(const InstrItineraryData *ItinData,
402 const MachineRegisterInfo *MRI,
403 const MachineInstr *DefMI, unsigned DefIdx,
404 const MachineInstr *UseMI,
405 unsigned UseIdx) const override;
407 /// analyzeCompare - For a comparison instruction, return the source registers
408 /// in SrcReg and SrcReg2 if having two register operands, and the value it
409 /// compares against in CmpValue. Return true if the comparison instruction
411 bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
412 unsigned &SrcReg2, int &CmpMask,
413 int &CmpValue) const override;
415 /// optimizeCompareInstr - Check if there exists an earlier instruction that
416 /// operates on the same source operands and sets flags in the same way as
417 /// Compare; remove Compare if possible.
418 bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
419 unsigned SrcReg2, int CmpMask, int CmpValue,
420 const MachineRegisterInfo *MRI) const override;
422 /// optimizeLoadInstr - Try to remove the load by folding it to a register
423 /// operand at the use. We fold the load instructions if and only if the
424 /// def and use are in the same BB. We only look at one load and see
425 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
426 /// defined by the load we are trying to fold. DefMI returns the machine
427 /// instruction that defines FoldAsLoadDefReg, and the function returns
428 /// the machine instruction generated due to folding.
429 MachineInstr* optimizeLoadInstr(MachineInstr *MI,
430 const MachineRegisterInfo *MRI,
431 unsigned &FoldAsLoadDefReg,
432 MachineInstr *&DefMI) const override;
435 MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
436 MachineFunction::iterator &MFI,
437 MachineBasicBlock::iterator &MBBI,
438 LiveVariables *LV) const;
440 /// isFrameOperand - Return true and the FrameIndex if the specified
441 /// operand and follow operands form a reference to the stack frame.
442 bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
443 int &FrameIndex) const;
446 } // End llvm namespace