X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrInfo.h;h=f5b0cc9152727d4d3d83660b64051b1cb459e7bb;hb=3328adda6b9386c4442b5ec71eeaaf41e8df58b5;hp=88df88295162dc8efdbd31d814d74ec267eef239;hpb=d10fd9791c20fd8368fa0ce94b626b769c6c8ba0;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 88df8829516..f5b0cc91527 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -15,9 +15,10 @@ #define X86INSTRUCTIONINFO_H #include "llvm/Target/TargetInstrInfo.h" +#include "X86.h" #include "X86RegisterInfo.h" -#include "llvm/ADT/IndexedMap.h" -#include "llvm/Target/MRegisterInfo.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Target/TargetRegisterInfo.h" namespace llvm { class X86RegisterInfo; @@ -43,9 +44,18 @@ namespace X86 { COND_O = 13, COND_P = 14, COND_S = 15, + + // Artificial condition codes. These are used by AnalyzeBranch + // to indicate a block terminated with two conditional branches to + // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE, + // which can't be represented on x86 with a single condition. These + // are never used in MachineInstrs. + COND_NE_OR_P, + COND_NP_OR_E, + COND_INVALID }; - + // Turn condition code into conditional branch opcode. unsigned GetCondBranchFromCond(CondCode CC); @@ -216,40 +226,79 @@ namespace X86II { // SpecialFP - Special instruction forms. Dispatch by opcode explicitly. SpecialFP = 7 << FPTypeShift, - // Bits 19 -> 23 are unused + // Lock prefix + LOCKShift = 19, + LOCK = 1 << LOCKShift, + + // Segment override prefixes. Currently we just need ability to address + // stuff in gs and fs segments. + SegOvrShift = 20, + SegOvrMask = 3 << SegOvrShift, + FS = 1 << SegOvrShift, + GS = 2 << SegOvrShift, + + // Bits 22 -> 23 are unused OpcodeShift = 24, OpcodeMask = 0xFF << OpcodeShift }; } -class X86InstrInfo : public TargetInstrInfo { +inline static bool isScale(const MachineOperand &MO) { + return MO.isImm() && + (MO.getImm() == 1 || MO.getImm() == 2 || + MO.getImm() == 4 || MO.getImm() == 8); +} + +inline static bool isMem(const MachineInstr *MI, unsigned Op) { + if (MI->getOperand(Op).isFI()) return true; + return Op+4 <= MI->getNumOperands() && + MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) && + MI->getOperand(Op+2).isReg() && + (MI->getOperand(Op+3).isImm() || + MI->getOperand(Op+3).isGlobal() || + MI->getOperand(Op+3).isCPI() || + MI->getOperand(Op+3).isJTI()); +} + +class X86InstrInfo : public TargetInstrInfoImpl { X86TargetMachine &TM; const X86RegisterInfo RI; - mutable IndexedMap MachineInstrMap; - - /// isDefinedInEntryBlock - Goes through the entry block to see if the given - /// virtual register is indeed defined in the entry block. - /// - bool isDefinedInEntryBlock(const MachineBasicBlock &Entry, - unsigned VReg) const; + + /// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, + /// RegOp2MemOpTable2 - Load / store folding opcode maps. + /// + DenseMap RegOp2MemOpTable2Addr; + DenseMap RegOp2MemOpTable0; + DenseMap RegOp2MemOpTable1; + DenseMap RegOp2MemOpTable2; + + /// MemOp2RegOpTable - Load / store unfolding opcode map. + /// + DenseMap > MemOp2RegOpTable; + public: - X86InstrInfo(X86TargetMachine &tm); + explicit X86InstrInfo(X86TargetMachine &tm); /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As /// such, whenever a client has an instance of instruction info, it should /// always be able to get register info as well (through this method). /// - virtual const MRegisterInfo &getRegisterInfo() const { return RI; } + virtual const X86RegisterInfo &getRegisterInfo() const { return RI; } - // Return true if the instruction is a register to register move and - // leave the source and dest operands in the passed parameters. - // - bool isMoveInstr(const MachineInstr& MI, unsigned& sourceReg, - unsigned& destReg) const; - unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const; - unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const; - bool isReallyTriviallyReMaterializable(MachineInstr *MI) const; - bool isReallySideEffectFree(MachineInstr *MI) const; + /// Return true if the instruction is a register to register move and return + /// the source and dest operands and their sub-register indices by reference. + virtual bool isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + + unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; + unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const; + + bool isReallyTriviallyReMaterializable(const MachineInstr *MI) const; + void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + unsigned DestReg, const MachineInstr *Orig) const; + + bool isInvariantLoad(const MachineInstr *MI) const; /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target @@ -263,41 +312,139 @@ public: /// virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, MachineBasicBlock::iterator &MBBI, - LiveVariables &LV) const; + LiveVariables *LV) const; /// commuteInstruction - We have a few instructions that must be hacked on to /// commute them. /// - virtual MachineInstr *commuteInstruction(MachineInstr *MI) const; + virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const; // Branch analysis. virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const; virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, - std::vector &Cond) const; + SmallVectorImpl &Cond, + bool AllowModify) const; virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, - const std::vector &Cond) const; - virtual void copyRegToReg(MachineBasicBlock &MBB, + const SmallVectorImpl &Cond) const; + virtual bool copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SrcReg, const TargetRegisterClass *DestRC, const TargetRegisterClass *SrcRC) const; - virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const; - virtual bool ReverseBranchCondition(std::vector &Cond) const; + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const; + + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const; + + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const; - const TargetRegisterClass *getPointerRegClass() const; + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const; + + /// foldMemoryOperand - If this target supports it, fold a load or store of + /// the specified stack slot into the specified machine instruction for the + /// specified operand(s). If this is possible, the target should perform the + /// folding and return true, otherwise it should return false. If it folds + /// the instruction, it is likely that the MachineInstruction the iterator + /// references has been changed. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; + + /// foldMemoryOperand - Same as the previous version except it allows folding + /// of any load and store from / to any address, not just from a specific + /// stack slot. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const; + + /// canFoldMemoryOperand - Returns true if the specified load / store is + /// folding is possible. + virtual bool canFoldMemoryOperand(const MachineInstr*, + const SmallVectorImpl &) const; + + /// unfoldMemoryOperand - Separate a single instruction which folded a load or + /// a store or a load and a store into two or more instruction. If this is + /// possible, returns true as well as the new instructions by reference. + virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, + unsigned Reg, bool UnfoldLoad, bool UnfoldStore, + SmallVectorImpl &NewMIs) const; + + virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, + SmallVectorImpl &NewNodes) const; + + /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new + /// instruction after load / store are unfolded from an instruction of the + /// specified opcode. It returns zero if the specified unfolding is not + /// possible. + virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, + bool UnfoldLoad, bool UnfoldStore) const; + + virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const; + virtual + bool ReverseBranchCondition(SmallVectorImpl &Cond) const; + + /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine + /// instruction that defines the specified register class. + bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const; // getBaseOpcodeFor - This function returns the "base" X86 opcode for the // specified machine instruction. // - unsigned char getBaseOpcodeFor(const TargetInstrDescriptor *TID) const { + unsigned char getBaseOpcodeFor(const TargetInstrDesc *TID) const { return TID->TSFlags >> X86II::OpcodeShift; } - unsigned char getBaseOpcodeFor(MachineOpCode Opcode) const { + unsigned char getBaseOpcodeFor(unsigned Opcode) const { return getBaseOpcodeFor(&get(Opcode)); } + + static bool isX86_64NonExtLowByteReg(unsigned reg) { + return (reg == X86::SPL || reg == X86::BPL || + reg == X86::SIL || reg == X86::DIL); + } + + static unsigned sizeOfImm(const TargetInstrDesc *Desc); + static bool isX86_64ExtendedReg(const MachineOperand &MO); + static unsigned determineREX(const MachineInstr &MI); + + /// GetInstSize - Returns the size of the specified MachineInstr. + /// + virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const; + + /// getGlobalBaseReg - Return a virtual register initialized with the + /// the global base register value. Output instructions required to + /// initialize the register in the function entry block, if necessary. + /// + unsigned getGlobalBaseReg(MachineFunction *MF) const; + +private: + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + unsigned OpNum, + const SmallVectorImpl &MOs) const; }; } // End llvm namespace