return hasReservedCallFrame(MF) || hasFP(MF);
}
- // needsFrameIndexResolution - Do we need to perform FI resolution for
- // this function. Normally, this is required only when the function
- // has any stack objects. However, targets may want to override this.
- virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;
-
/// getFrameIndexOffset - Returns the displacement from the frame register to
/// the stack frame of the specified index.
virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
- if (!TFI.needsFrameIndexResolution(Fn)) return;
+ if (!Fn.getFrameInfo()->hasStackObjects()) return; // Nothing to do?
// Store SPAdj at exit of a basic block.
SmallVector<int, 8> SPState;
continue;
}
+ // If we are looking at a call sequence, we need to keep track of
+ // the SP adjustment made by each instruction in the sequence.
+ // This includes both the frame setup/destroy pseudos (handled above),
+ // as well as other instructions that have side effects w.r.t the SP.
+ if (InsideCallSequence)
+ SPAdj += TII.getSPAdjust(I);
+
MachineInstr *MI = I;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
break;
}
- // If we are looking at a call sequence, we need to keep track of
- // the SP adjustment made by each instruction in the sequence.
- // This includes both the frame setup/destroy pseudos (handled above),
- // as well as other instructions that have side effects w.r.t the SP.
- // Note that this must come after eliminateFrameIndex, because
- // if I itself referred to a frame index, we shouldn't count its own
- // adjustment.
- if (MI && InsideCallSequence)
- SPAdj += TII.getSPAdjust(MI);
-
if (DoIncr && I != BB->end()) ++I;
// Update register states.
FrameReg = RI->getFrameRegister(MF);
return getFrameIndexOffset(MF, FI);
}
-
-bool TargetFrameLowering::needsFrameIndexResolution(
- const MachineFunction &MF) const {
- return MF.getFrameInfo()->hasStackObjects();
-}
-set(LLVM_TARGET_DEFINITIONS X86.td)\r
-\r
-tablegen(LLVM X86GenRegisterInfo.inc -gen-register-info)\r
-tablegen(LLVM X86GenDisassemblerTables.inc -gen-disassembler)\r
-tablegen(LLVM X86GenInstrInfo.inc -gen-instr-info)\r
-tablegen(LLVM X86GenAsmWriter.inc -gen-asm-writer)\r
-tablegen(LLVM X86GenAsmWriter1.inc -gen-asm-writer -asmwriternum=1)\r
-tablegen(LLVM X86GenAsmMatcher.inc -gen-asm-matcher)\r
-tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)\r
-tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)\r
-tablegen(LLVM X86GenCallingConv.inc -gen-callingconv)\r
-tablegen(LLVM X86GenSubtargetInfo.inc -gen-subtarget)\r
-add_public_tablegen_target(X86CommonTableGen)\r
-\r
-set(sources\r
- X86AsmPrinter.cpp\r
- X86CallFrameOptimization.cpp\r
- X86FastISel.cpp\r
- X86FloatingPoint.cpp\r
- X86FrameLowering.cpp\r
- X86ISelDAGToDAG.cpp\r
- X86ISelLowering.cpp\r
- X86InstrInfo.cpp\r
- X86MCInstLower.cpp\r
- X86MachineFunctionInfo.cpp\r
- X86PadShortFunction.cpp\r
- X86RegisterInfo.cpp\r
- X86SelectionDAGInfo.cpp\r
- X86Subtarget.cpp\r
- X86TargetMachine.cpp\r
- X86TargetObjectFile.cpp\r
- X86TargetTransformInfo.cpp\r
- X86VZeroUpper.cpp\r
- X86FixupLEAs.cpp\r
- )\r
-\r
-if( CMAKE_CL_64 )\r
- enable_language(ASM_MASM)\r
- ADD_CUSTOM_COMMAND(\r
- OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj\r
- MAIN_DEPENDENCY X86CompilationCallback_Win64.asm\r
- COMMAND ${CMAKE_ASM_MASM_COMPILER} /Fo ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj /c ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm\r
- )\r
- set(sources ${sources} ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj)\r
-endif()\r
-\r
-add_llvm_target(X86CodeGen ${sources})\r
-\r
-add_subdirectory(AsmParser)\r
-add_subdirectory(Disassembler)\r
-add_subdirectory(InstPrinter)\r
-add_subdirectory(MCTargetDesc)\r
-add_subdirectory(TargetInfo)\r
-add_subdirectory(Utils)\r
+set(LLVM_TARGET_DEFINITIONS X86.td)
+
+tablegen(LLVM X86GenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM X86GenDisassemblerTables.inc -gen-disassembler)
+tablegen(LLVM X86GenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM X86GenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM X86GenAsmWriter1.inc -gen-asm-writer -asmwriternum=1)
+tablegen(LLVM X86GenAsmMatcher.inc -gen-asm-matcher)
+tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)
+tablegen(LLVM X86GenCallingConv.inc -gen-callingconv)
+tablegen(LLVM X86GenSubtargetInfo.inc -gen-subtarget)
+add_public_tablegen_target(X86CommonTableGen)
+
+set(sources
+ X86AsmPrinter.cpp
+ X86FastISel.cpp
+ X86FloatingPoint.cpp
+ X86FrameLowering.cpp
+ X86ISelDAGToDAG.cpp
+ X86ISelLowering.cpp
+ X86InstrInfo.cpp
+ X86MCInstLower.cpp
+ X86MachineFunctionInfo.cpp
+ X86PadShortFunction.cpp
+ X86RegisterInfo.cpp
+ X86SelectionDAGInfo.cpp
+ X86Subtarget.cpp
+ X86TargetMachine.cpp
+ X86TargetObjectFile.cpp
+ X86TargetTransformInfo.cpp
+ X86VZeroUpper.cpp
+ X86FixupLEAs.cpp
+ )
+
+if( CMAKE_CL_64 )
+ enable_language(ASM_MASM)
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj
+ MAIN_DEPENDENCY X86CompilationCallback_Win64.asm
+ COMMAND ${CMAKE_ASM_MASM_COMPILER} /Fo ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj /c ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm
+ )
+ set(sources ${sources} ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj)
+endif()
+
+add_llvm_target(X86CodeGen ${sources})
+
+add_subdirectory(AsmParser)
+add_subdirectory(Disassembler)
+add_subdirectory(InstPrinter)
+add_subdirectory(MCTargetDesc)
+add_subdirectory(TargetInfo)
+add_subdirectory(Utils)
/// to eliminate execution delays in some Atom processors.
FunctionPass *createX86FixupLEAs();
-/// createX86CallFrameOptimization - Return a pass that optimizes
-/// the code-size of x86 call sequences. This is done by replacing
-/// esp-relative movs with pushes.
-FunctionPass *createX86CallFrameOptimization();
-
} // End llvm namespace
#endif
+++ /dev/null
-//===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file defines a pass that optimizes call sequences on x86.\r
-// Currently, it converts movs of function parameters onto the stack into \r
-// pushes. This is beneficial for two main reasons:\r
-// 1) The push instruction encoding is much smaller than an esp-relative mov\r
-// 2) It is possible to push memory arguments directly. So, if the\r
-// the transformation is preformed pre-reg-alloc, it can help relieve\r
-// register pressure.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include <algorithm>\r
-\r
-#include "X86.h"\r
-#include "X86InstrInfo.h"\r
-#include "X86Subtarget.h"\r
-#include "X86MachineFunctionInfo.h"\r
-#include "llvm/ADT/Statistic.h"\r
-#include "llvm/CodeGen/MachineFunctionPass.h"\r
-#include "llvm/CodeGen/MachineInstrBuilder.h"\r
-#include "llvm/CodeGen/MachineRegisterInfo.h"\r
-#include "llvm/CodeGen/Passes.h"\r
-#include "llvm/IR/Function.h"\r
-#include "llvm/Support/Debug.h"\r
-#include "llvm/Support/raw_ostream.h"\r
-#include "llvm/Target/TargetInstrInfo.h"\r
-\r
-using namespace llvm;\r
-\r
-#define DEBUG_TYPE "x86-cf-opt"\r
-\r
-cl::opt<bool> NoX86CFOpt("no-x86-call-frame-opt",\r
- cl::desc("Avoid optimizing x86 call frames for size"),\r
- cl::init(false), cl::Hidden);\r
-\r
-namespace {\r
-class X86CallFrameOptimization : public MachineFunctionPass {\r
-public:\r
- X86CallFrameOptimization() : MachineFunctionPass(ID) {}\r
-\r
- bool runOnMachineFunction(MachineFunction &MF) override;\r
-\r
-private:\r
- bool shouldPerformTransformation(MachineFunction &MF);\r
-\r
- bool adjustCallSequence(MachineFunction &MF, MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I);\r
-\r
- MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,\r
- unsigned Reg);\r
-\r
- const char *getPassName() const override {\r
- return "X86 Optimize Call Frame";\r
- }\r
-\r
- const TargetInstrInfo *TII;\r
- const TargetFrameLowering *TFL;\r
- const MachineRegisterInfo *MRI;\r
- static char ID;\r
-};\r
-\r
-char X86CallFrameOptimization::ID = 0;\r
-}\r
-\r
-FunctionPass *llvm::createX86CallFrameOptimization() {\r
- return new X86CallFrameOptimization();\r
-}\r
-\r
-// This checks whether the transformation is legal and profitable\r
-bool X86CallFrameOptimization::shouldPerformTransformation(MachineFunction &MF) {\r
- if (NoX86CFOpt.getValue())\r
- return false;\r
-\r
- // We currently only support call sequences where *all* parameters.\r
- // are passed on the stack.\r
- // No point in running this in 64-bit mode, since some arguments are\r
- // passed in-register in all common calling conventions, so the pattern\r
- // we're looking for will never match.\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- if (STI.is64Bit())\r
- return false;\r
-\r
- // You would expect straight-line code between call-frame setup and\r
- // call-frame destroy. You would be wrong. There are circumstances (e.g.\r
- // CMOV_GR8 expansion of a select that feeds a function call!) where we can\r
- // end up with the setup and the destroy in different basic blocks.\r
- // This is bad, and breaks SP adjustment.\r
- // So, check that all of the frames in the function are closed inside\r
- // the same block, and, for good measure, that there are no nested frames.\r
- int FrameSetupOpcode = TII->getCallFrameSetupOpcode();\r
- int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();\r
- for (MachineBasicBlock &BB : MF) {\r
- bool InsideFrameSequence = false;\r
- for (MachineInstr &MI : BB) {\r
- if (MI.getOpcode() == FrameSetupOpcode) {\r
- if (InsideFrameSequence)\r
- return false;\r
- InsideFrameSequence = true;\r
- }\r
- else if (MI.getOpcode() == FrameDestroyOpcode) {\r
- if (!InsideFrameSequence)\r
- return false;\r
- InsideFrameSequence = false;\r
- }\r
- }\r
-\r
- if (InsideFrameSequence)\r
- return false;\r
- }\r
-\r
- // Now that we know the transformation is legal, check if it is\r
- // profitable.\r
- // TODO: Add a heuristic that actually looks at the function,\r
- // and enable this for more cases.\r
-\r
- // This transformation is always a win when we expected to have\r
- // a reserved call frame. Under other circumstances, it may be either \r
- // a win or a loss, and requires a heuristic.\r
- // For now, enable it only for the relatively clear win cases.\r
- bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects();\r
- if (CannotReserveFrame)\r
- return true;\r
-\r
- // For now, don't even try to evaluate the profitability when\r
- // not optimizing for size.\r
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();\r
- bool OptForSize =\r
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,\r
- Attribute::OptimizeForSize) ||\r
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);\r
-\r
- if (!OptForSize)\r
- return false;\r
-\r
- // Stack re-alignment can make this unprofitable even in terms of size.\r
- // As mentioned above, a better heuristic is needed. For now, don't do this\r
- // when the required alignment is above 8. (4 would be the safe choice, but\r
- // some experimentation showed 8 is generally good).\r
- if (TFL->getStackAlignment() > 8)\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {\r
- TII = MF.getSubtarget().getInstrInfo();\r
- TFL = MF.getSubtarget().getFrameLowering();\r
- MRI = &MF.getRegInfo();\r
-\r
- if (!shouldPerformTransformation(MF))\r
- return false;\r
-\r
- int FrameSetupOpcode = TII->getCallFrameSetupOpcode();\r
-\r
- bool Changed = false;\r
-\r
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)\r
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)\r
- if (I->getOpcode() == FrameSetupOpcode)\r
- Changed |= adjustCallSequence(MF, *BB, I);\r
-\r
- return Changed;\r
-}\r
-\r
-bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,\r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I) {\r
-\r
- // Check that this particular call sequence is amenable to the\r
- // transformation.\r
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(\r
- MF.getSubtarget().getRegisterInfo());\r
- unsigned StackPtr = RegInfo.getStackRegister();\r
- int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();\r
-\r
- // We expect to enter this at the beginning of a call sequence\r
- assert(I->getOpcode() == TII->getCallFrameSetupOpcode());\r
- MachineBasicBlock::iterator FrameSetup = I++;\r
-\r
- \r
- // For globals in PIC mode, we can have some LEAs here.\r
- // Ignore them, they don't bother us.\r
- // TODO: Extend this to something that covers more cases.\r
- while (I->getOpcode() == X86::LEA32r)\r
- ++I;\r
- \r
- // We expect a copy instruction here.\r
- // TODO: The copy instruction is a lowering artifact.\r
- // We should also support a copy-less version, where the stack\r
- // pointer is used directly.\r
- if (!I->isCopy() || !I->getOperand(0).isReg())\r
- return false;\r
- MachineBasicBlock::iterator SPCopy = I++;\r
- StackPtr = SPCopy->getOperand(0).getReg();\r
-\r
- // Scan the call setup sequence for the pattern we're looking for.\r
- // We only handle a simple case - a sequence of MOV32mi or MOV32mr\r
- // instructions, that push a sequence of 32-bit values onto the stack, with\r
- // no gaps between them.\r
- SmallVector<MachineInstr*, 4> MovVector(4, nullptr);\r
- unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;\r
- if (MaxAdjust > 4)\r
- MovVector.resize(MaxAdjust, nullptr);\r
-\r
- do {\r
- int Opcode = I->getOpcode();\r
- if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)\r
- break;\r
-\r
- // We only want movs of the form:\r
- // movl imm/r32, k(%esp)\r
- // If we run into something else, bail.\r
- // Note that AddrBaseReg may, counter to its name, not be a register,\r
- // but rather a frame index.\r
- // TODO: Support the fi case. This should probably work now that we\r
- // have the infrastructure to track the stack pointer within a call\r
- // sequence.\r
- if (!I->getOperand(X86::AddrBaseReg).isReg() ||\r
- (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||\r
- !I->getOperand(X86::AddrScaleAmt).isImm() ||\r
- (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||\r
- (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||\r
- (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||\r
- !I->getOperand(X86::AddrDisp).isImm())\r
- return false;\r
-\r
- int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();\r
- assert(StackDisp >= 0 && "Negative stack displacement when passing parameters");\r
-\r
- // We really don't want to consider the unaligned case.\r
- if (StackDisp % 4)\r
- return false;\r
- StackDisp /= 4;\r
-\r
- assert((size_t)StackDisp < MovVector.size() &&\r
- "Function call has more parameters than the stack is adjusted for.");\r
-\r
- // If the same stack slot is being filled twice, something's fishy.\r
- if (MovVector[StackDisp] != nullptr)\r
- return false;\r
- MovVector[StackDisp] = I;\r
-\r
- ++I;\r
- } while (I != MBB.end());\r
-\r
- // We now expect the end of the sequence - a call and a stack adjust.\r
- if (I == MBB.end())\r
- return false;\r
-\r
- // For PCrel calls, we expect an additional COPY of the basereg.\r
- // If we find one, skip it.\r
- if (I->isCopy()) {\r
- if (I->getOperand(1).getReg() ==\r
- MF.getInfo<X86MachineFunctionInfo>()->getGlobalBaseReg())\r
- ++I;\r
- else\r
- return false;\r
- }\r
-\r
- if (!I->isCall())\r
- return false;\r
- MachineBasicBlock::iterator Call = I;\r
- if ((++I)->getOpcode() != FrameDestroyOpcode)\r
- return false;\r
-\r
- // Now, go through the vector, and see that we don't have any gaps,\r
- // but only a series of 32-bit MOVs.\r
- \r
- int64_t ExpectedDist = 0;\r
- auto MMI = MovVector.begin(), MME = MovVector.end();\r
- for (; MMI != MME; ++MMI, ExpectedDist += 4)\r
- if (*MMI == nullptr)\r
- break;\r
- \r
- // If the call had no parameters, do nothing\r
- if (!ExpectedDist)\r
- return false;\r
-\r
- // We are either at the last parameter, or a gap. \r
- // Make sure it's not a gap\r
- for (; MMI != MME; ++MMI)\r
- if (*MMI != nullptr)\r
- return false;\r
-\r
- // Ok, we can in fact do the transformation for this call.\r
- // Do not remove the FrameSetup instruction, but adjust the parameters.\r
- // PEI will end up finalizing the handling of this.\r
- FrameSetup->getOperand(1).setImm(ExpectedDist);\r
-\r
- DebugLoc DL = I->getDebugLoc();\r
- // Now, iterate through the vector in reverse order, and replace the movs\r
- // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to \r
- // replace uses.\r
- for (int Idx = (ExpectedDist / 4) - 1; Idx >= 0; --Idx) {\r
- MachineBasicBlock::iterator MOV = *MovVector[Idx];\r
- MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);\r
- if (MOV->getOpcode() == X86::MOV32mi) {\r
- unsigned PushOpcode = X86::PUSHi32;\r
- // If the operand is a small (8-bit) immediate, we can use a\r
- // PUSH instruction with a shorter encoding.\r
- // Note that isImm() may fail even though this is a MOVmi, because\r
- // the operand can also be a symbol.\r
- if (PushOp.isImm()) {\r
- int64_t Val = PushOp.getImm();\r
- if (isInt<8>(Val))\r
- PushOpcode = X86::PUSH32i8;\r
- }\r
- BuildMI(MBB, Call, DL, TII->get(PushOpcode)).addOperand(PushOp);\r
- } else {\r
- unsigned int Reg = PushOp.getReg();\r
-\r
- // If PUSHrmm is not slow on this target, try to fold the source of the\r
- // push into the instruction.\r
- const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool SlowPUSHrmm = ST.isAtom() || ST.isSLM();\r
-\r
- // Check that this is legal to fold. Right now, we're extremely\r
- // conservative about that.\r
- MachineInstr *DefMov = nullptr;\r
- if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {\r
- MachineInstr *Push = BuildMI(MBB, Call, DL, TII->get(X86::PUSH32rmm));\r
-\r
- unsigned NumOps = DefMov->getDesc().getNumOperands();\r
- for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)\r
- Push->addOperand(DefMov->getOperand(i));\r
-\r
- DefMov->eraseFromParent();\r
- } else {\r
- BuildMI(MBB, Call, DL, TII->get(X86::PUSH32r)).addReg(Reg).getInstr();\r
- }\r
- }\r
-\r
- MBB.erase(MOV);\r
- }\r
-\r
- // The stack-pointer copy is no longer used in the call sequences.\r
- // There should not be any other users, but we can't commit to that, so:\r
- if (MRI->use_empty(SPCopy->getOperand(0).getReg()))\r
- SPCopy->eraseFromParent();\r
-\r
- // Once we've done this, we need to make sure PEI doesn't assume a reserved\r
- // frame.\r
- X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();\r
- FuncInfo->setHasPushSequences(true);\r
-\r
- return true;\r
-}\r
-\r
-MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(\r
- MachineBasicBlock::iterator FrameSetup, unsigned Reg) {\r
- // Do an extremely restricted form of load folding.\r
- // ISel will often create patterns like:\r
- // movl 4(%edi), %eax\r
- // movl 8(%edi), %ecx\r
- // movl 12(%edi), %edx\r
- // movl %edx, 8(%esp)\r
- // movl %ecx, 4(%esp)\r
- // movl %eax, (%esp)\r
- // call\r
- // Get rid of those with prejudice.\r
- if (!TargetRegisterInfo::isVirtualRegister(Reg))\r
- return nullptr;\r
-\r
- // Make sure this is the only use of Reg.\r
- if (!MRI->hasOneNonDBGUse(Reg))\r
- return nullptr;\r
-\r
- MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);\r
-\r
- // Make sure the def is a MOV from memory.\r
- // If the def is an another block, give up.\r
- if (DefMI->getOpcode() != X86::MOV32rm ||\r
- DefMI->getParent() != FrameSetup->getParent())\r
- return nullptr;\r
-\r
- // Be careful with movs that load from a stack slot, since it may get\r
- // resolved incorrectly.\r
- // TODO: Again, we already have the infrastructure, so this should work.\r
- if (!DefMI->getOperand(1).isReg())\r
- return nullptr;\r
-\r
- // Now, make sure everything else up until the ADJCALLSTACK is a sequence\r
- // of MOVs. To be less conservative would require duplicating a lot of the\r
- // logic from PeepholeOptimizer.\r
- // FIXME: A possibly better approach would be to teach the PeepholeOptimizer\r
- // to be smarter about folding into pushes. \r
- for (auto I = DefMI; I != FrameSetup; ++I)\r
- if (I->getOpcode() != X86::MOV32rm)\r
- return nullptr;\r
-\r
- return DefMI;\r
-}\r
-//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file defines the X86-specific support for the FastISel class. Much\r
-// of the target-specific code is generated by tablegen in the file\r
-// X86GenFastISel.inc, which is #included here.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include "X86.h"\r
-#include "X86CallingConv.h"\r
-#include "X86InstrBuilder.h"\r
-#include "X86InstrInfo.h"\r
-#include "X86MachineFunctionInfo.h"\r
-#include "X86RegisterInfo.h"\r
-#include "X86Subtarget.h"\r
-#include "X86TargetMachine.h"\r
-#include "llvm/Analysis/BranchProbabilityInfo.h"\r
-#include "llvm/CodeGen/Analysis.h"\r
-#include "llvm/CodeGen/FastISel.h"\r
-#include "llvm/CodeGen/FunctionLoweringInfo.h"\r
-#include "llvm/CodeGen/MachineConstantPool.h"\r
-#include "llvm/CodeGen/MachineFrameInfo.h"\r
-#include "llvm/CodeGen/MachineRegisterInfo.h"\r
-#include "llvm/IR/CallSite.h"\r
-#include "llvm/IR/CallingConv.h"\r
-#include "llvm/IR/DerivedTypes.h"\r
-#include "llvm/IR/GetElementPtrTypeIterator.h"\r
-#include "llvm/IR/GlobalAlias.h"\r
-#include "llvm/IR/GlobalVariable.h"\r
-#include "llvm/IR/Instructions.h"\r
-#include "llvm/IR/IntrinsicInst.h"\r
-#include "llvm/IR/Operator.h"\r
-#include "llvm/Support/ErrorHandling.h"\r
-#include "llvm/Target/TargetOptions.h"\r
-using namespace llvm;\r
-\r
-namespace {\r
-\r
-class X86FastISel final : public FastISel {\r
- /// Subtarget - Keep a pointer to the X86Subtarget around so that we can\r
- /// make the right decision when generating code for different targets.\r
- const X86Subtarget *Subtarget;\r
-\r
- /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87\r
- /// floating point ops.\r
- /// When SSE is available, use it for f32 operations.\r
- /// When SSE2 is available, use it for f64 operations.\r
- bool X86ScalarSSEf64;\r
- bool X86ScalarSSEf32;\r
-\r
-public:\r
- explicit X86FastISel(FunctionLoweringInfo &funcInfo,\r
- const TargetLibraryInfo *libInfo)\r
- : FastISel(funcInfo, libInfo) {\r
- Subtarget = &TM.getSubtarget<X86Subtarget>();\r
- X86ScalarSSEf64 = Subtarget->hasSSE2();\r
- X86ScalarSSEf32 = Subtarget->hasSSE1();\r
- }\r
-\r
- bool fastSelectInstruction(const Instruction *I) override;\r
-\r
- /// \brief The specified machine instr operand is a vreg, and that\r
- /// vreg is being provided by the specified load instruction. If possible,\r
- /// try to fold the load as an operand to the instruction, returning true if\r
- /// possible.\r
- bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,\r
- const LoadInst *LI) override;\r
-\r
- bool fastLowerArguments() override;\r
- bool fastLowerCall(CallLoweringInfo &CLI) override;\r
- bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;\r
-\r
-#include "X86GenFastISel.inc"\r
-\r
-private:\r
- bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);\r
-\r
- bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,\r
- unsigned &ResultReg);\r
-\r
- bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,\r
- MachineMemOperand *MMO = nullptr, bool Aligned = false);\r
- bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,\r
- const X86AddressMode &AM,\r
- MachineMemOperand *MMO = nullptr, bool Aligned = false);\r
-\r
- bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,\r
- unsigned &ResultReg);\r
-\r
- bool X86SelectAddress(const Value *V, X86AddressMode &AM);\r
- bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);\r
-\r
- bool X86SelectLoad(const Instruction *I);\r
-\r
- bool X86SelectStore(const Instruction *I);\r
-\r
- bool X86SelectRet(const Instruction *I);\r
-\r
- bool X86SelectCmp(const Instruction *I);\r
-\r
- bool X86SelectZExt(const Instruction *I);\r
-\r
- bool X86SelectBranch(const Instruction *I);\r
-\r
- bool X86SelectShift(const Instruction *I);\r
-\r
- bool X86SelectDivRem(const Instruction *I);\r
-\r
- bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);\r
-\r
- bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);\r
-\r
- bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);\r
-\r
- bool X86SelectSelect(const Instruction *I);\r
-\r
- bool X86SelectTrunc(const Instruction *I);\r
-\r
- bool X86SelectFPExt(const Instruction *I);\r
- bool X86SelectFPTrunc(const Instruction *I);\r
-\r
- const X86InstrInfo *getInstrInfo() const {\r
- return getTargetMachine()->getSubtargetImpl()->getInstrInfo();\r
- }\r
- const X86TargetMachine *getTargetMachine() const {\r
- return static_cast<const X86TargetMachine *>(&TM);\r
- }\r
-\r
- bool handleConstantAddresses(const Value *V, X86AddressMode &AM);\r
-\r
- unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);\r
- unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);\r
- unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);\r
- unsigned fastMaterializeConstant(const Constant *C) override;\r
-\r
- unsigned fastMaterializeAlloca(const AllocaInst *C) override;\r
-\r
- unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;\r
-\r
- /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is\r
- /// computed in an SSE register, not on the X87 floating point stack.\r
- bool isScalarFPTypeInSSEReg(EVT VT) const {\r
- return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2\r
- (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1\r
- }\r
-\r
- bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);\r
-\r
- bool IsMemcpySmall(uint64_t Len);\r
-\r
- bool TryEmitSmallMemcpy(X86AddressMode DestAM,\r
- X86AddressMode SrcAM, uint64_t Len);\r
-\r
- bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,\r
- const Value *Cond);\r
-};\r
-\r
-} // end anonymous namespace.\r
-\r
-static std::pair<X86::CondCode, bool>\r
-getX86ConditionCode(CmpInst::Predicate Predicate) {\r
- X86::CondCode CC = X86::COND_INVALID;\r
- bool NeedSwap = false;\r
- switch (Predicate) {\r
- default: break;\r
- // Floating-point Predicates\r
- case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;\r
- case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_OGT: CC = X86::COND_A; break;\r
- case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;\r
- case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_ULT: CC = X86::COND_B; break;\r
- case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;\r
- case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;\r
- case CmpInst::FCMP_UNO: CC = X86::COND_P; break;\r
- case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;\r
- case CmpInst::FCMP_OEQ: // fall-through\r
- case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;\r
-\r
- // Integer Predicates\r
- case CmpInst::ICMP_EQ: CC = X86::COND_E; break;\r
- case CmpInst::ICMP_NE: CC = X86::COND_NE; break;\r
- case CmpInst::ICMP_UGT: CC = X86::COND_A; break;\r
- case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;\r
- case CmpInst::ICMP_ULT: CC = X86::COND_B; break;\r
- case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;\r
- case CmpInst::ICMP_SGT: CC = X86::COND_G; break;\r
- case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;\r
- case CmpInst::ICMP_SLT: CC = X86::COND_L; break;\r
- case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;\r
- }\r
-\r
- return std::make_pair(CC, NeedSwap);\r
-}\r
-\r
-static std::pair<unsigned, bool>\r
-getX86SSEConditionCode(CmpInst::Predicate Predicate) {\r
- unsigned CC;\r
- bool NeedSwap = false;\r
-\r
- // SSE Condition code mapping:\r
- // 0 - EQ\r
- // 1 - LT\r
- // 2 - LE\r
- // 3 - UNORD\r
- // 4 - NEQ\r
- // 5 - NLT\r
- // 6 - NLE\r
- // 7 - ORD\r
- switch (Predicate) {\r
- default: llvm_unreachable("Unexpected predicate");\r
- case CmpInst::FCMP_OEQ: CC = 0; break;\r
- case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_OLT: CC = 1; break;\r
- case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_OLE: CC = 2; break;\r
- case CmpInst::FCMP_UNO: CC = 3; break;\r
- case CmpInst::FCMP_UNE: CC = 4; break;\r
- case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_UGE: CC = 5; break;\r
- case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through\r
- case CmpInst::FCMP_UGT: CC = 6; break;\r
- case CmpInst::FCMP_ORD: CC = 7; break;\r
- case CmpInst::FCMP_UEQ:\r
- case CmpInst::FCMP_ONE: CC = 8; break;\r
- }\r
-\r
- return std::make_pair(CC, NeedSwap);\r
-}\r
-\r
-/// \brief Check if it is possible to fold the condition from the XALU intrinsic\r
-/// into the user. The condition code will only be updated on success.\r
-bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,\r
- const Value *Cond) {\r
- if (!isa<ExtractValueInst>(Cond))\r
- return false;\r
-\r
- const auto *EV = cast<ExtractValueInst>(Cond);\r
- if (!isa<IntrinsicInst>(EV->getAggregateOperand()))\r
- return false;\r
-\r
- const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());\r
- MVT RetVT;\r
- const Function *Callee = II->getCalledFunction();\r
- Type *RetTy =\r
- cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);\r
- if (!isTypeLegal(RetTy, RetVT))\r
- return false;\r
-\r
- if (RetVT != MVT::i32 && RetVT != MVT::i64)\r
- return false;\r
-\r
- X86::CondCode TmpCC;\r
- switch (II->getIntrinsicID()) {\r
- default: return false;\r
- case Intrinsic::sadd_with_overflow:\r
- case Intrinsic::ssub_with_overflow:\r
- case Intrinsic::smul_with_overflow:\r
- case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;\r
- case Intrinsic::uadd_with_overflow:\r
- case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;\r
- }\r
-\r
- // Check if both instructions are in the same basic block.\r
- if (II->getParent() != I->getParent())\r
- return false;\r
-\r
- // Make sure nothing is in the way\r
- BasicBlock::const_iterator Start = I;\r
- BasicBlock::const_iterator End = II;\r
- for (auto Itr = std::prev(Start); Itr != End; --Itr) {\r
- // We only expect extractvalue instructions between the intrinsic and the\r
- // instruction to be selected.\r
- if (!isa<ExtractValueInst>(Itr))\r
- return false;\r
-\r
- // Check that the extractvalue operand comes from the intrinsic.\r
- const auto *EVI = cast<ExtractValueInst>(Itr);\r
- if (EVI->getAggregateOperand() != II)\r
- return false;\r
- }\r
-\r
- CC = TmpCC;\r
- return true;\r
-}\r
-\r
-bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {\r
- EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);\r
- if (evt == MVT::Other || !evt.isSimple())\r
- // Unhandled type. Halt "fast" selection and bail.\r
- return false;\r
-\r
- VT = evt.getSimpleVT();\r
- // For now, require SSE/SSE2 for performing floating-point operations,\r
- // since x87 requires additional work.\r
- if (VT == MVT::f64 && !X86ScalarSSEf64)\r
- return false;\r
- if (VT == MVT::f32 && !X86ScalarSSEf32)\r
- return false;\r
- // Similarly, no f80 support yet.\r
- if (VT == MVT::f80)\r
- return false;\r
- // We only handle legal types. For example, on x86-32 the instruction\r
- // selector contains all of the 64-bit instructions from x86-64,\r
- // under the assumption that i64 won't be used if the target doesn't\r
- // support it.\r
- return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);\r
-}\r
-\r
-#include "X86GenCallingConv.inc"\r
-\r
-/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.\r
-/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.\r
-/// Return true and the result register by reference if it is possible.\r
-bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,\r
- MachineMemOperand *MMO, unsigned &ResultReg) {\r
- // Get opcode and regclass of the output for the given load instruction.\r
- unsigned Opc = 0;\r
- const TargetRegisterClass *RC = nullptr;\r
- switch (VT.getSimpleVT().SimpleTy) {\r
- default: return false;\r
- case MVT::i1:\r
- case MVT::i8:\r
- Opc = X86::MOV8rm;\r
- RC = &X86::GR8RegClass;\r
- break;\r
- case MVT::i16:\r
- Opc = X86::MOV16rm;\r
- RC = &X86::GR16RegClass;\r
- break;\r
- case MVT::i32:\r
- Opc = X86::MOV32rm;\r
- RC = &X86::GR32RegClass;\r
- break;\r
- case MVT::i64:\r
- // Must be in x86-64 mode.\r
- Opc = X86::MOV64rm;\r
- RC = &X86::GR64RegClass;\r
- break;\r
- case MVT::f32:\r
- if (X86ScalarSSEf32) {\r
- Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;\r
- RC = &X86::FR32RegClass;\r
- } else {\r
- Opc = X86::LD_Fp32m;\r
- RC = &X86::RFP32RegClass;\r
- }\r
- break;\r
- case MVT::f64:\r
- if (X86ScalarSSEf64) {\r
- Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;\r
- RC = &X86::FR64RegClass;\r
- } else {\r
- Opc = X86::LD_Fp64m;\r
- RC = &X86::RFP64RegClass;\r
- }\r
- break;\r
- case MVT::f80:\r
- // No f80 support yet.\r
- return false;\r
- }\r
-\r
- ResultReg = createResultReg(RC);\r
- MachineInstrBuilder MIB =\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);\r
- addFullAddress(MIB, AM);\r
- if (MMO)\r
- MIB->addMemOperand(*FuncInfo.MF, MMO);\r
- return true;\r
-}\r
-\r
-/// X86FastEmitStore - Emit a machine instruction to store a value Val of\r
-/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr\r
-/// and a displacement offset, or a GlobalAddress,\r
-/// i.e. V. Return true if it is possible.\r
-bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,\r
- const X86AddressMode &AM,\r
- MachineMemOperand *MMO, bool Aligned) {\r
- // Get opcode and regclass of the output for the given store instruction.\r
- unsigned Opc = 0;\r
- switch (VT.getSimpleVT().SimpleTy) {\r
- case MVT::f80: // No f80 support yet.\r
- default: return false;\r
- case MVT::i1: {\r
- // Mask out all but lowest bit.\r
- unsigned AndResult = createResultReg(&X86::GR8RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(X86::AND8ri), AndResult)\r
- .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);\r
- ValReg = AndResult;\r
- }\r
- // FALLTHROUGH, handling i1 as i8.\r
- case MVT::i8: Opc = X86::MOV8mr; break;\r
- case MVT::i16: Opc = X86::MOV16mr; break;\r
- case MVT::i32: Opc = X86::MOV32mr; break;\r
- case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.\r
- case MVT::f32:\r
- Opc = X86ScalarSSEf32 ?\r
- (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;\r
- break;\r
- case MVT::f64:\r
- Opc = X86ScalarSSEf64 ?\r
- (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;\r
- break;\r
- case MVT::v4f32:\r
- if (Aligned)\r
- Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;\r
- else\r
- Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;\r
- break;\r
- case MVT::v2f64:\r
- if (Aligned)\r
- Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr;\r
- else\r
- Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr;\r
- break;\r
- case MVT::v4i32:\r
- case MVT::v2i64:\r
- case MVT::v8i16:\r
- case MVT::v16i8:\r
- if (Aligned)\r
- Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr;\r
- else\r
- Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;\r
- break;\r
- }\r
-\r
- MachineInstrBuilder MIB =\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));\r
- addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));\r
- if (MMO)\r
- MIB->addMemOperand(*FuncInfo.MF, MMO);\r
-\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,\r
- const X86AddressMode &AM,\r
- MachineMemOperand *MMO, bool Aligned) {\r
- // Handle 'null' like i32/i64 0.\r
- if (isa<ConstantPointerNull>(Val))\r
- Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));\r
-\r
- // If this is a store of a simple constant, fold the constant into the store.\r
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {\r
- unsigned Opc = 0;\r
- bool Signed = true;\r
- switch (VT.getSimpleVT().SimpleTy) {\r
- default: break;\r
- case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.\r
- case MVT::i8: Opc = X86::MOV8mi; break;\r
- case MVT::i16: Opc = X86::MOV16mi; break;\r
- case MVT::i32: Opc = X86::MOV32mi; break;\r
- case MVT::i64:\r
- // Must be a 32-bit sign extended value.\r
- if (isInt<32>(CI->getSExtValue()))\r
- Opc = X86::MOV64mi32;\r
- break;\r
- }\r
-\r
- if (Opc) {\r
- MachineInstrBuilder MIB =\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));\r
- addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()\r
- : CI->getZExtValue());\r
- if (MMO)\r
- MIB->addMemOperand(*FuncInfo.MF, MMO);\r
- return true;\r
- }\r
- }\r
-\r
- unsigned ValReg = getRegForValue(Val);\r
- if (ValReg == 0)\r
- return false;\r
-\r
- bool ValKill = hasTrivialKill(Val);\r
- return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);\r
-}\r
-\r
-/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of\r
-/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.\r
-/// ISD::SIGN_EXTEND).\r
-bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,\r
- unsigned Src, EVT SrcVT,\r
- unsigned &ResultReg) {\r
- unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,\r
- Src, /*TODO: Kill=*/false);\r
- if (RR == 0)\r
- return false;\r
-\r
- ResultReg = RR;\r
- return true;\r
-}\r
-\r
-bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {\r
- // Handle constant address.\r
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {\r
- // Can't handle alternate code models yet.\r
- if (TM.getCodeModel() != CodeModel::Small)\r
- return false;\r
-\r
- // Can't handle TLS yet.\r
- if (GV->isThreadLocal())\r
- return false;\r
-\r
- // RIP-relative addresses can't have additional register operands, so if\r
- // we've already folded stuff into the addressing mode, just force the\r
- // global value into its own register, which we can use as the basereg.\r
- if (!Subtarget->isPICStyleRIPRel() ||\r
- (AM.Base.Reg == 0 && AM.IndexReg == 0)) {\r
- // Okay, we've committed to selecting this global. Set up the address.\r
- AM.GV = GV;\r
-\r
- // Allow the subtarget to classify the global.\r
- unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);\r
-\r
- // If this reference is relative to the pic base, set it now.\r
- if (isGlobalRelativeToPICBase(GVFlags)) {\r
- // FIXME: How do we know Base.Reg is free??\r
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);\r
- }\r
-\r
- // Unless the ABI requires an extra load, return a direct reference to\r
- // the global.\r
- if (!isGlobalStubReference(GVFlags)) {\r
- if (Subtarget->isPICStyleRIPRel()) {\r
- // Use rip-relative addressing if we can. Above we verified that the\r
- // base and index registers are unused.\r
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);\r
- AM.Base.Reg = X86::RIP;\r
- }\r
- AM.GVOpFlags = GVFlags;\r
- return true;\r
- }\r
-\r
- // Ok, we need to do a load from a stub. If we've already loaded from\r
- // this stub, reuse the loaded pointer, otherwise emit the load now.\r
- DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);\r
- unsigned LoadReg;\r
- if (I != LocalValueMap.end() && I->second != 0) {\r
- LoadReg = I->second;\r
- } else {\r
- // Issue load from stub.\r
- unsigned Opc = 0;\r
- const TargetRegisterClass *RC = nullptr;\r
- X86AddressMode StubAM;\r
- StubAM.Base.Reg = AM.Base.Reg;\r
- StubAM.GV = GV;\r
- StubAM.GVOpFlags = GVFlags;\r
-\r
- // Prepare for inserting code in the local-value area.\r
- SavePoint SaveInsertPt = enterLocalValueArea();\r
-\r
- if (TLI.getPointerTy() == MVT::i64) {\r
- Opc = X86::MOV64rm;\r
- RC = &X86::GR64RegClass;\r
-\r
- if (Subtarget->isPICStyleRIPRel())\r
- StubAM.Base.Reg = X86::RIP;\r
- } else {\r
- Opc = X86::MOV32rm;\r
- RC = &X86::GR32RegClass;\r
- }\r
-\r
- LoadReg = createResultReg(RC);\r
- MachineInstrBuilder LoadMI =\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);\r
- addFullAddress(LoadMI, StubAM);\r
-\r
- // Ok, back to normal mode.\r
- leaveLocalValueArea(SaveInsertPt);\r
-\r
- // Prevent loading GV stub multiple times in same MBB.\r
- LocalValueMap[V] = LoadReg;\r
- }\r
-\r
- // Now construct the final address. Note that the Disp, Scale,\r
- // and Index values may already be set here.\r
- AM.Base.Reg = LoadReg;\r
- AM.GV = nullptr;\r
- return true;\r
- }\r
- }\r
-\r
- // If all else fails, try to materialize the value in a register.\r
- if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {\r
- if (AM.Base.Reg == 0) {\r
- AM.Base.Reg = getRegForValue(V);\r
- return AM.Base.Reg != 0;\r
- }\r
- if (AM.IndexReg == 0) {\r
- assert(AM.Scale == 1 && "Scale with no index!");\r
- AM.IndexReg = getRegForValue(V);\r
- return AM.IndexReg != 0;\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-/// X86SelectAddress - Attempt to fill in an address from the given value.\r
-///\r
-bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {\r
- SmallVector<const Value *, 32> GEPs;\r
-redo_gep:\r
- const User *U = nullptr;\r
- unsigned Opcode = Instruction::UserOp1;\r
- if (const Instruction *I = dyn_cast<Instruction>(V)) {\r
- // Don't walk into other basic blocks; it's possible we haven't\r
- // visited them yet, so the instructions may not yet be assigned\r
- // virtual registers.\r
- if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||\r
- FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {\r
- Opcode = I->getOpcode();\r
- U = I;\r
- }\r
- } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {\r
- Opcode = C->getOpcode();\r
- U = C;\r
- }\r
-\r
- if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))\r
- if (Ty->getAddressSpace() > 255)\r
- // Fast instruction selection doesn't support the special\r
- // address spaces.\r
- return false;\r
-\r
- switch (Opcode) {\r
- default: break;\r
- case Instruction::BitCast:\r
- // Look past bitcasts.\r
- return X86SelectAddress(U->getOperand(0), AM);\r
-\r
- case Instruction::IntToPtr:\r
- // Look past no-op inttoptrs.\r
- if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())\r
- return X86SelectAddress(U->getOperand(0), AM);\r
- break;\r
-\r
- case Instruction::PtrToInt:\r
- // Look past no-op ptrtoints.\r
- if (TLI.getValueType(U->getType()) == TLI.getPointerTy())\r
- return X86SelectAddress(U->getOperand(0), AM);\r
- break;\r
-\r
- case Instruction::Alloca: {\r
- // Do static allocas.\r
- const AllocaInst *A = cast<AllocaInst>(V);\r
- DenseMap<const AllocaInst *, int>::iterator SI =\r
- FuncInfo.StaticAllocaMap.find(A);\r
- if (SI != FuncInfo.StaticAllocaMap.end()) {\r
- AM.BaseType = X86AddressMode::FrameIndexBase;\r
- AM.Base.FrameIndex = SI->second;\r
- return true;\r
- }\r
- break;\r
- }\r
-\r
- case Instruction::Add: {\r
- // Adds of constants are common and easy enough.\r
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {\r
- uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();\r
- // They have to fit in the 32-bit signed displacement field though.\r
- if (isInt<32>(Disp)) {\r
- AM.Disp = (uint32_t)Disp;\r
- return X86SelectAddress(U->getOperand(0), AM);\r
- }\r
- }\r
- break;\r
- }\r
-\r
- case Instruction::GetElementPtr: {\r
- X86AddressMode SavedAM = AM;\r
-\r
- // Pattern-match simple GEPs.\r
- uint64_t Disp = (int32_t)AM.Disp;\r
- unsigned IndexReg = AM.IndexReg;\r
- unsigned Scale = AM.Scale;\r
- gep_type_iterator GTI = gep_type_begin(U);\r
- // Iterate through the indices, folding what we can. Constants can be\r
- // folded, and one dynamic index can be handled, if the scale is supported.\r
- for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();\r
- i != e; ++i, ++GTI) {\r
- const Value *Op = *i;\r
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {\r
- const StructLayout *SL = DL.getStructLayout(STy);\r
- Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());\r
- continue;\r
- }\r
-\r
- // A array/variable index is always of the form i*S where S is the\r
- // constant scale size. See if we can push the scale into immediates.\r
- uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());\r
- for (;;) {\r
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {\r
- // Constant-offset addressing.\r
- Disp += CI->getSExtValue() * S;\r
- break;\r
- }\r
- if (canFoldAddIntoGEP(U, Op)) {\r
- // A compatible add with a constant operand. Fold the constant.\r
- ConstantInt *CI =\r
- cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));\r
- Disp += CI->getSExtValue() * S;\r
- // Iterate on the other operand.\r
- Op = cast<AddOperator>(Op)->getOperand(0);\r
- continue;\r
- }\r
- if (IndexReg == 0 &&\r
- (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&\r
- (S == 1 || S == 2 || S == 4 || S == 8)) {\r
- // Scaled-index addressing.\r
- Scale = S;\r
- IndexReg = getRegForGEPIndex(Op).first;\r
- if (IndexReg == 0)\r
- return false;\r
- break;\r
- }\r
- // Unsupported.\r
- goto unsupported_gep;\r
- }\r
- }\r
-\r
- // Check for displacement overflow.\r
- if (!isInt<32>(Disp))\r
- break;\r
-\r
- AM.IndexReg = IndexReg;\r
- AM.Scale = Scale;\r
- AM.Disp = (uint32_t)Disp;\r
- GEPs.push_back(V);\r
-\r
- if (const GetElementPtrInst *GEP =\r
- dyn_cast<GetElementPtrInst>(U->getOperand(0))) {\r
- // Ok, the GEP indices were covered by constant-offset and scaled-index\r
- // addressing. Update the address state and move on to examining the base.\r
- V = GEP;\r
- goto redo_gep;\r
- } else if (X86SelectAddress(U->getOperand(0), AM)) {\r
- return true;\r
- }\r
-\r
- // If we couldn't merge the gep value into this addr mode, revert back to\r
- // our address and just match the value instead of completely failing.\r
- AM = SavedAM;\r
-\r
- for (SmallVectorImpl<const Value *>::reverse_iterator\r
- I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)\r
- if (handleConstantAddresses(*I, AM))\r
- return true;\r
-\r
- return false;\r
- unsupported_gep:\r
- // Ok, the GEP indices weren't all covered.\r
- break;\r
- }\r
- }\r
-\r
- return handleConstantAddresses(V, AM);\r
-}\r
-\r
-/// X86SelectCallAddress - Attempt to fill in an address from the given value.\r
-///\r
-bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {\r
- const User *U = nullptr;\r
- unsigned Opcode = Instruction::UserOp1;\r
- const Instruction *I = dyn_cast<Instruction>(V);\r
- // Record if the value is defined in the same basic block.\r
- //\r
- // This information is crucial to know whether or not folding an\r
- // operand is valid.\r
- // Indeed, FastISel generates or reuses a virtual register for all\r
- // operands of all instructions it selects. Obviously, the definition and\r
- // its uses must use the same virtual register otherwise the produced\r
- // code is incorrect.\r
- // Before instruction selection, FunctionLoweringInfo::set sets the virtual\r
- // registers for values that are alive across basic blocks. This ensures\r
- // that the values are consistently set between across basic block, even\r
- // if different instruction selection mechanisms are used (e.g., a mix of\r
- // SDISel and FastISel).\r
- // For values local to a basic block, the instruction selection process\r
- // generates these virtual registers with whatever method is appropriate\r
- // for its needs. In particular, FastISel and SDISel do not share the way\r
- // local virtual registers are set.\r
- // Therefore, this is impossible (or at least unsafe) to share values\r
- // between basic blocks unless they use the same instruction selection\r
- // method, which is not guarantee for X86.\r
- // Moreover, things like hasOneUse could not be used accurately, if we\r
- // allow to reference values across basic blocks whereas they are not\r
- // alive across basic blocks initially.\r
- bool InMBB = true;\r
- if (I) {\r
- Opcode = I->getOpcode();\r
- U = I;\r
- InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();\r
- } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {\r
- Opcode = C->getOpcode();\r
- U = C;\r
- }\r
-\r
- switch (Opcode) {\r
- default: break;\r
- case Instruction::BitCast:\r
- // Look past bitcasts if its operand is in the same BB.\r
- if (InMBB)\r
- return X86SelectCallAddress(U->getOperand(0), AM);\r
- break;\r
-\r
- case Instruction::IntToPtr:\r
- // Look past no-op inttoptrs if its operand is in the same BB.\r
- if (InMBB &&\r
- TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())\r
- return X86SelectCallAddress(U->getOperand(0), AM);\r
- break;\r
-\r
- case Instruction::PtrToInt:\r
- // Look past no-op ptrtoints if its operand is in the same BB.\r
- if (InMBB &&\r
- TLI.getValueType(U->getType()) == TLI.getPointerTy())\r
- return X86SelectCallAddress(U->getOperand(0), AM);\r
- break;\r
- }\r
-\r
- // Handle constant address.\r
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {\r
- // Can't handle alternate code models yet.\r
- if (TM.getCodeModel() != CodeModel::Small)\r
- return false;\r
-\r
- // RIP-relative addresses can't have additional register operands.\r
- if (Subtarget->isPICStyleRIPRel() &&\r
- (AM.Base.Reg != 0 || AM.IndexReg != 0))\r
- return false;\r
-\r
- // Can't handle DLL Import.\r
- if (GV->hasDLLImportStorageClass())\r
- return false;\r
-\r
- // Can't handle TLS.\r
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))\r
- if (GVar->isThreadLocal())\r
- return false;\r
-\r
- // Okay, we've committed to selecting this global. Set up the basic address.\r
- AM.GV = GV;\r
-\r
- // No ABI requires an extra load for anything other than DLLImport, which\r
- // we rejected above. Return a direct reference to the global.\r
- if (Subtarget->isPICStyleRIPRel()) {\r
- // Use rip-relative addressing if we can. Above we verified that the\r
- // base and index registers are unused.\r
- assert(AM.Base.Reg == 0 && AM.IndexReg == 0);\r
- AM.Base.Reg = X86::RIP;\r
- } else if (Subtarget->isPICStyleStubPIC()) {\r
- AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;\r
- } else if (Subtarget->isPICStyleGOT()) {\r
- AM.GVOpFlags = X86II::MO_GOTOFF;\r
- }\r
-\r
- return true;\r
- }\r
-\r
- // If all else fails, try to materialize the value in a register.\r
- if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {\r
- if (AM.Base.Reg == 0) {\r
- AM.Base.Reg = getRegForValue(V);\r
- return AM.Base.Reg != 0;\r
- }\r
- if (AM.IndexReg == 0) {\r
- assert(AM.Scale == 1 && "Scale with no index!");\r
- AM.IndexReg = getRegForValue(V);\r
- return AM.IndexReg != 0;\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-\r
-/// X86SelectStore - Select and emit code to implement store instructions.\r
-bool X86FastISel::X86SelectStore(const Instruction *I) {\r
- // Atomic stores need special handling.\r
- const StoreInst *S = cast<StoreInst>(I);\r
-\r
- if (S->isAtomic())\r
- return false;\r
-\r
- const Value *Val = S->getValueOperand();\r
- const Value *Ptr = S->getPointerOperand();\r
-\r
- MVT VT;\r
- if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))\r
- return false;\r
-\r
- unsigned Alignment = S->getAlignment();\r
- unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());\r
- if (Alignment == 0) // Ensure that codegen never sees alignment 0\r
- Alignment = ABIAlignment;\r
- bool Aligned = Alignment >= ABIAlignment;\r
-\r
- X86AddressMode AM;\r
- if (!X86SelectAddress(Ptr, AM))\r
- return false;\r
-\r
- return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);\r
-}\r
-\r
-/// X86SelectRet - Select and emit code to implement ret instructions.\r
-bool X86FastISel::X86SelectRet(const Instruction *I) {\r
- const ReturnInst *Ret = cast<ReturnInst>(I);\r
- const Function &F = *I->getParent()->getParent();\r
- const X86MachineFunctionInfo *X86MFInfo =\r
- FuncInfo.MF->getInfo<X86MachineFunctionInfo>();\r
-\r
- if (!FuncInfo.CanLowerReturn)\r
- return false;\r
-\r
- CallingConv::ID CC = F.getCallingConv();\r
- if (CC != CallingConv::C &&\r
- CC != CallingConv::Fast &&\r
- CC != CallingConv::X86_FastCall &&\r
- CC != CallingConv::X86_64_SysV)\r
- return false;\r
-\r
- if (Subtarget->isCallingConvWin64(CC))\r
- return false;\r
-\r
- // Don't handle popping bytes on return for now.\r
- if (X86MFInfo->getBytesToPopOnReturn() != 0)\r
- return false;\r
-\r
- // fastcc with -tailcallopt is intended to provide a guaranteed\r
- // tail call optimization. Fastisel doesn't know how to do that.\r
- if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)\r
- return false;\r
-\r
- // Let SDISel handle vararg functions.\r
- if (F.isVarArg())\r
- return false;\r
-\r
- // Build a list of return value registers.\r
- SmallVector<unsigned, 4> RetRegs;\r
-\r
- if (Ret->getNumOperands() > 0) {\r
- SmallVector<ISD::OutputArg, 4> Outs;\r
- GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);\r
-\r
- // Analyze operands of the call, assigning locations to each operand.\r
- SmallVector<CCValAssign, 16> ValLocs;\r
- CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());\r
- CCInfo.AnalyzeReturn(Outs, RetCC_X86);\r
-\r
- const Value *RV = Ret->getOperand(0);\r
- unsigned Reg = getRegForValue(RV);\r
- if (Reg == 0)\r
- return false;\r
-\r
- // Only handle a single return value for now.\r
- if (ValLocs.size() != 1)\r
- return false;\r
-\r
- CCValAssign &VA = ValLocs[0];\r
-\r
- // Don't bother handling odd stuff for now.\r
- if (VA.getLocInfo() != CCValAssign::Full)\r
- return false;\r
- // Only handle register returns for now.\r
- if (!VA.isRegLoc())\r
- return false;\r
-\r
- // The calling-convention tables for x87 returns don't tell\r
- // the whole story.\r
- if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)\r
- return false;\r
-\r
- unsigned SrcReg = Reg + VA.getValNo();\r
- EVT SrcVT = TLI.getValueType(RV->getType());\r
- EVT DstVT = VA.getValVT();\r
- // Special handling for extended integers.\r
- if (SrcVT != DstVT) {\r
- if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)\r
- return false;\r
-\r
- if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())\r
- return false;\r
-\r
- assert(DstVT == MVT::i32 && "X86 should always ext to i32");\r
-\r
- if (SrcVT == MVT::i1) {\r
- if (Outs[0].Flags.isSExt())\r
- return false;\r
- SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);\r
- SrcVT = MVT::i8;\r
- }\r
- unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :\r
- ISD::SIGN_EXTEND;\r
- SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,\r
- SrcReg, /*TODO: Kill=*/false);\r
- }\r
-\r
- // Make the copy.\r
- unsigned DstReg = VA.getLocReg();\r
- const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);\r
- // Avoid a cross-class copy. This is very unlikely.\r
- if (!SrcRC->contains(DstReg))\r
- return false;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);\r
-\r
- // Add register to return instruction.\r
- RetRegs.push_back(VA.getLocReg());\r
- }\r
-\r
- // The x86-64 ABI for returning structs by value requires that we copy\r
- // the sret argument into %rax for the return. We saved the argument into\r
- // a virtual register in the entry block, so now we copy the value out\r
- // and into %rax. We also do the same with %eax for Win32.\r
- if (F.hasStructRetAttr() &&\r
- (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {\r
- unsigned Reg = X86MFInfo->getSRetReturnReg();\r
- assert(Reg &&\r
- "SRetReturnReg should have been set in LowerFormalArguments()!");\r
- unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);\r
- RetRegs.push_back(RetReg);\r
- }\r
-\r
- // Now emit the RET.\r
- MachineInstrBuilder MIB =\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));\r
- for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)\r
- MIB.addReg(RetRegs[i], RegState::Implicit);\r
- return true;\r
-}\r
-\r
-/// X86SelectLoad - Select and emit code to implement load instructions.\r
-///\r
-bool X86FastISel::X86SelectLoad(const Instruction *I) {\r
- const LoadInst *LI = cast<LoadInst>(I);\r
-\r
- // Atomic loads need special handling.\r
- if (LI->isAtomic())\r
- return false;\r
-\r
- MVT VT;\r
- if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))\r
- return false;\r
-\r
- const Value *Ptr = LI->getPointerOperand();\r
-\r
- X86AddressMode AM;\r
- if (!X86SelectAddress(Ptr, AM))\r
- return false;\r
-\r
- unsigned ResultReg = 0;\r
- if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))\r
- return false;\r
-\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {\r
- bool HasAVX = Subtarget->hasAVX();\r
- bool X86ScalarSSEf32 = Subtarget->hasSSE1();\r
- bool X86ScalarSSEf64 = Subtarget->hasSSE2();\r
-\r
- switch (VT.getSimpleVT().SimpleTy) {\r
- default: return 0;\r
- case MVT::i8: return X86::CMP8rr;\r
- case MVT::i16: return X86::CMP16rr;\r
- case MVT::i32: return X86::CMP32rr;\r
- case MVT::i64: return X86::CMP64rr;\r
- case MVT::f32:\r
- return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;\r
- case MVT::f64:\r
- return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;\r
- }\r
-}\r
-\r
-/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS\r
-/// of the comparison, return an opcode that works for the compare (e.g.\r
-/// CMP32ri) otherwise return 0.\r
-static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {\r
- switch (VT.getSimpleVT().SimpleTy) {\r
- // Otherwise, we can't fold the immediate into this comparison.\r
- default: return 0;\r
- case MVT::i8: return X86::CMP8ri;\r
- case MVT::i16: return X86::CMP16ri;\r
- case MVT::i32: return X86::CMP32ri;\r
- case MVT::i64:\r
- // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext\r
- // field.\r
- if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())\r
- return X86::CMP64ri32;\r
- return 0;\r
- }\r
-}\r
-\r
-bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,\r
- EVT VT, DebugLoc CurDbgLoc) {\r
- unsigned Op0Reg = getRegForValue(Op0);\r
- if (Op0Reg == 0) return false;\r
-\r
- // Handle 'null' like i32/i64 0.\r
- if (isa<ConstantPointerNull>(Op1))\r
- Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));\r
-\r
- // We have two options: compare with register or immediate. If the RHS of\r
- // the compare is an immediate that we can fold into this compare, use\r
- // CMPri, otherwise use CMPrr.\r
- if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {\r
- if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))\r
- .addReg(Op0Reg)\r
- .addImm(Op1C->getSExtValue());\r
- return true;\r
- }\r
- }\r
-\r
- unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);\r
- if (CompareOpc == 0) return false;\r
-\r
- unsigned Op1Reg = getRegForValue(Op1);\r
- if (Op1Reg == 0) return false;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))\r
- .addReg(Op0Reg)\r
- .addReg(Op1Reg);\r
-\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectCmp(const Instruction *I) {\r
- const CmpInst *CI = cast<CmpInst>(I);\r
-\r
- MVT VT;\r
- if (!isTypeLegal(I->getOperand(0)->getType(), VT))\r
- return false;\r
-\r
- // Try to optimize or fold the cmp.\r
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);\r
- unsigned ResultReg = 0;\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_FALSE: {\r
- ResultReg = createResultReg(&X86::GR32RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),\r
- ResultReg);\r
- ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,\r
- X86::sub_8bit);\r
- if (!ResultReg)\r
- return false;\r
- break;\r
- }\r
- case CmpInst::FCMP_TRUE: {\r
- ResultReg = createResultReg(&X86::GR8RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),\r
- ResultReg).addImm(1);\r
- break;\r
- }\r
- }\r
-\r
- if (ResultReg) {\r
- updateValueMap(I, ResultReg);\r
- return true;\r
- }\r
-\r
- const Value *LHS = CI->getOperand(0);\r
- const Value *RHS = CI->getOperand(1);\r
-\r
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.\r
- // We don't have to materialize a zero constant for this case and can just use\r
- // %x again on the RHS.\r
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {\r
- const auto *RHSC = dyn_cast<ConstantFP>(RHS);\r
- if (RHSC && RHSC->isNullValue())\r
- RHS = LHS;\r
- }\r
-\r
- // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.\r
- static unsigned SETFOpcTable[2][3] = {\r
- { X86::SETEr, X86::SETNPr, X86::AND8rr },\r
- { X86::SETNEr, X86::SETPr, X86::OR8rr }\r
- };\r
- unsigned *SETFOpc = nullptr;\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;\r
- case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;\r
- }\r
-\r
- ResultReg = createResultReg(&X86::GR8RegClass);\r
- if (SETFOpc) {\r
- if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))\r
- return false;\r
-\r
- unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);\r
- unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),\r
- FlagReg1);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),\r
- FlagReg2);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),\r
- ResultReg).addReg(FlagReg1).addReg(FlagReg2);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
- }\r
-\r
- X86::CondCode CC;\r
- bool SwapArgs;\r
- std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);\r
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");\r
- unsigned Opc = X86::getSETFromCond(CC);\r
-\r
- if (SwapArgs)\r
- std::swap(LHS, RHS);\r
-\r
- // Emit a compare of LHS/RHS.\r
- if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))\r
- return false;\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectZExt(const Instruction *I) {\r
- EVT DstVT = TLI.getValueType(I->getType());\r
- if (!TLI.isTypeLegal(DstVT))\r
- return false;\r
-\r
- unsigned ResultReg = getRegForValue(I->getOperand(0));\r
- if (ResultReg == 0)\r
- return false;\r
-\r
- // Handle zero-extension from i1 to i8, which is common.\r
- MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType());\r
- if (SrcVT.SimpleTy == MVT::i1) {\r
- // Set the high bits to zero.\r
- ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);\r
- SrcVT = MVT::i8;\r
-\r
- if (ResultReg == 0)\r
- return false;\r
- }\r
-\r
- if (DstVT == MVT::i64) {\r
- // Handle extension to 64-bits via sub-register shenanigans.\r
- unsigned MovInst;\r
-\r
- switch (SrcVT.SimpleTy) {\r
- case MVT::i8: MovInst = X86::MOVZX32rr8; break;\r
- case MVT::i16: MovInst = X86::MOVZX32rr16; break;\r
- case MVT::i32: MovInst = X86::MOV32rr; break;\r
- default: llvm_unreachable("Unexpected zext to i64 source type");\r
- }\r
-\r
- unsigned Result32 = createResultReg(&X86::GR32RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)\r
- .addReg(ResultReg);\r
-\r
- ResultReg = createResultReg(&X86::GR64RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),\r
- ResultReg)\r
- .addImm(0).addReg(Result32).addImm(X86::sub_32bit);\r
- } else if (DstVT != MVT::i8) {\r
- ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,\r
- ResultReg, /*Kill=*/true);\r
- if (ResultReg == 0)\r
- return false;\r
- }\r
-\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectBranch(const Instruction *I) {\r
- // Unconditional branches are selected by tablegen-generated code.\r
- // Handle a conditional branch.\r
- const BranchInst *BI = cast<BranchInst>(I);\r
- MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];\r
- MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];\r
-\r
- // Fold the common case of a conditional branch with a comparison\r
- // in the same block (values defined on other blocks may not have\r
- // initialized registers).\r
- X86::CondCode CC;\r
- if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {\r
- if (CI->hasOneUse() && CI->getParent() == I->getParent()) {\r
- EVT VT = TLI.getValueType(CI->getOperand(0)->getType());\r
-\r
- // Try to optimize or fold the cmp.\r
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;\r
- case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;\r
- }\r
-\r
- const Value *CmpLHS = CI->getOperand(0);\r
- const Value *CmpRHS = CI->getOperand(1);\r
-\r
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,\r
- // 0.0.\r
- // We don't have to materialize a zero constant for this case and can just\r
- // use %x again on the RHS.\r
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {\r
- const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);\r
- if (CmpRHSC && CmpRHSC->isNullValue())\r
- CmpRHS = CmpLHS;\r
- }\r
-\r
- // Try to take advantage of fallthrough opportunities.\r
- if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {\r
- std::swap(TrueMBB, FalseMBB);\r
- Predicate = CmpInst::getInversePredicate(Predicate);\r
- }\r
-\r
- // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition\r
- // code check. Instead two branch instructions are required to check all\r
- // the flags. First we change the predicate to a supported condition code,\r
- // which will be the first branch. Later one we will emit the second\r
- // branch.\r
- bool NeedExtraBranch = false;\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_OEQ:\r
- std::swap(TrueMBB, FalseMBB); // fall-through\r
- case CmpInst::FCMP_UNE:\r
- NeedExtraBranch = true;\r
- Predicate = CmpInst::FCMP_ONE;\r
- break;\r
- }\r
-\r
- bool SwapArgs;\r
- unsigned BranchOpc;\r
- std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);\r
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");\r
-\r
- BranchOpc = X86::GetCondBranchFromCond(CC);\r
- if (SwapArgs)\r
- std::swap(CmpLHS, CmpRHS);\r
-\r
- // Emit a compare of the LHS and RHS, setting the flags.\r
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))\r
- return false;\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))\r
- .addMBB(TrueMBB);\r
-\r
- // X86 requires a second branch to handle UNE (and OEQ, which is mapped\r
- // to UNE above).\r
- if (NeedExtraBranch) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1))\r
- .addMBB(TrueMBB);\r
- }\r
-\r
- // Obtain the branch weight and add the TrueBB to the successor list.\r
- uint32_t BranchWeight = 0;\r
- if (FuncInfo.BPI)\r
- BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),\r
- TrueMBB->getBasicBlock());\r
- FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);\r
-\r
- // Emits an unconditional branch to the FalseBB, obtains the branch\r
- // weight, and adds it to the successor list.\r
- fastEmitBranch(FalseMBB, DbgLoc);\r
-\r
- return true;\r
- }\r
- } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {\r
- // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which\r
- // typically happen for _Bool and C++ bools.\r
- MVT SourceVT;\r
- if (TI->hasOneUse() && TI->getParent() == I->getParent() &&\r
- isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {\r
- unsigned TestOpc = 0;\r
- switch (SourceVT.SimpleTy) {\r
- default: break;\r
- case MVT::i8: TestOpc = X86::TEST8ri; break;\r
- case MVT::i16: TestOpc = X86::TEST16ri; break;\r
- case MVT::i32: TestOpc = X86::TEST32ri; break;\r
- case MVT::i64: TestOpc = X86::TEST64ri32; break;\r
- }\r
- if (TestOpc) {\r
- unsigned OpReg = getRegForValue(TI->getOperand(0));\r
- if (OpReg == 0) return false;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))\r
- .addReg(OpReg).addImm(1);\r
-\r
- unsigned JmpOpc = X86::JNE_1;\r
- if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {\r
- std::swap(TrueMBB, FalseMBB);\r
- JmpOpc = X86::JE_1;\r
- }\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))\r
- .addMBB(TrueMBB);\r
- fastEmitBranch(FalseMBB, DbgLoc);\r
- uint32_t BranchWeight = 0;\r
- if (FuncInfo.BPI)\r
- BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),\r
- TrueMBB->getBasicBlock());\r
- FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);\r
- return true;\r
- }\r
- }\r
- } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {\r
- // Fake request the condition, otherwise the intrinsic might be completely\r
- // optimized away.\r
- unsigned TmpReg = getRegForValue(BI->getCondition());\r
- if (TmpReg == 0)\r
- return false;\r
-\r
- unsigned BranchOpc = X86::GetCondBranchFromCond(CC);\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))\r
- .addMBB(TrueMBB);\r
- fastEmitBranch(FalseMBB, DbgLoc);\r
- uint32_t BranchWeight = 0;\r
- if (FuncInfo.BPI)\r
- BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),\r
- TrueMBB->getBasicBlock());\r
- FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);\r
- return true;\r
- }\r
-\r
- // Otherwise do a clumsy setcc and re-test it.\r
- // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used\r
- // in an explicit cast, so make sure to handle that correctly.\r
- unsigned OpReg = getRegForValue(BI->getCondition());\r
- if (OpReg == 0) return false;\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))\r
- .addReg(OpReg).addImm(1);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1))\r
- .addMBB(TrueMBB);\r
- fastEmitBranch(FalseMBB, DbgLoc);\r
- uint32_t BranchWeight = 0;\r
- if (FuncInfo.BPI)\r
- BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),\r
- TrueMBB->getBasicBlock());\r
- FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectShift(const Instruction *I) {\r
- unsigned CReg = 0, OpReg = 0;\r
- const TargetRegisterClass *RC = nullptr;\r
- if (I->getType()->isIntegerTy(8)) {\r
- CReg = X86::CL;\r
- RC = &X86::GR8RegClass;\r
- switch (I->getOpcode()) {\r
- case Instruction::LShr: OpReg = X86::SHR8rCL; break;\r
- case Instruction::AShr: OpReg = X86::SAR8rCL; break;\r
- case Instruction::Shl: OpReg = X86::SHL8rCL; break;\r
- default: return false;\r
- }\r
- } else if (I->getType()->isIntegerTy(16)) {\r
- CReg = X86::CX;\r
- RC = &X86::GR16RegClass;\r
- switch (I->getOpcode()) {\r
- case Instruction::LShr: OpReg = X86::SHR16rCL; break;\r
- case Instruction::AShr: OpReg = X86::SAR16rCL; break;\r
- case Instruction::Shl: OpReg = X86::SHL16rCL; break;\r
- default: return false;\r
- }\r
- } else if (I->getType()->isIntegerTy(32)) {\r
- CReg = X86::ECX;\r
- RC = &X86::GR32RegClass;\r
- switch (I->getOpcode()) {\r
- case Instruction::LShr: OpReg = X86::SHR32rCL; break;\r
- case Instruction::AShr: OpReg = X86::SAR32rCL; break;\r
- case Instruction::Shl: OpReg = X86::SHL32rCL; break;\r
- default: return false;\r
- }\r
- } else if (I->getType()->isIntegerTy(64)) {\r
- CReg = X86::RCX;\r
- RC = &X86::GR64RegClass;\r
- switch (I->getOpcode()) {\r
- case Instruction::LShr: OpReg = X86::SHR64rCL; break;\r
- case Instruction::AShr: OpReg = X86::SAR64rCL; break;\r
- case Instruction::Shl: OpReg = X86::SHL64rCL; break;\r
- default: return false;\r
- }\r
- } else {\r
- return false;\r
- }\r
-\r
- MVT VT;\r
- if (!isTypeLegal(I->getType(), VT))\r
- return false;\r
-\r
- unsigned Op0Reg = getRegForValue(I->getOperand(0));\r
- if (Op0Reg == 0) return false;\r
-\r
- unsigned Op1Reg = getRegForValue(I->getOperand(1));\r
- if (Op1Reg == 0) return false;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),\r
- CReg).addReg(Op1Reg);\r
-\r
- // The shift instruction uses X86::CL. If we defined a super-register\r
- // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.\r
- if (CReg != X86::CL)\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::KILL), X86::CL)\r
- .addReg(CReg, RegState::Kill);\r
-\r
- unsigned ResultReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)\r
- .addReg(Op0Reg);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectDivRem(const Instruction *I) {\r
- const static unsigned NumTypes = 4; // i8, i16, i32, i64\r
- const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem\r
- const static bool S = true; // IsSigned\r
- const static bool U = false; // !IsSigned\r
- const static unsigned Copy = TargetOpcode::COPY;\r
- // For the X86 DIV/IDIV instruction, in most cases the dividend\r
- // (numerator) must be in a specific register pair highreg:lowreg,\r
- // producing the quotient in lowreg and the remainder in highreg.\r
- // For most data types, to set up the instruction, the dividend is\r
- // copied into lowreg, and lowreg is sign-extended or zero-extended\r
- // into highreg. The exception is i8, where the dividend is defined\r
- // as a single register rather than a register pair, and we\r
- // therefore directly sign-extend or zero-extend the dividend into\r
- // lowreg, instead of copying, and ignore the highreg.\r
- const static struct DivRemEntry {\r
- // The following portion depends only on the data type.\r
- const TargetRegisterClass *RC;\r
- unsigned LowInReg; // low part of the register pair\r
- unsigned HighInReg; // high part of the register pair\r
- // The following portion depends on both the data type and the operation.\r
- struct DivRemResult {\r
- unsigned OpDivRem; // The specific DIV/IDIV opcode to use.\r
- unsigned OpSignExtend; // Opcode for sign-extending lowreg into\r
- // highreg, or copying a zero into highreg.\r
- unsigned OpCopy; // Opcode for copying dividend into lowreg, or\r
- // zero/sign-extending into lowreg for i8.\r
- unsigned DivRemResultReg; // Register containing the desired result.\r
- bool IsOpSigned; // Whether to use signed or unsigned form.\r
- } ResultTable[NumOps];\r
- } OpTable[NumTypes] = {\r
- { &X86::GR8RegClass, X86::AX, 0, {\r
- { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv\r
- { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem\r
- { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv\r
- { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem\r
- }\r
- }, // i8\r
- { &X86::GR16RegClass, X86::AX, X86::DX, {\r
- { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv\r
- { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem\r
- { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv\r
- { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem\r
- }\r
- }, // i16\r
- { &X86::GR32RegClass, X86::EAX, X86::EDX, {\r
- { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv\r
- { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem\r
- { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv\r
- { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem\r
- }\r
- }, // i32\r
- { &X86::GR64RegClass, X86::RAX, X86::RDX, {\r
- { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv\r
- { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem\r
- { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv\r
- { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem\r
- }\r
- }, // i64\r
- };\r
-\r
- MVT VT;\r
- if (!isTypeLegal(I->getType(), VT))\r
- return false;\r
-\r
- unsigned TypeIndex, OpIndex;\r
- switch (VT.SimpleTy) {\r
- default: return false;\r
- case MVT::i8: TypeIndex = 0; break;\r
- case MVT::i16: TypeIndex = 1; break;\r
- case MVT::i32: TypeIndex = 2; break;\r
- case MVT::i64: TypeIndex = 3;\r
- if (!Subtarget->is64Bit())\r
- return false;\r
- break;\r
- }\r
-\r
- switch (I->getOpcode()) {\r
- default: llvm_unreachable("Unexpected div/rem opcode");\r
- case Instruction::SDiv: OpIndex = 0; break;\r
- case Instruction::SRem: OpIndex = 1; break;\r
- case Instruction::UDiv: OpIndex = 2; break;\r
- case Instruction::URem: OpIndex = 3; break;\r
- }\r
-\r
- const DivRemEntry &TypeEntry = OpTable[TypeIndex];\r
- const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];\r
- unsigned Op0Reg = getRegForValue(I->getOperand(0));\r
- if (Op0Reg == 0)\r
- return false;\r
- unsigned Op1Reg = getRegForValue(I->getOperand(1));\r
- if (Op1Reg == 0)\r
- return false;\r
-\r
- // Move op0 into low-order input register.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);\r
- // Zero-extend or sign-extend into high-order input register.\r
- if (OpEntry.OpSignExtend) {\r
- if (OpEntry.IsOpSigned)\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(OpEntry.OpSignExtend));\r
- else {\r
- unsigned Zero32 = createResultReg(&X86::GR32RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(X86::MOV32r0), Zero32);\r
-\r
- // Copy the zero into the appropriate sub/super/identical physical\r
- // register. Unfortunately the operations needed are not uniform enough\r
- // to fit neatly into the table above.\r
- if (VT.SimpleTy == MVT::i16) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Copy), TypeEntry.HighInReg)\r
- .addReg(Zero32, 0, X86::sub_16bit);\r
- } else if (VT.SimpleTy == MVT::i32) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Copy), TypeEntry.HighInReg)\r
- .addReg(Zero32);\r
- } else if (VT.SimpleTy == MVT::i64) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)\r
- .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);\r
- }\r
- }\r
- }\r
- // Generate the DIV/IDIV instruction.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);\r
- // For i8 remainder, we can't reference AH directly, as we'll end\r
- // up with bogus copies like %R9B = COPY %AH. Reference AX\r
- // instead to prevent AH references in a REX instruction.\r
- //\r
- // The current assumption of the fast register allocator is that isel\r
- // won't generate explicit references to the GPR8_NOREX registers. If\r
- // the allocator and/or the backend get enhanced to be more robust in\r
- // that regard, this can be, and should be, removed.\r
- unsigned ResultReg = 0;\r
- if ((I->getOpcode() == Instruction::SRem ||\r
- I->getOpcode() == Instruction::URem) &&\r
- OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {\r
- unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);\r
- unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Copy), SourceSuperReg).addReg(X86::AX);\r
-\r
- // Shift AX right by 8 bits instead of using AH.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),\r
- ResultSuperReg).addReg(SourceSuperReg).addImm(8);\r
-\r
- // Now reference the 8-bit subreg of the result.\r
- ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,\r
- /*Kill=*/true, X86::sub_8bit);\r
- }\r
- // Copy the result out of the physreg if we haven't already.\r
- if (!ResultReg) {\r
- ResultReg = createResultReg(TypeEntry.RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)\r
- .addReg(OpEntry.DivRemResultReg);\r
- }\r
- updateValueMap(I, ResultReg);\r
-\r
- return true;\r
-}\r
-\r
-/// \brief Emit a conditional move instruction (if the are supported) to lower\r
-/// the select.\r
-bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {\r
- // Check if the subtarget supports these instructions.\r
- if (!Subtarget->hasCMov())\r
- return false;\r
-\r
- // FIXME: Add support for i8.\r
- if (RetVT < MVT::i16 || RetVT > MVT::i64)\r
- return false;\r
-\r
- const Value *Cond = I->getOperand(0);\r
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);\r
- bool NeedTest = true;\r
- X86::CondCode CC = X86::COND_NE;\r
-\r
- // Optimize conditions coming from a compare if both instructions are in the\r
- // same basic block (values defined in other basic blocks may not have\r
- // initialized registers).\r
- const auto *CI = dyn_cast<CmpInst>(Cond);\r
- if (CI && (CI->getParent() == I->getParent())) {\r
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);\r
-\r
- // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.\r
- static unsigned SETFOpcTable[2][3] = {\r
- { X86::SETNPr, X86::SETEr , X86::TEST8rr },\r
- { X86::SETPr, X86::SETNEr, X86::OR8rr }\r
- };\r
- unsigned *SETFOpc = nullptr;\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_OEQ:\r
- SETFOpc = &SETFOpcTable[0][0];\r
- Predicate = CmpInst::ICMP_NE;\r
- break;\r
- case CmpInst::FCMP_UNE:\r
- SETFOpc = &SETFOpcTable[1][0];\r
- Predicate = CmpInst::ICMP_NE;\r
- break;\r
- }\r
-\r
- bool NeedSwap;\r
- std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate);\r
- assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");\r
-\r
- const Value *CmpLHS = CI->getOperand(0);\r
- const Value *CmpRHS = CI->getOperand(1);\r
- if (NeedSwap)\r
- std::swap(CmpLHS, CmpRHS);\r
-\r
- EVT CmpVT = TLI.getValueType(CmpLHS->getType());\r
- // Emit a compare of the LHS and RHS, setting the flags.\r
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))\r
- return false;\r
-\r
- if (SETFOpc) {\r
- unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);\r
- unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),\r
- FlagReg1);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),\r
- FlagReg2);\r
- auto const &II = TII.get(SETFOpc[2]);\r
- if (II.getNumDefs()) {\r
- unsigned TmpReg = createResultReg(&X86::GR8RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)\r
- .addReg(FlagReg2).addReg(FlagReg1);\r
- } else {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)\r
- .addReg(FlagReg2).addReg(FlagReg1);\r
- }\r
- }\r
- NeedTest = false;\r
- } else if (foldX86XALUIntrinsic(CC, I, Cond)) {\r
- // Fake request the condition, otherwise the intrinsic might be completely\r
- // optimized away.\r
- unsigned TmpReg = getRegForValue(Cond);\r
- if (TmpReg == 0)\r
- return false;\r
-\r
- NeedTest = false;\r
- }\r
-\r
- if (NeedTest) {\r
- // Selects operate on i1, however, CondReg is 8 bits width and may contain\r
- // garbage. Indeed, only the less significant bit is supposed to be\r
- // accurate. If we read more than the lsb, we may see non-zero values\r
- // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for\r
- // the select. This is achieved by performing TEST against 1.\r
- unsigned CondReg = getRegForValue(Cond);\r
- if (CondReg == 0)\r
- return false;\r
- bool CondIsKill = hasTrivialKill(Cond);\r
-\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))\r
- .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);\r
- }\r
-\r
- const Value *LHS = I->getOperand(1);\r
- const Value *RHS = I->getOperand(2);\r
-\r
- unsigned RHSReg = getRegForValue(RHS);\r
- bool RHSIsKill = hasTrivialKill(RHS);\r
-\r
- unsigned LHSReg = getRegForValue(LHS);\r
- bool LHSIsKill = hasTrivialKill(LHS);\r
-\r
- if (!LHSReg || !RHSReg)\r
- return false;\r
-\r
- unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());\r
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,\r
- LHSReg, LHSIsKill);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-/// \brief Emit SSE instructions to lower the select.\r
-///\r
-/// Try to use SSE1/SSE2 instructions to simulate a select without branches.\r
-/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary\r
-/// SSE instructions are available.\r
-bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {\r
- // Optimize conditions coming from a compare if both instructions are in the\r
- // same basic block (values defined in other basic blocks may not have\r
- // initialized registers).\r
- const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));\r
- if (!CI || (CI->getParent() != I->getParent()))\r
- return false;\r
-\r
- if (I->getType() != CI->getOperand(0)->getType() ||\r
- !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||\r
- (Subtarget->hasSSE2() && RetVT == MVT::f64)))\r
- return false;\r
-\r
- const Value *CmpLHS = CI->getOperand(0);\r
- const Value *CmpRHS = CI->getOperand(1);\r
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);\r
-\r
- // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.\r
- // We don't have to materialize a zero constant for this case and can just use\r
- // %x again on the RHS.\r
- if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {\r
- const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);\r
- if (CmpRHSC && CmpRHSC->isNullValue())\r
- CmpRHS = CmpLHS;\r
- }\r
-\r
- unsigned CC;\r
- bool NeedSwap;\r
- std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);\r
- if (CC > 7)\r
- return false;\r
-\r
- if (NeedSwap)\r
- std::swap(CmpLHS, CmpRHS);\r
-\r
- static unsigned OpcTable[2][2][4] = {\r
- { { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr },\r
- { X86::VCMPSSrr, X86::VFsANDPSrr, X86::VFsANDNPSrr, X86::VFsORPSrr } },\r
- { { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr },\r
- { X86::VCMPSDrr, X86::VFsANDPDrr, X86::VFsANDNPDrr, X86::VFsORPDrr } }\r
- };\r
-\r
- bool HasAVX = Subtarget->hasAVX();\r
- unsigned *Opc = nullptr;\r
- switch (RetVT.SimpleTy) {\r
- default: return false;\r
- case MVT::f32: Opc = &OpcTable[0][HasAVX][0]; break;\r
- case MVT::f64: Opc = &OpcTable[1][HasAVX][0]; break;\r
- }\r
-\r
- const Value *LHS = I->getOperand(1);\r
- const Value *RHS = I->getOperand(2);\r
-\r
- unsigned LHSReg = getRegForValue(LHS);\r
- bool LHSIsKill = hasTrivialKill(LHS);\r
-\r
- unsigned RHSReg = getRegForValue(RHS);\r
- bool RHSIsKill = hasTrivialKill(RHS);\r
-\r
- unsigned CmpLHSReg = getRegForValue(CmpLHS);\r
- bool CmpLHSIsKill = hasTrivialKill(CmpLHS);\r
-\r
- unsigned CmpRHSReg = getRegForValue(CmpRHS);\r
- bool CmpRHSIsKill = hasTrivialKill(CmpRHS);\r
-\r
- if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)\r
- return false;\r
-\r
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);\r
- unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,\r
- CmpRHSReg, CmpRHSIsKill, CC);\r
- unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,\r
- LHSReg, LHSIsKill);\r
- unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,\r
- RHSReg, RHSIsKill);\r
- unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,\r
- AndReg, /*IsKill=*/true);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {\r
- // These are pseudo CMOV instructions and will be later expanded into control-\r
- // flow.\r
- unsigned Opc;\r
- switch (RetVT.SimpleTy) {\r
- default: return false;\r
- case MVT::i8: Opc = X86::CMOV_GR8; break;\r
- case MVT::i16: Opc = X86::CMOV_GR16; break;\r
- case MVT::i32: Opc = X86::CMOV_GR32; break;\r
- case MVT::f32: Opc = X86::CMOV_FR32; break;\r
- case MVT::f64: Opc = X86::CMOV_FR64; break;\r
- }\r
-\r
- const Value *Cond = I->getOperand(0);\r
- X86::CondCode CC = X86::COND_NE;\r
-\r
- // Optimize conditions coming from a compare if both instructions are in the\r
- // same basic block (values defined in other basic blocks may not have\r
- // initialized registers).\r
- const auto *CI = dyn_cast<CmpInst>(Cond);\r
- if (CI && (CI->getParent() == I->getParent())) {\r
- bool NeedSwap;\r
- std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate());\r
- if (CC > X86::LAST_VALID_COND)\r
- return false;\r
-\r
- const Value *CmpLHS = CI->getOperand(0);\r
- const Value *CmpRHS = CI->getOperand(1);\r
-\r
- if (NeedSwap)\r
- std::swap(CmpLHS, CmpRHS);\r
-\r
- EVT CmpVT = TLI.getValueType(CmpLHS->getType());\r
- if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))\r
- return false;\r
- } else {\r
- unsigned CondReg = getRegForValue(Cond);\r
- if (CondReg == 0)\r
- return false;\r
- bool CondIsKill = hasTrivialKill(Cond);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))\r
- .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);\r
- }\r
-\r
- const Value *LHS = I->getOperand(1);\r
- const Value *RHS = I->getOperand(2);\r
-\r
- unsigned LHSReg = getRegForValue(LHS);\r
- bool LHSIsKill = hasTrivialKill(LHS);\r
-\r
- unsigned RHSReg = getRegForValue(RHS);\r
- bool RHSIsKill = hasTrivialKill(RHS);\r
-\r
- if (!LHSReg || !RHSReg)\r
- return false;\r
-\r
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);\r
-\r
- unsigned ResultReg =\r
- fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::X86SelectSelect(const Instruction *I) {\r
- MVT RetVT;\r
- if (!isTypeLegal(I->getType(), RetVT))\r
- return false;\r
-\r
- // Check if we can fold the select.\r
- if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {\r
- CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);\r
- const Value *Opnd = nullptr;\r
- switch (Predicate) {\r
- default: break;\r
- case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;\r
- case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;\r
- }\r
- // No need for a select anymore - this is an unconditional move.\r
- if (Opnd) {\r
- unsigned OpReg = getRegForValue(Opnd);\r
- if (OpReg == 0)\r
- return false;\r
- bool OpIsKill = hasTrivialKill(Opnd);\r
- const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);\r
- unsigned ResultReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), ResultReg)\r
- .addReg(OpReg, getKillRegState(OpIsKill));\r
- updateValueMap(I, ResultReg);\r
- return true;\r
- }\r
- }\r
-\r
- // First try to use real conditional move instructions.\r
- if (X86FastEmitCMoveSelect(RetVT, I))\r
- return true;\r
-\r
- // Try to use a sequence of SSE instructions to simulate a conditional move.\r
- if (X86FastEmitSSESelect(RetVT, I))\r
- return true;\r
-\r
- // Fall-back to pseudo conditional move instructions, which will be later\r
- // converted to control-flow.\r
- if (X86FastEmitPseudoSelect(RetVT, I))\r
- return true;\r
-\r
- return false;\r
-}\r
-\r
-bool X86FastISel::X86SelectFPExt(const Instruction *I) {\r
- // fpext from float to double.\r
- if (X86ScalarSSEf64 &&\r
- I->getType()->isDoubleTy()) {\r
- const Value *V = I->getOperand(0);\r
- if (V->getType()->isFloatTy()) {\r
- unsigned OpReg = getRegForValue(V);\r
- if (OpReg == 0) return false;\r
- unsigned ResultReg = createResultReg(&X86::FR64RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(X86::CVTSS2SDrr), ResultReg)\r
- .addReg(OpReg);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {\r
- if (X86ScalarSSEf64) {\r
- if (I->getType()->isFloatTy()) {\r
- const Value *V = I->getOperand(0);\r
- if (V->getType()->isDoubleTy()) {\r
- unsigned OpReg = getRegForValue(V);\r
- if (OpReg == 0) return false;\r
- unsigned ResultReg = createResultReg(&X86::FR32RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(X86::CVTSD2SSrr), ResultReg)\r
- .addReg(OpReg);\r
- updateValueMap(I, ResultReg);\r
- return true;\r
- }\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-bool X86FastISel::X86SelectTrunc(const Instruction *I) {\r
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());\r
- EVT DstVT = TLI.getValueType(I->getType());\r
-\r
- // This code only handles truncation to byte.\r
- if (DstVT != MVT::i8 && DstVT != MVT::i1)\r
- return false;\r
- if (!TLI.isTypeLegal(SrcVT))\r
- return false;\r
-\r
- unsigned InputReg = getRegForValue(I->getOperand(0));\r
- if (!InputReg)\r
- // Unhandled operand. Halt "fast" selection and bail.\r
- return false;\r
-\r
- if (SrcVT == MVT::i8) {\r
- // Truncate from i8 to i1; no code needed.\r
- updateValueMap(I, InputReg);\r
- return true;\r
- }\r
-\r
- if (!Subtarget->is64Bit()) {\r
- // If we're on x86-32; we can't extract an i8 from a general register.\r
- // First issue a copy to GR16_ABCD or GR32_ABCD.\r
- const TargetRegisterClass *CopyRC =\r
- (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;\r
- unsigned CopyReg = createResultReg(CopyRC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);\r
- InputReg = CopyReg;\r
- }\r
-\r
- // Issue an extract_subreg.\r
- unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,\r
- InputReg, /*Kill=*/true,\r
- X86::sub_8bit);\r
- if (!ResultReg)\r
- return false;\r
-\r
- updateValueMap(I, ResultReg);\r
- return true;\r
-}\r
-\r
-bool X86FastISel::IsMemcpySmall(uint64_t Len) {\r
- return Len <= (Subtarget->is64Bit() ? 32 : 16);\r
-}\r
-\r
-bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,\r
- X86AddressMode SrcAM, uint64_t Len) {\r
-\r
- // Make sure we don't bloat code by inlining very large memcpy's.\r
- if (!IsMemcpySmall(Len))\r
- return false;\r
-\r
- bool i64Legal = Subtarget->is64Bit();\r
-\r
- // We don't care about alignment here since we just emit integer accesses.\r
- while (Len) {\r
- MVT VT;\r
- if (Len >= 8 && i64Legal)\r
- VT = MVT::i64;\r
- else if (Len >= 4)\r
- VT = MVT::i32;\r
- else if (Len >= 2)\r
- VT = MVT::i16;\r
- else\r
- VT = MVT::i8;\r
-\r
- unsigned Reg;\r
- bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);\r
- RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);\r
- assert(RV && "Failed to emit load or store??");\r
-\r
- unsigned Size = VT.getSizeInBits()/8;\r
- Len -= Size;\r
- DestAM.Disp += Size;\r
- SrcAM.Disp += Size;\r
- }\r
-\r
- return true;\r
-}\r
-\r
-bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {\r
- // FIXME: Handle more intrinsics.\r
- switch (II->getIntrinsicID()) {\r
- default: return false;\r
- case Intrinsic::frameaddress: {\r
- Type *RetTy = II->getCalledFunction()->getReturnType();\r
-\r
- MVT VT;\r
- if (!isTypeLegal(RetTy, VT))\r
- return false;\r
-\r
- unsigned Opc;\r
- const TargetRegisterClass *RC = nullptr;\r
-\r
- switch (VT.SimpleTy) {\r
- default: llvm_unreachable("Invalid result type for frameaddress.");\r
- case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;\r
- case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;\r
- }\r
-\r
- // This needs to be set before we call getPtrSizedFrameRegister, otherwise\r
- // we get the wrong frame register.\r
- MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();\r
- MFI->setFrameAddressIsTaken(true);\r
-\r
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(\r
- TM.getSubtargetImpl()->getRegisterInfo());\r
- unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*(FuncInfo.MF));\r
- assert(((FrameReg == X86::RBP && VT == MVT::i64) ||\r
- (FrameReg == X86::EBP && VT == MVT::i32)) &&\r
- "Invalid Frame Register!");\r
-\r
- // Always make a copy of the frame register to to a vreg first, so that we\r
- // never directly reference the frame register (the TwoAddressInstruction-\r
- // Pass doesn't like that).\r
- unsigned SrcReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);\r
-\r
- // Now recursively load from the frame address.\r
- // movq (%rbp), %rax\r
- // movq (%rax), %rax\r
- // movq (%rax), %rax\r
- // ...\r
- unsigned DestReg;\r
- unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();\r
- while (Depth--) {\r
- DestReg = createResultReg(RC);\r
- addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), DestReg), SrcReg);\r
- SrcReg = DestReg;\r
- }\r
-\r
- updateValueMap(II, SrcReg);\r
- return true;\r
- }\r
- case Intrinsic::memcpy: {\r
- const MemCpyInst *MCI = cast<MemCpyInst>(II);\r
- // Don't handle volatile or variable length memcpys.\r
- if (MCI->isVolatile())\r
- return false;\r
-\r
- if (isa<ConstantInt>(MCI->getLength())) {\r
- // Small memcpy's are common enough that we want to do them\r
- // without a call if possible.\r
- uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();\r
- if (IsMemcpySmall(Len)) {\r
- X86AddressMode DestAM, SrcAM;\r
- if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||\r
- !X86SelectAddress(MCI->getRawSource(), SrcAM))\r
- return false;\r
- TryEmitSmallMemcpy(DestAM, SrcAM, Len);\r
- return true;\r
- }\r
- }\r
-\r
- unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;\r
- if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))\r
- return false;\r
-\r
- if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)\r
- return false;\r
-\r
- return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);\r
- }\r
- case Intrinsic::memset: {\r
- const MemSetInst *MSI = cast<MemSetInst>(II);\r
-\r
- if (MSI->isVolatile())\r
- return false;\r
-\r
- unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;\r
- if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))\r
- return false;\r
-\r
- if (MSI->getDestAddressSpace() > 255)\r
- return false;\r
-\r
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);\r
- }\r
- case Intrinsic::stackprotector: {\r
- // Emit code to store the stack guard onto the stack.\r
- EVT PtrTy = TLI.getPointerTy();\r
-\r
- const Value *Op1 = II->getArgOperand(0); // The guard's value.\r
- const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));\r
-\r
- MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);\r
-\r
- // Grab the frame index.\r
- X86AddressMode AM;\r
- if (!X86SelectAddress(Slot, AM)) return false;\r
- if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;\r
- return true;\r
- }\r
- case Intrinsic::dbg_declare: {\r
- const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);\r
- X86AddressMode AM;\r
- assert(DI->getAddress() && "Null address should be checked earlier!");\r
- if (!X86SelectAddress(DI->getAddress(), AM))\r
- return false;\r
- const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);\r
- // FIXME may need to add RegState::Debug to any registers produced,\r
- // although ESP/EBP should be the only ones at the moment.\r
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)\r
- .addImm(0)\r
- .addMetadata(DI->getVariable())\r
- .addMetadata(DI->getExpression());\r
- return true;\r
- }\r
- case Intrinsic::trap: {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));\r
- return true;\r
- }\r
- case Intrinsic::sqrt: {\r
- if (!Subtarget->hasSSE1())\r
- return false;\r
-\r
- Type *RetTy = II->getCalledFunction()->getReturnType();\r
-\r
- MVT VT;\r
- if (!isTypeLegal(RetTy, VT))\r
- return false;\r
-\r
- // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT\r
- // is not generated by FastISel yet.\r
- // FIXME: Update this code once tablegen can handle it.\r
- static const unsigned SqrtOpc[2][2] = {\r
- {X86::SQRTSSr, X86::VSQRTSSr},\r
- {X86::SQRTSDr, X86::VSQRTSDr}\r
- };\r
- bool HasAVX = Subtarget->hasAVX();\r
- unsigned Opc;\r
- const TargetRegisterClass *RC;\r
- switch (VT.SimpleTy) {\r
- default: return false;\r
- case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass; break;\r
- case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;\r
- }\r
-\r
- const Value *SrcVal = II->getArgOperand(0);\r
- unsigned SrcReg = getRegForValue(SrcVal);\r
-\r
- if (SrcReg == 0)\r
- return false;\r
-\r
- unsigned ImplicitDefReg = 0;\r
- if (HasAVX) {\r
- ImplicitDefReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);\r
- }\r
-\r
- unsigned ResultReg = createResultReg(RC);\r
- MachineInstrBuilder MIB;\r
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),\r
- ResultReg);\r
-\r
- if (ImplicitDefReg)\r
- MIB.addReg(ImplicitDefReg);\r
-\r
- MIB.addReg(SrcReg);\r
-\r
- updateValueMap(II, ResultReg);\r
- return true;\r
- }\r
- case Intrinsic::sadd_with_overflow:\r
- case Intrinsic::uadd_with_overflow:\r
- case Intrinsic::ssub_with_overflow:\r
- case Intrinsic::usub_with_overflow:\r
- case Intrinsic::smul_with_overflow:\r
- case Intrinsic::umul_with_overflow: {\r
- // This implements the basic lowering of the xalu with overflow intrinsics\r
- // into add/sub/mul followed by either seto or setb.\r
- const Function *Callee = II->getCalledFunction();\r
- auto *Ty = cast<StructType>(Callee->getReturnType());\r
- Type *RetTy = Ty->getTypeAtIndex(0U);\r
- Type *CondTy = Ty->getTypeAtIndex(1);\r
-\r
- MVT VT;\r
- if (!isTypeLegal(RetTy, VT))\r
- return false;\r
-\r
- if (VT < MVT::i8 || VT > MVT::i64)\r
- return false;\r
-\r
- const Value *LHS = II->getArgOperand(0);\r
- const Value *RHS = II->getArgOperand(1);\r
-\r
- // Canonicalize immediate to the RHS.\r
- if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&\r
- isCommutativeIntrinsic(II))\r
- std::swap(LHS, RHS);\r
-\r
- bool UseIncDec = false;\r
- if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())\r
- UseIncDec = true;\r
-\r
- unsigned BaseOpc, CondOpc;\r
- switch (II->getIntrinsicID()) {\r
- default: llvm_unreachable("Unexpected intrinsic!");\r
- case Intrinsic::sadd_with_overflow:\r
- BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);\r
- CondOpc = X86::SETOr;\r
- break;\r
- case Intrinsic::uadd_with_overflow:\r
- BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;\r
- case Intrinsic::ssub_with_overflow:\r
- BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);\r
- CondOpc = X86::SETOr;\r
- break;\r
- case Intrinsic::usub_with_overflow:\r
- BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;\r
- case Intrinsic::smul_with_overflow:\r
- BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;\r
- case Intrinsic::umul_with_overflow:\r
- BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;\r
- }\r
-\r
- unsigned LHSReg = getRegForValue(LHS);\r
- if (LHSReg == 0)\r
- return false;\r
- bool LHSIsKill = hasTrivialKill(LHS);\r
-\r
- unsigned ResultReg = 0;\r
- // Check if we have an immediate version.\r
- if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {\r
- static const unsigned Opc[2][4] = {\r
- { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },\r
- { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }\r
- };\r
-\r
- if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {\r
- ResultReg = createResultReg(TLI.getRegClassFor(VT));\r
- bool IsDec = BaseOpc == X86ISD::DEC;\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)\r
- .addReg(LHSReg, getKillRegState(LHSIsKill));\r
- } else\r
- ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,\r
- CI->getZExtValue());\r
- }\r
-\r
- unsigned RHSReg;\r
- bool RHSIsKill;\r
- if (!ResultReg) {\r
- RHSReg = getRegForValue(RHS);\r
- if (RHSReg == 0)\r
- return false;\r
- RHSIsKill = hasTrivialKill(RHS);\r
- ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,\r
- RHSIsKill);\r
- }\r
-\r
- // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit\r
- // it manually.\r
- if (BaseOpc == X86ISD::UMUL && !ResultReg) {\r
- static const unsigned MULOpc[] =\r
- { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };\r
- static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };\r
- // First copy the first operand into RAX, which is an implicit input to\r
- // the X86::MUL*r instruction.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])\r
- .addReg(LHSReg, getKillRegState(LHSIsKill));\r
- ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],\r
- TLI.getRegClassFor(VT), RHSReg, RHSIsKill);\r
- } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {\r
- static const unsigned MULOpc[] =\r
- { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };\r
- if (VT == MVT::i8) {\r
- // Copy the first operand into AL, which is an implicit input to the\r
- // X86::IMUL8r instruction.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), X86::AL)\r
- .addReg(LHSReg, getKillRegState(LHSIsKill));\r
- ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,\r
- RHSIsKill);\r
- } else\r
- ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],\r
- TLI.getRegClassFor(VT), LHSReg, LHSIsKill,\r
- RHSReg, RHSIsKill);\r
- }\r
-\r
- if (!ResultReg)\r
- return false;\r
-\r
- unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);\r
- assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),\r
- ResultReg2);\r
-\r
- updateValueMap(II, ResultReg, 2);\r
- return true;\r
- }\r
- case Intrinsic::x86_sse_cvttss2si:\r
- case Intrinsic::x86_sse_cvttss2si64:\r
- case Intrinsic::x86_sse2_cvttsd2si:\r
- case Intrinsic::x86_sse2_cvttsd2si64: {\r
- bool IsInputDouble;\r
- switch (II->getIntrinsicID()) {\r
- default: llvm_unreachable("Unexpected intrinsic.");\r
- case Intrinsic::x86_sse_cvttss2si:\r
- case Intrinsic::x86_sse_cvttss2si64:\r
- if (!Subtarget->hasSSE1())\r
- return false;\r
- IsInputDouble = false;\r
- break;\r
- case Intrinsic::x86_sse2_cvttsd2si:\r
- case Intrinsic::x86_sse2_cvttsd2si64:\r
- if (!Subtarget->hasSSE2())\r
- return false;\r
- IsInputDouble = true;\r
- break;\r
- }\r
-\r
- Type *RetTy = II->getCalledFunction()->getReturnType();\r
- MVT VT;\r
- if (!isTypeLegal(RetTy, VT))\r
- return false;\r
-\r
- static const unsigned CvtOpc[2][2][2] = {\r
- { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },\r
- { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },\r
- { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },\r
- { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }\r
- };\r
- bool HasAVX = Subtarget->hasAVX();\r
- unsigned Opc;\r
- switch (VT.SimpleTy) {\r
- default: llvm_unreachable("Unexpected result type.");\r
- case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX]; break;\r
- case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX]; break;\r
- }\r
-\r
- // Check if we can fold insertelement instructions into the convert.\r
- const Value *Op = II->getArgOperand(0);\r
- while (auto *IE = dyn_cast<InsertElementInst>(Op)) {\r
- const Value *Index = IE->getOperand(2);\r
- if (!isa<ConstantInt>(Index))\r
- break;\r
- unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();\r
-\r
- if (Idx == 0) {\r
- Op = IE->getOperand(1);\r
- break;\r
- }\r
- Op = IE->getOperand(0);\r
- }\r
-\r
- unsigned Reg = getRegForValue(Op);\r
- if (Reg == 0)\r
- return false;\r
-\r
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)\r
- .addReg(Reg);\r
-\r
- updateValueMap(II, ResultReg);\r
- return true;\r
- }\r
- }\r
-}\r
-\r
-bool X86FastISel::fastLowerArguments() {\r
- if (!FuncInfo.CanLowerReturn)\r
- return false;\r
-\r
- const Function *F = FuncInfo.Fn;\r
- if (F->isVarArg())\r
- return false;\r
-\r
- CallingConv::ID CC = F->getCallingConv();\r
- if (CC != CallingConv::C)\r
- return false;\r
-\r
- if (Subtarget->isCallingConvWin64(CC))\r
- return false;\r
-\r
- if (!Subtarget->is64Bit())\r
- return false;\r
-\r
- // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.\r
- unsigned GPRCnt = 0;\r
- unsigned FPRCnt = 0;\r
- unsigned Idx = 0;\r
- for (auto const &Arg : F->args()) {\r
- // The first argument is at index 1.\r
- ++Idx;\r
- if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||\r
- F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||\r
- F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||\r
- F->getAttributes().hasAttribute(Idx, Attribute::Nest))\r
- return false;\r
-\r
- Type *ArgTy = Arg.getType();\r
- if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())\r
- return false;\r
-\r
- EVT ArgVT = TLI.getValueType(ArgTy);\r
- if (!ArgVT.isSimple()) return false;\r
- switch (ArgVT.getSimpleVT().SimpleTy) {\r
- default: return false;\r
- case MVT::i32:\r
- case MVT::i64:\r
- ++GPRCnt;\r
- break;\r
- case MVT::f32:\r
- case MVT::f64:\r
- if (!Subtarget->hasSSE1())\r
- return false;\r
- ++FPRCnt;\r
- break;\r
- }\r
-\r
- if (GPRCnt > 6)\r
- return false;\r
-\r
- if (FPRCnt > 8)\r
- return false;\r
- }\r
-\r
- static const MCPhysReg GPR32ArgRegs[] = {\r
- X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D\r
- };\r
- static const MCPhysReg GPR64ArgRegs[] = {\r
- X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9\r
- };\r
- static const MCPhysReg XMMArgRegs[] = {\r
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,\r
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7\r
- };\r
-\r
- unsigned GPRIdx = 0;\r
- unsigned FPRIdx = 0;\r
- for (auto const &Arg : F->args()) {\r
- MVT VT = TLI.getSimpleValueType(Arg.getType());\r
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);\r
- unsigned SrcReg;\r
- switch (VT.SimpleTy) {\r
- default: llvm_unreachable("Unexpected value type.");\r
- case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;\r
- case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;\r
- case MVT::f32: // fall-through\r
- case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;\r
- }\r
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);\r
- // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.\r
- // Without this, EmitLiveInCopies may eliminate the livein if its only\r
- // use is a bitcast (which isn't turned into an instruction).\r
- unsigned ResultReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), ResultReg)\r
- .addReg(DstReg, getKillRegState(true));\r
- updateValueMap(&Arg, ResultReg);\r
- }\r
- return true;\r
-}\r
-\r
-static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,\r
- CallingConv::ID CC,\r
- ImmutableCallSite *CS) {\r
- if (Subtarget->is64Bit())\r
- return 0;\r
- if (Subtarget->getTargetTriple().isOSMSVCRT())\r
- return 0;\r
- if (CC == CallingConv::Fast || CC == CallingConv::GHC ||\r
- CC == CallingConv::HiPE)\r
- return 0;\r
- if (CS && !CS->paramHasAttr(1, Attribute::StructRet))\r
- return 0;\r
- if (CS && CS->paramHasAttr(1, Attribute::InReg))\r
- return 0;\r
- return 4;\r
-}\r
-\r
-bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {\r
- auto &OutVals = CLI.OutVals;\r
- auto &OutFlags = CLI.OutFlags;\r
- auto &OutRegs = CLI.OutRegs;\r
- auto &Ins = CLI.Ins;\r
- auto &InRegs = CLI.InRegs;\r
- CallingConv::ID CC = CLI.CallConv;\r
- bool &IsTailCall = CLI.IsTailCall;\r
- bool IsVarArg = CLI.IsVarArg;\r
- const Value *Callee = CLI.Callee;\r
- const char *SymName = CLI.SymName;\r
-\r
- bool Is64Bit = Subtarget->is64Bit();\r
- bool IsWin64 = Subtarget->isCallingConvWin64(CC);\r
-\r
- // Handle only C, fastcc, and webkit_js calling conventions for now.\r
- switch (CC) {\r
- default: return false;\r
- case CallingConv::C:\r
- case CallingConv::Fast:\r
- case CallingConv::WebKit_JS:\r
- case CallingConv::X86_FastCall:\r
- case CallingConv::X86_64_Win64:\r
- case CallingConv::X86_64_SysV:\r
- break;\r
- }\r
-\r
- // Allow SelectionDAG isel to handle tail calls.\r
- if (IsTailCall)\r
- return false;\r
-\r
- // fastcc with -tailcallopt is intended to provide a guaranteed\r
- // tail call optimization. Fastisel doesn't know how to do that.\r
- if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)\r
- return false;\r
-\r
- // Don't know how to handle Win64 varargs yet. Nothing special needed for\r
- // x86-32. Special handling for x86-64 is implemented.\r
- if (IsVarArg && IsWin64)\r
- return false;\r
-\r
- // Don't know about inalloca yet.\r
- if (CLI.CS && CLI.CS->hasInAllocaArgument())\r
- return false;\r
-\r
- // Fast-isel doesn't know about callee-pop yet.\r
- if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,\r
- TM.Options.GuaranteedTailCallOpt))\r
- return false;\r
-\r
- SmallVector<MVT, 16> OutVTs;\r
- SmallVector<unsigned, 16> ArgRegs;\r
-\r
- // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra\r
- // instruction. This is safe because it is common to all FastISel supported\r
- // calling conventions on x86.\r
- for (int i = 0, e = OutVals.size(); i != e; ++i) {\r
- Value *&Val = OutVals[i];\r
- ISD::ArgFlagsTy Flags = OutFlags[i];\r
- if (auto *CI = dyn_cast<ConstantInt>(Val)) {\r
- if (CI->getBitWidth() < 32) {\r
- if (Flags.isSExt())\r
- Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));\r
- else\r
- Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));\r
- }\r
- }\r
-\r
- // Passing bools around ends up doing a trunc to i1 and passing it.\r
- // Codegen this as an argument + "and 1".\r
- MVT VT;\r
- auto *TI = dyn_cast<TruncInst>(Val);\r
- unsigned ResultReg;\r
- if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&\r
- (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&\r
- TI->hasOneUse()) {\r
- Value *PrevVal = TI->getOperand(0);\r
- ResultReg = getRegForValue(PrevVal);\r
-\r
- if (!ResultReg)\r
- return false;\r
-\r
- if (!isTypeLegal(PrevVal->getType(), VT))\r
- return false;\r
-\r
- ResultReg =\r
- fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);\r
- } else {\r
- if (!isTypeLegal(Val->getType(), VT))\r
- return false;\r
- ResultReg = getRegForValue(Val);\r
- }\r
-\r
- if (!ResultReg)\r
- return false;\r
-\r
- ArgRegs.push_back(ResultReg);\r
- OutVTs.push_back(VT);\r
- }\r
-\r
- // Analyze operands of the call, assigning locations to each operand.\r
- SmallVector<CCValAssign, 16> ArgLocs;\r
- CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());\r
-\r
- // Allocate shadow area for Win64\r
- if (IsWin64)\r
- CCInfo.AllocateStack(32, 8);\r
-\r
- CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);\r
-\r
- // Get a count of how many bytes are to be pushed on the stack.\r
- unsigned NumBytes = CCInfo.getNextStackOffset();\r
-\r
- // Issue CALLSEQ_START\r
- unsigned AdjStackDown = TII.getCallFrameSetupOpcode();\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))\r
- .addImm(NumBytes).addImm(0);\r
-\r
- // Walk the register/memloc assignments, inserting copies/loads.\r
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(\r
- TM.getSubtargetImpl()->getRegisterInfo());\r
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {\r
- CCValAssign const &VA = ArgLocs[i];\r
- const Value *ArgVal = OutVals[VA.getValNo()];\r
- MVT ArgVT = OutVTs[VA.getValNo()];\r
-\r
- if (ArgVT == MVT::x86mmx)\r
- return false;\r
-\r
- unsigned ArgReg = ArgRegs[VA.getValNo()];\r
-\r
- // Promote the value if needed.\r
- switch (VA.getLocInfo()) {\r
- case CCValAssign::Full: break;\r
- case CCValAssign::SExt: {\r
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&\r
- "Unexpected extend");\r
- bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,\r
- ArgVT, ArgReg);\r
- assert(Emitted && "Failed to emit a sext!"); (void)Emitted;\r
- ArgVT = VA.getLocVT();\r
- break;\r
- }\r
- case CCValAssign::ZExt: {\r
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&\r
- "Unexpected extend");\r
- bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,\r
- ArgVT, ArgReg);\r
- assert(Emitted && "Failed to emit a zext!"); (void)Emitted;\r
- ArgVT = VA.getLocVT();\r
- break;\r
- }\r
- case CCValAssign::AExt: {\r
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&\r
- "Unexpected extend");\r
- bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,\r
- ArgVT, ArgReg);\r
- if (!Emitted)\r
- Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,\r
- ArgVT, ArgReg);\r
- if (!Emitted)\r
- Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,\r
- ArgVT, ArgReg);\r
-\r
- assert(Emitted && "Failed to emit a aext!"); (void)Emitted;\r
- ArgVT = VA.getLocVT();\r
- break;\r
- }\r
- case CCValAssign::BCvt: {\r
- ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,\r
- /*TODO: Kill=*/false);\r
- assert(ArgReg && "Failed to emit a bitcast!");\r
- ArgVT = VA.getLocVT();\r
- break;\r
- }\r
- case CCValAssign::VExt:\r
- // VExt has not been implemented, so this should be impossible to reach\r
- // for now. However, fallback to Selection DAG isel once implemented.\r
- return false;\r
- case CCValAssign::AExtUpper:\r
- case CCValAssign::SExtUpper:\r
- case CCValAssign::ZExtUpper:\r
- case CCValAssign::FPExt:\r
- llvm_unreachable("Unexpected loc info!");\r
- case CCValAssign::Indirect:\r
- // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully\r
- // support this.\r
- return false;\r
- }\r
-\r
- if (VA.isRegLoc()) {\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);\r
- OutRegs.push_back(VA.getLocReg());\r
- } else {\r
- assert(VA.isMemLoc());\r
-\r
- // Don't emit stores for undef values.\r
- if (isa<UndefValue>(ArgVal))\r
- continue;\r
-\r
- unsigned LocMemOffset = VA.getLocMemOffset();\r
- X86AddressMode AM;\r
- AM.Base.Reg = RegInfo->getStackRegister();\r
- AM.Disp = LocMemOffset;\r
- ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];\r
- unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());\r
- MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(\r
- MachinePointerInfo::getStack(LocMemOffset), MachineMemOperand::MOStore,\r
- ArgVT.getStoreSize(), Alignment);\r
- if (Flags.isByVal()) {\r
- X86AddressMode SrcAM;\r
- SrcAM.Base.Reg = ArgReg;\r
- if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))\r
- return false;\r
- } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {\r
- // If this is a really simple value, emit this with the Value* version\r
- // of X86FastEmitStore. If it isn't simple, we don't want to do this,\r
- // as it can cause us to reevaluate the argument.\r
- if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))\r
- return false;\r
- } else {\r
- bool ValIsKill = hasTrivialKill(ArgVal);\r
- if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))\r
- return false;\r
- }\r
- }\r
- }\r
-\r
- // ELF / PIC requires GOT in the EBX register before function calls via PLT\r
- // GOT pointer.\r
- if (Subtarget->isPICStyleGOT()) {\r
- unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);\r
- }\r
-\r
- if (Is64Bit && IsVarArg && !IsWin64) {\r
- // From AMD64 ABI document:\r
- // For calls that may call functions that use varargs or stdargs\r
- // (prototype-less calls or calls to functions containing ellipsis (...) in\r
- // the declaration) %al is used as hidden argument to specify the number\r
- // of SSE registers used. The contents of %al do not need to match exactly\r
- // the number of registers, but must be an ubound on the number of SSE\r
- // registers used and is in the range 0 - 8 inclusive.\r
-\r
- // Count the number of XMM registers allocated.\r
- static const MCPhysReg XMMArgRegs[] = {\r
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,\r
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7\r
- };\r
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);\r
- assert((Subtarget->hasSSE1() || !NumXMMRegs)\r
- && "SSE registers cannot be used when SSE is disabled");\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),\r
- X86::AL).addImm(NumXMMRegs);\r
- }\r
-\r
- // Materialize callee address in a register. FIXME: GV address can be\r
- // handled with a CALLpcrel32 instead.\r
- X86AddressMode CalleeAM;\r
- if (!X86SelectCallAddress(Callee, CalleeAM))\r
- return false;\r
-\r
- unsigned CalleeOp = 0;\r
- const GlobalValue *GV = nullptr;\r
- if (CalleeAM.GV != nullptr) {\r
- GV = CalleeAM.GV;\r
- } else if (CalleeAM.Base.Reg != 0) {\r
- CalleeOp = CalleeAM.Base.Reg;\r
- } else\r
- return false;\r
-\r
- // Issue the call.\r
- MachineInstrBuilder MIB;\r
- if (CalleeOp) {\r
- // Register-indirect call.\r
- unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;\r
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))\r
- .addReg(CalleeOp);\r
- } else {\r
- // Direct call.\r
- assert(GV && "Not a direct call");\r
- unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;\r
-\r
- // See if we need any target-specific flags on the GV operand.\r
- unsigned char OpFlags = 0;\r
-\r
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to\r
- // external symbols most go through the PLT in PIC mode. If the symbol\r
- // has hidden or protected visibility, or if it is static or local, then\r
- // we don't need to use the PLT - we can directly call it.\r
- if (Subtarget->isTargetELF() &&\r
- TM.getRelocationModel() == Reloc::PIC_ &&\r
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {\r
- OpFlags = X86II::MO_PLT;\r
- } else if (Subtarget->isPICStyleStubAny() &&\r
- (GV->isDeclaration() || GV->isWeakForLinker()) &&\r
- (!Subtarget->getTargetTriple().isMacOSX() ||\r
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {\r
- // PC-relative references to external symbols should go through $stub,\r
- // unless we're building with the leopard linker or later, which\r
- // automatically synthesizes these stubs.\r
- OpFlags = X86II::MO_DARWIN_STUB;\r
- }\r
-\r
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));\r
- if (SymName)\r
- MIB.addExternalSymbol(SymName, OpFlags);\r
- else\r
- MIB.addGlobalAddress(GV, 0, OpFlags);\r
- }\r
-\r
- // Add a register mask operand representing the call-preserved registers.\r
- // Proper defs for return values will be added by setPhysRegsDeadExcept().\r
- MIB.addRegMask(TRI.getCallPreservedMask(CC));\r
-\r
- // Add an implicit use GOT pointer in EBX.\r
- if (Subtarget->isPICStyleGOT())\r
- MIB.addReg(X86::EBX, RegState::Implicit);\r
-\r
- if (Is64Bit && IsVarArg && !IsWin64)\r
- MIB.addReg(X86::AL, RegState::Implicit);\r
-\r
- // Add implicit physical register uses to the call.\r
- for (auto Reg : OutRegs)\r
- MIB.addReg(Reg, RegState::Implicit);\r
-\r
- // Issue CALLSEQ_END\r
- unsigned NumBytesForCalleeToPop =\r
- computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);\r
- unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))\r
- .addImm(NumBytes).addImm(NumBytesForCalleeToPop);\r
-\r
- // Now handle call return values.\r
- SmallVector<CCValAssign, 16> RVLocs;\r
- CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,\r
- CLI.RetTy->getContext());\r
- CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);\r
-\r
- // Copy all of the result registers out of their specified physreg.\r
- unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);\r
- for (unsigned i = 0; i != RVLocs.size(); ++i) {\r
- CCValAssign &VA = RVLocs[i];\r
- EVT CopyVT = VA.getValVT();\r
- unsigned CopyReg = ResultReg + i;\r
-\r
- // If this is x86-64, and we disabled SSE, we can't return FP values\r
- if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&\r
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {\r
- report_fatal_error("SSE register return with SSE disabled");\r
- }\r
-\r
- // If we prefer to use the value in xmm registers, copy it out as f80 and\r
- // use a truncate to move it from fp stack reg to xmm reg.\r
- if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&\r
- isScalarFPTypeInSSEReg(VA.getValVT())) {\r
- CopyVT = MVT::f80;\r
- CopyReg = createResultReg(&X86::RFP80RegClass);\r
- }\r
-\r
- // Copy out the result.\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());\r
- InRegs.push_back(VA.getLocReg());\r
-\r
- // Round the f80 to the right size, which also moves it to the appropriate\r
- // xmm register. This is accomplished by storing the f80 value in memory\r
- // and then loading it back.\r
- if (CopyVT != VA.getValVT()) {\r
- EVT ResVT = VA.getValVT();\r
- unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;\r
- unsigned MemSize = ResVT.getSizeInBits()/8;\r
- int FI = MFI.CreateStackObject(MemSize, MemSize, false);\r
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc)), FI)\r
- .addReg(CopyReg);\r
- Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;\r
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), ResultReg + i), FI);\r
- }\r
- }\r
-\r
- CLI.ResultReg = ResultReg;\r
- CLI.NumResultRegs = RVLocs.size();\r
- CLI.Call = MIB;\r
-\r
- return true;\r
-}\r
-\r
-bool\r
-X86FastISel::fastSelectInstruction(const Instruction *I) {\r
- switch (I->getOpcode()) {\r
- default: break;\r
- case Instruction::Load:\r
- return X86SelectLoad(I);\r
- case Instruction::Store:\r
- return X86SelectStore(I);\r
- case Instruction::Ret:\r
- return X86SelectRet(I);\r
- case Instruction::ICmp:\r
- case Instruction::FCmp:\r
- return X86SelectCmp(I);\r
- case Instruction::ZExt:\r
- return X86SelectZExt(I);\r
- case Instruction::Br:\r
- return X86SelectBranch(I);\r
- case Instruction::LShr:\r
- case Instruction::AShr:\r
- case Instruction::Shl:\r
- return X86SelectShift(I);\r
- case Instruction::SDiv:\r
- case Instruction::UDiv:\r
- case Instruction::SRem:\r
- case Instruction::URem:\r
- return X86SelectDivRem(I);\r
- case Instruction::Select:\r
- return X86SelectSelect(I);\r
- case Instruction::Trunc:\r
- return X86SelectTrunc(I);\r
- case Instruction::FPExt:\r
- return X86SelectFPExt(I);\r
- case Instruction::FPTrunc:\r
- return X86SelectFPTrunc(I);\r
- case Instruction::IntToPtr: // Deliberate fall-through.\r
- case Instruction::PtrToInt: {\r
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());\r
- EVT DstVT = TLI.getValueType(I->getType());\r
- if (DstVT.bitsGT(SrcVT))\r
- return X86SelectZExt(I);\r
- if (DstVT.bitsLT(SrcVT))\r
- return X86SelectTrunc(I);\r
- unsigned Reg = getRegForValue(I->getOperand(0));\r
- if (Reg == 0) return false;\r
- updateValueMap(I, Reg);\r
- return true;\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {\r
- if (VT > MVT::i64)\r
- return 0;\r
-\r
- uint64_t Imm = CI->getZExtValue();\r
- if (Imm == 0) {\r
- unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);\r
- switch (VT.SimpleTy) {\r
- default: llvm_unreachable("Unexpected value type");\r
- case MVT::i1:\r
- case MVT::i8:\r
- return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,\r
- X86::sub_8bit);\r
- case MVT::i16:\r
- return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,\r
- X86::sub_16bit);\r
- case MVT::i32:\r
- return SrcReg;\r
- case MVT::i64: {\r
- unsigned ResultReg = createResultReg(&X86::GR64RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)\r
- .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);\r
- return ResultReg;\r
- }\r
- }\r
- }\r
-\r
- unsigned Opc = 0;\r
- switch (VT.SimpleTy) {\r
- default: llvm_unreachable("Unexpected value type");\r
- case MVT::i1: VT = MVT::i8; // fall-through\r
- case MVT::i8: Opc = X86::MOV8ri; break;\r
- case MVT::i16: Opc = X86::MOV16ri; break;\r
- case MVT::i32: Opc = X86::MOV32ri; break;\r
- case MVT::i64: {\r
- if (isUInt<32>(Imm))\r
- Opc = X86::MOV32ri;\r
- else if (isInt<32>(Imm))\r
- Opc = X86::MOV64ri32;\r
- else\r
- Opc = X86::MOV64ri;\r
- break;\r
- }\r
- }\r
- if (VT == MVT::i64 && Opc == X86::MOV32ri) {\r
- unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);\r
- unsigned ResultReg = createResultReg(&X86::GR64RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)\r
- .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);\r
- return ResultReg;\r
- }\r
- return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);\r
-}\r
-\r
-unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {\r
- if (CFP->isNullValue())\r
- return fastMaterializeFloatZero(CFP);\r
-\r
- // Can't handle alternate code models yet.\r
- CodeModel::Model CM = TM.getCodeModel();\r
- if (CM != CodeModel::Small && CM != CodeModel::Large)\r
- return 0;\r
-\r
- // Get opcode and regclass of the output for the given load instruction.\r
- unsigned Opc = 0;\r
- const TargetRegisterClass *RC = nullptr;\r
- switch (VT.SimpleTy) {\r
- default: return 0;\r
- case MVT::f32:\r
- if (X86ScalarSSEf32) {\r
- Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;\r
- RC = &X86::FR32RegClass;\r
- } else {\r
- Opc = X86::LD_Fp32m;\r
- RC = &X86::RFP32RegClass;\r
- }\r
- break;\r
- case MVT::f64:\r
- if (X86ScalarSSEf64) {\r
- Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;\r
- RC = &X86::FR64RegClass;\r
- } else {\r
- Opc = X86::LD_Fp64m;\r
- RC = &X86::RFP64RegClass;\r
- }\r
- break;\r
- case MVT::f80:\r
- // No f80 support yet.\r
- return 0;\r
- }\r
-\r
- // MachineConstantPool wants an explicit alignment.\r
- unsigned Align = DL.getPrefTypeAlignment(CFP->getType());\r
- if (Align == 0) {\r
- // Alignment of vector types. FIXME!\r
- Align = DL.getTypeAllocSize(CFP->getType());\r
- }\r
-\r
- // x86-32 PIC requires a PIC base register for constant pools.\r
- unsigned PICBase = 0;\r
- unsigned char OpFlag = 0;\r
- if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic\r
- OpFlag = X86II::MO_PIC_BASE_OFFSET;\r
- PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);\r
- } else if (Subtarget->isPICStyleGOT()) {\r
- OpFlag = X86II::MO_GOTOFF;\r
- PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);\r
- } else if (Subtarget->isPICStyleRIPRel() &&\r
- TM.getCodeModel() == CodeModel::Small) {\r
- PICBase = X86::RIP;\r
- }\r
-\r
- // Create the load from the constant pool.\r
- unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);\r
- unsigned ResultReg = createResultReg(RC);\r
-\r
- if (CM == CodeModel::Large) {\r
- unsigned AddrReg = createResultReg(&X86::GR64RegClass);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),\r
- AddrReg)\r
- .addConstantPoolIndex(CPI, 0, OpFlag);\r
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), ResultReg);\r
- addDirectMem(MIB, AddrReg);\r
- MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(\r
- MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,\r
- TM.getDataLayout()->getPointerSize(), Align);\r
- MIB->addMemOperand(*FuncInfo.MF, MMO);\r
- return ResultReg;\r
- }\r
-\r
- addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), ResultReg),\r
- CPI, PICBase, OpFlag);\r
- return ResultReg;\r
-}\r
-\r
-unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {\r
- // Can't handle alternate code models yet.\r
- if (TM.getCodeModel() != CodeModel::Small)\r
- return 0;\r
-\r
- // Materialize addresses with LEA/MOV instructions.\r
- X86AddressMode AM;\r
- if (X86SelectAddress(GV, AM)) {\r
- // If the expression is just a basereg, then we're done, otherwise we need\r
- // to emit an LEA.\r
- if (AM.BaseType == X86AddressMode::RegBase &&\r
- AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)\r
- return AM.Base.Reg;\r
-\r
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));\r
- if (TM.getRelocationModel() == Reloc::Static &&\r
- TLI.getPointerTy() == MVT::i64) {\r
- // The displacement code could be more than 32 bits away so we need to use\r
- // an instruction with a 64 bit immediate\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),\r
- ResultReg)\r
- .addGlobalAddress(GV);\r
- } else {\r
- unsigned Opc = TLI.getPointerTy() == MVT::i32\r
- ? (Subtarget->isTarget64BitILP32()\r
- ? X86::LEA64_32r : X86::LEA32r)\r
- : X86::LEA64r;\r
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), ResultReg), AM);\r
- }\r
- return ResultReg;\r
- }\r
- return 0;\r
-}\r
-\r
-unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {\r
- EVT CEVT = TLI.getValueType(C->getType(), true);\r
-\r
- // Only handle simple types.\r
- if (!CEVT.isSimple())\r
- return 0;\r
- MVT VT = CEVT.getSimpleVT();\r
-\r
- if (const auto *CI = dyn_cast<ConstantInt>(C))\r
- return X86MaterializeInt(CI, VT);\r
- else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))\r
- return X86MaterializeFP(CFP, VT);\r
- else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))\r
- return X86MaterializeGV(GV, VT);\r
-\r
- return 0;\r
-}\r
-\r
-unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {\r
- // Fail on dynamic allocas. At this point, getRegForValue has already\r
- // checked its CSE maps, so if we're here trying to handle a dynamic\r
- // alloca, we're not going to succeed. X86SelectAddress has a\r
- // check for dynamic allocas, because it's called directly from\r
- // various places, but targetMaterializeAlloca also needs a check\r
- // in order to avoid recursion between getRegForValue,\r
- // X86SelectAddrss, and targetMaterializeAlloca.\r
- if (!FuncInfo.StaticAllocaMap.count(C))\r
- return 0;\r
- assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");\r
-\r
- X86AddressMode AM;\r
- if (!X86SelectAddress(C, AM))\r
- return 0;\r
- unsigned Opc = TLI.getPointerTy() == MVT::i32\r
- ? (Subtarget->isTarget64BitILP32()\r
- ? X86::LEA64_32r : X86::LEA32r)\r
- : X86::LEA64r;\r
- const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());\r
- unsigned ResultReg = createResultReg(RC);\r
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,\r
- TII.get(Opc), ResultReg), AM);\r
- return ResultReg;\r
-}\r
-\r
-unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {\r
- MVT VT;\r
- if (!isTypeLegal(CF->getType(), VT))\r
- return 0;\r
-\r
- // Get opcode and regclass for the given zero.\r
- unsigned Opc = 0;\r
- const TargetRegisterClass *RC = nullptr;\r
- switch (VT.SimpleTy) {\r
- default: return 0;\r
- case MVT::f32:\r
- if (X86ScalarSSEf32) {\r
- Opc = X86::FsFLD0SS;\r
- RC = &X86::FR32RegClass;\r
- } else {\r
- Opc = X86::LD_Fp032;\r
- RC = &X86::RFP32RegClass;\r
- }\r
- break;\r
- case MVT::f64:\r
- if (X86ScalarSSEf64) {\r
- Opc = X86::FsFLD0SD;\r
- RC = &X86::FR64RegClass;\r
- } else {\r
- Opc = X86::LD_Fp064;\r
- RC = &X86::RFP64RegClass;\r
- }\r
- break;\r
- case MVT::f80:\r
- // No f80 support yet.\r
- return 0;\r
- }\r
-\r
- unsigned ResultReg = createResultReg(RC);\r
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);\r
- return ResultReg;\r
-}\r
-\r
-\r
-bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,\r
- const LoadInst *LI) {\r
- const Value *Ptr = LI->getPointerOperand();\r
- X86AddressMode AM;\r
- if (!X86SelectAddress(Ptr, AM))\r
- return false;\r
-\r
- const X86InstrInfo &XII = (const X86InstrInfo &)TII;\r
-\r
- unsigned Size = DL.getTypeAllocSize(LI->getType());\r
- unsigned Alignment = LI->getAlignment();\r
-\r
- if (Alignment == 0) // Ensure that codegen never sees alignment 0\r
- Alignment = DL.getABITypeAlignment(LI->getType());\r
-\r
- SmallVector<MachineOperand, 8> AddrOps;\r
- AM.getFullAddress(AddrOps);\r
-\r
- MachineInstr *Result =\r
- XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps,\r
- Size, Alignment, /*AllowCommute=*/true);\r
- if (!Result)\r
- return false;\r
-\r
- Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));\r
- FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);\r
- MI->eraseFromParent();\r
- return true;\r
-}\r
-\r
-\r
-namespace llvm {\r
- FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,\r
- const TargetLibraryInfo *libInfo) {\r
- return new X86FastISel(funcInfo, libInfo);\r
- }\r
-}\r
+//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the X86-specific support for the FastISel class. Much
+// of the target-specific code is generated by tablegen in the file
+// X86GenFastISel.inc, which is #included here.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86CallingConv.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86RegisterInfo.h"
+#include "X86Subtarget.h"
+#include "X86TargetMachine.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+namespace {
+
+class X86FastISel final : public FastISel {
+ /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const X86Subtarget *Subtarget;
+
+ /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
+ /// floating point ops.
+ /// When SSE is available, use it for f32 operations.
+ /// When SSE2 is available, use it for f64 operations.
+ bool X86ScalarSSEf64;
+ bool X86ScalarSSEf32;
+
+public:
+ explicit X86FastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
+ : FastISel(funcInfo, libInfo) {
+ Subtarget = &TM.getSubtarget<X86Subtarget>();
+ X86ScalarSSEf64 = Subtarget->hasSSE2();
+ X86ScalarSSEf32 = Subtarget->hasSSE1();
+ }
+
+ bool fastSelectInstruction(const Instruction *I) override;
+
+ /// \brief The specified machine instr operand is a vreg, and that
+ /// vreg is being provided by the specified load instruction. If possible,
+ /// try to fold the load as an operand to the instruction, returning true if
+ /// possible.
+ bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) override;
+
+ bool fastLowerArguments() override;
+ bool fastLowerCall(CallLoweringInfo &CLI) override;
+ bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
+
+#include "X86GenFastISel.inc"
+
+private:
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
+
+ bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,
+ unsigned &ResultReg);
+
+ bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,
+ MachineMemOperand *MMO = nullptr, bool Aligned = false);
+ bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO = nullptr, bool Aligned = false);
+
+ bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
+ unsigned &ResultReg);
+
+ bool X86SelectAddress(const Value *V, X86AddressMode &AM);
+ bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
+
+ bool X86SelectLoad(const Instruction *I);
+
+ bool X86SelectStore(const Instruction *I);
+
+ bool X86SelectRet(const Instruction *I);
+
+ bool X86SelectCmp(const Instruction *I);
+
+ bool X86SelectZExt(const Instruction *I);
+
+ bool X86SelectBranch(const Instruction *I);
+
+ bool X86SelectShift(const Instruction *I);
+
+ bool X86SelectDivRem(const Instruction *I);
+
+ bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
+
+ bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
+
+ bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
+
+ bool X86SelectSelect(const Instruction *I);
+
+ bool X86SelectTrunc(const Instruction *I);
+
+ bool X86SelectFPExt(const Instruction *I);
+ bool X86SelectFPTrunc(const Instruction *I);
+
+ const X86InstrInfo *getInstrInfo() const {
+ return getTargetMachine()->getSubtargetImpl()->getInstrInfo();
+ }
+ const X86TargetMachine *getTargetMachine() const {
+ return static_cast<const X86TargetMachine *>(&TM);
+ }
+
+ bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
+
+ unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
+ unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
+ unsigned fastMaterializeConstant(const Constant *C) override;
+
+ unsigned fastMaterializeAlloca(const AllocaInst *C) override;
+
+ unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
+
+ /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
+ /// computed in an SSE register, not on the X87 floating point stack.
+ bool isScalarFPTypeInSSEReg(EVT VT) const {
+ return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
+ (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
+ }
+
+ bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
+
+ bool IsMemcpySmall(uint64_t Len);
+
+ bool TryEmitSmallMemcpy(X86AddressMode DestAM,
+ X86AddressMode SrcAM, uint64_t Len);
+
+ bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
+ const Value *Cond);
+};
+
+} // end anonymous namespace.
+
+static std::pair<X86::CondCode, bool>
+getX86ConditionCode(CmpInst::Predicate Predicate) {
+ X86::CondCode CC = X86::COND_INVALID;
+ bool NeedSwap = false;
+ switch (Predicate) {
+ default: break;
+ // Floating-point Predicates
+ case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
+ case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
+ case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
+ case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
+ case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
+ case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
+ case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
+ case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
+ case CmpInst::FCMP_OEQ: // fall-through
+ case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
+
+ // Integer Predicates
+ case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
+ case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
+ case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
+ case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
+ case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
+ case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
+ case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
+ case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
+ case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
+ case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
+ }
+
+ return std::make_pair(CC, NeedSwap);
+}
+
+static std::pair<unsigned, bool>
+getX86SSEConditionCode(CmpInst::Predicate Predicate) {
+ unsigned CC;
+ bool NeedSwap = false;
+
+ // SSE Condition code mapping:
+ // 0 - EQ
+ // 1 - LT
+ // 2 - LE
+ // 3 - UNORD
+ // 4 - NEQ
+ // 5 - NLT
+ // 6 - NLE
+ // 7 - ORD
+ switch (Predicate) {
+ default: llvm_unreachable("Unexpected predicate");
+ case CmpInst::FCMP_OEQ: CC = 0; break;
+ case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OLT: CC = 1; break;
+ case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OLE: CC = 2; break;
+ case CmpInst::FCMP_UNO: CC = 3; break;
+ case CmpInst::FCMP_UNE: CC = 4; break;
+ case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_UGE: CC = 5; break;
+ case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_UGT: CC = 6; break;
+ case CmpInst::FCMP_ORD: CC = 7; break;
+ case CmpInst::FCMP_UEQ:
+ case CmpInst::FCMP_ONE: CC = 8; break;
+ }
+
+ return std::make_pair(CC, NeedSwap);
+}
+
+/// \brief Check if it is possible to fold the condition from the XALU intrinsic
+/// into the user. The condition code will only be updated on success.
+bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
+ const Value *Cond) {
+ if (!isa<ExtractValueInst>(Cond))
+ return false;
+
+ const auto *EV = cast<ExtractValueInst>(Cond);
+ if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
+ return false;
+
+ const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
+ MVT RetVT;
+ const Function *Callee = II->getCalledFunction();
+ Type *RetTy =
+ cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
+ if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ if (RetVT != MVT::i32 && RetVT != MVT::i64)
+ return false;
+
+ X86::CondCode TmpCC;
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
+ }
+
+ // Check if both instructions are in the same basic block.
+ if (II->getParent() != I->getParent())
+ return false;
+
+ // Make sure nothing is in the way
+ BasicBlock::const_iterator Start = I;
+ BasicBlock::const_iterator End = II;
+ for (auto Itr = std::prev(Start); Itr != End; --Itr) {
+ // We only expect extractvalue instructions between the intrinsic and the
+ // instruction to be selected.
+ if (!isa<ExtractValueInst>(Itr))
+ return false;
+
+ // Check that the extractvalue operand comes from the intrinsic.
+ const auto *EVI = cast<ExtractValueInst>(Itr);
+ if (EVI->getAggregateOperand() != II)
+ return false;
+ }
+
+ CC = TmpCC;
+ return true;
+}
+
+bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
+ EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
+ if (evt == MVT::Other || !evt.isSimple())
+ // Unhandled type. Halt "fast" selection and bail.
+ return false;
+
+ VT = evt.getSimpleVT();
+ // For now, require SSE/SSE2 for performing floating-point operations,
+ // since x87 requires additional work.
+ if (VT == MVT::f64 && !X86ScalarSSEf64)
+ return false;
+ if (VT == MVT::f32 && !X86ScalarSSEf32)
+ return false;
+ // Similarly, no f80 support yet.
+ if (VT == MVT::f80)
+ return false;
+ // We only handle legal types. For example, on x86-32 the instruction
+ // selector contains all of the 64-bit instructions from x86-64,
+ // under the assumption that i64 won't be used if the target doesn't
+ // support it.
+ return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
+}
+
+#include "X86GenCallingConv.inc"
+
+/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
+/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
+/// Return true and the result register by reference if it is possible.
+bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
+ MachineMemOperand *MMO, unsigned &ResultReg) {
+ // Get opcode and regclass of the output for the given load instruction.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = nullptr;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return false;
+ case MVT::i1:
+ case MVT::i8:
+ Opc = X86::MOV8rm;
+ RC = &X86::GR8RegClass;
+ break;
+ case MVT::i16:
+ Opc = X86::MOV16rm;
+ RC = &X86::GR16RegClass;
+ break;
+ case MVT::i32:
+ Opc = X86::MOV32rm;
+ RC = &X86::GR32RegClass;
+ break;
+ case MVT::i64:
+ // Must be in x86-64 mode.
+ Opc = X86::MOV64rm;
+ RC = &X86::GR64RegClass;
+ break;
+ case MVT::f32:
+ if (X86ScalarSSEf32) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
+ RC = &X86::FR32RegClass;
+ } else {
+ Opc = X86::LD_Fp32m;
+ RC = &X86::RFP32RegClass;
+ }
+ break;
+ case MVT::f64:
+ if (X86ScalarSSEf64) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
+ RC = &X86::FR64RegClass;
+ } else {
+ Opc = X86::LD_Fp64m;
+ RC = &X86::RFP64RegClass;
+ }
+ break;
+ case MVT::f80:
+ // No f80 support yet.
+ return false;
+ }
+
+ ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
+ addFullAddress(MIB, AM);
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
+ return true;
+}
+
+/// X86FastEmitStore - Emit a machine instruction to store a value Val of
+/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
+/// and a displacement offset, or a GlobalAddress,
+/// i.e. V. Return true if it is possible.
+bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO, bool Aligned) {
+ // Get opcode and regclass of the output for the given store instruction.
+ unsigned Opc = 0;
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f80: // No f80 support yet.
+ default: return false;
+ case MVT::i1: {
+ // Mask out all but lowest bit.
+ unsigned AndResult = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::AND8ri), AndResult)
+ .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
+ ValReg = AndResult;
+ }
+ // FALLTHROUGH, handling i1 as i8.
+ case MVT::i8: Opc = X86::MOV8mr; break;
+ case MVT::i16: Opc = X86::MOV16mr; break;
+ case MVT::i32: Opc = X86::MOV32mr; break;
+ case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
+ case MVT::f32:
+ Opc = X86ScalarSSEf32 ?
+ (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;
+ break;
+ case MVT::f64:
+ Opc = X86ScalarSSEf64 ?
+ (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
+ break;
+ case MVT::v4f32:
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
+ break;
+ case MVT::v2f64:
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr;
+ break;
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ if (Aligned)
+ Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr;
+ else
+ Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
+ break;
+ }
+
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
+
+ return true;
+}
+
+bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO, bool Aligned) {
+ // Handle 'null' like i32/i64 0.
+ if (isa<ConstantPointerNull>(Val))
+ Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
+
+ // If this is a store of a simple constant, fold the constant into the store.
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ unsigned Opc = 0;
+ bool Signed = true;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: break;
+ case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
+ case MVT::i8: Opc = X86::MOV8mi; break;
+ case MVT::i16: Opc = X86::MOV16mi; break;
+ case MVT::i32: Opc = X86::MOV32mi; break;
+ case MVT::i64:
+ // Must be a 32-bit sign extended value.
+ if (isInt<32>(CI->getSExtValue()))
+ Opc = X86::MOV64mi32;
+ break;
+ }
+
+ if (Opc) {
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
+ : CI->getZExtValue());
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
+ return true;
+ }
+ }
+
+ unsigned ValReg = getRegForValue(Val);
+ if (ValReg == 0)
+ return false;
+
+ bool ValKill = hasTrivialKill(Val);
+ return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
+}
+
+/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
+/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
+/// ISD::SIGN_EXTEND).
+bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
+ unsigned Src, EVT SrcVT,
+ unsigned &ResultReg) {
+ unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ Src, /*TODO: Kill=*/false);
+ if (RR == 0)
+ return false;
+
+ ResultReg = RR;
+ return true;
+}
+
+bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
+ // Handle constant address.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return false;
+
+ // Can't handle TLS yet.
+ if (GV->isThreadLocal())
+ return false;
+
+ // RIP-relative addresses can't have additional register operands, so if
+ // we've already folded stuff into the addressing mode, just force the
+ // global value into its own register, which we can use as the basereg.
+ if (!Subtarget->isPICStyleRIPRel() ||
+ (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
+ // Okay, we've committed to selecting this global. Set up the address.
+ AM.GV = GV;
+
+ // Allow the subtarget to classify the global.
+ unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
+
+ // If this reference is relative to the pic base, set it now.
+ if (isGlobalRelativeToPICBase(GVFlags)) {
+ // FIXME: How do we know Base.Reg is free??
+ AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ }
+
+ // Unless the ABI requires an extra load, return a direct reference to
+ // the global.
+ if (!isGlobalStubReference(GVFlags)) {
+ if (Subtarget->isPICStyleRIPRel()) {
+ // Use rip-relative addressing if we can. Above we verified that the
+ // base and index registers are unused.
+ assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
+ AM.Base.Reg = X86::RIP;
+ }
+ AM.GVOpFlags = GVFlags;
+ return true;
+ }
+
+ // Ok, we need to do a load from a stub. If we've already loaded from
+ // this stub, reuse the loaded pointer, otherwise emit the load now.
+ DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
+ unsigned LoadReg;
+ if (I != LocalValueMap.end() && I->second != 0) {
+ LoadReg = I->second;
+ } else {
+ // Issue load from stub.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = nullptr;
+ X86AddressMode StubAM;
+ StubAM.Base.Reg = AM.Base.Reg;
+ StubAM.GV = GV;
+ StubAM.GVOpFlags = GVFlags;
+
+ // Prepare for inserting code in the local-value area.
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
+ if (TLI.getPointerTy() == MVT::i64) {
+ Opc = X86::MOV64rm;
+ RC = &X86::GR64RegClass;
+
+ if (Subtarget->isPICStyleRIPRel())
+ StubAM.Base.Reg = X86::RIP;
+ } else {
+ Opc = X86::MOV32rm;
+ RC = &X86::GR32RegClass;
+ }
+
+ LoadReg = createResultReg(RC);
+ MachineInstrBuilder LoadMI =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
+ addFullAddress(LoadMI, StubAM);
+
+ // Ok, back to normal mode.
+ leaveLocalValueArea(SaveInsertPt);
+
+ // Prevent loading GV stub multiple times in same MBB.
+ LocalValueMap[V] = LoadReg;
+ }
+
+ // Now construct the final address. Note that the Disp, Scale,
+ // and Index values may already be set here.
+ AM.Base.Reg = LoadReg;
+ AM.GV = nullptr;
+ return true;
+ }
+ }
+
+ // If all else fails, try to materialize the value in a register.
+ if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
+ if (AM.Base.Reg == 0) {
+ AM.Base.Reg = getRegForValue(V);
+ return AM.Base.Reg != 0;
+ }
+ if (AM.IndexReg == 0) {
+ assert(AM.Scale == 1 && "Scale with no index!");
+ AM.IndexReg = getRegForValue(V);
+ return AM.IndexReg != 0;
+ }
+ }
+
+ return false;
+}
+
+/// X86SelectAddress - Attempt to fill in an address from the given value.
+///
+bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
+ SmallVector<const Value *, 32> GEPs;
+redo_gep:
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ // Don't walk into other basic blocks; it's possible we haven't
+ // visited them yet, so the instructions may not yet be assigned
+ // virtual registers.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ switch (Opcode) {
+ default: break;
+ case Instruction::BitCast:
+ // Look past bitcasts.
+ return X86SelectAddress(U->getOperand(0), AM);
+
+ case Instruction::IntToPtr:
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return X86SelectAddress(U->getOperand(0), AM);
+ break;
+
+ case Instruction::PtrToInt:
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return X86SelectAddress(U->getOperand(0), AM);
+ break;
+
+ case Instruction::Alloca: {
+ // Do static allocas.
+ const AllocaInst *A = cast<AllocaInst>(V);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(A);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = SI->second;
+ return true;
+ }
+ break;
+ }
+
+ case Instruction::Add: {
+ // Adds of constants are common and easy enough.
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
+ // They have to fit in the 32-bit signed displacement field though.
+ if (isInt<32>(Disp)) {
+ AM.Disp = (uint32_t)Disp;
+ return X86SelectAddress(U->getOperand(0), AM);
+ }
+ }
+ break;
+ }
+
+ case Instruction::GetElementPtr: {
+ X86AddressMode SavedAM = AM;
+
+ // Pattern-match simple GEPs.
+ uint64_t Disp = (int32_t)AM.Disp;
+ unsigned IndexReg = AM.IndexReg;
+ unsigned Scale = AM.Scale;
+ gep_type_iterator GTI = gep_type_begin(U);
+ // Iterate through the indices, folding what we can. Constants can be
+ // folded, and one dynamic index can be handled, if the scale is supported.
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
+ i != e; ++i, ++GTI) {
+ const Value *Op = *i;
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
+ continue;
+ }
+
+ // A array/variable index is always of the form i*S where S is the
+ // constant scale size. See if we can push the scale into immediates.
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ Disp += CI->getSExtValue() * S;
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ Disp += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ if (IndexReg == 0 &&
+ (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
+ (S == 1 || S == 2 || S == 4 || S == 8)) {
+ // Scaled-index addressing.
+ Scale = S;
+ IndexReg = getRegForGEPIndex(Op).first;
+ if (IndexReg == 0)
+ return false;
+ break;
+ }
+ // Unsupported.
+ goto unsupported_gep;
+ }
+ }
+
+ // Check for displacement overflow.
+ if (!isInt<32>(Disp))
+ break;
+
+ AM.IndexReg = IndexReg;
+ AM.Scale = Scale;
+ AM.Disp = (uint32_t)Disp;
+ GEPs.push_back(V);
+
+ if (const GetElementPtrInst *GEP =
+ dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
+ // Ok, the GEP indices were covered by constant-offset and scaled-index
+ // addressing. Update the address state and move on to examining the base.
+ V = GEP;
+ goto redo_gep;
+ } else if (X86SelectAddress(U->getOperand(0), AM)) {
+ return true;
+ }
+
+ // If we couldn't merge the gep value into this addr mode, revert back to
+ // our address and just match the value instead of completely failing.
+ AM = SavedAM;
+
+ for (SmallVectorImpl<const Value *>::reverse_iterator
+ I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)
+ if (handleConstantAddresses(*I, AM))
+ return true;
+
+ return false;
+ unsupported_gep:
+ // Ok, the GEP indices weren't all covered.
+ break;
+ }
+ }
+
+ return handleConstantAddresses(V, AM);
+}
+
+/// X86SelectCallAddress - Attempt to fill in an address from the given value.
+///
+bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ const Instruction *I = dyn_cast<Instruction>(V);
+ // Record if the value is defined in the same basic block.
+ //
+ // This information is crucial to know whether or not folding an
+ // operand is valid.
+ // Indeed, FastISel generates or reuses a virtual register for all
+ // operands of all instructions it selects. Obviously, the definition and
+ // its uses must use the same virtual register otherwise the produced
+ // code is incorrect.
+ // Before instruction selection, FunctionLoweringInfo::set sets the virtual
+ // registers for values that are alive across basic blocks. This ensures
+ // that the values are consistently set between across basic block, even
+ // if different instruction selection mechanisms are used (e.g., a mix of
+ // SDISel and FastISel).
+ // For values local to a basic block, the instruction selection process
+ // generates these virtual registers with whatever method is appropriate
+ // for its needs. In particular, FastISel and SDISel do not share the way
+ // local virtual registers are set.
+ // Therefore, this is impossible (or at least unsafe) to share values
+ // between basic blocks unless they use the same instruction selection
+ // method, which is not guarantee for X86.
+ // Moreover, things like hasOneUse could not be used accurately, if we
+ // allow to reference values across basic blocks whereas they are not
+ // alive across basic blocks initially.
+ bool InMBB = true;
+ if (I) {
+ Opcode = I->getOpcode();
+ U = I;
+ InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ switch (Opcode) {
+ default: break;
+ case Instruction::BitCast:
+ // Look past bitcasts if its operand is in the same BB.
+ if (InMBB)
+ return X86SelectCallAddress(U->getOperand(0), AM);
+ break;
+
+ case Instruction::IntToPtr:
+ // Look past no-op inttoptrs if its operand is in the same BB.
+ if (InMBB &&
+ TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return X86SelectCallAddress(U->getOperand(0), AM);
+ break;
+
+ case Instruction::PtrToInt:
+ // Look past no-op ptrtoints if its operand is in the same BB.
+ if (InMBB &&
+ TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return X86SelectCallAddress(U->getOperand(0), AM);
+ break;
+ }
+
+ // Handle constant address.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return false;
+
+ // RIP-relative addresses can't have additional register operands.
+ if (Subtarget->isPICStyleRIPRel() &&
+ (AM.Base.Reg != 0 || AM.IndexReg != 0))
+ return false;
+
+ // Can't handle DLL Import.
+ if (GV->hasDLLImportStorageClass())
+ return false;
+
+ // Can't handle TLS.
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->isThreadLocal())
+ return false;
+
+ // Okay, we've committed to selecting this global. Set up the basic address.
+ AM.GV = GV;
+
+ // No ABI requires an extra load for anything other than DLLImport, which
+ // we rejected above. Return a direct reference to the global.
+ if (Subtarget->isPICStyleRIPRel()) {
+ // Use rip-relative addressing if we can. Above we verified that the
+ // base and index registers are unused.
+ assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
+ AM.Base.Reg = X86::RIP;
+ } else if (Subtarget->isPICStyleStubPIC()) {
+ AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
+ } else if (Subtarget->isPICStyleGOT()) {
+ AM.GVOpFlags = X86II::MO_GOTOFF;
+ }
+
+ return true;
+ }
+
+ // If all else fails, try to materialize the value in a register.
+ if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
+ if (AM.Base.Reg == 0) {
+ AM.Base.Reg = getRegForValue(V);
+ return AM.Base.Reg != 0;
+ }
+ if (AM.IndexReg == 0) {
+ assert(AM.Scale == 1 && "Scale with no index!");
+ AM.IndexReg = getRegForValue(V);
+ return AM.IndexReg != 0;
+ }
+ }
+
+ return false;
+}
+
+
+/// X86SelectStore - Select and emit code to implement store instructions.
+bool X86FastISel::X86SelectStore(const Instruction *I) {
+ // Atomic stores need special handling.
+ const StoreInst *S = cast<StoreInst>(I);
+
+ if (S->isAtomic())
+ return false;
+
+ const Value *Val = S->getValueOperand();
+ const Value *Ptr = S->getPointerOperand();
+
+ MVT VT;
+ if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
+ return false;
+
+ unsigned Alignment = S->getAlignment();
+ unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = ABIAlignment;
+ bool Aligned = Alignment >= ABIAlignment;
+
+ X86AddressMode AM;
+ if (!X86SelectAddress(Ptr, AM))
+ return false;
+
+ return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
+}
+
+/// X86SelectRet - Select and emit code to implement ret instructions.
+bool X86FastISel::X86SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+ const X86MachineFunctionInfo *X86MFInfo =
+ FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (CC != CallingConv::C &&
+ CC != CallingConv::Fast &&
+ CC != CallingConv::X86_FastCall &&
+ CC != CallingConv::X86_64_SysV)
+ return false;
+
+ if (Subtarget->isCallingConvWin64(CC))
+ return false;
+
+ // Don't handle popping bytes on return for now.
+ if (X86MFInfo->getBytesToPopOnReturn() != 0)
+ return false;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
+ return false;
+
+ // Let SDISel handle vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ // Build a list of return value registers.
+ SmallVector<unsigned, 4> RetRegs;
+
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, RetCC_X86);
+
+ const Value *RV = Ret->getOperand(0);
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+
+ // The calling-convention tables for x87 returns don't tell
+ // the whole story.
+ if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
+ return false;
+
+ unsigned SrcReg = Reg + VA.getValNo();
+ EVT SrcVT = TLI.getValueType(RV->getType());
+ EVT DstVT = VA.getValVT();
+ // Special handling for extended integers.
+ if (SrcVT != DstVT) {
+ if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
+ return false;
+
+ if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
+ return false;
+
+ assert(DstVT == MVT::i32 && "X86 should always ext to i32");
+
+ if (SrcVT == MVT::i1) {
+ if (Outs[0].Flags.isSExt())
+ return false;
+ SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
+ SrcVT = MVT::i8;
+ }
+ unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
+ ISD::SIGN_EXTEND;
+ SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
+ SrcReg, /*TODO: Kill=*/false);
+ }
+
+ // Make the copy.
+ unsigned DstReg = VA.getLocReg();
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!SrcRC->contains(DstReg))
+ return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
+
+ // Add register to return instruction.
+ RetRegs.push_back(VA.getLocReg());
+ }
+
+ // The x86-64 ABI for returning structs by value requires that we copy
+ // the sret argument into %rax for the return. We saved the argument into
+ // a virtual register in the entry block, so now we copy the value out
+ // and into %rax. We also do the same with %eax for Win32.
+ if (F.hasStructRetAttr() &&
+ (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
+ unsigned Reg = X86MFInfo->getSRetReturnReg();
+ assert(Reg &&
+ "SRetReturnReg should have been set in LowerFormalArguments()!");
+ unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
+ RetRegs.push_back(RetReg);
+ }
+
+ // Now emit the RET.
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
+ MIB.addReg(RetRegs[i], RegState::Implicit);
+ return true;
+}
+
+/// X86SelectLoad - Select and emit code to implement load instructions.
+///
+bool X86FastISel::X86SelectLoad(const Instruction *I) {
+ const LoadInst *LI = cast<LoadInst>(I);
+
+ // Atomic loads need special handling.
+ if (LI->isAtomic())
+ return false;
+
+ MVT VT;
+ if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
+ return false;
+
+ const Value *Ptr = LI->getPointerOperand();
+
+ X86AddressMode AM;
+ if (!X86SelectAddress(Ptr, AM))
+ return false;
+
+ unsigned ResultReg = 0;
+ if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))
+ return false;
+
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
+ bool HasAVX = Subtarget->hasAVX();
+ bool X86ScalarSSEf32 = Subtarget->hasSSE1();
+ bool X86ScalarSSEf64 = Subtarget->hasSSE2();
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return 0;
+ case MVT::i8: return X86::CMP8rr;
+ case MVT::i16: return X86::CMP16rr;
+ case MVT::i32: return X86::CMP32rr;
+ case MVT::i64: return X86::CMP64rr;
+ case MVT::f32:
+ return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
+ case MVT::f64:
+ return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
+ }
+}
+
+/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
+/// of the comparison, return an opcode that works for the compare (e.g.
+/// CMP32ri) otherwise return 0.
+static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ // Otherwise, we can't fold the immediate into this comparison.
+ default: return 0;
+ case MVT::i8: return X86::CMP8ri;
+ case MVT::i16: return X86::CMP16ri;
+ case MVT::i32: return X86::CMP32ri;
+ case MVT::i64:
+ // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
+ // field.
+ if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
+ return X86::CMP64ri32;
+ return 0;
+ }
+}
+
+bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
+ EVT VT, DebugLoc CurDbgLoc) {
+ unsigned Op0Reg = getRegForValue(Op0);
+ if (Op0Reg == 0) return false;
+
+ // Handle 'null' like i32/i64 0.
+ if (isa<ConstantPointerNull>(Op1))
+ Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
+
+ // We have two options: compare with register or immediate. If the RHS of
+ // the compare is an immediate that we can fold into this compare, use
+ // CMPri, otherwise use CMPrr.
+ if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
+ .addReg(Op0Reg)
+ .addImm(Op1C->getSExtValue());
+ return true;
+ }
+ }
+
+ unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
+ if (CompareOpc == 0) return false;
+
+ unsigned Op1Reg = getRegForValue(Op1);
+ if (Op1Reg == 0) return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
+ .addReg(Op0Reg)
+ .addReg(Op1Reg);
+
+ return true;
+}
+
+bool X86FastISel::X86SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
+
+ MVT VT;
+ if (!isTypeLegal(I->getOperand(0)->getType(), VT))
+ return false;
+
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ unsigned ResultReg = 0;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: {
+ ResultReg = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
+ ResultReg);
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
+ X86::sub_8bit);
+ if (!ResultReg)
+ return false;
+ break;
+ }
+ case CmpInst::FCMP_TRUE: {
+ ResultReg = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
+ ResultReg).addImm(1);
+ break;
+ }
+ }
+
+ if (ResultReg) {
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+
+ const Value *LHS = CI->getOperand(0);
+ const Value *RHS = CI->getOperand(1);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
+ // We don't have to materialize a zero constant for this case and can just use
+ // %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *RHSC = dyn_cast<ConstantFP>(RHS);
+ if (RHSC && RHSC->isNullValue())
+ RHS = LHS;
+ }
+
+ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
+ static unsigned SETFOpcTable[2][3] = {
+ { X86::SETEr, X86::SETNPr, X86::AND8rr },
+ { X86::SETNEr, X86::SETPr, X86::OR8rr }
+ };
+ unsigned *SETFOpc = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
+ case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
+ }
+
+ ResultReg = createResultReg(&X86::GR8RegClass);
+ if (SETFOpc) {
+ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
+ return false;
+
+ unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
+ unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
+ FlagReg1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
+ FlagReg2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
+ ResultReg).addReg(FlagReg1).addReg(FlagReg2);
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+
+ X86::CondCode CC;
+ bool SwapArgs;
+ std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+ unsigned Opc = X86::getSETFromCond(CC);
+
+ if (SwapArgs)
+ std::swap(LHS, RHS);
+
+ // Emit a compare of LHS/RHS.
+ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
+ return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86SelectZExt(const Instruction *I) {
+ EVT DstVT = TLI.getValueType(I->getType());
+ if (!TLI.isTypeLegal(DstVT))
+ return false;
+
+ unsigned ResultReg = getRegForValue(I->getOperand(0));
+ if (ResultReg == 0)
+ return false;
+
+ // Handle zero-extension from i1 to i8, which is common.
+ MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType());
+ if (SrcVT.SimpleTy == MVT::i1) {
+ // Set the high bits to zero.
+ ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
+ SrcVT = MVT::i8;
+
+ if (ResultReg == 0)
+ return false;
+ }
+
+ if (DstVT == MVT::i64) {
+ // Handle extension to 64-bits via sub-register shenanigans.
+ unsigned MovInst;
+
+ switch (SrcVT.SimpleTy) {
+ case MVT::i8: MovInst = X86::MOVZX32rr8; break;
+ case MVT::i16: MovInst = X86::MOVZX32rr16; break;
+ case MVT::i32: MovInst = X86::MOV32rr; break;
+ default: llvm_unreachable("Unexpected zext to i64 source type");
+ }
+
+ unsigned Result32 = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
+ .addReg(ResultReg);
+
+ ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
+ ResultReg)
+ .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
+ } else if (DstVT != MVT::i8) {
+ ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
+ ResultReg, /*Kill=*/true);
+ if (ResultReg == 0)
+ return false;
+ }
+
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86SelectBranch(const Instruction *I) {
+ // Unconditional branches are selected by tablegen-generated code.
+ // Handle a conditional branch.
+ const BranchInst *BI = cast<BranchInst>(I);
+ MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
+
+ // Fold the common case of a conditional branch with a comparison
+ // in the same block (values defined on other blocks may not have
+ // initialized registers).
+ X86::CondCode CC;
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
+ EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
+
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
+ case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
+ }
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
+ // 0.0.
+ // We don't have to materialize a zero constant for this case and can just
+ // use %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
+ if (CmpRHSC && CmpRHSC->isNullValue())
+ CmpRHS = CmpLHS;
+ }
+
+ // Try to take advantage of fallthrough opportunities.
+ if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
+ std::swap(TrueMBB, FalseMBB);
+ Predicate = CmpInst::getInversePredicate(Predicate);
+ }
+
+ // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
+ // code check. Instead two branch instructions are required to check all
+ // the flags. First we change the predicate to a supported condition code,
+ // which will be the first branch. Later one we will emit the second
+ // branch.
+ bool NeedExtraBranch = false;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_OEQ:
+ std::swap(TrueMBB, FalseMBB); // fall-through
+ case CmpInst::FCMP_UNE:
+ NeedExtraBranch = true;
+ Predicate = CmpInst::FCMP_ONE;
+ break;
+ }
+
+ bool SwapArgs;
+ unsigned BranchOpc;
+ std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+
+ BranchOpc = X86::GetCondBranchFromCond(CC);
+ if (SwapArgs)
+ std::swap(CmpLHS, CmpRHS);
+
+ // Emit a compare of the LHS and RHS, setting the flags.
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
+ return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
+
+ // X86 requires a second branch to handle UNE (and OEQ, which is mapped
+ // to UNE above).
+ if (NeedExtraBranch) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1))
+ .addMBB(TrueMBB);
+ }
+
+ // Obtain the branch weight and add the TrueBB to the successor list.
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+
+ // Emits an unconditional branch to the FalseBB, obtains the branch
+ // weight, and adds it to the successor list.
+ fastEmitBranch(FalseMBB, DbgLoc);
+
+ return true;
+ }
+ } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
+ // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
+ // typically happen for _Bool and C++ bools.
+ MVT SourceVT;
+ if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
+ isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
+ unsigned TestOpc = 0;
+ switch (SourceVT.SimpleTy) {
+ default: break;
+ case MVT::i8: TestOpc = X86::TEST8ri; break;
+ case MVT::i16: TestOpc = X86::TEST16ri; break;
+ case MVT::i32: TestOpc = X86::TEST32ri; break;
+ case MVT::i64: TestOpc = X86::TEST64ri32; break;
+ }
+ if (TestOpc) {
+ unsigned OpReg = getRegForValue(TI->getOperand(0));
+ if (OpReg == 0) return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
+ .addReg(OpReg).addImm(1);
+
+ unsigned JmpOpc = X86::JNE_1;
+ if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
+ std::swap(TrueMBB, FalseMBB);
+ JmpOpc = X86::JE_1;
+ }
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
+ .addMBB(TrueMBB);
+ fastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+ return true;
+ }
+ }
+ } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
+ // Fake request the condition, otherwise the intrinsic might be completely
+ // optimized away.
+ unsigned TmpReg = getRegForValue(BI->getCondition());
+ if (TmpReg == 0)
+ return false;
+
+ unsigned BranchOpc = X86::GetCondBranchFromCond(CC);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
+ fastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+ return true;
+ }
+
+ // Otherwise do a clumsy setcc and re-test it.
+ // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
+ // in an explicit cast, so make sure to handle that correctly.
+ unsigned OpReg = getRegForValue(BI->getCondition());
+ if (OpReg == 0) return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
+ .addReg(OpReg).addImm(1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1))
+ .addMBB(TrueMBB);
+ fastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+ return true;
+}
+
+bool X86FastISel::X86SelectShift(const Instruction *I) {
+ unsigned CReg = 0, OpReg = 0;
+ const TargetRegisterClass *RC = nullptr;
+ if (I->getType()->isIntegerTy(8)) {
+ CReg = X86::CL;
+ RC = &X86::GR8RegClass;
+ switch (I->getOpcode()) {
+ case Instruction::LShr: OpReg = X86::SHR8rCL; break;
+ case Instruction::AShr: OpReg = X86::SAR8rCL; break;
+ case Instruction::Shl: OpReg = X86::SHL8rCL; break;
+ default: return false;
+ }
+ } else if (I->getType()->isIntegerTy(16)) {
+ CReg = X86::CX;
+ RC = &X86::GR16RegClass;
+ switch (I->getOpcode()) {
+ case Instruction::LShr: OpReg = X86::SHR16rCL; break;
+ case Instruction::AShr: OpReg = X86::SAR16rCL; break;
+ case Instruction::Shl: OpReg = X86::SHL16rCL; break;
+ default: return false;
+ }
+ } else if (I->getType()->isIntegerTy(32)) {
+ CReg = X86::ECX;
+ RC = &X86::GR32RegClass;
+ switch (I->getOpcode()) {
+ case Instruction::LShr: OpReg = X86::SHR32rCL; break;
+ case Instruction::AShr: OpReg = X86::SAR32rCL; break;
+ case Instruction::Shl: OpReg = X86::SHL32rCL; break;
+ default: return false;
+ }
+ } else if (I->getType()->isIntegerTy(64)) {
+ CReg = X86::RCX;
+ RC = &X86::GR64RegClass;
+ switch (I->getOpcode()) {
+ case Instruction::LShr: OpReg = X86::SHR64rCL; break;
+ case Instruction::AShr: OpReg = X86::SAR64rCL; break;
+ case Instruction::Shl: OpReg = X86::SHL64rCL; break;
+ default: return false;
+ }
+ } else {
+ return false;
+ }
+
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
+ return false;
+
+ unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ if (Op0Reg == 0) return false;
+
+ unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ if (Op1Reg == 0) return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
+ CReg).addReg(Op1Reg);
+
+ // The shift instruction uses X86::CL. If we defined a super-register
+ // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
+ if (CReg != X86::CL)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::KILL), X86::CL)
+ .addReg(CReg, RegState::Kill);
+
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
+ .addReg(Op0Reg);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86SelectDivRem(const Instruction *I) {
+ const static unsigned NumTypes = 4; // i8, i16, i32, i64
+ const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
+ const static bool S = true; // IsSigned
+ const static bool U = false; // !IsSigned
+ const static unsigned Copy = TargetOpcode::COPY;
+ // For the X86 DIV/IDIV instruction, in most cases the dividend
+ // (numerator) must be in a specific register pair highreg:lowreg,
+ // producing the quotient in lowreg and the remainder in highreg.
+ // For most data types, to set up the instruction, the dividend is
+ // copied into lowreg, and lowreg is sign-extended or zero-extended
+ // into highreg. The exception is i8, where the dividend is defined
+ // as a single register rather than a register pair, and we
+ // therefore directly sign-extend or zero-extend the dividend into
+ // lowreg, instead of copying, and ignore the highreg.
+ const static struct DivRemEntry {
+ // The following portion depends only on the data type.
+ const TargetRegisterClass *RC;
+ unsigned LowInReg; // low part of the register pair
+ unsigned HighInReg; // high part of the register pair
+ // The following portion depends on both the data type and the operation.
+ struct DivRemResult {
+ unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
+ unsigned OpSignExtend; // Opcode for sign-extending lowreg into
+ // highreg, or copying a zero into highreg.
+ unsigned OpCopy; // Opcode for copying dividend into lowreg, or
+ // zero/sign-extending into lowreg for i8.
+ unsigned DivRemResultReg; // Register containing the desired result.
+ bool IsOpSigned; // Whether to use signed or unsigned form.
+ } ResultTable[NumOps];
+ } OpTable[NumTypes] = {
+ { &X86::GR8RegClass, X86::AX, 0, {
+ { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
+ { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
+ { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
+ { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
+ }
+ }, // i8
+ { &X86::GR16RegClass, X86::AX, X86::DX, {
+ { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
+ { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
+ { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
+ { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
+ }
+ }, // i16
+ { &X86::GR32RegClass, X86::EAX, X86::EDX, {
+ { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
+ { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
+ { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
+ { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
+ }
+ }, // i32
+ { &X86::GR64RegClass, X86::RAX, X86::RDX, {
+ { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
+ { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
+ { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
+ { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
+ }
+ }, // i64
+ };
+
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
+ return false;
+
+ unsigned TypeIndex, OpIndex;
+ switch (VT.SimpleTy) {
+ default: return false;
+ case MVT::i8: TypeIndex = 0; break;
+ case MVT::i16: TypeIndex = 1; break;
+ case MVT::i32: TypeIndex = 2; break;
+ case MVT::i64: TypeIndex = 3;
+ if (!Subtarget->is64Bit())
+ return false;
+ break;
+ }
+
+ switch (I->getOpcode()) {
+ default: llvm_unreachable("Unexpected div/rem opcode");
+ case Instruction::SDiv: OpIndex = 0; break;
+ case Instruction::SRem: OpIndex = 1; break;
+ case Instruction::UDiv: OpIndex = 2; break;
+ case Instruction::URem: OpIndex = 3; break;
+ }
+
+ const DivRemEntry &TypeEntry = OpTable[TypeIndex];
+ const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
+ unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ if (Op0Reg == 0)
+ return false;
+ unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ if (Op1Reg == 0)
+ return false;
+
+ // Move op0 into low-order input register.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
+ // Zero-extend or sign-extend into high-order input register.
+ if (OpEntry.OpSignExtend) {
+ if (OpEntry.IsOpSigned)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(OpEntry.OpSignExtend));
+ else {
+ unsigned Zero32 = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::MOV32r0), Zero32);
+
+ // Copy the zero into the appropriate sub/super/identical physical
+ // register. Unfortunately the operations needed are not uniform enough
+ // to fit neatly into the table above.
+ if (VT.SimpleTy == MVT::i16) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Copy), TypeEntry.HighInReg)
+ .addReg(Zero32, 0, X86::sub_16bit);
+ } else if (VT.SimpleTy == MVT::i32) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Copy), TypeEntry.HighInReg)
+ .addReg(Zero32);
+ } else if (VT.SimpleTy == MVT::i64) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
+ .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
+ }
+ }
+ }
+ // Generate the DIV/IDIV instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
+ // For i8 remainder, we can't reference AH directly, as we'll end
+ // up with bogus copies like %R9B = COPY %AH. Reference AX
+ // instead to prevent AH references in a REX instruction.
+ //
+ // The current assumption of the fast register allocator is that isel
+ // won't generate explicit references to the GPR8_NOREX registers. If
+ // the allocator and/or the backend get enhanced to be more robust in
+ // that regard, this can be, and should be, removed.
+ unsigned ResultReg = 0;
+ if ((I->getOpcode() == Instruction::SRem ||
+ I->getOpcode() == Instruction::URem) &&
+ OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
+ unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
+ unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Copy), SourceSuperReg).addReg(X86::AX);
+
+ // Shift AX right by 8 bits instead of using AH.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
+ ResultSuperReg).addReg(SourceSuperReg).addImm(8);
+
+ // Now reference the 8-bit subreg of the result.
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
+ /*Kill=*/true, X86::sub_8bit);
+ }
+ // Copy the result out of the physreg if we haven't already.
+ if (!ResultReg) {
+ ResultReg = createResultReg(TypeEntry.RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
+ .addReg(OpEntry.DivRemResultReg);
+ }
+ updateValueMap(I, ResultReg);
+
+ return true;
+}
+
+/// \brief Emit a conditional move instruction (if the are supported) to lower
+/// the select.
+bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
+ // Check if the subtarget supports these instructions.
+ if (!Subtarget->hasCMov())
+ return false;
+
+ // FIXME: Add support for i8.
+ if (RetVT < MVT::i16 || RetVT > MVT::i64)
+ return false;
+
+ const Value *Cond = I->getOperand(0);
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ bool NeedTest = true;
+ X86::CondCode CC = X86::COND_NE;
+
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<CmpInst>(Cond);
+ if (CI && (CI->getParent() == I->getParent())) {
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+
+ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
+ static unsigned SETFOpcTable[2][3] = {
+ { X86::SETNPr, X86::SETEr , X86::TEST8rr },
+ { X86::SETPr, X86::SETNEr, X86::OR8rr }
+ };
+ unsigned *SETFOpc = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_OEQ:
+ SETFOpc = &SETFOpcTable[0][0];
+ Predicate = CmpInst::ICMP_NE;
+ break;
+ case CmpInst::FCMP_UNE:
+ SETFOpc = &SETFOpcTable[1][0];
+ Predicate = CmpInst::ICMP_NE;
+ break;
+ }
+
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ EVT CmpVT = TLI.getValueType(CmpLHS->getType());
+ // Emit a compare of the LHS and RHS, setting the flags.
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
+ return false;
+
+ if (SETFOpc) {
+ unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
+ unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
+ FlagReg1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
+ FlagReg2);
+ auto const &II = TII.get(SETFOpc[2]);
+ if (II.getNumDefs()) {
+ unsigned TmpReg = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
+ .addReg(FlagReg2).addReg(FlagReg1);
+ } else {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
+ .addReg(FlagReg2).addReg(FlagReg1);
+ }
+ }
+ NeedTest = false;
+ } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
+ // Fake request the condition, otherwise the intrinsic might be completely
+ // optimized away.
+ unsigned TmpReg = getRegForValue(Cond);
+ if (TmpReg == 0)
+ return false;
+
+ NeedTest = false;
+ }
+
+ if (NeedTest) {
+ // Selects operate on i1, however, CondReg is 8 bits width and may contain
+ // garbage. Indeed, only the less significant bit is supposed to be
+ // accurate. If we read more than the lsb, we may see non-zero values
+ // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
+ // the select. This is achieved by performing TEST against 1.
+ unsigned CondReg = getRegForValue(Cond);
+ if (CondReg == 0)
+ return false;
+ bool CondIsKill = hasTrivialKill(Cond);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
+ .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ if (!LHSReg || !RHSReg)
+ return false;
+
+ unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
+ LHSReg, LHSIsKill);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+/// \brief Emit SSE instructions to lower the select.
+///
+/// Try to use SSE1/SSE2 instructions to simulate a select without branches.
+/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
+/// SSE instructions are available.
+bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
+ if (!CI || (CI->getParent() != I->getParent()))
+ return false;
+
+ if (I->getType() != CI->getOperand(0)->getType() ||
+ !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
+ (Subtarget->hasSSE2() && RetVT == MVT::f64)))
+ return false;
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
+ // We don't have to materialize a zero constant for this case and can just use
+ // %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
+ if (CmpRHSC && CmpRHSC->isNullValue())
+ CmpRHS = CmpLHS;
+ }
+
+ unsigned CC;
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
+ if (CC > 7)
+ return false;
+
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ static unsigned OpcTable[2][2][4] = {
+ { { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr },
+ { X86::VCMPSSrr, X86::VFsANDPSrr, X86::VFsANDNPSrr, X86::VFsORPSrr } },
+ { { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr },
+ { X86::VCMPSDrr, X86::VFsANDPDrr, X86::VFsANDNPDrr, X86::VFsORPDrr } }
+ };
+
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned *Opc = nullptr;
+ switch (RetVT.SimpleTy) {
+ default: return false;
+ case MVT::f32: Opc = &OpcTable[0][HasAVX][0]; break;
+ case MVT::f64: Opc = &OpcTable[1][HasAVX][0]; break;
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ unsigned CmpLHSReg = getRegForValue(CmpLHS);
+ bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
+
+ unsigned CmpRHSReg = getRegForValue(CmpRHS);
+ bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
+
+ if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
+ return false;
+
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ CmpRHSReg, CmpRHSIsKill, CC);
+ unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
+ LHSReg, LHSIsKill);
+ unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
+ RHSReg, RHSIsKill);
+ unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
+ AndReg, /*IsKill=*/true);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
+ // These are pseudo CMOV instructions and will be later expanded into control-
+ // flow.
+ unsigned Opc;
+ switch (RetVT.SimpleTy) {
+ default: return false;
+ case MVT::i8: Opc = X86::CMOV_GR8; break;
+ case MVT::i16: Opc = X86::CMOV_GR16; break;
+ case MVT::i32: Opc = X86::CMOV_GR32; break;
+ case MVT::f32: Opc = X86::CMOV_FR32; break;
+ case MVT::f64: Opc = X86::CMOV_FR64; break;
+ }
+
+ const Value *Cond = I->getOperand(0);
+ X86::CondCode CC = X86::COND_NE;
+
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<CmpInst>(Cond);
+ if (CI && (CI->getParent() == I->getParent())) {
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate());
+ if (CC > X86::LAST_VALID_COND)
+ return false;
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ EVT CmpVT = TLI.getValueType(CmpLHS->getType());
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
+ return false;
+ } else {
+ unsigned CondReg = getRegForValue(Cond);
+ if (CondReg == 0)
+ return false;
+ bool CondIsKill = hasTrivialKill(Cond);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
+ .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ if (!LHSReg || !RHSReg)
+ return false;
+
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+
+ unsigned ResultReg =
+ fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86SelectSelect(const Instruction *I) {
+ MVT RetVT;
+ if (!isTypeLegal(I->getType(), RetVT))
+ return false;
+
+ // Check if we can fold the select.
+ if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ const Value *Opnd = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
+ case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
+ }
+ // No need for a select anymore - this is an unconditional move.
+ if (Opnd) {
+ unsigned OpReg = getRegForValue(Opnd);
+ if (OpReg == 0)
+ return false;
+ bool OpIsKill = hasTrivialKill(Opnd);
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(OpReg, getKillRegState(OpIsKill));
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+
+ // First try to use real conditional move instructions.
+ if (X86FastEmitCMoveSelect(RetVT, I))
+ return true;
+
+ // Try to use a sequence of SSE instructions to simulate a conditional move.
+ if (X86FastEmitSSESelect(RetVT, I))
+ return true;
+
+ // Fall-back to pseudo conditional move instructions, which will be later
+ // converted to control-flow.
+ if (X86FastEmitPseudoSelect(RetVT, I))
+ return true;
+
+ return false;
+}
+
+bool X86FastISel::X86SelectFPExt(const Instruction *I) {
+ // fpext from float to double.
+ if (X86ScalarSSEf64 &&
+ I->getType()->isDoubleTy()) {
+ const Value *V = I->getOperand(0);
+ if (V->getType()->isFloatTy()) {
+ unsigned OpReg = getRegForValue(V);
+ if (OpReg == 0) return false;
+ unsigned ResultReg = createResultReg(&X86::FR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::CVTSS2SDrr), ResultReg)
+ .addReg(OpReg);
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
+ if (X86ScalarSSEf64) {
+ if (I->getType()->isFloatTy()) {
+ const Value *V = I->getOperand(0);
+ if (V->getType()->isDoubleTy()) {
+ unsigned OpReg = getRegForValue(V);
+ if (OpReg == 0) return false;
+ unsigned ResultReg = createResultReg(&X86::FR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::CVTSD2SSrr), ResultReg)
+ .addReg(OpReg);
+ updateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool X86FastISel::X86SelectTrunc(const Instruction *I) {
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(I->getType());
+
+ // This code only handles truncation to byte.
+ if (DstVT != MVT::i8 && DstVT != MVT::i1)
+ return false;
+ if (!TLI.isTypeLegal(SrcVT))
+ return false;
+
+ unsigned InputReg = getRegForValue(I->getOperand(0));
+ if (!InputReg)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+
+ if (SrcVT == MVT::i8) {
+ // Truncate from i8 to i1; no code needed.
+ updateValueMap(I, InputReg);
+ return true;
+ }
+
+ if (!Subtarget->is64Bit()) {
+ // If we're on x86-32; we can't extract an i8 from a general register.
+ // First issue a copy to GR16_ABCD or GR32_ABCD.
+ const TargetRegisterClass *CopyRC =
+ (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;
+ unsigned CopyReg = createResultReg(CopyRC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);
+ InputReg = CopyReg;
+ }
+
+ // Issue an extract_subreg.
+ unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
+ InputReg, /*Kill=*/true,
+ X86::sub_8bit);
+ if (!ResultReg)
+ return false;
+
+ updateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::IsMemcpySmall(uint64_t Len) {
+ return Len <= (Subtarget->is64Bit() ? 32 : 16);
+}
+
+bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
+ X86AddressMode SrcAM, uint64_t Len) {
+
+ // Make sure we don't bloat code by inlining very large memcpy's.
+ if (!IsMemcpySmall(Len))
+ return false;
+
+ bool i64Legal = Subtarget->is64Bit();
+
+ // We don't care about alignment here since we just emit integer accesses.
+ while (Len) {
+ MVT VT;
+ if (Len >= 8 && i64Legal)
+ VT = MVT::i64;
+ else if (Len >= 4)
+ VT = MVT::i32;
+ else if (Len >= 2)
+ VT = MVT::i16;
+ else
+ VT = MVT::i8;
+
+ unsigned Reg;
+ bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
+ RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);
+ assert(RV && "Failed to emit load or store??");
+
+ unsigned Size = VT.getSizeInBits()/8;
+ Len -= Size;
+ DestAM.Disp += Size;
+ SrcAM.Disp += Size;
+ }
+
+ return true;
+}
+
+bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
+ // FIXME: Handle more intrinsics.
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::frameaddress: {
+ Type *RetTy = II->getCalledFunction()->getReturnType();
+
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC = nullptr;
+
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Invalid result type for frameaddress.");
+ case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
+ case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
+ }
+
+ // This needs to be set before we call getPtrSizedFrameRegister, otherwise
+ // we get the wrong frame register.
+ MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
+ unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*(FuncInfo.MF));
+ assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
+ (FrameReg == X86::EBP && VT == MVT::i32)) &&
+ "Invalid Frame Register!");
+
+ // Always make a copy of the frame register to to a vreg first, so that we
+ // never directly reference the frame register (the TwoAddressInstruction-
+ // Pass doesn't like that).
+ unsigned SrcReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
+
+ // Now recursively load from the frame address.
+ // movq (%rbp), %rax
+ // movq (%rax), %rax
+ // movq (%rax), %rax
+ // ...
+ unsigned DestReg;
+ unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
+ while (Depth--) {
+ DestReg = createResultReg(RC);
+ addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg), SrcReg);
+ SrcReg = DestReg;
+ }
+
+ updateValueMap(II, SrcReg);
+ return true;
+ }
+ case Intrinsic::memcpy: {
+ const MemCpyInst *MCI = cast<MemCpyInst>(II);
+ // Don't handle volatile or variable length memcpys.
+ if (MCI->isVolatile())
+ return false;
+
+ if (isa<ConstantInt>(MCI->getLength())) {
+ // Small memcpy's are common enough that we want to do them
+ // without a call if possible.
+ uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
+ if (IsMemcpySmall(Len)) {
+ X86AddressMode DestAM, SrcAM;
+ if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
+ !X86SelectAddress(MCI->getRawSource(), SrcAM))
+ return false;
+ TryEmitSmallMemcpy(DestAM, SrcAM, Len);
+ return true;
+ }
+ }
+
+ unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
+ if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
+ return false;
+
+ if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
+ return false;
+
+ return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
+ }
+ case Intrinsic::memset: {
+ const MemSetInst *MSI = cast<MemSetInst>(II);
+
+ if (MSI->isVolatile())
+ return false;
+
+ unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
+ if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
+ return false;
+
+ if (MSI->getDestAddressSpace() > 255)
+ return false;
+
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
+ }
+ case Intrinsic::stackprotector: {
+ // Emit code to store the stack guard onto the stack.
+ EVT PtrTy = TLI.getPointerTy();
+
+ const Value *Op1 = II->getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
+
+ MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
+
+ // Grab the frame index.
+ X86AddressMode AM;
+ if (!X86SelectAddress(Slot, AM)) return false;
+ if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
+ return true;
+ }
+ case Intrinsic::dbg_declare: {
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
+ X86AddressMode AM;
+ assert(DI->getAddress() && "Null address should be checked earlier!");
+ if (!X86SelectAddress(DI->getAddress(), AM))
+ return false;
+ const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
+ // FIXME may need to add RegState::Debug to any registers produced,
+ // although ESP/EBP should be the only ones at the moment.
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
+ .addImm(0)
+ .addMetadata(DI->getVariable())
+ .addMetadata(DI->getExpression());
+ return true;
+ }
+ case Intrinsic::trap: {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
+ return true;
+ }
+ case Intrinsic::sqrt: {
+ if (!Subtarget->hasSSE1())
+ return false;
+
+ Type *RetTy = II->getCalledFunction()->getReturnType();
+
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
+ // is not generated by FastISel yet.
+ // FIXME: Update this code once tablegen can handle it.
+ static const unsigned SqrtOpc[2][2] = {
+ {X86::SQRTSSr, X86::VSQRTSSr},
+ {X86::SQRTSDr, X86::VSQRTSDr}
+ };
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (VT.SimpleTy) {
+ default: return false;
+ case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass; break;
+ case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;
+ }
+
+ const Value *SrcVal = II->getArgOperand(0);
+ unsigned SrcReg = getRegForValue(SrcVal);
+
+ if (SrcReg == 0)
+ return false;
+
+ unsigned ImplicitDefReg = 0;
+ if (HasAVX) {
+ ImplicitDefReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ if (ImplicitDefReg)
+ MIB.addReg(ImplicitDefReg);
+
+ MIB.addReg(SrcReg);
+
+ updateValueMap(II, ResultReg);
+ return true;
+ }
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow: {
+ // This implements the basic lowering of the xalu with overflow intrinsics
+ // into add/sub/mul followed by either seto or setb.
+ const Function *Callee = II->getCalledFunction();
+ auto *Ty = cast<StructType>(Callee->getReturnType());
+ Type *RetTy = Ty->getTypeAtIndex(0U);
+ Type *CondTy = Ty->getTypeAtIndex(1);
+
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ if (VT < MVT::i8 || VT > MVT::i64)
+ return false;
+
+ const Value *LHS = II->getArgOperand(0);
+ const Value *RHS = II->getArgOperand(1);
+
+ // Canonicalize immediate to the RHS.
+ if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
+ isCommutativeIntrinsic(II))
+ std::swap(LHS, RHS);
+
+ bool UseIncDec = false;
+ if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
+ UseIncDec = true;
+
+ unsigned BaseOpc, CondOpc;
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::sadd_with_overflow:
+ BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);
+ CondOpc = X86::SETOr;
+ break;
+ case Intrinsic::uadd_with_overflow:
+ BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
+ case Intrinsic::ssub_with_overflow:
+ BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);
+ CondOpc = X86::SETOr;
+ break;
+ case Intrinsic::usub_with_overflow:
+ BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
+ case Intrinsic::smul_with_overflow:
+ BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;
+ case Intrinsic::umul_with_overflow:
+ BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;
+ }
+
+ unsigned LHSReg = getRegForValue(LHS);
+ if (LHSReg == 0)
+ return false;
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned ResultReg = 0;
+ // Check if we have an immediate version.
+ if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
+ static const unsigned Opc[2][4] = {
+ { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
+ { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
+ };
+
+ if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
+ ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ bool IsDec = BaseOpc == X86ISD::DEC;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ } else
+ ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ CI->getZExtValue());
+ }
+
+ unsigned RHSReg;
+ bool RHSIsKill;
+ if (!ResultReg) {
+ RHSReg = getRegForValue(RHS);
+ if (RHSReg == 0)
+ return false;
+ RHSIsKill = hasTrivialKill(RHS);
+ ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill);
+ }
+
+ // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
+ // it manually.
+ if (BaseOpc == X86ISD::UMUL && !ResultReg) {
+ static const unsigned MULOpc[] =
+ { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
+ static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
+ // First copy the first operand into RAX, which is an implicit input to
+ // the X86::MUL*r instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
+ TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
+ } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
+ static const unsigned MULOpc[] =
+ { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
+ if (VT == MVT::i8) {
+ // Copy the first operand into AL, which is an implicit input to the
+ // X86::IMUL8r instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), X86::AL)
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
+ RHSIsKill);
+ } else
+ ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
+ TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
+ RHSReg, RHSIsKill);
+ }
+
+ if (!ResultReg)
+ return false;
+
+ unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
+ assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
+ ResultReg2);
+
+ updateValueMap(II, ResultReg, 2);
+ return true;
+ }
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64: {
+ bool IsInputDouble;
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic.");
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ if (!Subtarget->hasSSE1())
+ return false;
+ IsInputDouble = false;
+ break;
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64:
+ if (!Subtarget->hasSSE2())
+ return false;
+ IsInputDouble = true;
+ break;
+ }
+
+ Type *RetTy = II->getCalledFunction()->getReturnType();
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ static const unsigned CvtOpc[2][2][2] = {
+ { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
+ { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
+ { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
+ { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }
+ };
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned Opc;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected result type.");
+ case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX]; break;
+ case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX]; break;
+ }
+
+ // Check if we can fold insertelement instructions into the convert.
+ const Value *Op = II->getArgOperand(0);
+ while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
+ const Value *Index = IE->getOperand(2);
+ if (!isa<ConstantInt>(Index))
+ break;
+ unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
+
+ if (Idx == 0) {
+ Op = IE->getOperand(1);
+ break;
+ }
+ Op = IE->getOperand(0);
+ }
+
+ unsigned Reg = getRegForValue(Op);
+ if (Reg == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(Reg);
+
+ updateValueMap(II, ResultReg);
+ return true;
+ }
+ }
+}
+
+bool X86FastISel::fastLowerArguments() {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg())
+ return false;
+
+ CallingConv::ID CC = F->getCallingConv();
+ if (CC != CallingConv::C)
+ return false;
+
+ if (Subtarget->isCallingConvWin64(CC))
+ return false;
+
+ if (!Subtarget->is64Bit())
+ return false;
+
+ // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
+ unsigned GPRCnt = 0;
+ unsigned FPRCnt = 0;
+ unsigned Idx = 0;
+ for (auto const &Arg : F->args()) {
+ // The first argument is at index 1.
+ ++Idx;
+ if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::Nest))
+ return false;
+
+ Type *ArgTy = Arg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
+ return false;
+
+ EVT ArgVT = TLI.getValueType(ArgTy);
+ if (!ArgVT.isSimple()) return false;
+ switch (ArgVT.getSimpleVT().SimpleTy) {
+ default: return false;
+ case MVT::i32:
+ case MVT::i64:
+ ++GPRCnt;
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ if (!Subtarget->hasSSE1())
+ return false;
+ ++FPRCnt;
+ break;
+ }
+
+ if (GPRCnt > 6)
+ return false;
+
+ if (FPRCnt > 8)
+ return false;
+ }
+
+ static const MCPhysReg GPR32ArgRegs[] = {
+ X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
+ };
+ static const MCPhysReg GPR64ArgRegs[] = {
+ X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
+ };
+ static const MCPhysReg XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
+ };
+
+ unsigned GPRIdx = 0;
+ unsigned FPRIdx = 0;
+ for (auto const &Arg : F->args()) {
+ MVT VT = TLI.getSimpleValueType(Arg.getType());
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+ unsigned SrcReg;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type.");
+ case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
+ case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
+ case MVT::f32: // fall-through
+ case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
+ }
+ unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
+ // Without this, EmitLiveInCopies may eliminate the livein if its only
+ // use is a bitcast (which isn't turned into an instruction).
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(DstReg, getKillRegState(true));
+ updateValueMap(&Arg, ResultReg);
+ }
+ return true;
+}
+
+static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
+ CallingConv::ID CC,
+ ImmutableCallSite *CS) {
+ if (Subtarget->is64Bit())
+ return 0;
+ if (Subtarget->getTargetTriple().isOSMSVCRT())
+ return 0;
+ if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
+ CC == CallingConv::HiPE)
+ return 0;
+ if (CS && !CS->paramHasAttr(1, Attribute::StructRet))
+ return 0;
+ if (CS && CS->paramHasAttr(1, Attribute::InReg))
+ return 0;
+ return 4;
+}
+
+bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
+ auto &OutVals = CLI.OutVals;
+ auto &OutFlags = CLI.OutFlags;
+ auto &OutRegs = CLI.OutRegs;
+ auto &Ins = CLI.Ins;
+ auto &InRegs = CLI.InRegs;
+ CallingConv::ID CC = CLI.CallConv;
+ bool &IsTailCall = CLI.IsTailCall;
+ bool IsVarArg = CLI.IsVarArg;
+ const Value *Callee = CLI.Callee;
+ const char *SymName = CLI.SymName;
+
+ bool Is64Bit = Subtarget->is64Bit();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CC);
+
+ // Handle only C, fastcc, and webkit_js calling conventions for now.
+ switch (CC) {
+ default: return false;
+ case CallingConv::C:
+ case CallingConv::Fast:
+ case CallingConv::WebKit_JS:
+ case CallingConv::X86_FastCall:
+ case CallingConv::X86_64_Win64:
+ case CallingConv::X86_64_SysV:
+ break;
+ }
+
+ // Allow SelectionDAG isel to handle tail calls.
+ if (IsTailCall)
+ return false;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
+ return false;
+
+ // Don't know how to handle Win64 varargs yet. Nothing special needed for
+ // x86-32. Special handling for x86-64 is implemented.
+ if (IsVarArg && IsWin64)
+ return false;
+
+ // Don't know about inalloca yet.
+ if (CLI.CS && CLI.CS->hasInAllocaArgument())
+ return false;
+
+ // Fast-isel doesn't know about callee-pop yet.
+ if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
+ TM.Options.GuaranteedTailCallOpt))
+ return false;
+
+ SmallVector<MVT, 16> OutVTs;
+ SmallVector<unsigned, 16> ArgRegs;
+
+ // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
+ // instruction. This is safe because it is common to all FastISel supported
+ // calling conventions on x86.
+ for (int i = 0, e = OutVals.size(); i != e; ++i) {
+ Value *&Val = OutVals[i];
+ ISD::ArgFlagsTy Flags = OutFlags[i];
+ if (auto *CI = dyn_cast<ConstantInt>(Val)) {
+ if (CI->getBitWidth() < 32) {
+ if (Flags.isSExt())
+ Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
+ else
+ Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
+ }
+ }
+
+ // Passing bools around ends up doing a trunc to i1 and passing it.
+ // Codegen this as an argument + "and 1".
+ MVT VT;
+ auto *TI = dyn_cast<TruncInst>(Val);
+ unsigned ResultReg;
+ if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
+ (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
+ TI->hasOneUse()) {
+ Value *PrevVal = TI->getOperand(0);
+ ResultReg = getRegForValue(PrevVal);
+
+ if (!ResultReg)
+ return false;
+
+ if (!isTypeLegal(PrevVal->getType(), VT))
+ return false;
+
+ ResultReg =
+ fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
+ } else {
+ if (!isTypeLegal(Val->getType(), VT))
+ return false;
+ ResultReg = getRegForValue(Val);
+ }
+
+ if (!ResultReg)
+ return false;
+
+ ArgRegs.push_back(ResultReg);
+ OutVTs.push_back(VT);
+ }
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
+
+ // Allocate shadow area for Win64
+ if (IsWin64)
+ CCInfo.AllocateStack(32, 8);
+
+ CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
+
+ // Issue CALLSEQ_START
+ unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
+ .addImm(NumBytes);
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign const &VA = ArgLocs[i];
+ const Value *ArgVal = OutVals[VA.getValNo()];
+ MVT ArgVT = OutVTs[VA.getValNo()];
+
+ if (ArgVT == MVT::x86mmx)
+ return false;
+
+ unsigned ArgReg = ArgRegs[VA.getValNo()];
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt: {
+ assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
+ "Unexpected extend");
+ bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
+ assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::ZExt: {
+ assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
+ "Unexpected extend");
+ bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
+ assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::AExt: {
+ assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
+ "Unexpected extend");
+ bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
+ if (!Emitted)
+ Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
+ if (!Emitted)
+ Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
+
+ assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::BCvt: {
+ ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
+ /*TODO: Kill=*/false);
+ assert(ArgReg && "Failed to emit a bitcast!");
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::VExt:
+ // VExt has not been implemented, so this should be impossible to reach
+ // for now. However, fallback to Selection DAG isel once implemented.
+ return false;
+ case CCValAssign::AExtUpper:
+ case CCValAssign::SExtUpper:
+ case CCValAssign::ZExtUpper:
+ case CCValAssign::FPExt:
+ llvm_unreachable("Unexpected loc info!");
+ case CCValAssign::Indirect:
+ // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
+ // support this.
+ return false;
+ }
+
+ if (VA.isRegLoc()) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
+ OutRegs.push_back(VA.getLocReg());
+ } else {
+ assert(VA.isMemLoc());
+
+ // Don't emit stores for undef values.
+ if (isa<UndefValue>(ArgVal))
+ continue;
+
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ X86AddressMode AM;
+ AM.Base.Reg = RegInfo->getStackRegister();
+ AM.Disp = LocMemOffset;
+ ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
+ unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
+ MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getStack(LocMemOffset), MachineMemOperand::MOStore,
+ ArgVT.getStoreSize(), Alignment);
+ if (Flags.isByVal()) {
+ X86AddressMode SrcAM;
+ SrcAM.Base.Reg = ArgReg;
+ if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
+ return false;
+ } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
+ // If this is a really simple value, emit this with the Value* version
+ // of X86FastEmitStore. If it isn't simple, we don't want to do this,
+ // as it can cause us to reevaluate the argument.
+ if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
+ return false;
+ } else {
+ bool ValIsKill = hasTrivialKill(ArgVal);
+ if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
+ return false;
+ }
+ }
+ }
+
+ // ELF / PIC requires GOT in the EBX register before function calls via PLT
+ // GOT pointer.
+ if (Subtarget->isPICStyleGOT()) {
+ unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
+ }
+
+ if (Is64Bit && IsVarArg && !IsWin64) {
+ // From AMD64 ABI document:
+ // For calls that may call functions that use varargs or stdargs
+ // (prototype-less calls or calls to functions containing ellipsis (...) in
+ // the declaration) %al is used as hidden argument to specify the number
+ // of SSE registers used. The contents of %al do not need to match exactly
+ // the number of registers, but must be an ubound on the number of SSE
+ // registers used and is in the range 0 - 8 inclusive.
+
+ // Count the number of XMM registers allocated.
+ static const MCPhysReg XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
+ };
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+ assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ && "SSE registers cannot be used when SSE is disabled");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
+ X86::AL).addImm(NumXMMRegs);
+ }
+
+ // Materialize callee address in a register. FIXME: GV address can be
+ // handled with a CALLpcrel32 instead.
+ X86AddressMode CalleeAM;
+ if (!X86SelectCallAddress(Callee, CalleeAM))
+ return false;
+
+ unsigned CalleeOp = 0;
+ const GlobalValue *GV = nullptr;
+ if (CalleeAM.GV != nullptr) {
+ GV = CalleeAM.GV;
+ } else if (CalleeAM.Base.Reg != 0) {
+ CalleeOp = CalleeAM.Base.Reg;
+ } else
+ return false;
+
+ // Issue the call.
+ MachineInstrBuilder MIB;
+ if (CalleeOp) {
+ // Register-indirect call.
+ unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
+ .addReg(CalleeOp);
+ } else {
+ // Direct call.
+ assert(GV && "Not a direct call");
+ unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
+
+ // See if we need any target-specific flags on the GV operand.
+ unsigned char OpFlags = 0;
+
+ // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
+ // external symbols most go through the PLT in PIC mode. If the symbol
+ // has hidden or protected visibility, or if it is static or local, then
+ // we don't need to use the PLT - we can directly call it.
+ if (Subtarget->isTargetELF() &&
+ TM.getRelocationModel() == Reloc::PIC_ &&
+ GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
+ OpFlags = X86II::MO_PLT;
+ } else if (Subtarget->isPICStyleStubAny() &&
+ (GV->isDeclaration() || GV->isWeakForLinker()) &&
+ (!Subtarget->getTargetTriple().isMacOSX() ||
+ Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = X86II::MO_DARWIN_STUB;
+ }
+
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
+ if (SymName)
+ MIB.addExternalSymbol(SymName, OpFlags);
+ else
+ MIB.addGlobalAddress(GV, 0, OpFlags);
+ }
+
+ // Add a register mask operand representing the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CC));
+
+ // Add an implicit use GOT pointer in EBX.
+ if (Subtarget->isPICStyleGOT())
+ MIB.addReg(X86::EBX, RegState::Implicit);
+
+ if (Is64Bit && IsVarArg && !IsWin64)
+ MIB.addReg(X86::AL, RegState::Implicit);
+
+ // Add implicit physical register uses to the call.
+ for (auto Reg : OutRegs)
+ MIB.addReg(Reg, RegState::Implicit);
+
+ // Issue CALLSEQ_END
+ unsigned NumBytesForCalleeToPop =
+ computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
+ unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
+
+ // Now handle call return values.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
+ CLI.RetTy->getContext());
+ CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
+
+ // Copy all of the result registers out of their specified physreg.
+ unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ EVT CopyVT = VA.getValVT();
+ unsigned CopyReg = ResultReg + i;
+
+ // If this is x86-64, and we disabled SSE, we can't return FP values
+ if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ report_fatal_error("SSE register return with SSE disabled");
+ }
+
+ // If we prefer to use the value in xmm registers, copy it out as f80 and
+ // use a truncate to move it from fp stack reg to xmm reg.
+ if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
+ isScalarFPTypeInSSEReg(VA.getValVT())) {
+ CopyVT = MVT::f80;
+ CopyReg = createResultReg(&X86::RFP80RegClass);
+ }
+
+ // Copy out the result.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
+ InRegs.push_back(VA.getLocReg());
+
+ // Round the f80 to the right size, which also moves it to the appropriate
+ // xmm register. This is accomplished by storing the f80 value in memory
+ // and then loading it back.
+ if (CopyVT != VA.getValVT()) {
+ EVT ResVT = VA.getValVT();
+ unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
+ unsigned MemSize = ResVT.getSizeInBits()/8;
+ int FI = MFI.CreateStackObject(MemSize, MemSize, false);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc)), FI)
+ .addReg(CopyReg);
+ Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg + i), FI);
+ }
+ }
+
+ CLI.ResultReg = ResultReg;
+ CLI.NumResultRegs = RVLocs.size();
+ CLI.Call = MIB;
+
+ return true;
+}
+
+bool
+X86FastISel::fastSelectInstruction(const Instruction *I) {
+ switch (I->getOpcode()) {
+ default: break;
+ case Instruction::Load:
+ return X86SelectLoad(I);
+ case Instruction::Store:
+ return X86SelectStore(I);
+ case Instruction::Ret:
+ return X86SelectRet(I);
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return X86SelectCmp(I);
+ case Instruction::ZExt:
+ return X86SelectZExt(I);
+ case Instruction::Br:
+ return X86SelectBranch(I);
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ return X86SelectShift(I);
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::SRem:
+ case Instruction::URem:
+ return X86SelectDivRem(I);
+ case Instruction::Select:
+ return X86SelectSelect(I);
+ case Instruction::Trunc:
+ return X86SelectTrunc(I);
+ case Instruction::FPExt:
+ return X86SelectFPExt(I);
+ case Instruction::FPTrunc:
+ return X86SelectFPTrunc(I);
+ case Instruction::IntToPtr: // Deliberate fall-through.
+ case Instruction::PtrToInt: {
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(I->getType());
+ if (DstVT.bitsGT(SrcVT))
+ return X86SelectZExt(I);
+ if (DstVT.bitsLT(SrcVT))
+ return X86SelectTrunc(I);
+ unsigned Reg = getRegForValue(I->getOperand(0));
+ if (Reg == 0) return false;
+ updateValueMap(I, Reg);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
+ if (VT > MVT::i64)
+ return 0;
+
+ uint64_t Imm = CI->getZExtValue();
+ if (Imm == 0) {
+ unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type");
+ case MVT::i1:
+ case MVT::i8:
+ return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
+ X86::sub_8bit);
+ case MVT::i16:
+ return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
+ X86::sub_16bit);
+ case MVT::i32:
+ return SrcReg;
+ case MVT::i64: {
+ unsigned ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
+ .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
+ return ResultReg;
+ }
+ }
+ }
+
+ unsigned Opc = 0;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type");
+ case MVT::i1: VT = MVT::i8; // fall-through
+ case MVT::i8: Opc = X86::MOV8ri; break;
+ case MVT::i16: Opc = X86::MOV16ri; break;
+ case MVT::i32: Opc = X86::MOV32ri; break;
+ case MVT::i64: {
+ if (isUInt<32>(Imm))
+ Opc = X86::MOV32ri;
+ else if (isInt<32>(Imm))
+ Opc = X86::MOV64ri32;
+ else
+ Opc = X86::MOV64ri;
+ break;
+ }
+ }
+ if (VT == MVT::i64 && Opc == X86::MOV32ri) {
+ unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
+ unsigned ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
+ .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
+ return ResultReg;
+ }
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+}
+
+unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
+ if (CFP->isNullValue())
+ return fastMaterializeFloatZero(CFP);
+
+ // Can't handle alternate code models yet.
+ CodeModel::Model CM = TM.getCodeModel();
+ if (CM != CodeModel::Small && CM != CodeModel::Large)
+ return 0;
+
+ // Get opcode and regclass of the output for the given load instruction.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = nullptr;
+ switch (VT.SimpleTy) {
+ default: return 0;
+ case MVT::f32:
+ if (X86ScalarSSEf32) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
+ RC = &X86::FR32RegClass;
+ } else {
+ Opc = X86::LD_Fp32m;
+ RC = &X86::RFP32RegClass;
+ }
+ break;
+ case MVT::f64:
+ if (X86ScalarSSEf64) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
+ RC = &X86::FR64RegClass;
+ } else {
+ Opc = X86::LD_Fp64m;
+ RC = &X86::RFP64RegClass;
+ }
+ break;
+ case MVT::f80:
+ // No f80 support yet.
+ return 0;
+ }
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
+ if (Align == 0) {
+ // Alignment of vector types. FIXME!
+ Align = DL.getTypeAllocSize(CFP->getType());
+ }
+
+ // x86-32 PIC requires a PIC base register for constant pools.
+ unsigned PICBase = 0;
+ unsigned char OpFlag = 0;
+ if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
+ OpFlag = X86II::MO_PIC_BASE_OFFSET;
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ } else if (Subtarget->isPICStyleGOT()) {
+ OpFlag = X86II::MO_GOTOFF;
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ } else if (Subtarget->isPICStyleRIPRel() &&
+ TM.getCodeModel() == CodeModel::Small) {
+ PICBase = X86::RIP;
+ }
+
+ // Create the load from the constant pool.
+ unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
+ unsigned ResultReg = createResultReg(RC);
+
+ if (CM == CodeModel::Large) {
+ unsigned AddrReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
+ AddrReg)
+ .addConstantPoolIndex(CPI, 0, OpFlag);
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg);
+ addDirectMem(MIB, AddrReg);
+ MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(), Align);
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
+ return ResultReg;
+ }
+
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg),
+ CPI, PICBase, OpFlag);
+ return ResultReg;
+}
+
+unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return 0;
+
+ // Materialize addresses with LEA/MOV instructions.
+ X86AddressMode AM;
+ if (X86SelectAddress(GV, AM)) {
+ // If the expression is just a basereg, then we're done, otherwise we need
+ // to emit an LEA.
+ if (AM.BaseType == X86AddressMode::RegBase &&
+ AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
+ return AM.Base.Reg;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ if (TM.getRelocationModel() == Reloc::Static &&
+ TLI.getPointerTy() == MVT::i64) {
+ // The displacement code could be more than 32 bits away so we need to use
+ // an instruction with a 64 bit immediate
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
+ ResultReg)
+ .addGlobalAddress(GV);
+ } else {
+ unsigned Opc = TLI.getPointerTy() == MVT::i32
+ ? (Subtarget->isTarget64BitILP32()
+ ? X86::LEA64_32r : X86::LEA32r)
+ : X86::LEA64r;
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg), AM);
+ }
+ return ResultReg;
+ }
+ return 0;
+}
+
+unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
+ EVT CEVT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!CEVT.isSimple())
+ return 0;
+ MVT VT = CEVT.getSimpleVT();
+
+ if (const auto *CI = dyn_cast<ConstantInt>(C))
+ return X86MaterializeInt(CI, VT);
+ else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return X86MaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return X86MaterializeGV(GV, VT);
+
+ return 0;
+}
+
+unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
+ // Fail on dynamic allocas. At this point, getRegForValue has already
+ // checked its CSE maps, so if we're here trying to handle a dynamic
+ // alloca, we're not going to succeed. X86SelectAddress has a
+ // check for dynamic allocas, because it's called directly from
+ // various places, but targetMaterializeAlloca also needs a check
+ // in order to avoid recursion between getRegForValue,
+ // X86SelectAddrss, and targetMaterializeAlloca.
+ if (!FuncInfo.StaticAllocaMap.count(C))
+ return 0;
+ assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
+
+ X86AddressMode AM;
+ if (!X86SelectAddress(C, AM))
+ return 0;
+ unsigned Opc = TLI.getPointerTy() == MVT::i32
+ ? (Subtarget->isTarget64BitILP32()
+ ? X86::LEA64_32r : X86::LEA32r)
+ : X86::LEA64r;
+ const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
+ unsigned ResultReg = createResultReg(RC);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg), AM);
+ return ResultReg;
+}
+
+unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
+ MVT VT;
+ if (!isTypeLegal(CF->getType(), VT))
+ return 0;
+
+ // Get opcode and regclass for the given zero.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = nullptr;
+ switch (VT.SimpleTy) {
+ default: return 0;
+ case MVT::f32:
+ if (X86ScalarSSEf32) {
+ Opc = X86::FsFLD0SS;
+ RC = &X86::FR32RegClass;
+ } else {
+ Opc = X86::LD_Fp032;
+ RC = &X86::RFP32RegClass;
+ }
+ break;
+ case MVT::f64:
+ if (X86ScalarSSEf64) {
+ Opc = X86::FsFLD0SD;
+ RC = &X86::FR64RegClass;
+ } else {
+ Opc = X86::LD_Fp064;
+ RC = &X86::RFP64RegClass;
+ }
+ break;
+ case MVT::f80:
+ // No f80 support yet.
+ return 0;
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
+ return ResultReg;
+}
+
+
+bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ const Value *Ptr = LI->getPointerOperand();
+ X86AddressMode AM;
+ if (!X86SelectAddress(Ptr, AM))
+ return false;
+
+ const X86InstrInfo &XII = (const X86InstrInfo &)TII;
+
+ unsigned Size = DL.getTypeAllocSize(LI->getType());
+ unsigned Alignment = LI->getAlignment();
+
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = DL.getABITypeAlignment(LI->getType());
+
+ SmallVector<MachineOperand, 8> AddrOps;
+ AM.getFullAddress(AddrOps);
+
+ MachineInstr *Result =
+ XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps,
+ Size, Alignment, /*AllowCommute=*/true);
+ if (!Result)
+ return false;
+
+ Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
+ FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
+ MI->eraseFromParent();
+ return true;
+}
+
+
+namespace llvm {
+ FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) {
+ return new X86FastISel(funcInfo, libInfo);
+ }
+}
-//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file contains the X86 implementation of TargetFrameLowering class.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include "X86FrameLowering.h"\r
-#include "X86InstrBuilder.h"\r
-#include "X86InstrInfo.h"\r
-#include "X86MachineFunctionInfo.h"\r
-#include "X86Subtarget.h"\r
-#include "X86TargetMachine.h"\r
-#include "llvm/ADT/SmallSet.h"\r
-#include "llvm/CodeGen/MachineFrameInfo.h"\r
-#include "llvm/CodeGen/MachineFunction.h"\r
-#include "llvm/CodeGen/MachineInstrBuilder.h"\r
-#include "llvm/CodeGen/MachineModuleInfo.h"\r
-#include "llvm/CodeGen/MachineRegisterInfo.h"\r
-#include "llvm/IR/DataLayout.h"\r
-#include "llvm/IR/Function.h"\r
-#include "llvm/MC/MCAsmInfo.h"\r
-#include "llvm/MC/MCSymbol.h"\r
-#include "llvm/Support/CommandLine.h"\r
-#include "llvm/Target/TargetOptions.h"\r
-#include "llvm/Support/Debug.h"\r
-#include <cstdlib>\r
-\r
-using namespace llvm;\r
-\r
-// FIXME: completely move here.\r
-extern cl::opt<bool> ForceStackAlign;\r
-\r
-bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {\r
- return !MF.getFrameInfo()->hasVarSizedObjects() &&\r
- !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();\r
-}\r
-\r
-/// canSimplifyCallFramePseudos - If there is a reserved call frame, the\r
-/// call frame pseudos can be simplified. Having a FP, as in the default\r
-/// implementation, is not sufficient here since we can't always use it.\r
-/// Use a more nuanced condition.\r
-bool\r
-X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {\r
- const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>\r
- (MF.getSubtarget().getRegisterInfo());\r
- return hasReservedCallFrame(MF) ||\r
- (hasFP(MF) && !TRI->needsStackRealignment(MF))\r
- || TRI->hasBasePointer(MF);\r
-}\r
-\r
-// needsFrameIndexResolution - Do we need to perform FI resolution for\r
-// this function. Normally, this is required only when the function\r
-// has any stack objects. However, FI resolution actually has another job,\r
-// not apparent from the title - it resolves callframesetup/destroy \r
-// that were not simplified earlier.\r
-// So, this is required for x86 functions that have push sequences even\r
-// when there are no stack objects.\r
-bool\r
-X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {\r
- return MF.getFrameInfo()->hasStackObjects() ||\r
- MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();\r
-}\r
-\r
-/// hasFP - Return true if the specified function should have a dedicated frame\r
-/// pointer register. This is true if the function has variable sized allocas\r
-/// or if frame pointer elimination is disabled.\r
-bool X86FrameLowering::hasFP(const MachineFunction &MF) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const MachineModuleInfo &MMI = MF.getMMI();\r
- const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();\r
-\r
- return (MF.getTarget().Options.DisableFramePointerElim(MF) ||\r
- RegInfo->needsStackRealignment(MF) ||\r
- MFI->hasVarSizedObjects() ||\r
- MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||\r
- MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||\r
- MMI.callsUnwindInit() || MMI.callsEHReturn() ||\r
- MFI->hasStackMap() || MFI->hasPatchPoint());\r
-}\r
-\r
-static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::SUB64ri8;\r
- return X86::SUB64ri32;\r
- } else {\r
- if (isInt<8>(Imm))\r
- return X86::SUB32ri8;\r
- return X86::SUB32ri;\r
- }\r
-}\r
-\r
-static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::ADD64ri8;\r
- return X86::ADD64ri32;\r
- } else {\r
- if (isInt<8>(Imm))\r
- return X86::ADD32ri8;\r
- return X86::ADD32ri;\r
- }\r
-}\r
-\r
-static unsigned getSUBrrOpcode(unsigned isLP64) {\r
- return isLP64 ? X86::SUB64rr : X86::SUB32rr;\r
-}\r
-\r
-static unsigned getADDrrOpcode(unsigned isLP64) {\r
- return isLP64 ? X86::ADD64rr : X86::ADD32rr;\r
-}\r
-\r
-static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::AND64ri8;\r
- return X86::AND64ri32;\r
- }\r
- if (isInt<8>(Imm))\r
- return X86::AND32ri8;\r
- return X86::AND32ri;\r
-}\r
-\r
-static unsigned getLEArOpcode(unsigned IsLP64) {\r
- return IsLP64 ? X86::LEA64r : X86::LEA32r;\r
-}\r
-\r
-/// findDeadCallerSavedReg - Return a caller-saved register that isn't live\r
-/// when it reaches the "return" instruction. We can then pop a stack object\r
-/// to this register without worry about clobbering it.\r
-static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI,\r
- const TargetRegisterInfo &TRI,\r
- bool Is64Bit) {\r
- const MachineFunction *MF = MBB.getParent();\r
- const Function *F = MF->getFunction();\r
- if (!F || MF->getMMI().callsEHReturn())\r
- return 0;\r
-\r
- static const uint16_t CallerSavedRegs32Bit[] = {\r
- X86::EAX, X86::EDX, X86::ECX, 0\r
- };\r
-\r
- static const uint16_t CallerSavedRegs64Bit[] = {\r
- X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,\r
- X86::R8, X86::R9, X86::R10, X86::R11, 0\r
- };\r
-\r
- unsigned Opc = MBBI->getOpcode();\r
- switch (Opc) {\r
- default: return 0;\r
- case X86::RETL:\r
- case X86::RETQ:\r
- case X86::RETIL:\r
- case X86::RETIQ:\r
- case X86::TCRETURNdi:\r
- case X86::TCRETURNri:\r
- case X86::TCRETURNmi:\r
- case X86::TCRETURNdi64:\r
- case X86::TCRETURNri64:\r
- case X86::TCRETURNmi64:\r
- case X86::EH_RETURN:\r
- case X86::EH_RETURN64: {\r
- SmallSet<uint16_t, 8> Uses;\r
- for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {\r
- MachineOperand &MO = MBBI->getOperand(i);\r
- if (!MO.isReg() || MO.isDef())\r
- continue;\r
- unsigned Reg = MO.getReg();\r
- if (!Reg)\r
- continue;\r
- for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)\r
- Uses.insert(*AI);\r
- }\r
-\r
- const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;\r
- for (; *CS; ++CS)\r
- if (!Uses.count(*CS))\r
- return *CS;\r
- }\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-static bool isEAXLiveIn(MachineFunction &MF) {\r
- for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),\r
- EE = MF.getRegInfo().livein_end(); II != EE; ++II) {\r
- unsigned Reg = II->first;\r
-\r
- if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||\r
- Reg == X86::AH || Reg == X86::AL)\r
- return true;\r
- }\r
-\r
- return false;\r
-}\r
-\r
-/// emitSPUpdate - Emit a series of instructions to increment / decrement the\r
-/// stack pointer by a constant value.\r
-static\r
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, int64_t NumBytes,\r
- bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,\r
- const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {\r
- bool isSub = NumBytes < 0;\r
- uint64_t Offset = isSub ? -NumBytes : NumBytes;\r
- unsigned Opc;\r
- if (UseLEA)\r
- Opc = getLEArOpcode(Is64BitStackPtr);\r
- else\r
- Opc = isSub\r
- ? getSUBriOpcode(Is64BitStackPtr, Offset)\r
- : getADDriOpcode(Is64BitStackPtr, Offset);\r
-\r
- uint64_t Chunk = (1LL << 31) - 1;\r
- DebugLoc DL = MBB.findDebugLoc(MBBI);\r
-\r
- while (Offset) {\r
- if (Offset > Chunk) {\r
- // Rather than emit a long series of instructions for large offsets,\r
- // load the offset into a register and do one sub/add\r
- unsigned Reg = 0;\r
-\r
- if (isSub && !isEAXLiveIn(*MBB.getParent()))\r
- Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);\r
- else\r
- Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);\r
-\r
- if (Reg) {\r
- Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)\r
- .addImm(Offset);\r
- Opc = isSub\r
- ? getSUBrrOpcode(Is64BitTarget)\r
- : getADDrrOpcode(Is64BitTarget);\r
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr)\r
- .addReg(Reg);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- Offset = 0;\r
- continue;\r
- }\r
- }\r
-\r
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;\r
- if (ThisVal == (Is64BitTarget ? 8 : 4)) {\r
- // Use push / pop instead.\r
- unsigned Reg = isSub\r
- ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)\r
- : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);\r
- if (Reg) {\r
- Opc = isSub\r
- ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)\r
- : (Is64BitTarget ? X86::POP64r : X86::POP32r);\r
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))\r
- .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));\r
- if (isSub)\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- Offset -= ThisVal;\r
- continue;\r
- }\r
- }\r
-\r
- MachineInstr *MI = nullptr;\r
-\r
- if (UseLEA) {\r
- MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),\r
- StackPtr, false, isSub ? -ThisVal : ThisVal);\r
- } else {\r
- MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(ThisVal);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- }\r
-\r
- if (isSub)\r
- MI->setFlag(MachineInstr::FrameSetup);\r
-\r
- Offset -= ThisVal;\r
- }\r
-}\r
-\r
-/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.\r
-static\r
-void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, uint64_t *NumBytes = nullptr) {\r
- if (MBBI == MBB.begin()) return;\r
-\r
- MachineBasicBlock::iterator PI = std::prev(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||\r
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes += PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes -= PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- }\r
-}\r
-\r
-/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower\r
-/// iterator.\r
-static\r
-void mergeSPUpdatesDown(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, uint64_t *NumBytes = nullptr) {\r
- // FIXME: THIS ISN'T RUN!!!\r
- return;\r
-\r
- if (MBBI == MBB.end()) return;\r
-\r
- MachineBasicBlock::iterator NI = std::next(MBBI);\r
- if (NI == MBB.end()) return;\r
-\r
- unsigned Opc = NI->getOpcode();\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&\r
- NI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes -= NI->getOperand(2).getImm();\r
- MBB.erase(NI);\r
- MBBI = NI;\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- NI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes += NI->getOperand(2).getImm();\r
- MBB.erase(NI);\r
- MBBI = NI;\r
- }\r
-}\r
-\r
-/// mergeSPUpdates - Checks the instruction before/after the passed\r
-/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and\r
-/// the stack adjustment is returned as a positive value for ADD/LEA and a\r
-/// negative for SUB.\r
-static int mergeSPUpdates(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI, unsigned StackPtr,\r
- bool doMergeWithPrevious) {\r
- if ((doMergeWithPrevious && MBBI == MBB.begin()) ||\r
- (!doMergeWithPrevious && MBBI == MBB.end()))\r
- return 0;\r
-\r
- MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;\r
- MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr\r
- : std::next(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
- int Offset = 0;\r
-\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||\r
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&\r
- PI->getOperand(0).getReg() == StackPtr){\r
- Offset += PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- if (!doMergeWithPrevious) MBBI = NI;\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- Offset -= PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- if (!doMergeWithPrevious) MBBI = NI;\r
- }\r
-\r
- return Offset;\r
-}\r
-\r
-void\r
-X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI,\r
- DebugLoc DL) const {\r
- MachineFunction &MF = *MBB.getParent();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- MachineModuleInfo &MMI = MF.getMMI();\r
- const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
-\r
- // Add callee saved registers to move list.\r
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();\r
- if (CSI.empty()) return;\r
-\r
- // Calculate offsets.\r
- for (std::vector<CalleeSavedInfo>::const_iterator\r
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {\r
- int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());\r
- unsigned Reg = I->getReg();\r
-\r
- unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);\r
- unsigned CFIIndex =\r
- MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,\r
- Offset));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-}\r
-\r
-/// usesTheStack - This function checks if any of the users of EFLAGS\r
-/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has\r
-/// to use the stack, and if we don't adjust the stack we clobber the first\r
-/// frame index.\r
-/// See X86InstrInfo::copyPhysReg.\r
-static bool usesTheStack(const MachineFunction &MF) {\r
- const MachineRegisterInfo &MRI = MF.getRegInfo();\r
-\r
- for (MachineRegisterInfo::reg_instr_iterator\r
- ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();\r
- ri != re; ++ri)\r
- if (ri->isCopy())\r
- return true;\r
-\r
- return false;\r
-}\r
-\r
-void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,\r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI,\r
- DebugLoc DL) {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;\r
-\r
- unsigned CallOp;\r
- if (Is64Bit)\r
- CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;\r
- else\r
- CallOp = X86::CALLpcrel32;\r
-\r
- const char *Symbol;\r
- if (Is64Bit) {\r
- if (STI.isTargetCygMing()) {\r
- Symbol = "___chkstk_ms";\r
- } else {\r
- Symbol = "__chkstk";\r
- }\r
- } else if (STI.isTargetCygMing())\r
- Symbol = "_alloca";\r
- else\r
- Symbol = "_chkstk";\r
-\r
- MachineInstrBuilder CI;\r
-\r
- // All current stack probes take AX and SP as input, clobber flags, and\r
- // preserve all registers. x86_64 probes leave RSP unmodified.\r
- if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {\r
- // For the large code model, we have to call through a register. Use R11,\r
- // as it is scratch in all supported calling conventions.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)\r
- .addExternalSymbol(Symbol);\r
- CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);\r
- } else {\r
- CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);\r
- }\r
-\r
- unsigned AX = Is64Bit ? X86::RAX : X86::EAX;\r
- unsigned SP = Is64Bit ? X86::RSP : X86::ESP;\r
- CI.addReg(AX, RegState::Implicit)\r
- .addReg(SP, RegState::Implicit)\r
- .addReg(AX, RegState::Define | RegState::Implicit)\r
- .addReg(SP, RegState::Define | RegState::Implicit)\r
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);\r
-\r
- if (Is64Bit) {\r
- // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp\r
- // themselves. It also does not clobber %rax so we can reuse it when\r
- // adjusting %rsp.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)\r
- .addReg(X86::RSP)\r
- .addReg(X86::RAX);\r
- }\r
-}\r
-\r
-/// emitPrologue - Push callee-saved registers onto the stack, which\r
-/// automatically adjust the stack pointer. Adjust the stack pointer to allocate\r
-/// space for local variables. Also emit labels used by the exception handler to\r
-/// generate the exception handling frames.\r
-\r
-/*\r
- Here's a gist of what gets emitted:\r
-\r
- ; Establish frame pointer, if needed\r
- [if needs FP]\r
- push %rbp\r
- .cfi_def_cfa_offset 16\r
- .cfi_offset %rbp, -16\r
- .seh_pushreg %rpb\r
- mov %rsp, %rbp\r
- .cfi_def_cfa_register %rbp\r
-\r
- ; Spill general-purpose registers\r
- [for all callee-saved GPRs]\r
- pushq %<reg>\r
- [if not needs FP]\r
- .cfi_def_cfa_offset (offset from RETADDR)\r
- .seh_pushreg %<reg>\r
-\r
- ; If the required stack alignment > default stack alignment\r
- ; rsp needs to be re-aligned. This creates a "re-alignment gap"\r
- ; of unknown size in the stack frame.\r
- [if stack needs re-alignment]\r
- and $MASK, %rsp\r
-\r
- ; Allocate space for locals\r
- [if target is Windows and allocated space > 4096 bytes]\r
- ; Windows needs special care for allocations larger\r
- ; than one page.\r
- mov $NNN, %rax\r
- call ___chkstk_ms/___chkstk\r
- sub %rax, %rsp\r
- [else]\r
- sub $NNN, %rsp\r
-\r
- [if needs FP]\r
- .seh_stackalloc (size of XMM spill slots)\r
- .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots\r
- [else]\r
- .seh_stackalloc NNN\r
-\r
- ; Spill XMMs\r
- ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,\r
- ; they may get spilled on any platform, if the current function\r
- ; calls @llvm.eh.unwind.init\r
- [if needs FP]\r
- [for all callee-saved XMM registers]\r
- movaps %<xmm reg>, -MMM(%rbp)\r
- [for all callee-saved XMM registers]\r
- .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)\r
- ; i.e. the offset relative to (%rbp - SEHFrameOffset)\r
- [else]\r
- [for all callee-saved XMM registers]\r
- movaps %<xmm reg>, KKK(%rsp)\r
- [for all callee-saved XMM registers]\r
- .seh_savexmm %<xmm reg>, KKK\r
-\r
- .seh_endprologue\r
-\r
- [if needs base pointer]\r
- mov %rsp, %rbx\r
- [if needs to restore base pointer]\r
- mov %rsp, -MMM(%rbp)\r
-\r
- ; Emit CFI info\r
- [if needs FP]\r
- [for all callee-saved registers]\r
- .cfi_offset %<reg>, (offset from %rbp)\r
- [else]\r
- .cfi_def_cfa_offset (offset from RETADDR)\r
- [for all callee-saved registers]\r
- .cfi_offset %<reg>, (offset from %rsp)\r
-\r
- Notes:\r
- - .seh directives are emitted only for Windows 64 ABI\r
- - .cfi directives are emitted for all other ABIs\r
- - for 32-bit code, substitute %e?? registers for %r??\r
-*/\r
-\r
-void X86FrameLowering::emitPrologue(MachineFunction &MF) const {\r
- MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.\r
- MachineBasicBlock::iterator MBBI = MBB.begin();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const Function *Fn = MF.getFunction();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineModuleInfo &MMI = MF.getMMI();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.\r
- uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.\r
- bool HasFP = hasFP(MF);\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.\r
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();\r
- bool IsWin64 = STI.isTargetWin64();\r
- // Not necessarily synonymous with IsWin64.\r
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();\r
- bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();\r
- bool NeedsDwarfCFI =\r
- !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());\r
- bool UseLEA = STI.useLeaForSP();\r
- unsigned StackAlign = getStackAlignment();\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- unsigned FramePtr = RegInfo->getFrameRegister(MF);\r
- const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?\r
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;\r
- unsigned StackPtr = RegInfo->getStackRegister();\r
- unsigned BasePtr = RegInfo->getBaseRegister();\r
- DebugLoc DL;\r
-\r
- // If we're forcing a stack realignment we can't rely on just the frame\r
- // info, we need to know the ABI stack alignment as well in case we\r
- // have a call out. Otherwise just make sure we have some alignment - we'll\r
- // go with the minimum SlotSize.\r
- if (ForceStackAlign) {\r
- if (MFI->hasCalls())\r
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;\r
- else if (MaxAlign < SlotSize)\r
- MaxAlign = SlotSize;\r
- }\r
-\r
- // Add RETADDR move area to callee saved frame size.\r
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
- if (TailCallReturnAddrDelta < 0)\r
- X86FI->setCalleeSavedFrameSize(\r
- X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);\r
-\r
- bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());\r
-\r
- // The default stack probe size is 4096 if the function has no stackprobesize\r
- // attribute.\r
- unsigned StackProbeSize = 4096;\r
- if (Fn->hasFnAttribute("stack-probe-size"))\r
- Fn->getFnAttribute("stack-probe-size")\r
- .getValueAsString()\r
- .getAsInteger(0, StackProbeSize);\r
-\r
- // If this is x86-64 and the Red Zone is not disabled, if we are a leaf\r
- // function, and use up to 128 bytes of stack space, don't have a frame\r
- // pointer, calls, or dynamic alloca then we do not need to adjust the\r
- // stack pointer (we fit in the Red Zone). We also check that we don't\r
- // push and pop from the stack.\r
- if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,\r
- Attribute::NoRedZone) &&\r
- !RegInfo->needsStackRealignment(MF) &&\r
- !MFI->hasVarSizedObjects() && // No dynamic alloca.\r
- !MFI->adjustsStack() && // No calls.\r
- !IsWin64 && // Win64 has no Red Zone\r
- !usesTheStack(MF) && // Don't push and pop.\r
- !MF.shouldSplitStack()) { // Regular stack\r
- uint64_t MinSize = X86FI->getCalleeSavedFrameSize();\r
- if (HasFP) MinSize += SlotSize;\r
- StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);\r
- MFI->setStackSize(StackSize);\r
- }\r
-\r
- // Insert stack pointer adjustment for later moving of return addr. Only\r
- // applies to tail call optimized functions where the callee argument stack\r
- // size is bigger than the callers.\r
- if (TailCallReturnAddrDelta < 0) {\r
- MachineInstr *MI =\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),\r
- StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(-TailCallReturnAddrDelta)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- }\r
-\r
- // Mapping for machine moves:\r
- //\r
- // DST: VirtualFP AND\r
- // SRC: VirtualFP => DW_CFA_def_cfa_offset\r
- // ELSE => DW_CFA_def_cfa\r
- //\r
- // SRC: VirtualFP AND\r
- // DST: Register => DW_CFA_def_cfa_register\r
- //\r
- // ELSE\r
- // OFFSET < 0 => DW_CFA_offset_extended_sf\r
- // REG < 64 => DW_CFA_offset + Reg\r
- // ELSE => DW_CFA_offset_extended\r
-\r
- uint64_t NumBytes = 0;\r
- int stackGrowth = -SlotSize;\r
-\r
- if (HasFP) {\r
- // Calculate required stack adjustment.\r
- uint64_t FrameSize = StackSize - SlotSize;\r
- // If required, include space for extra hidden slot for stashing base pointer.\r
- if (X86FI->getRestoreBasePointer())\r
- FrameSize += SlotSize;\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- // Callee-saved registers are pushed on stack before the stack\r
- // is realigned.\r
- FrameSize -= X86FI->getCalleeSavedFrameSize();\r
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;\r
- } else {\r
- NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();\r
- }\r
-\r
- // Get the offset of the stack slot for the EBP register, which is\r
- // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.\r
- // Update the frame offset adjustment.\r
- MFI->setOffsetAdjustment(-NumBytes);\r
-\r
- // Save EBP/RBP into the appropriate stack slot.\r
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))\r
- .addReg(MachineFramePtr, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- if (NeedsDwarfCFI) {\r
- // Mark the place where EBP/RBP was saved.\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
-\r
- // Change the rule for the FramePtr to be an "offset" rule.\r
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);\r
- CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createOffset(nullptr,\r
- DwarfFramePtr, 2 * stackGrowth));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- if (NeedsWinEH) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))\r
- .addImm(FramePtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Update EBP with the new base value.\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- if (NeedsDwarfCFI) {\r
- // Mark effective beginning of when frame pointer becomes valid.\r
- // Define the current CFA to use the EBP/RBP register.\r
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- // Mark the FramePtr as live-in in every block.\r
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)\r
- I->addLiveIn(MachineFramePtr);\r
- } else {\r
- NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();\r
- }\r
-\r
- // Skip the callee-saved push instructions.\r
- bool PushedRegs = false;\r
- int StackOffset = 2 * stackGrowth;\r
-\r
- while (MBBI != MBB.end() &&\r
- (MBBI->getOpcode() == X86::PUSH32r ||\r
- MBBI->getOpcode() == X86::PUSH64r)) {\r
- PushedRegs = true;\r
- unsigned Reg = MBBI->getOperand(0).getReg();\r
- ++MBBI;\r
-\r
- if (!HasFP && NeedsDwarfCFI) {\r
- // Mark callee-saved push instruction.\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- StackOffset += stackGrowth;\r
- }\r
-\r
- if (NeedsWinEH) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(\r
- MachineInstr::FrameSetup);\r
- }\r
- }\r
-\r
- // Realign stack after we pushed callee-saved registers (so that we'll be\r
- // able to calculate their offsets from the frame pointer).\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- assert(HasFP && "There should be a frame pointer if stack is realigned.");\r
- uint64_t Val = -MaxAlign;\r
- MachineInstr *MI =\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(getANDriOpcode(Uses64BitFramePtr, Val)), StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(Val)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- // The EFLAGS implicit def is dead.\r
- MI->getOperand(3).setIsDead();\r
- }\r
-\r
- // If there is an SUB32ri of ESP immediately before this instruction, merge\r
- // the two. This can be the case when tail call elimination is enabled and\r
- // the callee has more arguments then the caller.\r
- NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
-\r
- // If there is an ADD32ri or SUB32ri of ESP immediately after this\r
- // instruction, merge the two instructions.\r
- mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);\r
-\r
- // Adjust stack pointer: ESP -= numbytes.\r
-\r
- // Windows and cygwin/mingw require a prologue helper routine when allocating\r
- // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw\r
- // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the\r
- // stack and adjust the stack pointer in one go. The 64-bit version of\r
- // __chkstk is only responsible for probing the stack. The 64-bit prologue is\r
- // responsible for adjusting the stack pointer. Touching the stack at 4K\r
- // increments is necessary to ensure that the guard pages used by the OS\r
- // virtual memory manager are allocated in correct sequence.\r
- if (NumBytes >= StackProbeSize && UseStackProbe) {\r
- // Check whether EAX is livein for this function.\r
- bool isEAXAlive = isEAXLiveIn(MF);\r
-\r
- if (isEAXAlive) {\r
- // Sanity check that EAX is not livein for this function.\r
- // It should not be, so throw an assert.\r
- assert(!Is64Bit && "EAX is livein in x64 case!");\r
-\r
- // Save EAX\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))\r
- .addReg(X86::EAX, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- if (Is64Bit) {\r
- // Handle the 64-bit Windows ABI case where we need to call __chkstk.\r
- // Function prologue is responsible for adjusting the stack pointer.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)\r
- .addImm(NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- } else {\r
- // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.\r
- // We'll also use 4 already allocated bytes for EAX.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)\r
- .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Save a pointer to the MI where we set AX.\r
- MachineBasicBlock::iterator SetRAX = MBBI;\r
- --SetRAX;\r
-\r
- // Call __chkstk, __chkstk_ms, or __alloca.\r
- emitStackProbeCall(MF, MBB, MBBI, DL);\r
-\r
- // Apply the frame setup flag to all inserted instrs.\r
- for (; SetRAX != MBBI; ++SetRAX)\r
- SetRAX->setFlag(MachineInstr::FrameSetup);\r
-\r
- if (isEAXAlive) {\r
- // Restore EAX\r
- MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),\r
- X86::EAX),\r
- StackPtr, false, NumBytes - 4);\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- MBB.insert(MBBI, MI);\r
- }\r
- } else if (NumBytes) {\r
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,\r
- UseLEA, TII, *RegInfo);\r
- }\r
-\r
- int SEHFrameOffset = 0;\r
- if (NeedsWinEH) {\r
- if (HasFP) {\r
- // We need to set frame base offset low enough such that all saved\r
- // register offsets would be positive relative to it, but we can't\r
- // just use NumBytes, because .seh_setframe offset must be <=240.\r
- // So we pretend to have only allocated enough space to spill the\r
- // non-volatile registers.\r
- // We don't care about the rest of stack allocation, because unwinder\r
- // will restore SP to (BP - SEHFrameOffset)\r
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {\r
- int offset = MFI->getObjectOffset(Info.getFrameIdx());\r
- SEHFrameOffset = std::max(SEHFrameOffset, std::abs(offset));\r
- }\r
- SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant\r
-\r
- // This only needs to account for XMM spill slots, GPR slots\r
- // are covered by the .seh_pushreg's emitted above.\r
- unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();\r
- if (Size) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))\r
- .addImm(Size)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))\r
- .addImm(FramePtr)\r
- .addImm(SEHFrameOffset)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- } else {\r
- // SP will be the base register for restoring XMMs\r
- if (NumBytes) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))\r
- .addImm(NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
- }\r
- }\r
-\r
- // Skip the rest of register spilling code\r
- while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))\r
- ++MBBI;\r
-\r
- // Emit SEH info for non-GPRs\r
- if (NeedsWinEH) {\r
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {\r
- unsigned Reg = Info.getReg();\r
- if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))\r
- continue;\r
- assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");\r
-\r
- int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());\r
- Offset += SEHFrameOffset;\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))\r
- .addImm(Reg)\r
- .addImm(Offset)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // If we need a base pointer, set it up here. It's whatever the value\r
- // of the stack pointer is at this point. Any variable size objects\r
- // will be allocated after this, so we can still use the base pointer\r
- // to reference locals.\r
- if (RegInfo->hasBasePointer(MF)) {\r
- // Update the base pointer with the current stack pointer.\r
- unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- if (X86FI->getRestoreBasePointer()) {\r
- // Stash value of base pointer. Saving RSP instead of EBP shortens dependence chain.\r
- unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;\r
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),\r
- FramePtr, true, X86FI->getRestoreBasePointerOffset())\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
- }\r
-\r
- if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {\r
- // Mark end of stack pointer adjustment.\r
- if (!HasFP && NumBytes) {\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr,\r
- -StackSize + stackGrowth));\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- // Emit DWARF info specifying the offsets of the callee-saved registers.\r
- if (PushedRegs)\r
- emitCalleeSavedFrameMoves(MBB, MBBI, DL);\r
- }\r
-}\r
-\r
-void X86FrameLowering::emitEpilogue(MachineFunction &MF,\r
- MachineBasicBlock &MBB) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();\r
- assert(MBBI != MBB.end() && "Returning block has no instructions");\r
- unsigned RetOpcode = MBBI->getOpcode();\r
- DebugLoc DL = MBBI->getDebugLoc();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.\r
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();\r
- const bool Is64BitILP32 = STI.isTarget64BitILP32();\r
- bool UseLEA = STI.useLeaForSP();\r
- unsigned StackAlign = getStackAlignment();\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- unsigned FramePtr = RegInfo->getFrameRegister(MF);\r
- unsigned MachineFramePtr = Is64BitILP32 ?\r
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;\r
- unsigned StackPtr = RegInfo->getStackRegister();\r
-\r
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();\r
- bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();\r
-\r
- switch (RetOpcode) {\r
- default:\r
- llvm_unreachable("Can only insert epilog into returning blocks");\r
- case X86::RETQ:\r
- case X86::RETL:\r
- case X86::RETIL:\r
- case X86::RETIQ:\r
- case X86::TCRETURNdi:\r
- case X86::TCRETURNri:\r
- case X86::TCRETURNmi:\r
- case X86::TCRETURNdi64:\r
- case X86::TCRETURNri64:\r
- case X86::TCRETURNmi64:\r
- case X86::EH_RETURN:\r
- case X86::EH_RETURN64:\r
- break; // These are ok\r
- }\r
-\r
- // Get the number of bytes to allocate from the FrameInfo.\r
- uint64_t StackSize = MFI->getStackSize();\r
- uint64_t MaxAlign = MFI->getMaxAlignment();\r
- unsigned CSSize = X86FI->getCalleeSavedFrameSize();\r
- uint64_t NumBytes = 0;\r
-\r
- // If we're forcing a stack realignment we can't rely on just the frame\r
- // info, we need to know the ABI stack alignment as well in case we\r
- // have a call out. Otherwise just make sure we have some alignment - we'll\r
- // go with the minimum.\r
- if (ForceStackAlign) {\r
- if (MFI->hasCalls())\r
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;\r
- else\r
- MaxAlign = MaxAlign ? MaxAlign : 4;\r
- }\r
-\r
- if (hasFP(MF)) {\r
- // Calculate required stack adjustment.\r
- uint64_t FrameSize = StackSize - SlotSize;\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- // Callee-saved registers were pushed on stack before the stack\r
- // was realigned.\r
- FrameSize -= CSSize;\r
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;\r
- } else {\r
- NumBytes = FrameSize - CSSize;\r
- }\r
-\r
- // Pop EBP.\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);\r
- } else {\r
- NumBytes = StackSize - CSSize;\r
- }\r
-\r
- // Skip the callee-saved pop instructions.\r
- while (MBBI != MBB.begin()) {\r
- MachineBasicBlock::iterator PI = std::prev(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
-\r
- if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&\r
- !PI->isTerminator())\r
- break;\r
-\r
- --MBBI;\r
- }\r
- MachineBasicBlock::iterator FirstCSPop = MBBI;\r
-\r
- DL = MBBI->getDebugLoc();\r
-\r
- // If there is an ADD32ri or SUB32ri of ESP immediately before this\r
- // instruction, merge the two instructions.\r
- if (NumBytes || MFI->hasVarSizedObjects())\r
- mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);\r
-\r
- // If dynamic alloca is used, then reset esp to point to the last callee-saved\r
- // slot before popping them off! Same applies for the case, when stack was\r
- // realigned.\r
- if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {\r
- if (RegInfo->needsStackRealignment(MF))\r
- MBBI = FirstCSPop;\r
- if (CSSize != 0) {\r
- unsigned Opc = getLEArOpcode(Uses64BitFramePtr);\r
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),\r
- FramePtr, false, -CSSize);\r
- --MBBI;\r
- } else {\r
- unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(FramePtr);\r
- --MBBI;\r
- }\r
- } else if (NumBytes) {\r
- // Adjust stack pointer back: ESP += numbytes.\r
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,\r
- TII, *RegInfo);\r
- --MBBI;\r
- }\r
-\r
- // Windows unwinder will not invoke function's exception handler if IP is\r
- // either in prologue or in epilogue. This behavior causes a problem when a\r
- // call immediately precedes an epilogue, because the return address points\r
- // into the epilogue. To cope with that, we insert an epilogue marker here,\r
- // then replace it with a 'nop' if it ends up immediately after a CALL in the\r
- // final emitted code.\r
- if (NeedsWinEH)\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));\r
-\r
- // We're returning from function via eh_return.\r
- if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {\r
- MBBI = MBB.getLastNonDebugInstr();\r
- MachineOperand &DestAddr = MBBI->getOperand(0);\r
- assert(DestAddr.isReg() && "Offset should be in register!");\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),\r
- StackPtr).addReg(DestAddr.getReg());\r
- } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||\r
- RetOpcode == X86::TCRETURNmi ||\r
- RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||\r
- RetOpcode == X86::TCRETURNmi64) {\r
- bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;\r
- // Tail call return: adjust the stack pointer and jump to callee.\r
- MBBI = MBB.getLastNonDebugInstr();\r
- MachineOperand &JumpTarget = MBBI->getOperand(0);\r
- MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);\r
- assert(StackAdjust.isImm() && "Expecting immediate value.");\r
-\r
- // Adjust stack pointer.\r
- int StackAdj = StackAdjust.getImm();\r
- int MaxTCDelta = X86FI->getTCReturnAddrDelta();\r
- int Offset = 0;\r
- assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");\r
-\r
- // Incoporate the retaddr area.\r
- Offset = StackAdj-MaxTCDelta;\r
- assert(Offset >= 0 && "Offset should never be negative");\r
-\r
- if (Offset) {\r
- // Check for possible merge with preceding ADD instruction.\r
- Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
- emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,\r
- UseLEA, TII, *RegInfo);\r
- }\r
-\r
- // Jump to label or value in register.\r
- bool IsWin64 = STI.isTargetWin64();\r
- if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {\r
- unsigned Op = (RetOpcode == X86::TCRETURNdi)\r
- ? X86::TAILJMPd\r
- : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);\r
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));\r
- if (JumpTarget.isGlobal())\r
- MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),\r
- JumpTarget.getTargetFlags());\r
- else {\r
- assert(JumpTarget.isSymbol());\r
- MIB.addExternalSymbol(JumpTarget.getSymbolName(),\r
- JumpTarget.getTargetFlags());\r
- }\r
- } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {\r
- unsigned Op = (RetOpcode == X86::TCRETURNmi)\r
- ? X86::TAILJMPm\r
- : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);\r
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));\r
- for (unsigned i = 0; i != 5; ++i)\r
- MIB.addOperand(MBBI->getOperand(i));\r
- } else if (RetOpcode == X86::TCRETURNri64) {\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))\r
- .addReg(JumpTarget.getReg(), RegState::Kill);\r
- } else {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).\r
- addReg(JumpTarget.getReg(), RegState::Kill);\r
- }\r
-\r
- MachineInstr *NewMI = std::prev(MBBI);\r
- NewMI->copyImplicitOps(MF, MBBI);\r
-\r
- // Delete the pseudo instruction TCRETURN.\r
- MBB.erase(MBBI);\r
- } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||\r
- RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&\r
- (X86FI->getTCReturnAddrDelta() < 0)) {\r
- // Add the return addr area delta back since we are not tail calling.\r
- int delta = -1*X86FI->getTCReturnAddrDelta();\r
- MBBI = MBB.getLastNonDebugInstr();\r
-\r
- // Check for possible merge with preceding ADD instruction.\r
- delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
- emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,\r
- *RegInfo);\r
- }\r
-}\r
-\r
-int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,\r
- int FI) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();\r
- uint64_t StackSize = MFI->getStackSize();\r
-\r
- if (RegInfo->hasBasePointer(MF)) {\r
- assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");\r
- if (FI < 0) {\r
- // Skip the saved EBP.\r
- return Offset + RegInfo->getSlotSize();\r
- } else {\r
- assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);\r
- return Offset + StackSize;\r
- }\r
- } else if (RegInfo->needsStackRealignment(MF)) {\r
- if (FI < 0) {\r
- // Skip the saved EBP.\r
- return Offset + RegInfo->getSlotSize();\r
- } else {\r
- assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);\r
- return Offset + StackSize;\r
- }\r
- // FIXME: Support tail calls\r
- } else {\r
- if (!hasFP(MF))\r
- return Offset + StackSize;\r
-\r
- // Skip the saved EBP.\r
- Offset += RegInfo->getSlotSize();\r
-\r
- // Skip the RETADDR move area\r
- const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
- if (TailCallReturnAddrDelta < 0)\r
- Offset -= TailCallReturnAddrDelta;\r
- }\r
-\r
- return Offset;\r
-}\r
-\r
-int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- // We can't calculate offset from frame pointer if the stack is realigned,\r
- // so enforce usage of stack/base pointer. The base pointer is used when we\r
- // have dynamic allocas in addition to dynamic realignment.\r
- if (RegInfo->hasBasePointer(MF))\r
- FrameReg = RegInfo->getBaseRegister();\r
- else if (RegInfo->needsStackRealignment(MF))\r
- FrameReg = RegInfo->getStackRegister();\r
- else\r
- FrameReg = RegInfo->getFrameRegister(MF);\r
- return getFrameIndexOffset(MF, FI);\r
-}\r
-\r
-// Simplified from getFrameIndexOffset keeping only StackPointer cases\r
-int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- // Does not include any dynamic realign.\r
- const uint64_t StackSize = MFI->getStackSize();\r
- {\r
-#ifndef NDEBUG\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());\r
- // Note: LLVM arranges the stack as:\r
- // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)\r
- // > "Stack Slots" (<--SP)\r
- // We can always address StackSlots from RSP. We can usually (unless\r
- // needsStackRealignment) address CSRs from RSP, but sometimes need to\r
- // address them from RBP. FixedObjects can be placed anywhere in the stack\r
- // frame depending on their specific requirements (i.e. we can actually\r
- // refer to arguments to the function which are stored in the *callers*\r
- // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs\r
- // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.\r
-\r
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");\r
-\r
- // We don't handle tail calls, and shouldn't be seeing them\r
- // either.\r
- int TailCallReturnAddrDelta =\r
- MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();\r
- assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");\r
-#endif\r
- }\r
-\r
- // This is how the math works out:\r
- //\r
- // %rsp grows (i.e. gets lower) left to right. Each box below is\r
- // one word (eight bytes). Obj0 is the stack slot we're trying to\r
- // get to.\r
- //\r
- // ----------------------------------\r
- // | BP | Obj0 | Obj1 | ... | ObjN |\r
- // ----------------------------------\r
- // ^ ^ ^ ^\r
- // A B C E\r
- //\r
- // A is the incoming stack pointer.\r
- // (B - A) is the local area offset (-8 for x86-64) [1]\r
- // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]\r
- //\r
- // |(E - B)| is the StackSize (absolute value, positive). For a\r
- // stack that grown down, this works out to be (B - E). [3]\r
- //\r
- // E is also the value of %rsp after stack has been set up, and we\r
- // want (C - E) -- the value we can add to %rsp to get to Obj0. Now\r
- // (C - E) == (C - A) - (B - A) + (B - E)\r
- // { Using [1], [2] and [3] above }\r
- // == getObjectOffset - LocalAreaOffset + StackSize\r
- //\r
-\r
- // Get the Offset from the StackPointer\r
- int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();\r
-\r
- return Offset + StackSize;\r
-}\r
-// Simplified from getFrameIndexReference keeping only StackPointer cases\r
-int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());\r
-\r
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");\r
-\r
- FrameReg = RegInfo->getStackRegister();\r
- return getFrameIndexOffsetFromSP(MF, FI);\r
-}\r
-\r
-bool X86FrameLowering::assignCalleeSavedSpillSlots(\r
- MachineFunction &MF, const TargetRegisterInfo *TRI,\r
- std::vector<CalleeSavedInfo> &CSI) const {\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
-\r
- unsigned CalleeSavedFrameSize = 0;\r
- int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();\r
-\r
- if (hasFP(MF)) {\r
- // emitPrologue always spills frame register the first thing.\r
- SpillSlotOffset -= SlotSize;\r
- MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);\r
-\r
- // Since emitPrologue and emitEpilogue will handle spilling and restoring of\r
- // the frame register, we can delete it from CSI list and not have to worry\r
- // about avoiding it later.\r
- unsigned FPReg = RegInfo->getFrameRegister(MF);\r
- for (unsigned i = 0; i < CSI.size(); ++i) {\r
- if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {\r
- CSI.erase(CSI.begin() + i);\r
- break;\r
- }\r
- }\r
- }\r
-\r
- // Assign slots for GPRs. It increases frame size.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
-\r
- if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- SpillSlotOffset -= SlotSize;\r
- CalleeSavedFrameSize += SlotSize;\r
-\r
- int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);\r
- CSI[i - 1].setFrameIdx(SlotIndex);\r
- }\r
-\r
- X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);\r
-\r
- // Assign slots for XMMs.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
- if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);\r
- // ensure alignment\r
- SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();\r
- // spill into slot\r
- SpillSlotOffset -= RC->getSize();\r
- int SlotIndex =\r
- MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);\r
- CSI[i - 1].setFrameIdx(SlotIndex);\r
- MFI->ensureMaxAlignment(RC->getAlignment());\r
- }\r
-\r
- return true;\r
-}\r
-\r
-bool X86FrameLowering::spillCalleeSavedRegisters(\r
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const {\r
- DebugLoc DL = MBB.findDebugLoc(MI);\r
-\r
- MachineFunction &MF = *MBB.getParent();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
-\r
- // Push GPRs. It increases frame size.\r
- unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
-\r
- if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))\r
- continue;\r
- // Add the callee-saved register as live-in. It's killed at the spill.\r
- MBB.addLiveIn(Reg);\r
-\r
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Make XMM regs spilled. X86 does not have ability of push/pop XMM.\r
- // It can be done by spilling XMMs to stack frame.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i-1].getReg();\r
- if (X86::GR64RegClass.contains(Reg) ||\r
- X86::GR32RegClass.contains(Reg))\r
- continue;\r
- // Add the callee-saved register as live-in. It's killed at the spill.\r
- MBB.addLiveIn(Reg);\r
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);\r
-\r
- TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,\r
- TRI);\r
- --MI;\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- ++MI;\r
- }\r
-\r
- return true;\r
-}\r
-\r
-bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const {\r
- if (CSI.empty())\r
- return false;\r
-\r
- DebugLoc DL = MBB.findDebugLoc(MI);\r
-\r
- MachineFunction &MF = *MBB.getParent();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
-\r
- // Reload XMMs from stack frame.\r
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {\r
- unsigned Reg = CSI[i].getReg();\r
- if (X86::GR64RegClass.contains(Reg) ||\r
- X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);\r
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);\r
- }\r
-\r
- // POP GPRs.\r
- unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;\r
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {\r
- unsigned Reg = CSI[i].getReg();\r
- if (!X86::GR64RegClass.contains(Reg) &&\r
- !X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- BuildMI(MBB, MI, DL, TII.get(Opc), Reg);\r
- }\r
- return true;\r
-}\r
-\r
-void\r
-X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,\r
- RegScavenger *RS) const {\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
-\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
-\r
- if (TailCallReturnAddrDelta < 0) {\r
- // create RETURNADDR area\r
- // arg\r
- // arg\r
- // RETADDR\r
- // { ...\r
- // RETADDR area\r
- // ...\r
- // }\r
- // [EBP]\r
- MFI->CreateFixedObject(-TailCallReturnAddrDelta,\r
- TailCallReturnAddrDelta - SlotSize, true);\r
- }\r
-\r
- // Spill the BasePtr if it's used.\r
- if (RegInfo->hasBasePointer(MF))\r
- MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());\r
-}\r
-\r
-static bool\r
-HasNestArgument(const MachineFunction *MF) {\r
- const Function *F = MF->getFunction();\r
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();\r
- I != E; I++) {\r
- if (I->hasNestAttr())\r
- return true;\r
- }\r
- return false;\r
-}\r
-\r
-/// GetScratchRegister - Get a temp register for performing work in the\r
-/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform\r
-/// and the properties of the function either one or two registers will be\r
-/// needed. Set primary to true for the first register, false for the second.\r
-static unsigned\r
-GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {\r
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();\r
-\r
- // Erlang stuff.\r
- if (CallingConvention == CallingConv::HiPE) {\r
- if (Is64Bit)\r
- return Primary ? X86::R14 : X86::R13;\r
- else\r
- return Primary ? X86::EBX : X86::EDI;\r
- }\r
-\r
- if (Is64Bit) {\r
- if (IsLP64)\r
- return Primary ? X86::R11 : X86::R12;\r
- else\r
- return Primary ? X86::R11D : X86::R12D;\r
- }\r
-\r
- bool IsNested = HasNestArgument(&MF);\r
-\r
- if (CallingConvention == CallingConv::X86_FastCall ||\r
- CallingConvention == CallingConv::Fast) {\r
- if (IsNested)\r
- report_fatal_error("Segmented stacks does not support fastcall with "\r
- "nested function.");\r
- return Primary ? X86::EAX : X86::ECX;\r
- }\r
- if (IsNested)\r
- return Primary ? X86::EDX : X86::EAX;\r
- return Primary ? X86::ECX : X86::EAX;\r
-}\r
-\r
-// The stack limit in the TCB is set to this many bytes above the actual stack\r
-// limit.\r
-static const uint64_t kSplitStackAvailable = 256;\r
-\r
-void\r
-X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {\r
- MachineBasicBlock &prologueMBB = MF.front();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- uint64_t StackSize;\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- const bool IsLP64 = STI.isTarget64BitLP64();\r
- unsigned TlsReg, TlsOffset;\r
- DebugLoc DL;\r
-\r
- unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&\r
- "Scratch register is live-in");\r
-\r
- if (MF.getFunction()->isVarArg())\r
- report_fatal_error("Segmented stacks do not support vararg functions.");\r
- if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&\r
- !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&\r
- !STI.isTargetDragonFly())\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
-\r
- // Eventually StackSize will be calculated by a link-time pass; which will\r
- // also decide whether checking code needs to be injected into this particular\r
- // prologue.\r
- StackSize = MFI->getStackSize();\r
-\r
- // Do not generate a prologue for functions with a stack of size zero\r
- if (StackSize == 0)\r
- return;\r
-\r
- MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();\r
- MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- bool IsNested = false;\r
-\r
- // We need to know if the function has a nest argument only in 64 bit mode.\r
- if (Is64Bit)\r
- IsNested = HasNestArgument(&MF);\r
-\r
- // The MOV R10, RAX needs to be in a different block, since the RET we emit in\r
- // allocMBB needs to be last (terminating) instruction.\r
-\r
- for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),\r
- e = prologueMBB.livein_end(); i != e; i++) {\r
- allocMBB->addLiveIn(*i);\r
- checkMBB->addLiveIn(*i);\r
- }\r
-\r
- if (IsNested)\r
- allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);\r
-\r
- MF.push_front(allocMBB);\r
- MF.push_front(checkMBB);\r
-\r
- // When the frame size is less than 256 we just compare the stack\r
- // boundary directly to the value of the stack pointer, per gcc.\r
- bool CompareStackPointer = StackSize < kSplitStackAvailable;\r
-\r
- // Read the limit off the current stacklet off the stack_guard location.\r
- if (Is64Bit) {\r
- if (STI.isTargetLinux()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = IsLP64 ? 0x70 : 0x40;\r
- } else if (STI.isTargetDarwin()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.\r
- } else if (STI.isTargetWin64()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x28; // pvArbitrary, reserved for application use\r
- } else if (STI.isTargetFreeBSD()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x18;\r
- } else if (STI.isTargetDragonFly()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x20; // use tls_tcb.tcb_segstack\r
- } else {\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
- }\r
-\r
- if (CompareStackPointer)\r
- ScratchReg = IsLP64 ? X86::RSP : X86::ESP;\r
- else\r
- BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)\r
- .addImm(1).addReg(0).addImm(-StackSize).addReg(0);\r
-\r
- BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)\r
- .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);\r
- } else {\r
- if (STI.isTargetLinux()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x30;\r
- } else if (STI.isTargetDarwin()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x48 + 90*4;\r
- } else if (STI.isTargetWin32()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x14; // pvArbitrary, reserved for application use\r
- } else if (STI.isTargetDragonFly()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x10; // use tls_tcb.tcb_segstack\r
- } else if (STI.isTargetFreeBSD()) {\r
- report_fatal_error("Segmented stacks not supported on FreeBSD i386.");\r
- } else {\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
- }\r
-\r
- if (CompareStackPointer)\r
- ScratchReg = X86::ESP;\r
- else\r
- BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)\r
- .addImm(1).addReg(0).addImm(-StackSize).addReg(0);\r
-\r
- if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||\r
- STI.isTargetDragonFly()) {\r
- BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)\r
- .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);\r
- } else if (STI.isTargetDarwin()) {\r
-\r
- // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.\r
- unsigned ScratchReg2;\r
- bool SaveScratch2;\r
- if (CompareStackPointer) {\r
- // The primary scratch register is available for holding the TLS offset.\r
- ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- SaveScratch2 = false;\r
- } else {\r
- // Need to use a second register to hold the TLS offset\r
- ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);\r
-\r
- // Unfortunately, with fastcc the second scratch register may hold an\r
- // argument.\r
- SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);\r
- }\r
-\r
- // If Scratch2 is live-in then it needs to be saved.\r
- assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&\r
- "Scratch register is live-in and not saved");\r
-\r
- if (SaveScratch2)\r
- BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))\r
- .addReg(ScratchReg2, RegState::Kill);\r
-\r
- BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)\r
- .addImm(TlsOffset);\r
- BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))\r
- .addReg(ScratchReg)\r
- .addReg(ScratchReg2).addImm(1).addReg(0)\r
- .addImm(0)\r
- .addReg(TlsReg);\r
-\r
- if (SaveScratch2)\r
- BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);\r
- }\r
- }\r
-\r
- // This jump is taken if SP >= (Stacklet Limit + Stack Space required).\r
- // It jumps to normal execution of the function body.\r
- BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB);\r
-\r
- // On 32 bit we first push the arguments size and then the frame size. On 64\r
- // bit, we pass the stack frame size in r10 and the argument size in r11.\r
- if (Is64Bit) {\r
- // Functions with nested arguments use R10, so it needs to be saved across\r
- // the call to _morestack\r
-\r
- const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;\r
- const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;\r
- const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;\r
- const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;\r
- const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;\r
-\r
- if (IsNested)\r
- BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);\r
-\r
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)\r
- .addImm(StackSize);\r
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)\r
- .addImm(X86FI->getArgumentStackSize());\r
- MF.getRegInfo().setPhysRegUsed(Reg10);\r
- MF.getRegInfo().setPhysRegUsed(Reg11);\r
- } else {\r
- BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))\r
- .addImm(X86FI->getArgumentStackSize());\r
- BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))\r
- .addImm(StackSize);\r
- }\r
-\r
- // __morestack is in libgcc\r
- if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {\r
- // Under the large code model, we cannot assume that __morestack lives\r
- // within 2^31 bytes of the call site, so we cannot use pc-relative\r
- // addressing. We cannot perform the call via a temporary register,\r
- // as the rax register may be used to store the static chain, and all\r
- // other suitable registers may be either callee-save or used for\r
- // parameter passing. We cannot use the stack at this point either\r
- // because __morestack manipulates the stack directly.\r
- //\r
- // To avoid these issues, perform an indirect call via a read-only memory\r
- // location containing the address.\r
- //\r
- // This solution is not perfect, as it assumes that the .rodata section\r
- // is laid out within 2^31 bytes of each function body, but this seems\r
- // to be sufficient for JIT.\r
- BuildMI(allocMBB, DL, TII.get(X86::CALL64m))\r
- .addReg(X86::RIP)\r
- .addImm(0)\r
- .addReg(0)\r
- .addExternalSymbol("__morestack_addr")\r
- .addReg(0);\r
- MF.getMMI().setUsesMorestackAddr(true);\r
- } else {\r
- if (Is64Bit)\r
- BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))\r
- .addExternalSymbol("__morestack");\r
- else\r
- BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))\r
- .addExternalSymbol("__morestack");\r
- }\r
-\r
- if (IsNested)\r
- BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));\r
- else\r
- BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));\r
-\r
- allocMBB->addSuccessor(&prologueMBB);\r
-\r
- checkMBB->addSuccessor(allocMBB);\r
- checkMBB->addSuccessor(&prologueMBB);\r
-\r
-#ifdef XDEBUG\r
- MF.verify();\r
-#endif\r
-}\r
-\r
-/// Erlang programs may need a special prologue to handle the stack size they\r
-/// might need at runtime. That is because Erlang/OTP does not implement a C\r
-/// stack but uses a custom implementation of hybrid stack/heap architecture.\r
-/// (for more information see Eric Stenman's Ph.D. thesis:\r
-/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)\r
-///\r
-/// CheckStack:\r
-/// temp0 = sp - MaxStack\r
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart\r
-/// OldStart:\r
-/// ...\r
-/// IncStack:\r
-/// call inc_stack # doubles the stack space\r
-/// temp0 = sp - MaxStack\r
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart\r
-void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const unsigned SlotSize =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())\r
- ->getSlotSize();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- const bool Is64Bit = STI.is64Bit();\r
- const bool IsLP64 = STI.isTarget64BitLP64();\r
- DebugLoc DL;\r
- // HiPE-specific values\r
- const unsigned HipeLeafWords = 24;\r
- const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;\r
- const unsigned Guaranteed = HipeLeafWords * SlotSize;\r
- unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?\r
- MF.getFunction()->arg_size() - CCRegisteredArgs : 0;\r
- unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;\r
-\r
- assert(STI.isTargetLinux() &&\r
- "HiPE prologue is only supported on Linux operating systems.");\r
-\r
- // Compute the largest caller's frame that is needed to fit the callees'\r
- // frames. This 'MaxStack' is computed from:\r
- //\r
- // a) the fixed frame size, which is the space needed for all spilled temps,\r
- // b) outgoing on-stack parameter areas, and\r
- // c) the minimum stack space this function needs to make available for the\r
- // functions it calls (a tunable ABI property).\r
- if (MFI->hasCalls()) {\r
- unsigned MoreStackForCalls = 0;\r
-\r
- for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();\r
- MBBI != MBBE; ++MBBI)\r
- for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();\r
- MI != ME; ++MI) {\r
- if (!MI->isCall())\r
- continue;\r
-\r
- // Get callee operand.\r
- const MachineOperand &MO = MI->getOperand(0);\r
-\r
- // Only take account of global function calls (no closures etc.).\r
- if (!MO.isGlobal())\r
- continue;\r
-\r
- const Function *F = dyn_cast<Function>(MO.getGlobal());\r
- if (!F)\r
- continue;\r
-\r
- // Do not update 'MaxStack' for primitive and built-in functions\r
- // (encoded with names either starting with "erlang."/"bif_" or not\r
- // having a ".", such as a simple <Module>.<Function>.<Arity>, or an\r
- // "_", such as the BIF "suspend_0") as they are executed on another\r
- // stack.\r
- if (F->getName().find("erlang.") != StringRef::npos ||\r
- F->getName().find("bif_") != StringRef::npos ||\r
- F->getName().find_first_of("._") == StringRef::npos)\r
- continue;\r
-\r
- unsigned CalleeStkArity =\r
- F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;\r
- if (HipeLeafWords - 1 > CalleeStkArity)\r
- MoreStackForCalls = std::max(MoreStackForCalls,\r
- (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);\r
- }\r
- MaxStack += MoreStackForCalls;\r
- }\r
-\r
- // If the stack frame needed is larger than the guaranteed then runtime checks\r
- // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.\r
- if (MaxStack > Guaranteed) {\r
- MachineBasicBlock &prologueMBB = MF.front();\r
- MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();\r
- MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();\r
-\r
- for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),\r
- E = prologueMBB.livein_end(); I != E; I++) {\r
- stackCheckMBB->addLiveIn(*I);\r
- incStackMBB->addLiveIn(*I);\r
- }\r
-\r
- MF.push_front(incStackMBB);\r
- MF.push_front(stackCheckMBB);\r
-\r
- unsigned ScratchReg, SPReg, PReg, SPLimitOffset;\r
- unsigned LEAop, CMPop, CALLop;\r
- if (Is64Bit) {\r
- SPReg = X86::RSP;\r
- PReg = X86::RBP;\r
- LEAop = X86::LEA64r;\r
- CMPop = X86::CMP64rm;\r
- CALLop = X86::CALL64pcrel32;\r
- SPLimitOffset = 0x90;\r
- } else {\r
- SPReg = X86::ESP;\r
- PReg = X86::EBP;\r
- LEAop = X86::LEA32r;\r
- CMPop = X86::CMP32rm;\r
- CALLop = X86::CALLpcrel32;\r
- SPLimitOffset = 0x4c;\r
- }\r
-\r
- ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&\r
- "HiPE prologue scratch register is live-in");\r
-\r
- // Create new MBB for StackCheck:\r
- addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),\r
- SPReg, false, -MaxStack);\r
- // SPLimitOffset is in a fixed heap location (pointed by BP).\r
- addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))\r
- .addReg(ScratchReg), PReg, false, SPLimitOffset);\r
- BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB);\r
-\r
- // Create new MBB for IncStack:\r
- BuildMI(incStackMBB, DL, TII.get(CALLop)).\r
- addExternalSymbol("inc_stack_0");\r
- addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),\r
- SPReg, false, -MaxStack);\r
- addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))\r
- .addReg(ScratchReg), PReg, false, SPLimitOffset);\r
- BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);\r
-\r
- stackCheckMBB->addSuccessor(&prologueMBB, 99);\r
- stackCheckMBB->addSuccessor(incStackMBB, 1);\r
- incStackMBB->addSuccessor(&prologueMBB, 99);\r
- incStackMBB->addSuccessor(incStackMBB, 1);\r
- }\r
-#ifdef XDEBUG\r
- MF.verify();\r
-#endif\r
-}\r
-\r
-void X86FrameLowering::\r
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I) const {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(\r
- MF.getSubtarget().getRegisterInfo());\r
- unsigned StackPtr = RegInfo.getStackRegister();\r
- bool reserveCallFrame = hasReservedCallFrame(MF);\r
- int Opcode = I->getOpcode();\r
- bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool IsLP64 = STI.isTarget64BitLP64();\r
- DebugLoc DL = I->getDebugLoc();\r
- uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;\r
- uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;\r
- I = MBB.erase(I);\r
-\r
- if (!reserveCallFrame) {\r
- // If the stack pointer can be changed after prologue, turn the\r
- // adjcallstackup instruction into a 'sub ESP, <amt>' and the\r
- // adjcallstackdown instruction into 'add ESP, <amt>'\r
- if (Amount == 0)\r
- return;\r
-\r
- // We need to keep the stack aligned properly. To do this, we round the\r
- // amount of space needed for the outgoing arguments up to the next\r
- // alignment boundary.\r
- unsigned StackAlign = MF.getTarget()\r
- .getSubtargetImpl()\r
- ->getFrameLowering()\r
- ->getStackAlignment();\r
- Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;\r
-\r
- MachineInstr *New = nullptr;\r
-\r
- // Factor out the amount that gets handled inside the sequence\r
- // (Pushes of argument for frame setup, callee pops for frame destroy)\r
- Amount -= InternalAmt;\r
-\r
- if (Amount) {\r
- if (Opcode == TII.getCallFrameSetupOpcode()) {\r
- New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), StackPtr)\r
- .addReg(StackPtr).addImm(Amount);\r
- } else {\r
- assert(Opcode == TII.getCallFrameDestroyOpcode());\r
-\r
- unsigned Opc = getADDriOpcode(IsLP64, Amount);\r
- New = BuildMI(MF, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr).addImm(Amount);\r
- }\r
- }\r
-\r
- if (New) {\r
- // The EFLAGS implicit def is dead.\r
- New->getOperand(3).setIsDead();\r
-\r
- // Replace the pseudo instruction with a new instruction.\r
- MBB.insert(I, New);\r
- }\r
-\r
- return;\r
- }\r
-\r
- if (Opcode == TII.getCallFrameDestroyOpcode() && InternalAmt) {\r
- // If we are performing frame pointer elimination and if the callee pops\r
- // something off the stack pointer, add it back. We do this until we have\r
- // more advanced stack pointer tracking ability.\r
- unsigned Opc = getSUBriOpcode(IsLP64, InternalAmt);\r
- MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr).addImm(InternalAmt);\r
-\r
- // The EFLAGS implicit def is dead.\r
- New->getOperand(3).setIsDead();\r
-\r
- // We are not tracking the stack pointer adjustment by the callee, so make\r
- // sure we restore the stack pointer immediately after the call, there may\r
- // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.\r
- MachineBasicBlock::iterator B = MBB.begin();\r
- while (I != B && !std::prev(I)->isCall())\r
- --I;\r
- MBB.insert(I, New);\r
- }\r
-}\r
-\r
+//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the X86 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86FrameLowering.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "X86TargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
+#include <cstdlib>
+
+using namespace llvm;
+
+// FIXME: completely move here.
+extern cl::opt<bool> ForceStackAlign;
+
+bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+/// hasFP - Return true if the specified function should have a dedicated frame
+/// pointer register. This is true if the function has variable sized allocas
+/// or if frame pointer elimination is disabled.
+bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineModuleInfo &MMI = MF.getMMI();
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->needsStackRealignment(MF) ||
+ MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
+ MMI.callsUnwindInit() || MMI.callsEHReturn() ||
+ MFI->hasStackMap() || MFI->hasPatchPoint());
+}
+
+static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::SUB64ri8;
+ return X86::SUB64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::SUB32ri8;
+ return X86::SUB32ri;
+ }
+}
+
+static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::ADD64ri8;
+ return X86::ADD64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::ADD32ri8;
+ return X86::ADD32ri;
+ }
+}
+
+static unsigned getSUBrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::SUB64rr : X86::SUB32rr;
+}
+
+static unsigned getADDrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::ADD64rr : X86::ADD32rr;
+}
+
+static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::AND64ri8;
+ return X86::AND64ri32;
+ }
+ if (isInt<8>(Imm))
+ return X86::AND32ri8;
+ return X86::AND32ri;
+}
+
+static unsigned getPUSHiOpcode(bool IsLP64, MachineOperand MO) {
+ // We don't support LP64 for now.
+ assert(!IsLP64);
+
+ if (MO.isImm() && isInt<8>(MO.getImm()))
+ return X86::PUSH32i8;
+
+ return X86::PUSHi32;;
+}
+
+static unsigned getLEArOpcode(unsigned IsLP64) {
+ return IsLP64 ? X86::LEA64r : X86::LEA32r;
+}
+
+/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
+/// when it reaches the "return" instruction. We can then pop a stack object
+/// to this register without worry about clobbering it.
+static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const TargetRegisterInfo &TRI,
+ bool Is64Bit) {
+ const MachineFunction *MF = MBB.getParent();
+ const Function *F = MF->getFunction();
+ if (!F || MF->getMMI().callsEHReturn())
+ return 0;
+
+ static const uint16_t CallerSavedRegs32Bit[] = {
+ X86::EAX, X86::EDX, X86::ECX, 0
+ };
+
+ static const uint16_t CallerSavedRegs64Bit[] = {
+ X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
+ X86::R8, X86::R9, X86::R10, X86::R11, 0
+ };
+
+ unsigned Opc = MBBI->getOpcode();
+ switch (Opc) {
+ default: return 0;
+ case X86::RETL:
+ case X86::RETQ:
+ case X86::RETIL:
+ case X86::RETIQ:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ SmallSet<uint16_t, 8> Uses;
+ for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MBBI->getOperand(i);
+ if (!MO.isReg() || MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ Uses.insert(*AI);
+ }
+
+ const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
+ for (; *CS; ++CS)
+ if (!Uses.count(*CS))
+ return *CS;
+ }
+ }
+
+ return 0;
+}
+
+static bool isEAXLiveIn(MachineFunction &MF) {
+ for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
+ EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
+ unsigned Reg = II->first;
+
+ if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
+ Reg == X86::AH || Reg == X86::AL)
+ return true;
+ }
+
+ return false;
+}
+
+/// emitSPUpdate - Emit a series of instructions to increment / decrement the
+/// stack pointer by a constant value.
+static
+void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, int64_t NumBytes,
+ bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,
+ const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
+ bool isSub = NumBytes < 0;
+ uint64_t Offset = isSub ? -NumBytes : NumBytes;
+ unsigned Opc;
+ if (UseLEA)
+ Opc = getLEArOpcode(Is64BitStackPtr);
+ else
+ Opc = isSub
+ ? getSUBriOpcode(Is64BitStackPtr, Offset)
+ : getADDriOpcode(Is64BitStackPtr, Offset);
+
+ uint64_t Chunk = (1LL << 31) - 1;
+ DebugLoc DL = MBB.findDebugLoc(MBBI);
+
+ while (Offset) {
+ if (Offset > Chunk) {
+ // Rather than emit a long series of instructions for large offsets,
+ // load the offset into a register and do one sub/add
+ unsigned Reg = 0;
+
+ if (isSub && !isEAXLiveIn(*MBB.getParent()))
+ Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);
+ else
+ Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+
+ if (Reg) {
+ Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
+ .addImm(Offset);
+ Opc = isSub
+ ? getSUBrrOpcode(Is64BitTarget)
+ : getADDrrOpcode(Is64BitTarget);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addReg(Reg);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ Offset = 0;
+ continue;
+ }
+ }
+
+ uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
+ if (ThisVal == (Is64BitTarget ? 8 : 4)) {
+ // Use push / pop instead.
+ unsigned Reg = isSub
+ ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)
+ : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+ if (Reg) {
+ Opc = isSub
+ ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)
+ : (Is64BitTarget ? X86::POP64r : X86::POP32r);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
+ .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
+ if (isSub)
+ MI->setFlag(MachineInstr::FrameSetup);
+ Offset -= ThisVal;
+ continue;
+ }
+ }
+
+ MachineInstr *MI = nullptr;
+
+ if (UseLEA) {
+ MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ StackPtr, false, isSub ? -ThisVal : ThisVal);
+ } else {
+ MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(ThisVal);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
+ if (isSub)
+ MI->setFlag(MachineInstr::FrameSetup);
+
+ Offset -= ThisVal;
+ }
+}
+
+/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
+static
+void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
+ if (MBBI == MBB.begin()) return;
+
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ }
+}
+
+/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
+/// iterator.
+static
+void mergeSPUpdatesDown(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
+ // FIXME: THIS ISN'T RUN!!!
+ return;
+
+ if (MBBI == MBB.end()) return;
+
+ MachineBasicBlock::iterator NI = std::next(MBBI);
+ if (NI == MBB.end()) return;
+
+ unsigned Opc = NI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ }
+}
+
+/// mergeSPUpdates - Checks the instruction before/after the passed
+/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
+/// the stack adjustment is returned as a positive value for ADD/LEA and a
+/// negative for SUB.
+static int mergeSPUpdates(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
+ bool doMergeWithPrevious) {
+ if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
+ (!doMergeWithPrevious && MBBI == MBB.end()))
+ return 0;
+
+ MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
+ MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
+ : std::next(MBBI);
+ unsigned Opc = PI->getOpcode();
+ int Offset = 0;
+
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr){
+ Offset += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ Offset -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ }
+
+ return Offset;
+}
+
+void
+X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const {
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ if (CSI.empty()) return;
+
+ // Calculate offsets.
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
+ Offset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+}
+
+/// usesTheStack - This function checks if any of the users of EFLAGS
+/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
+/// to use the stack, and if we don't adjust the stack we clobber the first
+/// frame index.
+/// See X86InstrInfo::copyPhysReg.
+static bool usesTheStack(const MachineFunction &MF) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ for (MachineRegisterInfo::reg_instr_iterator
+ ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
+ ri != re; ++ri)
+ if (ri->isCopy())
+ return true;
+
+ return false;
+}
+
+void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
+
+ unsigned CallOp;
+ if (Is64Bit)
+ CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
+ else
+ CallOp = X86::CALLpcrel32;
+
+ const char *Symbol;
+ if (Is64Bit) {
+ if (STI.isTargetCygMing()) {
+ Symbol = "___chkstk_ms";
+ } else {
+ Symbol = "__chkstk";
+ }
+ } else if (STI.isTargetCygMing())
+ Symbol = "_alloca";
+ else
+ Symbol = "_chkstk";
+
+ MachineInstrBuilder CI;
+
+ // All current stack probes take AX and SP as input, clobber flags, and
+ // preserve all registers. x86_64 probes leave RSP unmodified.
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // For the large code model, we have to call through a register. Use R11,
+ // as it is scratch in all supported calling conventions.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
+ .addExternalSymbol(Symbol);
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
+ } else {
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
+ }
+
+ unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
+ unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
+ CI.addReg(AX, RegState::Implicit)
+ .addReg(SP, RegState::Implicit)
+ .addReg(AX, RegState::Define | RegState::Implicit)
+ .addReg(SP, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+
+ if (Is64Bit) {
+ // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
+ // themselves. It also does not clobber %rax so we can reuse it when
+ // adjusting %rsp.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
+ .addReg(X86::RSP)
+ .addReg(X86::RAX);
+ }
+}
+
+/// emitPrologue - Push callee-saved registers onto the stack, which
+/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
+/// space for local variables. Also emit labels used by the exception handler to
+/// generate the exception handling frames.
+
+/*
+ Here's a gist of what gets emitted:
+
+ ; Establish frame pointer, if needed
+ [if needs FP]
+ push %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ .seh_pushreg %rpb
+ mov %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+
+ ; Spill general-purpose registers
+ [for all callee-saved GPRs]
+ pushq %<reg>
+ [if not needs FP]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ .seh_pushreg %<reg>
+
+ ; If the required stack alignment > default stack alignment
+ ; rsp needs to be re-aligned. This creates a "re-alignment gap"
+ ; of unknown size in the stack frame.
+ [if stack needs re-alignment]
+ and $MASK, %rsp
+
+ ; Allocate space for locals
+ [if target is Windows and allocated space > 4096 bytes]
+ ; Windows needs special care for allocations larger
+ ; than one page.
+ mov $NNN, %rax
+ call ___chkstk_ms/___chkstk
+ sub %rax, %rsp
+ [else]
+ sub $NNN, %rsp
+
+ [if needs FP]
+ .seh_stackalloc (size of XMM spill slots)
+ .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
+ [else]
+ .seh_stackalloc NNN
+
+ ; Spill XMMs
+ ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
+ ; they may get spilled on any platform, if the current function
+ ; calls @llvm.eh.unwind.init
+ [if needs FP]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, -MMM(%rbp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
+ ; i.e. the offset relative to (%rbp - SEHFrameOffset)
+ [else]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, KKK(%rsp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, KKK
+
+ .seh_endprologue
+
+ [if needs base pointer]
+ mov %rsp, %rbx
+ [if needs to restore base pointer]
+ mov %rsp, -MMM(%rbp)
+
+ ; Emit CFI info
+ [if needs FP]
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rbp)
+ [else]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rsp)
+
+ Notes:
+ - .seh directives are emitted only for Windows 64 ABI
+ - .cfi directives are emitted for all other ABIs
+ - for 32-bit code, substitute %e?? registers for %r??
+*/
+
+void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *Fn = MF.getFunction();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
+ uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
+ bool HasFP = hasFP(MF);
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
+ const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ bool IsWin64 = STI.isTargetWin64();
+ // Not necessarily synonymous with IsWin64.
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
+ bool NeedsDwarfCFI =
+ !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ bool UseLEA = STI.useLeaForSP();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?
+ getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ unsigned StackPtr = RegInfo->getStackRegister();
+ unsigned BasePtr = RegInfo->getBaseRegister();
+ DebugLoc DL;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum SlotSize.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else if (MaxAlign < SlotSize)
+ MaxAlign = SlotSize;
+ }
+
+ // Add RETADDR move area to callee saved frame size.
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ X86FI->setCalleeSavedFrameSize(
+ X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
+
+ bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
+
+ // The default stack probe size is 4096 if the function has no stackprobesize
+ // attribute.
+ unsigned StackProbeSize = 4096;
+ if (Fn->hasFnAttribute("stack-probe-size"))
+ Fn->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+
+ // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
+ // function, and use up to 128 bytes of stack space, don't have a frame
+ // pointer, calls, or dynamic alloca then we do not need to adjust the
+ // stack pointer (we fit in the Red Zone). We also check that we don't
+ // push and pop from the stack.
+ if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoRedZone) &&
+ !RegInfo->needsStackRealignment(MF) &&
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64 && // Win64 has no Red Zone
+ !usesTheStack(MF) && // Don't push and pop.
+ !MF.shouldSplitStack()) { // Regular stack
+ uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
+ if (HasFP) MinSize += SlotSize;
+ StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
+ MFI->setStackSize(StackSize);
+ }
+
+ // Insert stack pointer adjustment for later moving of return addr. Only
+ // applies to tail call optimized functions where the callee argument stack
+ // size is bigger than the callers.
+ if (TailCallReturnAddrDelta < 0) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(-TailCallReturnAddrDelta)
+ .setMIFlag(MachineInstr::FrameSetup);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
+ // Mapping for machine moves:
+ //
+ // DST: VirtualFP AND
+ // SRC: VirtualFP => DW_CFA_def_cfa_offset
+ // ELSE => DW_CFA_def_cfa
+ //
+ // SRC: VirtualFP AND
+ // DST: Register => DW_CFA_def_cfa_register
+ //
+ // ELSE
+ // OFFSET < 0 => DW_CFA_offset_extended_sf
+ // REG < 64 => DW_CFA_offset + Reg
+ // ELSE => DW_CFA_offset_extended
+
+ uint64_t NumBytes = 0;
+ int stackGrowth = -SlotSize;
+
+ if (HasFP) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ // If required, include space for extra hidden slot for stashing base pointer.
+ if (X86FI->getRestoreBasePointer())
+ FrameSize += SlotSize;
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers are pushed on stack before the stack
+ // is realigned.
+ FrameSize -= X86FI->getCalleeSavedFrameSize();
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+ }
+
+ // Get the offset of the stack slot for the EBP register, which is
+ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+ // Update the frame offset adjustment.
+ MFI->setOffsetAdjustment(-NumBytes);
+
+ // Save EBP/RBP into the appropriate stack slot.
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(MachineFramePtr, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ if (NeedsDwarfCFI) {
+ // Mark the place where EBP/RBP was saved.
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Change the rule for the FramePtr to be an "offset" rule.
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr,
+ DwarfFramePtr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
+ .addImm(FramePtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Update EBP with the new base value.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ if (NeedsDwarfCFI) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ // Define the current CFA to use the EBP/RBP register.
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Mark the FramePtr as live-in in every block.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ I->addLiveIn(MachineFramePtr);
+ } else {
+ NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
+ }
+
+ // Skip the callee-saved push instructions.
+ bool PushedRegs = false;
+ int StackOffset = 2 * stackGrowth;
+
+ while (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == X86::PUSH32r ||
+ MBBI->getOpcode() == X86::PUSH64r)) {
+ PushedRegs = true;
+ unsigned Reg = MBBI->getOperand(0).getReg();
+ ++MBBI;
+
+ if (!HasFP && NeedsDwarfCFI) {
+ // Mark callee-saved push instruction.
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ StackOffset += stackGrowth;
+ }
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
+ MachineInstr::FrameSetup);
+ }
+ }
+
+ // Realign stack after we pushed callee-saved registers (so that we'll be
+ // able to calculate their offsets from the frame pointer).
+ if (RegInfo->needsStackRealignment(MF)) {
+ assert(HasFP && "There should be a frame pointer if stack is realigned.");
+ uint64_t Val = -MaxAlign;
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getANDriOpcode(Uses64BitFramePtr, Val)), StackPtr)
+ .addReg(StackPtr)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+
+ // If there is an SUB32ri of ESP immediately before this instruction, merge
+ // the two. This can be the case when tail call elimination is enabled and
+ // the callee has more arguments then the caller.
+ NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately after this
+ // instruction, merge the two instructions.
+ mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
+
+ // Adjust stack pointer: ESP -= numbytes.
+
+ // Windows and cygwin/mingw require a prologue helper routine when allocating
+ // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
+ // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
+ // stack and adjust the stack pointer in one go. The 64-bit version of
+ // __chkstk is only responsible for probing the stack. The 64-bit prologue is
+ // responsible for adjusting the stack pointer. Touching the stack at 4K
+ // increments is necessary to ensure that the guard pages used by the OS
+ // virtual memory manager are allocated in correct sequence.
+ if (NumBytes >= StackProbeSize && UseStackProbe) {
+ // Check whether EAX is livein for this function.
+ bool isEAXAlive = isEAXLiveIn(MF);
+
+ if (isEAXAlive) {
+ // Sanity check that EAX is not livein for this function.
+ // It should not be, so throw an assert.
+ assert(!Is64Bit && "EAX is livein in x64 case!");
+
+ // Save EAX
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
+ .addReg(X86::EAX, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ if (Is64Bit) {
+ // Handle the 64-bit Windows ABI case where we need to call __chkstk.
+ // Function prologue is responsible for adjusting the stack pointer.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
+ // We'll also use 4 already allocated bytes for EAX.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Save a pointer to the MI where we set AX.
+ MachineBasicBlock::iterator SetRAX = MBBI;
+ --SetRAX;
+
+ // Call __chkstk, __chkstk_ms, or __alloca.
+ emitStackProbeCall(MF, MBB, MBBI, DL);
+
+ // Apply the frame setup flag to all inserted instrs.
+ for (; SetRAX != MBBI; ++SetRAX)
+ SetRAX->setFlag(MachineInstr::FrameSetup);
+
+ if (isEAXAlive) {
+ // Restore EAX
+ MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
+ X86::EAX),
+ StackPtr, false, NumBytes - 4);
+ MI->setFlag(MachineInstr::FrameSetup);
+ MBB.insert(MBBI, MI);
+ }
+ } else if (NumBytes) {
+ emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,
+ UseLEA, TII, *RegInfo);
+ }
+
+ int SEHFrameOffset = 0;
+ if (NeedsWinEH) {
+ if (HasFP) {
+ // We need to set frame base offset low enough such that all saved
+ // register offsets would be positive relative to it, but we can't
+ // just use NumBytes, because .seh_setframe offset must be <=240.
+ // So we pretend to have only allocated enough space to spill the
+ // non-volatile registers.
+ // We don't care about the rest of stack allocation, because unwinder
+ // will restore SP to (BP - SEHFrameOffset)
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ int offset = MFI->getObjectOffset(Info.getFrameIdx());
+ SEHFrameOffset = std::max(SEHFrameOffset, std::abs(offset));
+ }
+ SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant
+
+ // This only needs to account for XMM spill slots, GPR slots
+ // are covered by the .seh_pushreg's emitted above.
+ unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();
+ if (Size) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(Size)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
+ .addImm(FramePtr)
+ .addImm(SEHFrameOffset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ // SP will be the base register for restoring XMMs
+ if (NumBytes) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+ }
+
+ // Skip the rest of register spilling code
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
+ ++MBBI;
+
+ // Emit SEH info for non-GPRs
+ if (NeedsWinEH) {
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ unsigned Reg = Info.getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+ assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");
+
+ int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());
+ Offset += SEHFrameOffset;
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
+ .addImm(Reg)
+ .addImm(Offset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // If we need a base pointer, set it up here. It's whatever the value
+ // of the stack pointer is at this point. Any variable size objects
+ // will be allocated after this, so we can still use the base pointer
+ // to reference locals.
+ if (RegInfo->hasBasePointer(MF)) {
+ // Update the base pointer with the current stack pointer.
+ unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ if (X86FI->getRestoreBasePointer()) {
+ // Stash value of base pointer. Saving RSP instead of EBP shortens dependence chain.
+ unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
+ FramePtr, true, X86FI->getRestoreBasePointerOffset())
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+
+ if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
+ // Mark end of stack pointer adjustment.
+ if (!HasFP && NumBytes) {
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr,
+ -StackSize + stackGrowth));
+
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Emit DWARF info specifying the offsets of the callee-saved registers.
+ if (PushedRegs)
+ emitCalleeSavedFrameMoves(MBB, MBBI, DL);
+ }
+}
+
+void X86FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI != MBB.end() && "Returning block has no instructions");
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc DL = MBBI->getDebugLoc();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
+ const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ const bool Is64BitILP32 = STI.isTarget64BitILP32();
+ bool UseLEA = STI.useLeaForSP();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned MachineFramePtr = Is64BitILP32 ?
+ getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ unsigned StackPtr = RegInfo->getStackRegister();
+
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
+
+ switch (RetOpcode) {
+ default:
+ llvm_unreachable("Can only insert epilog into returning blocks");
+ case X86::RETQ:
+ case X86::RETL:
+ case X86::RETIL:
+ case X86::RETIQ:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64:
+ break; // These are ok
+ }
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI->getStackSize();
+ uint64_t MaxAlign = MFI->getMaxAlignment();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = 0;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else
+ MaxAlign = MaxAlign ? MaxAlign : 4;
+ }
+
+ if (hasFP(MF)) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers were pushed on stack before the stack
+ // was realigned.
+ FrameSize -= CSSize;
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - CSSize;
+ }
+
+ // Pop EBP.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);
+ } else {
+ NumBytes = StackSize - CSSize;
+ }
+
+ // Skip the callee-saved pop instructions.
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
+ unsigned Opc = PI->getOpcode();
+
+ if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
+ !PI->isTerminator())
+ break;
+
+ --MBBI;
+ }
+ MachineBasicBlock::iterator FirstCSPop = MBBI;
+
+ DL = MBBI->getDebugLoc();
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately before this
+ // instruction, merge the two instructions.
+ if (NumBytes || MFI->hasVarSizedObjects())
+ mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
+
+ // If dynamic alloca is used, then reset esp to point to the last callee-saved
+ // slot before popping them off! Same applies for the case, when stack was
+ // realigned.
+ if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
+ if (RegInfo->needsStackRealignment(MF))
+ MBBI = FirstCSPop;
+ if (CSSize != 0) {
+ unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
+ --MBBI;
+ } else {
+ unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(FramePtr);
+ --MBBI;
+ }
+ } else if (NumBytes) {
+ // Adjust stack pointer back: ESP += numbytes.
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,
+ TII, *RegInfo);
+ --MBBI;
+ }
+
+ // Windows unwinder will not invoke function's exception handler if IP is
+ // either in prologue or in epilogue. This behavior causes a problem when a
+ // call immediately precedes an epilogue, because the return address points
+ // into the epilogue. To cope with that, we insert an epilogue marker here,
+ // then replace it with a 'nop' if it ends up immediately after a CALL in the
+ // final emitted code.
+ if (NeedsWinEH)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
+
+ // We're returning from function via eh_return.
+ if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &DestAddr = MBBI->getOperand(0);
+ assert(DestAddr.isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
+ StackPtr).addReg(DestAddr.getReg());
+ } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
+ RetOpcode == X86::TCRETURNmi ||
+ RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
+ RetOpcode == X86::TCRETURNmi64) {
+ bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
+
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+ int MaxTCDelta = X86FI->getTCReturnAddrDelta();
+ int Offset = 0;
+ assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+ // Incoporate the retaddr area.
+ Offset = StackAdj-MaxTCDelta;
+ assert(Offset >= 0 && "Offset should never be negative");
+
+ if (Offset) {
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
+ UseLEA, TII, *RegInfo);
+ }
+
+ // Jump to label or value in register.
+ bool IsWin64 = STI.isTargetWin64();
+ if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
+ unsigned Op = (RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd
+ : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+ } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
+ unsigned Op = (RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm
+ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
+ } else if (RetOpcode == X86::TCRETURNri64) {
+ BuildMI(MBB, MBBI, DL,
+ TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
+ .addReg(JumpTarget.getReg(), RegState::Kill);
+ } else {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = std::prev(MBBI);
+ NewMI->copyImplicitOps(MF, MBBI);
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
+ RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
+ (X86FI->getTCReturnAddrDelta() < 0)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int delta = -1*X86FI->getTCReturnAddrDelta();
+ MBBI = MBB.getLastNonDebugInstr();
+
+ // Check for possible merge with preceding ADD instruction.
+ delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,
+ *RegInfo);
+ }
+}
+
+int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+ uint64_t StackSize = MFI->getStackSize();
+
+ if (RegInfo->hasBasePointer(MF)) {
+ assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
+ if (FI < 0) {
+ // Skip the saved EBP.
+ return Offset + RegInfo->getSlotSize();
+ } else {
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
+ return Offset + StackSize;
+ }
+ } else if (RegInfo->needsStackRealignment(MF)) {
+ if (FI < 0) {
+ // Skip the saved EBP.
+ return Offset + RegInfo->getSlotSize();
+ } else {
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
+ return Offset + StackSize;
+ }
+ // FIXME: Support tail calls
+ } else {
+ if (!hasFP(MF))
+ return Offset + StackSize;
+
+ // Skip the saved EBP.
+ Offset += RegInfo->getSlotSize();
+
+ // Skip the RETADDR move area
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ Offset -= TailCallReturnAddrDelta;
+ }
+
+ return Offset;
+}
+
+int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ // We can't calculate offset from frame pointer if the stack is realigned,
+ // so enforce usage of stack/base pointer. The base pointer is used when we
+ // have dynamic allocas in addition to dynamic realignment.
+ if (RegInfo->hasBasePointer(MF))
+ FrameReg = RegInfo->getBaseRegister();
+ else if (RegInfo->needsStackRealignment(MF))
+ FrameReg = RegInfo->getStackRegister();
+ else
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return getFrameIndexOffset(MF, FI);
+}
+
+// Simplified from getFrameIndexOffset keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Does not include any dynamic realign.
+ const uint64_t StackSize = MFI->getStackSize();
+ {
+#ifndef NDEBUG
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());
+ // Note: LLVM arranges the stack as:
+ // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
+ // > "Stack Slots" (<--SP)
+ // We can always address StackSlots from RSP. We can usually (unless
+ // needsStackRealignment) address CSRs from RSP, but sometimes need to
+ // address them from RBP. FixedObjects can be placed anywhere in the stack
+ // frame depending on their specific requirements (i.e. we can actually
+ // refer to arguments to the function which are stored in the *callers*
+ // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
+ // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
+
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ // We don't handle tail calls, and shouldn't be seeing them
+ // either.
+ int TailCallReturnAddrDelta =
+ MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
+ assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
+#endif
+ }
+
+ // This is how the math works out:
+ //
+ // %rsp grows (i.e. gets lower) left to right. Each box below is
+ // one word (eight bytes). Obj0 is the stack slot we're trying to
+ // get to.
+ //
+ // ----------------------------------
+ // | BP | Obj0 | Obj1 | ... | ObjN |
+ // ----------------------------------
+ // ^ ^ ^ ^
+ // A B C E
+ //
+ // A is the incoming stack pointer.
+ // (B - A) is the local area offset (-8 for x86-64) [1]
+ // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
+ //
+ // |(E - B)| is the StackSize (absolute value, positive). For a
+ // stack that grown down, this works out to be (B - E). [3]
+ //
+ // E is also the value of %rsp after stack has been set up, and we
+ // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
+ // (C - E) == (C - A) - (B - A) + (B - E)
+ // { Using [1], [2] and [3] above }
+ // == getObjectOffset - LocalAreaOffset + StackSize
+ //
+
+ // Get the Offset from the StackPointer
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+
+ return Offset + StackSize;
+}
+// Simplified from getFrameIndexReference keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());
+
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ FrameReg = RegInfo->getStackRegister();
+ return getFrameIndexOffsetFromSP(MF, FI);
+}
+
+bool X86FrameLowering::assignCalleeSavedSpillSlots(
+ MachineFunction &MF, const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+
+ unsigned CalleeSavedFrameSize = 0;
+ int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
+
+ if (hasFP(MF)) {
+ // emitPrologue always spills frame register the first thing.
+ SpillSlotOffset -= SlotSize;
+ MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+
+ // Since emitPrologue and emitEpilogue will handle spilling and restoring of
+ // the frame register, we can delete it from CSI list and not have to worry
+ // about avoiding it later.
+ unsigned FPReg = RegInfo->getFrameRegister(MF);
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
+ CSI.erase(CSI.begin() + i);
+ break;
+ }
+ }
+ }
+
+ // Assign slots for GPRs. It increases frame size.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
+ continue;
+
+ SpillSlotOffset -= SlotSize;
+ CalleeSavedFrameSize += SlotSize;
+
+ int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ }
+
+ X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
+
+ // Assign slots for XMMs.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ // ensure alignment
+ SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
+ // spill into slot
+ SpillSlotOffset -= RC->getSize();
+ int SlotIndex =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ MFI->ensureMaxAlignment(RC->getAlignment());
+ }
+
+ return true;
+}
+
+bool X86FrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+
+ // Push GPRs. It increases frame size.
+ unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
+ continue;
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
+ // It can be done by spilling XMMs to stack frame.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ if (X86::GR64RegClass.contains(Reg) ||
+ X86::GR32RegClass.contains(Reg))
+ continue;
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
+ TRI);
+ --MI;
+ MI->setFlag(MachineInstr::FrameSetup);
+ ++MI;
+ }
+
+ return true;
+}
+
+bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+
+ // Reload XMMs from stack frame.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (X86::GR64RegClass.contains(Reg) ||
+ X86::GR32RegClass.contains(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
+ }
+
+ // POP GPRs.
+ unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (!X86::GR64RegClass.contains(Reg) &&
+ !X86::GR32RegClass.contains(Reg))
+ continue;
+
+ BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
+ }
+ return true;
+}
+
+void
+X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+
+ if (TailCallReturnAddrDelta < 0) {
+ // create RETURNADDR area
+ // arg
+ // arg
+ // RETADDR
+ // { ...
+ // RETADDR area
+ // ...
+ // }
+ // [EBP]
+ MFI->CreateFixedObject(-TailCallReturnAddrDelta,
+ TailCallReturnAddrDelta - SlotSize, true);
+ }
+
+ // Spill the BasePtr if it's used.
+ if (RegInfo->hasBasePointer(MF))
+ MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
+}
+
+static bool
+HasNestArgument(const MachineFunction *MF) {
+ const Function *F = MF->getFunction();
+ for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ I != E; I++) {
+ if (I->hasNestAttr())
+ return true;
+ }
+ return false;
+}
+
+/// GetScratchRegister - Get a temp register for performing work in the
+/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
+/// and the properties of the function either one or two registers will be
+/// needed. Set primary to true for the first register, false for the second.
+static unsigned
+GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
+ CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+
+ // Erlang stuff.
+ if (CallingConvention == CallingConv::HiPE) {
+ if (Is64Bit)
+ return Primary ? X86::R14 : X86::R13;
+ else
+ return Primary ? X86::EBX : X86::EDI;
+ }
+
+ if (Is64Bit) {
+ if (IsLP64)
+ return Primary ? X86::R11 : X86::R12;
+ else
+ return Primary ? X86::R11D : X86::R12D;
+ }
+
+ bool IsNested = HasNestArgument(&MF);
+
+ if (CallingConvention == CallingConv::X86_FastCall ||
+ CallingConvention == CallingConv::Fast) {
+ if (IsNested)
+ report_fatal_error("Segmented stacks does not support fastcall with "
+ "nested function.");
+ return Primary ? X86::EAX : X86::ECX;
+ }
+ if (IsNested)
+ return Primary ? X86::EDX : X86::EAX;
+ return Primary ? X86::ECX : X86::EAX;
+}
+
+// The stack limit in the TCB is set to this many bytes above the actual stack
+// limit.
+static const uint64_t kSplitStackAvailable = 256;
+
+void
+X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
+ MachineBasicBlock &prologueMBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ uint64_t StackSize;
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ const bool IsLP64 = STI.isTarget64BitLP64();
+ unsigned TlsReg, TlsOffset;
+ DebugLoc DL;
+
+ unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
+ "Scratch register is live-in");
+
+ if (MF.getFunction()->isVarArg())
+ report_fatal_error("Segmented stacks do not support vararg functions.");
+ if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
+ !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
+ !STI.isTargetDragonFly())
+ report_fatal_error("Segmented stacks not supported on this platform.");
+
+ // Eventually StackSize will be calculated by a link-time pass; which will
+ // also decide whether checking code needs to be injected into this particular
+ // prologue.
+ StackSize = MFI->getStackSize();
+
+ // Do not generate a prologue for functions with a stack of size zero
+ if (StackSize == 0)
+ return;
+
+ MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ bool IsNested = false;
+
+ // We need to know if the function has a nest argument only in 64 bit mode.
+ if (Is64Bit)
+ IsNested = HasNestArgument(&MF);
+
+ // The MOV R10, RAX needs to be in a different block, since the RET we emit in
+ // allocMBB needs to be last (terminating) instruction.
+
+ for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
+ e = prologueMBB.livein_end(); i != e; i++) {
+ allocMBB->addLiveIn(*i);
+ checkMBB->addLiveIn(*i);
+ }
+
+ if (IsNested)
+ allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
+
+ MF.push_front(allocMBB);
+ MF.push_front(checkMBB);
+
+ // When the frame size is less than 256 we just compare the stack
+ // boundary directly to the value of the stack pointer, per gcc.
+ bool CompareStackPointer = StackSize < kSplitStackAvailable;
+
+ // Read the limit off the current stacklet off the stack_guard location.
+ if (Is64Bit) {
+ if (STI.isTargetLinux()) {
+ TlsReg = X86::FS;
+ TlsOffset = IsLP64 ? 0x70 : 0x40;
+ } else if (STI.isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
+ } else if (STI.isTargetWin64()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x28; // pvArbitrary, reserved for application use
+ } else if (STI.isTargetFreeBSD()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x18;
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x20; // use tls_tcb.tcb_segstack
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
+
+ BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
+ .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ } else {
+ if (STI.isTargetLinux()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x30;
+ } else if (STI.isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x48 + 90*4;
+ } else if (STI.isTargetWin32()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x14; // pvArbitrary, reserved for application use
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x10; // use tls_tcb.tcb_segstack
+ } else if (STI.isTargetFreeBSD()) {
+ report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
+
+ if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
+ STI.isTargetDragonFly()) {
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
+ .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ } else if (STI.isTargetDarwin()) {
+
+ // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
+ unsigned ScratchReg2;
+ bool SaveScratch2;
+ if (CompareStackPointer) {
+ // The primary scratch register is available for holding the TLS offset.
+ ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ SaveScratch2 = false;
+ } else {
+ // Need to use a second register to hold the TLS offset
+ ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
+
+ // Unfortunately, with fastcc the second scratch register may hold an
+ // argument.
+ SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
+ }
+
+ // If Scratch2 is live-in then it needs to be saved.
+ assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
+ "Scratch register is live-in and not saved");
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
+ .addReg(ScratchReg2, RegState::Kill);
+
+ BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
+ .addImm(TlsOffset);
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
+ .addReg(ScratchReg)
+ .addReg(ScratchReg2).addImm(1).addReg(0)
+ .addImm(0)
+ .addReg(TlsReg);
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
+ }
+ }
+
+ // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
+ // It jumps to normal execution of the function body.
+ BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB);
+
+ // On 32 bit we first push the arguments size and then the frame size. On 64
+ // bit, we pass the stack frame size in r10 and the argument size in r11.
+ if (Is64Bit) {
+ // Functions with nested arguments use R10, so it needs to be saved across
+ // the call to _morestack
+
+ const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
+ const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
+ const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
+ const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
+ const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
+
+ if (IsNested)
+ BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
+
+ BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
+ .addImm(StackSize);
+ BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
+ .addImm(X86FI->getArgumentStackSize());
+ MF.getRegInfo().setPhysRegUsed(Reg10);
+ MF.getRegInfo().setPhysRegUsed(Reg11);
+ } else {
+ BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
+ .addImm(X86FI->getArgumentStackSize());
+ BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
+ .addImm(StackSize);
+ }
+
+ // __morestack is in libgcc
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // Under the large code model, we cannot assume that __morestack lives
+ // within 2^31 bytes of the call site, so we cannot use pc-relative
+ // addressing. We cannot perform the call via a temporary register,
+ // as the rax register may be used to store the static chain, and all
+ // other suitable registers may be either callee-save or used for
+ // parameter passing. We cannot use the stack at this point either
+ // because __morestack manipulates the stack directly.
+ //
+ // To avoid these issues, perform an indirect call via a read-only memory
+ // location containing the address.
+ //
+ // This solution is not perfect, as it assumes that the .rodata section
+ // is laid out within 2^31 bytes of each function body, but this seems
+ // to be sufficient for JIT.
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addExternalSymbol("__morestack_addr")
+ .addReg(0);
+ MF.getMMI().setUsesMorestackAddr(true);
+ } else {
+ if (Is64Bit)
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
+ .addExternalSymbol("__morestack");
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
+ .addExternalSymbol("__morestack");
+ }
+
+ if (IsNested)
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
+
+ allocMBB->addSuccessor(&prologueMBB);
+
+ checkMBB->addSuccessor(allocMBB);
+ checkMBB->addSuccessor(&prologueMBB);
+
+#ifdef XDEBUG
+ MF.verify();
+#endif
+}
+
+/// Erlang programs may need a special prologue to handle the stack size they
+/// might need at runtime. That is because Erlang/OTP does not implement a C
+/// stack but uses a custom implementation of hybrid stack/heap architecture.
+/// (for more information see Eric Stenman's Ph.D. thesis:
+/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
+///
+/// CheckStack:
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+/// OldStart:
+/// ...
+/// IncStack:
+/// call inc_stack # doubles the stack space
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const unsigned SlotSize =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())
+ ->getSlotSize();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const bool Is64Bit = STI.is64Bit();
+ const bool IsLP64 = STI.isTarget64BitLP64();
+ DebugLoc DL;
+ // HiPE-specific values
+ const unsigned HipeLeafWords = 24;
+ const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
+ const unsigned Guaranteed = HipeLeafWords * SlotSize;
+ unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
+ MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
+
+ assert(STI.isTargetLinux() &&
+ "HiPE prologue is only supported on Linux operating systems.");
+
+ // Compute the largest caller's frame that is needed to fit the callees'
+ // frames. This 'MaxStack' is computed from:
+ //
+ // a) the fixed frame size, which is the space needed for all spilled temps,
+ // b) outgoing on-stack parameter areas, and
+ // c) the minimum stack space this function needs to make available for the
+ // functions it calls (a tunable ABI property).
+ if (MFI->hasCalls()) {
+ unsigned MoreStackForCalls = 0;
+
+ for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
+ MBBI != MBBE; ++MBBI)
+ for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
+ MI != ME; ++MI) {
+ if (!MI->isCall())
+ continue;
+
+ // Get callee operand.
+ const MachineOperand &MO = MI->getOperand(0);
+
+ // Only take account of global function calls (no closures etc.).
+ if (!MO.isGlobal())
+ continue;
+
+ const Function *F = dyn_cast<Function>(MO.getGlobal());
+ if (!F)
+ continue;
+
+ // Do not update 'MaxStack' for primitive and built-in functions
+ // (encoded with names either starting with "erlang."/"bif_" or not
+ // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
+ // "_", such as the BIF "suspend_0") as they are executed on another
+ // stack.
+ if (F->getName().find("erlang.") != StringRef::npos ||
+ F->getName().find("bif_") != StringRef::npos ||
+ F->getName().find_first_of("._") == StringRef::npos)
+ continue;
+
+ unsigned CalleeStkArity =
+ F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
+ if (HipeLeafWords - 1 > CalleeStkArity)
+ MoreStackForCalls = std::max(MoreStackForCalls,
+ (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
+ }
+ MaxStack += MoreStackForCalls;
+ }
+
+ // If the stack frame needed is larger than the guaranteed then runtime checks
+ // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
+ if (MaxStack > Guaranteed) {
+ MachineBasicBlock &prologueMBB = MF.front();
+ MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
+
+ for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
+ E = prologueMBB.livein_end(); I != E; I++) {
+ stackCheckMBB->addLiveIn(*I);
+ incStackMBB->addLiveIn(*I);
+ }
+
+ MF.push_front(incStackMBB);
+ MF.push_front(stackCheckMBB);
+
+ unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
+ unsigned LEAop, CMPop, CALLop;
+ if (Is64Bit) {
+ SPReg = X86::RSP;
+ PReg = X86::RBP;
+ LEAop = X86::LEA64r;
+ CMPop = X86::CMP64rm;
+ CALLop = X86::CALL64pcrel32;
+ SPLimitOffset = 0x90;
+ } else {
+ SPReg = X86::ESP;
+ PReg = X86::EBP;
+ LEAop = X86::LEA32r;
+ CMPop = X86::CMP32rm;
+ CALLop = X86::CALLpcrel32;
+ SPLimitOffset = 0x4c;
+ }
+
+ ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
+ "HiPE prologue scratch register is live-in");
+
+ // Create new MBB for StackCheck:
+ addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
+ SPReg, false, -MaxStack);
+ // SPLimitOffset is in a fixed heap location (pointed by BP).
+ addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
+ .addReg(ScratchReg), PReg, false, SPLimitOffset);
+ BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB);
+
+ // Create new MBB for IncStack:
+ BuildMI(incStackMBB, DL, TII.get(CALLop)).
+ addExternalSymbol("inc_stack_0");
+ addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
+ SPReg, false, -MaxStack);
+ addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
+ .addReg(ScratchReg), PReg, false, SPLimitOffset);
+ BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
+
+ stackCheckMBB->addSuccessor(&prologueMBB, 99);
+ stackCheckMBB->addSuccessor(incStackMBB, 1);
+ incStackMBB->addSuccessor(&prologueMBB, 99);
+ incStackMBB->addSuccessor(incStackMBB, 1);
+ }
+#ifdef XDEBUG
+ MF.verify();
+#endif
+}
+
+bool X86FrameLowering::
+convertArgMovsToPushes(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, uint64_t Amount) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+
+ // Scan the call setup sequence for the pattern we're looking for.
+ // We only handle a simple case now - a sequence of MOV32mi or MOV32mr
+ // instructions, that push a sequence of 32-bit values onto the stack, with
+ // no gaps.
+ std::map<int64_t, MachineBasicBlock::iterator> MovMap;
+ do {
+ int Opcode = I->getOpcode();
+ if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)
+ break;
+
+ // We only want movs of the form:
+ // movl imm/r32, k(%ecx)
+ // If we run into something else, bail
+ // Note that AddrBaseReg may, counterintuitively, not be a register...
+ if (!I->getOperand(X86::AddrBaseReg).isReg() ||
+ (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
+ !I->getOperand(X86::AddrScaleAmt).isImm() ||
+ (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
+ (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
+ (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
+ !I->getOperand(X86::AddrDisp).isImm())
+ return false;
+
+ int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
+
+ // We don't want to consider the unaligned case.
+ if (StackDisp % 4)
+ return false;
+
+ // If the same stack slot is being filled twice, something's fishy.
+ if (!MovMap.insert(std::pair<int64_t, MachineInstr*>(StackDisp, I)).second)
+ return false;
+
+ ++I;
+ } while (I != MBB.end());
+
+ // We now expect the end of the sequence - a call and a stack adjust.
+ if (I == MBB.end())
+ return false;
+ if (!I->isCall())
+ return false;
+ MachineBasicBlock::iterator Call = I;
+ if ((++I)->getOpcode() != TII.getCallFrameDestroyOpcode())
+ return false;
+
+ // Now, go through the map, and see that we don't have any gaps,
+ // but only a series of 32-bit MOVs.
+ // Since std::map provides ordered iteration, the original order
+ // of the MOVs doesn't matter.
+ int64_t ExpectedDist = 0;
+ for (auto MMI = MovMap.begin(), MME = MovMap.end(); MMI != MME;
+ ++MMI, ExpectedDist += 4)
+ if (MMI->first != ExpectedDist)
+ return false;
+
+ // Ok, everything looks fine. Do the transformation.
+ DebugLoc DL = I->getDebugLoc();
+
+ // It's possible the original stack adjustment amount was larger than
+ // that done by the pushes. If so, we still need a SUB.
+ Amount -= ExpectedDist;
+ if (Amount) {
+ MachineInstr* Sub = BuildMI(MBB, Call, DL,
+ TII.get(getSUBriOpcode(false, Amount)), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
+ Sub->getOperand(3).setIsDead();
+ }
+
+ // Now, iterate through the map in reverse order, and replace the movs
+ // with pushes. MOVmi/MOVmr doesn't have any defs, so need to replace uses.
+ for (auto MMI = MovMap.rbegin(), MME = MovMap.rend(); MMI != MME; ++MMI) {
+ MachineBasicBlock::iterator MOV = MMI->second;
+ MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
+
+ // Replace MOVmr with PUSH32r, and MOVmi with PUSHi of appropriate size
+ int PushOpcode = X86::PUSH32r;
+ if (MOV->getOpcode() == X86::MOV32mi)
+ PushOpcode = getPUSHiOpcode(false, PushOp);
+
+ BuildMI(MBB, Call, DL, TII.get(PushOpcode)).addOperand(PushOp);
+ MBB.erase(MOV);
+ }
+
+ return true;
+}
+
+void X86FrameLowering::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+ bool reserveCallFrame = hasReservedCallFrame(MF);
+ int Opcode = I->getOpcode();
+ bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool IsLP64 = STI.isTarget64BitLP64();
+ DebugLoc DL = I->getDebugLoc();
+ uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
+ uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
+ I = MBB.erase(I);
+
+ if (!reserveCallFrame) {
+ // If the stack pointer can be changed after prologue, turn the
+ // adjcallstackup instruction into a 'sub ESP, <amt>' and the
+ // adjcallstackdown instruction into 'add ESP, <amt>'
+ if (Amount == 0)
+ return;
+
+ // We need to keep the stack aligned properly. To do this, we round the
+ // amount of space needed for the outgoing arguments up to the next
+ // alignment boundary.
+ unsigned StackAlign = MF.getTarget()
+ .getSubtargetImpl()
+ ->getFrameLowering()
+ ->getStackAlignment();
+ Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
+
+ MachineInstr *New = nullptr;
+ if (Opcode == TII.getCallFrameSetupOpcode()) {
+ // Try to convert movs to the stack into pushes.
+ // We currently only look for a pattern that appears in 32-bit
+ // calling conventions.
+ if (!IsLP64 && convertArgMovsToPushes(MF, MBB, I, Amount))
+ return;
+
+ New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ } else {
+ assert(Opcode == TII.getCallFrameDestroyOpcode());
+
+ // Factor out the amount the callee already popped.
+ Amount -= CalleeAmt;
+
+ if (Amount) {
+ unsigned Opc = getADDriOpcode(IsLP64, Amount);
+ New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
+ }
+ }
+
+ if (New) {
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
+ // Replace the pseudo instruction with a new instruction.
+ MBB.insert(I, New);
+ }
+
+ return;
+ }
+
+ if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
+ // If we are performing frame pointer elimination and if the callee pops
+ // something off the stack pointer, add it back. We do this until we have
+ // more advanced stack pointer tracking ability.
+ unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
+ MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(CalleeAmt);
+
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
+ // We are not tracking the stack pointer adjustment by the callee, so make
+ // sure we restore the stack pointer immediately after the call, there may
+ // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+ MachineBasicBlock::iterator B = MBB.begin();
+ while (I != B && !std::prev(I)->isCall())
+ --I;
+ MBB.insert(I, New);
+ }
+}
+
-//===-- X86TargetFrameLowering.h - Define frame lowering for X86 -*- C++ -*-==//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This class implements X86-specific bits of TargetFrameLowering class.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#ifndef LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H\r
-#define LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H\r
-\r
-#include "llvm/Target/TargetFrameLowering.h"\r
-\r
-namespace llvm {\r
-\r
-class MCSymbol;\r
-class X86TargetMachine;\r
-class X86Subtarget;\r
-\r
-class X86FrameLowering : public TargetFrameLowering {\r
-public:\r
- explicit X86FrameLowering(StackDirection D, unsigned StackAl, int LAO)\r
- : TargetFrameLowering(StackGrowsDown, StackAl, LAO) {}\r
-\r
- /// Emit a call to the target's stack probe function. This is required for all\r
- /// large stack allocations on Windows. The caller is required to materialize\r
- /// the number of bytes to probe in RAX/EAX.\r
- static void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI, DebugLoc DL);\r
-\r
- void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI,\r
- DebugLoc DL) const;\r
-\r
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into\r
- /// the function.\r
- void emitPrologue(MachineFunction &MF) const override;\r
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;\r
-\r
- void adjustForSegmentedStacks(MachineFunction &MF) const override;\r
-\r
- void adjustForHiPEPrologue(MachineFunction &MF) const override;\r
-\r
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,\r
- RegScavenger *RS = nullptr) const override;\r
-\r
- bool\r
- assignCalleeSavedSpillSlots(MachineFunction &MF,\r
- const TargetRegisterInfo *TRI,\r
- std::vector<CalleeSavedInfo> &CSI) const override;\r
-\r
- bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const override;\r
-\r
- bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const override;\r
-\r
- bool hasFP(const MachineFunction &MF) const override;\r
- bool hasReservedCallFrame(const MachineFunction &MF) const override;\r
- bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;\r
- bool needsFrameIndexResolution(const MachineFunction &MF) const override;\r
-\r
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;\r
- int getFrameIndexReference(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const override;\r
-\r
- int getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const;\r
- int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const override;\r
-\r
- void eliminateCallFramePseudoInstr(MachineFunction &MF,\r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MI) const override;\r
-\r
-private:\r
- /// convertArgMovsToPushes - This method tries to convert a call sequence\r
- /// that uses sub and mov instructions to put the argument onto the stack\r
- /// into a series of pushes.\r
- /// Returns true if the transformation succeeded, false if not.\r
- bool convertArgMovsToPushes(MachineFunction &MF, \r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I, \r
- uint64_t Amount) const;\r
-};\r
-\r
-} // End llvm namespace\r
-\r
-#endif\r
+//===-- X86TargetFrameLowering.h - Define frame lowering for X86 -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements X86-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
+#define LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
+
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+
+class MCSymbol;
+class X86TargetMachine;
+class X86Subtarget;
+
+class X86FrameLowering : public TargetFrameLowering {
+public:
+ explicit X86FrameLowering(StackDirection D, unsigned StackAl, int LAO)
+ : TargetFrameLowering(StackGrowsDown, StackAl, LAO) {}
+
+ /// Emit a call to the target's stack probe function. This is required for all
+ /// large stack allocations on Windows. The caller is required to materialize
+ /// the number of bytes to probe in RAX/EAX.
+ static void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL);
+
+ void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const;
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ void adjustForSegmentedStacks(MachineFunction &MF) const override;
+
+ void adjustForHiPEPrologue(MachineFunction &MF) const override;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = nullptr) const override;
+
+ bool
+ assignCalleeSavedSpillSlots(MachineFunction &MF,
+ const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const override;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+
+ int getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const;
+ int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
+
+private:
+ /// convertArgMovsToPushes - This method tries to convert a call sequence
+ /// that uses sub and mov instructions to put the argument onto the stack
+ /// into a series of pushes.
+ /// Returns true if the transformation succeeded, false if not.
+ bool convertArgMovsToPushes(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ uint64_t Amount) const;
+};
+
+} // End llvm namespace
+
+#endif
-//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file describes the various pseudo instructions used by the compiler,\r
-// as well as Pat patterns used during instruction selection.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Pattern Matching Support\r
-\r
-def GetLo32XForm : SDNodeXForm<imm, [{\r
- // Transformation function: get the low 32 bits.\r
- return getI32Imm((unsigned)N->getZExtValue());\r
-}]>;\r
-\r
-def GetLo8XForm : SDNodeXForm<imm, [{\r
- // Transformation function: get the low 8 bits.\r
- return getI8Imm((uint8_t)N->getZExtValue());\r
-}]>;\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Random Pseudo Instructions.\r
-\r
-// PIC base construction. This expands to code that looks like this:\r
-// call $next_inst\r
-// popl %destreg"\r
-let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in\r
- def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),\r
- "", []>;\r
-\r
-\r
-// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into\r
-// a stack adjustment and the codegen must know that they may modify the stack\r
-// pointer before prolog-epilog rewriting occurs.\r
-// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become\r
-// sub / add which can clobber EFLAGS.\r
-let Defs = [ESP, EFLAGS], Uses = [ESP] in {\r
-def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "#ADJCALLSTACKDOWN",\r
- []>,\r
- Requires<[NotLP64]>;\r
-def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "#ADJCALLSTACKUP",\r
- [(X86callseq_end timm:$amt1, timm:$amt2)]>,\r
- Requires<[NotLP64]>;\r
-}\r
-def : Pat<(X86callseq_start timm:$amt1),\r
- (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;\r
-\r
-\r
-// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into\r
-// a stack adjustment and the codegen must know that they may modify the stack\r
-// pointer before prolog-epilog rewriting occurs.\r
-// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become\r
-// sub / add which can clobber EFLAGS.\r
-let Defs = [RSP, EFLAGS], Uses = [RSP] in {\r
-def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "#ADJCALLSTACKDOWN",\r
- []>,\r
- Requires<[IsLP64]>;\r
-def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "#ADJCALLSTACKUP",\r
- [(X86callseq_end timm:$amt1, timm:$amt2)]>,\r
- Requires<[IsLP64]>;\r
-}\r
-def : Pat<(X86callseq_start timm:$amt1),\r
- (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;\r
-\r
-\r
-// x86-64 va_start lowering magic.\r
-let usesCustomInserter = 1, Defs = [EFLAGS] in {\r
-def VASTART_SAVE_XMM_REGS : I<0, Pseudo,\r
- (outs),\r
- (ins GR8:$al,\r
- i64imm:$regsavefi, i64imm:$offset,\r
- variable_ops),\r
- "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",\r
- [(X86vastart_save_xmm_regs GR8:$al,\r
- imm:$regsavefi,\r
- imm:$offset),\r
- (implicit EFLAGS)]>;\r
-\r
-// The VAARG_64 pseudo-instruction takes the address of the va_list,\r
-// and places the address of the next argument into a register.\r
-let Defs = [EFLAGS] in\r
-def VAARG_64 : I<0, Pseudo,\r
- (outs GR64:$dst),\r
- (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),\r
- "#VAARG_64 $dst, $ap, $size, $mode, $align",\r
- [(set GR64:$dst,\r
- (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),\r
- (implicit EFLAGS)]>;\r
-\r
-// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows\r
-// targets. These calls are needed to probe the stack when allocating more than\r
-// 4k bytes in one go. Touching the stack at 4K increments is necessary to\r
-// ensure that the guard pages used by the OS virtual memory manager are\r
-// allocated in correct sequence.\r
-// The main point of having separate instruction are extra unmodelled effects\r
-// (compared to ordinary calls) like stack pointer change.\r
-\r
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in\r
- def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),\r
- "# dynamic stack allocation",\r
- [(X86WinAlloca)]>;\r
-\r
-// When using segmented stacks these are lowered into instructions which first\r
-// check if the current stacklet has enough free memory. If it does, memory is\r
-// allocated by bumping the stack pointer. Otherwise memory is allocated from\r
-// the heap.\r
-\r
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in\r
-def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),\r
- "# variable sized alloca for segmented stacks",\r
- [(set GR32:$dst,\r
- (X86SegAlloca GR32:$size))]>,\r
- Requires<[NotLP64]>;\r
-\r
-let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in\r
-def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),\r
- "# variable sized alloca for segmented stacks",\r
- [(set GR64:$dst,\r
- (X86SegAlloca GR64:$size))]>,\r
- Requires<[In64BitMode]>;\r
-}\r
-\r
-// The MSVC runtime contains an _ftol2 routine for converting floating-point\r
-// to integer values. It has a strange calling convention: the input is\r
-// popped from the x87 stack, and the return value is given in EDX:EAX. ECX is\r
-// used as a temporary register. No other registers (aside from flags) are\r
-// touched.\r
-// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80\r
-// variant is unnecessary.\r
-\r
-let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {\r
- def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),\r
- "# win32 fptoui",\r
- [(X86WinFTOL RFP32:$src)]>,\r
- Requires<[Not64BitMode]>;\r
-\r
- def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),\r
- "# win32 fptoui",\r
- [(X86WinFTOL RFP64:$src)]>,\r
- Requires<[Not64BitMode]>;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// EH Pseudo Instructions\r
-//\r
-let SchedRW = [WriteSystem] in {\r
-let isTerminator = 1, isReturn = 1, isBarrier = 1,\r
- hasCtrlDep = 1, isCodeGenOnly = 1 in {\r
-def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),\r
- "ret\t#eh_return, addr: $addr",\r
- [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;\r
-\r
-}\r
-\r
-let isTerminator = 1, isReturn = 1, isBarrier = 1,\r
- hasCtrlDep = 1, isCodeGenOnly = 1 in {\r
-def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),\r
- "ret\t#eh_return, addr: $addr",\r
- [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;\r
-\r
-}\r
-\r
-let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,\r
- usesCustomInserter = 1 in {\r
- def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),\r
- "#EH_SJLJ_SETJMP32",\r
- [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,\r
- Requires<[Not64BitMode]>;\r
- def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),\r
- "#EH_SJLJ_SETJMP64",\r
- [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,\r
- Requires<[In64BitMode]>;\r
- let isTerminator = 1 in {\r
- def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),\r
- "#EH_SJLJ_LONGJMP32",\r
- [(X86eh_sjlj_longjmp addr:$buf)]>,\r
- Requires<[Not64BitMode]>;\r
- def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),\r
- "#EH_SJLJ_LONGJMP64",\r
- [(X86eh_sjlj_longjmp addr:$buf)]>,\r
- Requires<[In64BitMode]>;\r
- }\r
-}\r
-} // SchedRW\r
-\r
-let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {\r
- def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),\r
- "#EH_SjLj_Setup\t$dst", []>;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Pseudo instructions used by unwind info.\r
-//\r
-let isPseudo = 1 in {\r
- def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),\r
- "#SEH_PushReg $reg", []>;\r
- def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),\r
- "#SEH_SaveReg $reg, $dst", []>;\r
- def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),\r
- "#SEH_SaveXMM $reg, $dst", []>;\r
- def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),\r
- "#SEH_StackAlloc $size", []>;\r
- def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),\r
- "#SEH_SetFrame $reg, $offset", []>;\r
- def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),\r
- "#SEH_PushFrame $mode", []>;\r
- def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),\r
- "#SEH_EndPrologue", []>;\r
- def SEH_Epilogue : I<0, Pseudo, (outs), (ins),\r
- "#SEH_Epilogue", []>;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Pseudo instructions used by segmented stacks.\r
-//\r
-\r
-// This is lowered into a RET instruction by MCInstLower. We need\r
-// this so that we don't have to have a MachineBasicBlock which ends\r
-// with a RET and also has successors.\r
-let isPseudo = 1 in {\r
-def MORESTACK_RET: I<0, Pseudo, (outs), (ins),\r
- "", []>;\r
-\r
-// This instruction is lowered to a RET followed by a MOV. The two\r
-// instructions are not generated on a higher level since then the\r
-// verifier sees a MachineBasicBlock ending with a non-terminator.\r
-def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),\r
- "", []>;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Alias Instructions\r
-//===----------------------------------------------------------------------===//\r
-\r
-// Alias instruction mapping movr0 to xor.\r
-// FIXME: remove when we can teach regalloc that xor reg, reg is ok.\r
-let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,\r
- isPseudo = 1 in\r
-def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",\r
- [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;\r
-\r
-// Other widths can also make use of the 32-bit xor, which may have a smaller\r
-// encoding and avoid partial register updates.\r
-def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;\r
-def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;\r
-def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {\r
- let AddedComplexity = 20;\r
-}\r
-\r
-// Materialize i64 constant where top 32-bits are zero. This could theoretically\r
-// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however\r
-// that would make it more difficult to rematerialize.\r
-let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,\r
- isCodeGenOnly = 1, hasSideEffects = 0 in\r
-def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),\r
- "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;\r
-\r
-// This 64-bit pseudo-move can be used for both a 64-bit constant that is\r
-// actually the zero-extension of a 32-bit constant, and for labels in the\r
-// x86-64 small code model.\r
-def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;\r
-\r
-let AddedComplexity = 1 in\r
-def : Pat<(i64 mov64imm32:$src),\r
- (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;\r
-\r
-// Use sbb to materialize carry bit.\r
-let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {\r
-// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.\r
-// However, Pat<> can't replicate the destination reg into the inputs of the\r
-// result.\r
-def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",\r
- [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;\r
-def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",\r
- [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;\r
-def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",\r
- [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;\r
-def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",\r
- [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;\r
-} // isCodeGenOnly\r
-\r
-\r
-def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C16r)>;\r
-def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C32r)>;\r
-def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C64r)>;\r
-\r
-def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C16r)>;\r
-def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C32r)>;\r
-def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C64r)>;\r
-\r
-// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and\r
-// will be eliminated and that the sbb can be extended up to a wider type. When\r
-// this happens, it is great. However, if we are left with an 8-bit sbb and an\r
-// and, we might as well just match it as a setb.\r
-def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),\r
- (SETBr)>;\r
-\r
-// (add OP, SETB) -> (adc OP, 0)\r
-def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),\r
- (ADC8ri GR8:$op, 0)>;\r
-def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),\r
- (ADC32ri8 GR32:$op, 0)>;\r
-def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),\r
- (ADC64ri8 GR64:$op, 0)>;\r
-\r
-// (sub OP, SETB) -> (sbb OP, 0)\r
-def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),\r
- (SBB8ri GR8:$op, 0)>;\r
-def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),\r
- (SBB32ri8 GR32:$op, 0)>;\r
-def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),\r
- (SBB64ri8 GR64:$op, 0)>;\r
-\r
-// (sub OP, SETCC_CARRY) -> (adc OP, 0)\r
-def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),\r
- (ADC8ri GR8:$op, 0)>;\r
-def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),\r
- (ADC32ri8 GR32:$op, 0)>;\r
-def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),\r
- (ADC64ri8 GR64:$op, 0)>;\r
-\r
-//===----------------------------------------------------------------------===//\r
-// String Pseudo Instructions\r
-//\r
-let SchedRW = [WriteMicrocoded] in {\r
-let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {\r
-def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",\r
- [(X86rep_movs i8)], IIC_REP_MOVS>, REP,\r
- Requires<[Not64BitMode]>;\r
-def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",\r
- [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,\r
- Requires<[Not64BitMode]>;\r
-def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",\r
- [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,\r
- Requires<[Not64BitMode]>;\r
-}\r
-\r
-let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {\r
-def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",\r
- [(X86rep_movs i8)], IIC_REP_MOVS>, REP,\r
- Requires<[In64BitMode]>;\r
-def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",\r
- [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,\r
- Requires<[In64BitMode]>;\r
-def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",\r
- [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,\r
- Requires<[In64BitMode]>;\r
-def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",\r
- [(X86rep_movs i64)], IIC_REP_MOVS>, REP,\r
- Requires<[In64BitMode]>;\r
-}\r
-\r
-// FIXME: Should use "(X86rep_stos AL)" as the pattern.\r
-let Defs = [ECX,EDI], isCodeGenOnly = 1 in {\r
- let Uses = [AL,ECX,EDI] in\r
- def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",\r
- [(X86rep_stos i8)], IIC_REP_STOS>, REP,\r
- Requires<[Not64BitMode]>;\r
- let Uses = [AX,ECX,EDI] in\r
- def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",\r
- [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,\r
- Requires<[Not64BitMode]>;\r
- let Uses = [EAX,ECX,EDI] in\r
- def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",\r
- [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,\r
- Requires<[Not64BitMode]>;\r
-}\r
-\r
-let Defs = [RCX,RDI], isCodeGenOnly = 1 in {\r
- let Uses = [AL,RCX,RDI] in\r
- def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",\r
- [(X86rep_stos i8)], IIC_REP_STOS>, REP,\r
- Requires<[In64BitMode]>;\r
- let Uses = [AX,RCX,RDI] in\r
- def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",\r
- [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,\r
- Requires<[In64BitMode]>;\r
- let Uses = [RAX,RCX,RDI] in\r
- def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",\r
- [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,\r
- Requires<[In64BitMode]>;\r
-\r
- let Uses = [RAX,RCX,RDI] in\r
- def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",\r
- [(X86rep_stos i64)], IIC_REP_STOS>, REP,\r
- Requires<[In64BitMode]>;\r
-}\r
-} // SchedRW\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Thread Local Storage Instructions\r
-//\r
-\r
-// ELF TLS Support\r
-// All calls clobber the non-callee saved registers. ESP is marked as\r
-// a use to prevent stack-pointer assignments that appear immediately\r
-// before calls from potentially appearing dead.\r
-let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,\r
- ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,\r
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,\r
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,\r
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],\r
- Uses = [ESP] in {\r
-def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),\r
- "# TLS_addr32",\r
- [(X86tlsaddr tls32addr:$sym)]>,\r
- Requires<[Not64BitMode]>;\r
-def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),\r
- "# TLS_base_addr32",\r
- [(X86tlsbaseaddr tls32baseaddr:$sym)]>,\r
- Requires<[Not64BitMode]>;\r
-}\r
-\r
-// All calls clobber the non-callee saved registers. RSP is marked as\r
-// a use to prevent stack-pointer assignments that appear immediately\r
-// before calls from potentially appearing dead.\r
-let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,\r
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,\r
- ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,\r
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,\r
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,\r
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],\r
- Uses = [RSP] in {\r
-def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),\r
- "# TLS_addr64",\r
- [(X86tlsaddr tls64addr:$sym)]>,\r
- Requires<[In64BitMode]>;\r
-def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),\r
- "# TLS_base_addr64",\r
- [(X86tlsbaseaddr tls64baseaddr:$sym)]>,\r
- Requires<[In64BitMode]>;\r
-}\r
-\r
-// Darwin TLS Support\r
-// For i386, the address of the thunk is passed on the stack, on return the\r
-// address of the variable is in %eax. %ecx is trashed during the function\r
-// call. All other registers are preserved.\r
-let Defs = [EAX, ECX, EFLAGS],\r
- Uses = [ESP],\r
- usesCustomInserter = 1 in\r
-def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),\r
- "# TLSCall_32",\r
- [(X86TLSCall addr:$sym)]>,\r
- Requires<[Not64BitMode]>;\r
-\r
-// For x86_64, the address of the thunk is passed in %rdi, on return\r
-// the address of the variable is in %rax. All other registers are preserved.\r
-let Defs = [RAX, EFLAGS],\r
- Uses = [RSP, RDI],\r
- usesCustomInserter = 1 in\r
-def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),\r
- "# TLSCall_64",\r
- [(X86TLSCall addr:$sym)]>,\r
- Requires<[In64BitMode]>;\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Conditional Move Pseudo Instructions\r
-\r
-// X86 doesn't have 8-bit conditional moves. Use a customInserter to\r
-// emit control flow. An alternative to this is to mark i8 SELECT as Promote,\r
-// however that requires promoting the operands, and can induce additional\r
-// i8 register pressure.\r
-let usesCustomInserter = 1, Uses = [EFLAGS] in {\r
-def CMOV_GR8 : I<0, Pseudo,\r
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),\r
- "#CMOV_GR8 PSEUDO!",\r
- [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,\r
- imm:$cond, EFLAGS))]>;\r
-\r
-let Predicates = [NoCMov] in {\r
-def CMOV_GR32 : I<0, Pseudo,\r
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),\r
- "#CMOV_GR32* PSEUDO!",\r
- [(set GR32:$dst,\r
- (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;\r
-def CMOV_GR16 : I<0, Pseudo,\r
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),\r
- "#CMOV_GR16* PSEUDO!",\r
- [(set GR16:$dst,\r
- (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;\r
-} // Predicates = [NoCMov]\r
-\r
-// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no\r
-// SSE1.\r
-let Predicates = [FPStackf32] in\r
-def CMOV_RFP32 : I<0, Pseudo,\r
- (outs RFP32:$dst),\r
- (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),\r
- "#CMOV_RFP32 PSEUDO!",\r
- [(set RFP32:$dst,\r
- (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,\r
- EFLAGS))]>;\r
-// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no\r
-// SSE2.\r
-let Predicates = [FPStackf64] in\r
-def CMOV_RFP64 : I<0, Pseudo,\r
- (outs RFP64:$dst),\r
- (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),\r
- "#CMOV_RFP64 PSEUDO!",\r
- [(set RFP64:$dst,\r
- (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,\r
- EFLAGS))]>;\r
-def CMOV_RFP80 : I<0, Pseudo,\r
- (outs RFP80:$dst),\r
- (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),\r
- "#CMOV_RFP80 PSEUDO!",\r
- [(set RFP80:$dst,\r
- (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,\r
- EFLAGS))]>;\r
-} // UsesCustomInserter = 1, Uses = [EFLAGS]\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Normal-Instructions-With-Lock-Prefix Pseudo Instructions\r
-//===----------------------------------------------------------------------===//\r
-\r
-// FIXME: Use normal instructions and add lock prefix dynamically.\r
-\r
-// Memory barriers\r
-\r
-// TODO: Get this to fold the constant into the instruction.\r
-let isCodeGenOnly = 1, Defs = [EFLAGS] in\r
-def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),\r
- "or{l}\t{$zero, $dst|$dst, $zero}",\r
- [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,\r
- Sched<[WriteALULd, WriteRMW]>;\r
-\r
-let hasSideEffects = 1 in\r
-def Int_MemBarrier : I<0, Pseudo, (outs), (ins),\r
- "#MEMBARRIER",\r
- [(X86MemBarrier)]>, Sched<[WriteLoad]>;\r
-\r
-// RegOpc corresponds to the mr version of the instruction\r
-// ImmOpc corresponds to the mi version of the instruction\r
-// ImmOpc8 corresponds to the mi8 version of the instruction\r
-// ImmMod corresponds to the instruction format of the mi and mi8 versions\r
-multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,\r
- Format ImmMod, string mnemonic> {\r
-let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,\r
- SchedRW = [WriteALULd, WriteRMW] in {\r
-\r
-def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},\r
- RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },\r
- MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),\r
- !strconcat(mnemonic, "{b}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_NONMEM>, LOCK;\r
-def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},\r
- RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },\r
- MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),\r
- !strconcat(mnemonic, "{w}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_NONMEM>, OpSize16, LOCK;\r
-def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},\r
- RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },\r
- MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),\r
- !strconcat(mnemonic, "{l}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_NONMEM>, OpSize32, LOCK;\r
-def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},\r
- RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },\r
- MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),\r
- !strconcat(mnemonic, "{q}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_NONMEM>, LOCK;\r
-\r
-def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},\r
- ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },\r
- ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),\r
- !strconcat(mnemonic, "{b}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, LOCK;\r
-\r
-def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},\r
- ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },\r
- ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),\r
- !strconcat(mnemonic, "{w}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, OpSize16, LOCK;\r
-\r
-def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},\r
- ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },\r
- ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),\r
- !strconcat(mnemonic, "{l}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, OpSize32, LOCK;\r
-\r
-def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},\r
- ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },\r
- ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),\r
- !strconcat(mnemonic, "{q}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, LOCK;\r
-\r
-def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},\r
- ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },\r
- ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),\r
- !strconcat(mnemonic, "{w}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, OpSize16, LOCK;\r
-def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},\r
- ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },\r
- ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),\r
- !strconcat(mnemonic, "{l}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, OpSize32, LOCK;\r
-def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},\r
- ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },\r
- ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),\r
- !strconcat(mnemonic, "{q}\t",\r
- "{$src2, $dst|$dst, $src2}"),\r
- [], IIC_ALU_MEM>, LOCK;\r
-\r
-}\r
-\r
-}\r
-\r
-defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;\r
-defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;\r
-defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;\r
-defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;\r
-defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;\r
-\r
-// Optimized codegen when the non-memory output is not used.\r
-multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,\r
- string mnemonic> {\r
-let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,\r
- SchedRW = [WriteALULd, WriteRMW] in {\r
-\r
-def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),\r
- !strconcat(mnemonic, "{b}\t$dst"),\r
- [], IIC_UNARY_MEM>, LOCK;\r
-def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),\r
- !strconcat(mnemonic, "{w}\t$dst"),\r
- [], IIC_UNARY_MEM>, OpSize16, LOCK;\r
-def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),\r
- !strconcat(mnemonic, "{l}\t$dst"),\r
- [], IIC_UNARY_MEM>, OpSize32, LOCK;\r
-def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),\r
- !strconcat(mnemonic, "{q}\t$dst"),\r
- [], IIC_UNARY_MEM>, LOCK;\r
-}\r
-}\r
-\r
-defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;\r
-defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;\r
-\r
-// Atomic compare and swap.\r
-multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,\r
- SDPatternOperator frag, X86MemOperand x86memop,\r
- InstrItinClass itin> {\r
-let isCodeGenOnly = 1 in {\r
- def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),\r
- !strconcat(mnemonic, "\t$ptr"),\r
- [(frag addr:$ptr)], itin>, TB, LOCK;\r
-}\r
-}\r
-\r
-multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,\r
- string mnemonic, SDPatternOperator frag,\r
- InstrItinClass itin8, InstrItinClass itin> {\r
-let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {\r
- let Defs = [AL, EFLAGS], Uses = [AL] in\r
- def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),\r
- !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),\r
- [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;\r
- let Defs = [AX, EFLAGS], Uses = [AX] in\r
- def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),\r
- !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),\r
- [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;\r
- let Defs = [EAX, EFLAGS], Uses = [EAX] in\r
- def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),\r
- !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),\r
- [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;\r
- let Defs = [RAX, EFLAGS], Uses = [RAX] in\r
- def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),\r
- !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),\r
- [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;\r
-}\r
-}\r
-\r
-let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],\r
- SchedRW = [WriteALULd, WriteRMW] in {\r
-defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",\r
- X86cas8, i64mem,\r
- IIC_CMPX_LOCK_8B>;\r
-}\r
-\r
-let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],\r
- Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {\r
-defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",\r
- X86cas16, i128mem,\r
- IIC_CMPX_LOCK_16B>, REX_W;\r
-}\r
-\r
-defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",\r
- X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;\r
-\r
-// Atomic exchange and add\r
-multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,\r
- string frag,\r
- InstrItinClass itin8, InstrItinClass itin> {\r
- let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,\r
- SchedRW = [WriteALULd, WriteRMW] in {\r
- def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),\r
- (ins GR8:$val, i8mem:$ptr),\r
- !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),\r
- [(set GR8:$dst,\r
- (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],\r
- itin8>;\r
- def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),\r
- (ins GR16:$val, i16mem:$ptr),\r
- !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),\r
- [(set\r
- GR16:$dst,\r
- (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],\r
- itin>, OpSize16;\r
- def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),\r
- (ins GR32:$val, i32mem:$ptr),\r
- !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),\r
- [(set\r
- GR32:$dst,\r
- (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],\r
- itin>, OpSize32;\r
- def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),\r
- (ins GR64:$val, i64mem:$ptr),\r
- !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),\r
- [(set\r
- GR64:$dst,\r
- (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],\r
- itin>;\r
- }\r
-}\r
-\r
-defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",\r
- IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,\r
- TB, LOCK;\r
-\r
-/* The following multiclass tries to make sure that in code like\r
- * x.store (immediate op x.load(acquire), release)\r
- * an operation directly on memory is generated instead of wasting a register.\r
- * It is not automatic as atomic_store/load are only lowered to MOV instructions\r
- * extremely late to prevent them from being accidentally reordered in the backend\r
- * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)\r
- */\r
-multiclass RELEASE_BINOP_MI<string op> {\r
- def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),\r
- "#RELEASE_BINOP PSEUDO!",\r
- [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)\r
- (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;\r
- // NAME#16 is not generated as 16-bit arithmetic instructions are considered\r
- // costly and avoided as far as possible by this backend anyway\r
- def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),\r
- "#RELEASE_BINOP PSEUDO!",\r
- [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)\r
- (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;\r
- def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),\r
- "#RELEASE_BINOP PSEUDO!",\r
- [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)\r
- (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;\r
-}\r
-defm RELEASE_ADD : RELEASE_BINOP_MI<"add">;\r
-defm RELEASE_AND : RELEASE_BINOP_MI<"and">;\r
-defm RELEASE_OR : RELEASE_BINOP_MI<"or">;\r
-defm RELEASE_XOR : RELEASE_BINOP_MI<"xor">;\r
-// Note: we don't deal with sub, because substractions of constants are\r
-// optimized into additions before this code can run\r
-\r
-multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {\r
- def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),\r
- "#RELEASE_UNOP PSEUDO!",\r
- [(atomic_store_8 addr:$dst, dag8)]>;\r
- def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),\r
- "#RELEASE_UNOP PSEUDO!",\r
- [(atomic_store_16 addr:$dst, dag16)]>;\r
- def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),\r
- "#RELEASE_UNOP PSEUDO!",\r
- [(atomic_store_32 addr:$dst, dag32)]>;\r
- def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),\r
- "#RELEASE_UNOP PSEUDO!",\r
- [(atomic_store_64 addr:$dst, dag64)]>;\r
-}\r
-\r
-defm RELEASE_INC : RELEASE_UNOP<\r
- (add (atomic_load_8 addr:$dst), (i8 1)),\r
- (add (atomic_load_16 addr:$dst), (i16 1)),\r
- (add (atomic_load_32 addr:$dst), (i32 1)),\r
- (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;\r
-defm RELEASE_DEC : RELEASE_UNOP<\r
- (add (atomic_load_8 addr:$dst), (i8 -1)),\r
- (add (atomic_load_16 addr:$dst), (i16 -1)),\r
- (add (atomic_load_32 addr:$dst), (i32 -1)),\r
- (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;\r
-/*\r
-TODO: These don't work because the type inference of TableGen fails.\r
-TODO: find a way to fix it.\r
-defm RELEASE_NEG : RELEASE_UNOP<\r
- (ineg (atomic_load_8 addr:$dst)),\r
- (ineg (atomic_load_16 addr:$dst)),\r
- (ineg (atomic_load_32 addr:$dst)),\r
- (ineg (atomic_load_64 addr:$dst))>;\r
-defm RELEASE_NOT : RELEASE_UNOP<\r
- (not (atomic_load_8 addr:$dst)),\r
- (not (atomic_load_16 addr:$dst)),\r
- (not (atomic_load_32 addr:$dst)),\r
- (not (atomic_load_64 addr:$dst))>;\r
-*/\r
-\r
-def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),\r
- "#RELEASE_MOV PSEUDO !",\r
- [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;\r
-def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),\r
- "#RELEASE_MOV PSEUDO !",\r
- [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;\r
-def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),\r
- "#RELEASE_MOV PSEUDO !",\r
- [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;\r
-def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),\r
- "#RELEASE_MOV PSEUDO !",\r
- [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;\r
-\r
-def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),\r
- "#RELEASE_MOV PSEUDO!",\r
- [(atomic_store_8 addr:$dst, GR8 :$src)]>;\r
-def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),\r
- "#RELEASE_MOV PSEUDO!",\r
- [(atomic_store_16 addr:$dst, GR16:$src)]>;\r
-def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),\r
- "#RELEASE_MOV PSEUDO!",\r
- [(atomic_store_32 addr:$dst, GR32:$src)]>;\r
-def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),\r
- "#RELEASE_MOV PSEUDO!",\r
- [(atomic_store_64 addr:$dst, GR64:$src)]>;\r
-\r
-def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),\r
- "#ACQUIRE_MOV PSEUDO!",\r
- [(set GR8:$dst, (atomic_load_8 addr:$src))]>;\r
-def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),\r
- "#ACQUIRE_MOV PSEUDO!",\r
- [(set GR16:$dst, (atomic_load_16 addr:$src))]>;\r
-def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),\r
- "#ACQUIRE_MOV PSEUDO!",\r
- [(set GR32:$dst, (atomic_load_32 addr:$src))]>;\r
-def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),\r
- "#ACQUIRE_MOV PSEUDO!",\r
- [(set GR64:$dst, (atomic_load_64 addr:$src))]>;\r
-//===----------------------------------------------------------------------===//\r
-// Conditional Move Pseudo Instructions.\r
-//===----------------------------------------------------------------------===//\r
-\r
-// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after\r
-// instruction selection into a branch sequence.\r
-let Uses = [EFLAGS], usesCustomInserter = 1 in {\r
- def CMOV_FR32 : I<0, Pseudo,\r
- (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),\r
- "#CMOV_FR32 PSEUDO!",\r
- [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,\r
- EFLAGS))]>;\r
- def CMOV_FR64 : I<0, Pseudo,\r
- (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),\r
- "#CMOV_FR64 PSEUDO!",\r
- [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,\r
- EFLAGS))]>;\r
- def CMOV_V4F32 : I<0, Pseudo,\r
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),\r
- "#CMOV_V4F32 PSEUDO!",\r
- [(set VR128:$dst,\r
- (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V2F64 : I<0, Pseudo,\r
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),\r
- "#CMOV_V2F64 PSEUDO!",\r
- [(set VR128:$dst,\r
- (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V2I64 : I<0, Pseudo,\r
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),\r
- "#CMOV_V2I64 PSEUDO!",\r
- [(set VR128:$dst,\r
- (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V8F32 : I<0, Pseudo,\r
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),\r
- "#CMOV_V8F32 PSEUDO!",\r
- [(set VR256:$dst,\r
- (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V4F64 : I<0, Pseudo,\r
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),\r
- "#CMOV_V4F64 PSEUDO!",\r
- [(set VR256:$dst,\r
- (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V4I64 : I<0, Pseudo,\r
- (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),\r
- "#CMOV_V4I64 PSEUDO!",\r
- [(set VR256:$dst,\r
- (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V8I64 : I<0, Pseudo,\r
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),\r
- "#CMOV_V8I64 PSEUDO!",\r
- [(set VR512:$dst,\r
- (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V8F64 : I<0, Pseudo,\r
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),\r
- "#CMOV_V8F64 PSEUDO!",\r
- [(set VR512:$dst,\r
- (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,\r
- EFLAGS)))]>;\r
- def CMOV_V16F32 : I<0, Pseudo,\r
- (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),\r
- "#CMOV_V16F32 PSEUDO!",\r
- [(set VR512:$dst,\r
- (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,\r
- EFLAGS)))]>;\r
-}\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// DAG Pattern Matching Rules\r
-//===----------------------------------------------------------------------===//\r
-\r
-// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable\r
-def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;\r
-def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;\r
-def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;\r
-def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;\r
-def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;\r
-def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;\r
-\r
-def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),\r
- (ADD32ri GR32:$src1, tconstpool:$src2)>;\r
-def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),\r
- (ADD32ri GR32:$src1, tjumptable:$src2)>;\r
-def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),\r
- (ADD32ri GR32:$src1, tglobaladdr:$src2)>;\r
-def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),\r
- (ADD32ri GR32:$src1, texternalsym:$src2)>;\r
-def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),\r
- (ADD32ri GR32:$src1, tblockaddress:$src2)>;\r
-\r
-def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),\r
- (MOV32mi addr:$dst, tglobaladdr:$src)>;\r
-def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),\r
- (MOV32mi addr:$dst, texternalsym:$src)>;\r
-def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),\r
- (MOV32mi addr:$dst, tblockaddress:$src)>;\r
-\r
-// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small\r
-// code model mode, should use 'movabs'. FIXME: This is really a hack, the\r
-// 'movabs' predicate should handle this sort of thing.\r
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),\r
- (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;\r
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),\r
- (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;\r
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),\r
- (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;\r
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),\r
- (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;\r
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),\r
- (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;\r
-\r
-// In kernel code model, we can get the address of a label\r
-// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of\r
-// the MOV64ri32 should accept these.\r
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),\r
- (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;\r
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),\r
- (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;\r
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),\r
- (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;\r
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),\r
- (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;\r
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),\r
- (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;\r
-\r
-// If we have small model and -static mode, it is safe to store global addresses\r
-// directly as immediates. FIXME: This is really a hack, the 'imm' predicate\r
-// for MOV64mi32 should handle this sort of thing.\r
-def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),\r
- (MOV64mi32 addr:$dst, tconstpool:$src)>,\r
- Requires<[NearData, IsStatic]>;\r
-def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),\r
- (MOV64mi32 addr:$dst, tjumptable:$src)>,\r
- Requires<[NearData, IsStatic]>;\r
-def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),\r
- (MOV64mi32 addr:$dst, tglobaladdr:$src)>,\r
- Requires<[NearData, IsStatic]>;\r
-def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),\r
- (MOV64mi32 addr:$dst, texternalsym:$src)>,\r
- Requires<[NearData, IsStatic]>;\r
-def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),\r
- (MOV64mi32 addr:$dst, tblockaddress:$src)>,\r
- Requires<[NearData, IsStatic]>;\r
-\r
-def : Pat<(i32 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;\r
-def : Pat<(i64 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV64ri texternalsym:$dst)>;\r
-\r
-// Calls\r
-\r
-// tls has some funny stuff here...\r
-// This corresponds to movabs $foo@tpoff, %rax\r
-def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),\r
- (MOV64ri32 tglobaltlsaddr :$dst)>;\r
-// This corresponds to add $foo@tpoff, %rax\r
-def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),\r
- (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;\r
-\r
-\r
-// Direct PC relative function call for small code model. 32-bit displacement\r
-// sign extended to 64-bit.\r
-def : Pat<(X86call (i64 tglobaladdr:$dst)),\r
- (CALL64pcrel32 tglobaladdr:$dst)>;\r
-def : Pat<(X86call (i64 texternalsym:$dst)),\r
- (CALL64pcrel32 texternalsym:$dst)>;\r
-\r
-// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they\r
-// can never use callee-saved registers. That is the purpose of the GR64_TC\r
-// register classes.\r
-//\r
-// The only volatile register that is never used by the calling convention is\r
-// %r11. This happens when calling a vararg function with 6 arguments.\r
-//\r
-// Match an X86tcret that uses less than 7 volatile registers.\r
-def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),\r
- (X86tcret node:$ptr, node:$off), [{\r
- // X86tcret args: (*chain, ptr, imm, regs..., glue)\r
- unsigned NumRegs = 0;\r
- for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)\r
- if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)\r
- return false;\r
- return true;\r
-}]>;\r
-\r
-def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),\r
- (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,\r
- Requires<[Not64BitMode]>;\r
-\r
-// FIXME: This is disabled for 32-bit PIC mode because the global base\r
-// register which is part of the address mode may be assigned a\r
-// callee-saved register.\r
-def : Pat<(X86tcret (load addr:$dst), imm:$off),\r
- (TCRETURNmi addr:$dst, imm:$off)>,\r
- Requires<[Not64BitMode, IsNotPIC]>;\r
-\r
-def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),\r
- (TCRETURNdi tglobaladdr:$dst, imm:$off)>,\r
- Requires<[NotLP64]>;\r
-\r
-def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),\r
- (TCRETURNdi texternalsym:$dst, imm:$off)>,\r
- Requires<[NotLP64]>;\r
-\r
-def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),\r
- (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,\r
- Requires<[In64BitMode]>;\r
-\r
-// Don't fold loads into X86tcret requiring more than 6 regs.\r
-// There wouldn't be enough scratch registers for base+index.\r
-def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),\r
- (TCRETURNmi64 addr:$dst, imm:$off)>,\r
- Requires<[In64BitMode]>;\r
-\r
-def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),\r
- (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,\r
- Requires<[IsLP64]>;\r
-\r
-def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),\r
- (TCRETURNdi64 texternalsym:$dst, imm:$off)>,\r
- Requires<[IsLP64]>;\r
-\r
-// Normal calls, with various flavors of addresses.\r
-def : Pat<(X86call (i32 tglobaladdr:$dst)),\r
- (CALLpcrel32 tglobaladdr:$dst)>;\r
-def : Pat<(X86call (i32 texternalsym:$dst)),\r
- (CALLpcrel32 texternalsym:$dst)>;\r
-def : Pat<(X86call (i32 imm:$dst)),\r
- (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;\r
-\r
-// Comparisons.\r
-\r
-// TEST R,R is smaller than CMP R,0\r
-def : Pat<(X86cmp GR8:$src1, 0),\r
- (TEST8rr GR8:$src1, GR8:$src1)>;\r
-def : Pat<(X86cmp GR16:$src1, 0),\r
- (TEST16rr GR16:$src1, GR16:$src1)>;\r
-def : Pat<(X86cmp GR32:$src1, 0),\r
- (TEST32rr GR32:$src1, GR32:$src1)>;\r
-def : Pat<(X86cmp GR64:$src1, 0),\r
- (TEST64rr GR64:$src1, GR64:$src1)>;\r
-\r
-// Conditional moves with folded loads with operands swapped and conditions\r
-// inverted.\r
-multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,\r
- Instruction Inst64> {\r
- let Predicates = [HasCMov] in {\r
- def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),\r
- (Inst16 GR16:$src2, addr:$src1)>;\r
- def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),\r
- (Inst32 GR32:$src2, addr:$src1)>;\r
- def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),\r
- (Inst64 GR64:$src2, addr:$src1)>;\r
- }\r
-}\r
-\r
-defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;\r
-defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;\r
-defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;\r
-defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;\r
-defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;\r
-defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;\r
-defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;\r
-defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;\r
-defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;\r
-defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;\r
-defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;\r
-defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;\r
-defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;\r
-defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;\r
-defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;\r
-defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;\r
-\r
-// zextload bool -> zextload byte\r
-def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;\r
-def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;\r
-def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;\r
-def : Pat<(zextloadi64i1 addr:$src),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;\r
-\r
-// extload bool -> extload byte\r
-// When extloading from 16-bit and smaller memory locations into 64-bit\r
-// registers, use zero-extending loads so that the entire 64-bit register is\r
-// defined, avoiding partial-register updates.\r
-\r
-def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;\r
-def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;\r
-def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;\r
-def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;\r
-def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;\r
-def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;\r
-\r
-// For other extloads, use subregs, since the high contents of the register are\r
-// defined after an extload.\r
-def : Pat<(extloadi64i1 addr:$src),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;\r
-def : Pat<(extloadi64i8 addr:$src),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;\r
-def : Pat<(extloadi64i16 addr:$src),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;\r
-def : Pat<(extloadi64i32 addr:$src),\r
- (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;\r
-\r
-// anyext. Define these to do an explicit zero-extend to\r
-// avoid partial-register updates.\r
-def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG\r
- (MOVZX32rr8 GR8 :$src), sub_16bit)>;\r
-def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;\r
-\r
-// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.\r
-def : Pat<(i32 (anyext GR16:$src)),\r
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;\r
-\r
-def : Pat<(i64 (anyext GR8 :$src)),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;\r
-def : Pat<(i64 (anyext GR16:$src)),\r
- (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;\r
-def : Pat<(i64 (anyext GR32:$src)),\r
- (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;\r
-\r
-\r
-// Any instruction that defines a 32-bit result leaves the high half of the\r
-// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may\r
-// be copying from a truncate. And x86's cmov doesn't do anything if the\r
-// condition is false. But any other 32-bit operation will zero-extend\r
-// up to 64 bits.\r
-def def32 : PatLeaf<(i32 GR32:$src), [{\r
- return N->getOpcode() != ISD::TRUNCATE &&\r
- N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&\r
- N->getOpcode() != ISD::CopyFromReg &&\r
- N->getOpcode() != ISD::AssertSext &&\r
- N->getOpcode() != X86ISD::CMOV;\r
-}]>;\r
-\r
-// In the case of a 32-bit def that is known to implicitly zero-extend,\r
-// we can use a SUBREG_TO_REG.\r
-def : Pat<(i64 (zext def32:$src)),\r
- (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Pattern match OR as ADD\r
-//===----------------------------------------------------------------------===//\r
-\r
-// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be\r
-// 3-addressified into an LEA instruction to avoid copies. However, we also\r
-// want to finally emit these instructions as an or at the end of the code\r
-// generator to make the generated code easier to read. To do this, we select\r
-// into "disjoint bits" pseudo ops.\r
-\r
-// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.\r
-def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{\r
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))\r
- return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());\r
-\r
- APInt KnownZero0, KnownOne0;\r
- CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);\r
- APInt KnownZero1, KnownOne1;\r
- CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);\r
- return (~KnownZero0 & ~KnownZero1) == 0;\r
-}]>;\r
-\r
-\r
-// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.\r
-// Try this before the selecting to OR.\r
-let AddedComplexity = 5, SchedRW = [WriteALU] in {\r
-\r
-let isConvertibleToThreeAddress = 1,\r
- Constraints = "$src1 = $dst", Defs = [EFLAGS] in {\r
-let isCommutable = 1 in {\r
-def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),\r
- "", // orw/addw REG, REG\r
- [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;\r
-def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),\r
- "", // orl/addl REG, REG\r
- [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;\r
-def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),\r
- "", // orq/addq REG, REG\r
- [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;\r
-} // isCommutable\r
-\r
-// NOTE: These are order specific, we want the ri8 forms to be listed\r
-// first so that they are slightly preferred to the ri forms.\r
-\r
-def ADD16ri8_DB : I<0, Pseudo,\r
- (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),\r
- "", // orw/addw REG, imm8\r
- [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;\r
-def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),\r
- "", // orw/addw REG, imm\r
- [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;\r
-\r
-def ADD32ri8_DB : I<0, Pseudo,\r
- (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),\r
- "", // orl/addl REG, imm8\r
- [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;\r
-def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),\r
- "", // orl/addl REG, imm\r
- [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;\r
-\r
-\r
-def ADD64ri8_DB : I<0, Pseudo,\r
- (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),\r
- "", // orq/addq REG, imm8\r
- [(set GR64:$dst, (or_is_add GR64:$src1,\r
- i64immSExt8:$src2))]>;\r
-def ADD64ri32_DB : I<0, Pseudo,\r
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),\r
- "", // orq/addq REG, imm\r
- [(set GR64:$dst, (or_is_add GR64:$src1,\r
- i64immSExt32:$src2))]>;\r
-}\r
-} // AddedComplexity, SchedRW\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Some peepholes\r
-//===----------------------------------------------------------------------===//\r
-\r
-// Odd encoding trick: -128 fits into an 8-bit immediate field while\r
-// +128 doesn't, so in this special case use a sub instead of an add.\r
-def : Pat<(add GR16:$src1, 128),\r
- (SUB16ri8 GR16:$src1, -128)>;\r
-def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),\r
- (SUB16mi8 addr:$dst, -128)>;\r
-\r
-def : Pat<(add GR32:$src1, 128),\r
- (SUB32ri8 GR32:$src1, -128)>;\r
-def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),\r
- (SUB32mi8 addr:$dst, -128)>;\r
-\r
-def : Pat<(add GR64:$src1, 128),\r
- (SUB64ri8 GR64:$src1, -128)>;\r
-def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),\r
- (SUB64mi8 addr:$dst, -128)>;\r
-\r
-// The same trick applies for 32-bit immediate fields in 64-bit\r
-// instructions.\r
-def : Pat<(add GR64:$src1, 0x0000000080000000),\r
- (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;\r
-def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),\r
- (SUB64mi32 addr:$dst, 0xffffffff80000000)>;\r
-\r
-// To avoid needing to materialize an immediate in a register, use a 32-bit and\r
-// with implicit zero-extension instead of a 64-bit and if the immediate has at\r
-// least 32 bits of leading zeros. If in addition the last 32 bits can be\r
-// represented with a sign extension of a 8 bit constant, use that.\r
-\r
-def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),\r
- (SUBREG_TO_REG\r
- (i64 0),\r
- (AND32ri8\r
- (EXTRACT_SUBREG GR64:$src, sub_32bit),\r
- (i32 (GetLo8XForm imm:$imm))),\r
- sub_32bit)>;\r
-\r
-def : Pat<(and GR64:$src, i64immZExt32:$imm),\r
- (SUBREG_TO_REG\r
- (i64 0),\r
- (AND32ri\r
- (EXTRACT_SUBREG GR64:$src, sub_32bit),\r
- (i32 (GetLo32XForm imm:$imm))),\r
- sub_32bit)>;\r
-\r
-\r
-// r & (2^16-1) ==> movz\r
-def : Pat<(and GR32:$src1, 0xffff),\r
- (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;\r
-// r & (2^8-1) ==> movz\r
-def : Pat<(and GR32:$src1, 0xff),\r
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,\r
- GR32_ABCD)),\r
- sub_8bit))>,\r
- Requires<[Not64BitMode]>;\r
-// r & (2^8-1) ==> movz\r
-def : Pat<(and GR16:$src1, 0xff),\r
- (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG\r
- (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),\r
- sub_16bit)>,\r
- Requires<[Not64BitMode]>;\r
-\r
-// r & (2^32-1) ==> movz\r
-def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),\r
- (SUBREG_TO_REG (i64 0),\r
- (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),\r
- sub_32bit)>;\r
-// r & (2^16-1) ==> movz\r
-def : Pat<(and GR64:$src, 0xffff),\r
- (SUBREG_TO_REG (i64 0),\r
- (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),\r
- sub_32bit)>;\r
-// r & (2^8-1) ==> movz\r
-def : Pat<(and GR64:$src, 0xff),\r
- (SUBREG_TO_REG (i64 0),\r
- (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),\r
- sub_32bit)>;\r
-// r & (2^8-1) ==> movz\r
-def : Pat<(and GR32:$src1, 0xff),\r
- (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,\r
- Requires<[In64BitMode]>;\r
-// r & (2^8-1) ==> movz\r
-def : Pat<(and GR16:$src1, 0xff),\r
- (EXTRACT_SUBREG (MOVZX32rr8 (i8\r
- (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,\r
- Requires<[In64BitMode]>;\r
-\r
-\r
-// sext_inreg patterns\r
-def : Pat<(sext_inreg GR32:$src, i16),\r
- (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;\r
-def : Pat<(sext_inreg GR32:$src, i8),\r
- (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,\r
- GR32_ABCD)),\r
- sub_8bit))>,\r
- Requires<[Not64BitMode]>;\r
-\r
-def : Pat<(sext_inreg GR16:$src, i8),\r
- (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG\r
- (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),\r
- sub_16bit)>,\r
- Requires<[Not64BitMode]>;\r
-\r
-def : Pat<(sext_inreg GR64:$src, i32),\r
- (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;\r
-def : Pat<(sext_inreg GR64:$src, i16),\r
- (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;\r
-def : Pat<(sext_inreg GR64:$src, i8),\r
- (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;\r
-def : Pat<(sext_inreg GR32:$src, i8),\r
- (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(sext_inreg GR16:$src, i8),\r
- (EXTRACT_SUBREG (MOVSX32rr8\r
- (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,\r
- Requires<[In64BitMode]>;\r
-\r
-// sext, sext_load, zext, zext_load\r
-def: Pat<(i16 (sext GR8:$src)),\r
- (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;\r
-def: Pat<(sextloadi16i8 addr:$src),\r
- (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;\r
-def: Pat<(i16 (zext GR8:$src)),\r
- (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;\r
-def: Pat<(zextloadi16i8 addr:$src),\r
- (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;\r
-\r
-// trunc patterns\r
-def : Pat<(i16 (trunc GR32:$src)),\r
- (EXTRACT_SUBREG GR32:$src, sub_16bit)>;\r
-def : Pat<(i8 (trunc GR32:$src)),\r
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),\r
- sub_8bit)>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(i8 (trunc GR16:$src)),\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit)>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(i32 (trunc GR64:$src)),\r
- (EXTRACT_SUBREG GR64:$src, sub_32bit)>;\r
-def : Pat<(i16 (trunc GR64:$src)),\r
- (EXTRACT_SUBREG GR64:$src, sub_16bit)>;\r
-def : Pat<(i8 (trunc GR64:$src)),\r
- (EXTRACT_SUBREG GR64:$src, sub_8bit)>;\r
-def : Pat<(i8 (trunc GR32:$src)),\r
- (EXTRACT_SUBREG GR32:$src, sub_8bit)>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(i8 (trunc GR16:$src)),\r
- (EXTRACT_SUBREG GR16:$src, sub_8bit)>,\r
- Requires<[In64BitMode]>;\r
-\r
-// h-register tricks\r
-def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi)>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),\r
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),\r
- sub_8bit_hi)>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(srl GR16:$src, (i8 8)),\r
- (EXTRACT_SUBREG\r
- (MOVZX32rr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi)),\r
- sub_16bit)>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),\r
- (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,\r
- GR16_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),\r
- (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,\r
- GR16_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),\r
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,\r
- GR32_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[Not64BitMode]>;\r
-def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),\r
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,\r
- GR32_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[Not64BitMode]>;\r
-\r
-// h-register tricks.\r
-// For now, be conservative on x86-64 and use an h-register extract only if the\r
-// value is immediately zero-extended or stored, which are somewhat common\r
-// cases. This uses a bunch of code to prevent a register requiring a REX prefix\r
-// from being allocated in the same instruction as the h register, as there's\r
-// currently no way to describe this requirement to the register allocator.\r
-\r
-// h-register extract and zero-extend.\r
-def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),\r
- (SUBREG_TO_REG\r
- (i64 0),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),\r
- sub_8bit_hi)),\r
- sub_32bit)>;\r
-def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),\r
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,\r
- GR32_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(srl GR16:$src, (i8 8)),\r
- (EXTRACT_SUBREG\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi)),\r
- sub_16bit)>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),\r
- (SUBREG_TO_REG\r
- (i64 0),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi)),\r
- sub_32bit)>;\r
-def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),\r
- (SUBREG_TO_REG\r
- (i64 0),\r
- (MOVZX32_NOREXrr8\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi)),\r
- sub_32bit)>;\r
-\r
-// h-register extract and store.\r
-def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),\r
- (MOV8mr_NOREX\r
- addr:$dst,\r
- (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),\r
- sub_8bit_hi))>;\r
-def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),\r
- (MOV8mr_NOREX\r
- addr:$dst,\r
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),\r
- (MOV8mr_NOREX\r
- addr:$dst,\r
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),\r
- sub_8bit_hi))>,\r
- Requires<[In64BitMode]>;\r
-\r
-\r
-// (shl x, 1) ==> (add x, x)\r
-// Note that if x is undef (immediate or otherwise), we could theoretically\r
-// end up with the two uses of x getting different values, producing a result\r
-// where the least significant bit is not 0. However, the probability of this\r
-// happening is considered low enough that this is officially not a\r
-// "real problem".\r
-def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;\r
-def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;\r
-def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;\r
-def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;\r
-\r
-// Helper imms that check if a mask doesn't change significant shift bits.\r
-def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;\r
-def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;\r
-\r
-// Shift amount is implicitly masked.\r
-multiclass MaskedShiftAmountPats<SDNode frag, string name> {\r
- // (shift x (and y, 31)) ==> (shift x, y)\r
- def : Pat<(frag GR8:$src1, (and CL, immShift32)),\r
- (!cast<Instruction>(name # "8rCL") GR8:$src1)>;\r
- def : Pat<(frag GR16:$src1, (and CL, immShift32)),\r
- (!cast<Instruction>(name # "16rCL") GR16:$src1)>;\r
- def : Pat<(frag GR32:$src1, (and CL, immShift32)),\r
- (!cast<Instruction>(name # "32rCL") GR32:$src1)>;\r
- def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),\r
- (!cast<Instruction>(name # "8mCL") addr:$dst)>;\r
- def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),\r
- (!cast<Instruction>(name # "16mCL") addr:$dst)>;\r
- def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),\r
- (!cast<Instruction>(name # "32mCL") addr:$dst)>;\r
-\r
- // (shift x (and y, 63)) ==> (shift x, y)\r
- def : Pat<(frag GR64:$src1, (and CL, immShift64)),\r
- (!cast<Instruction>(name # "64rCL") GR64:$src1)>;\r
- def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),\r
- (!cast<Instruction>(name # "64mCL") addr:$dst)>;\r
-}\r
-\r
-defm : MaskedShiftAmountPats<shl, "SHL">;\r
-defm : MaskedShiftAmountPats<srl, "SHR">;\r
-defm : MaskedShiftAmountPats<sra, "SAR">;\r
-defm : MaskedShiftAmountPats<rotl, "ROL">;\r
-defm : MaskedShiftAmountPats<rotr, "ROR">;\r
-\r
-// (anyext (setcc_carry)) -> (setcc_carry)\r
-def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C16r)>;\r
-def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C32r)>;\r
-def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),\r
- (SETB_C32r)>;\r
-\r
-\r
-\r
-\r
-//===----------------------------------------------------------------------===//\r
-// EFLAGS-defining Patterns\r
-//===----------------------------------------------------------------------===//\r
-\r
-// add reg, reg\r
-def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;\r
-def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;\r
-\r
-// add reg, mem\r
-def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),\r
- (ADD8rm GR8:$src1, addr:$src2)>;\r
-def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),\r
- (ADD16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),\r
- (ADD32rm GR32:$src1, addr:$src2)>;\r
-\r
-// add reg, imm\r
-def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;\r
-def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;\r
-def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;\r
-def : Pat<(add GR16:$src1, i16immSExt8:$src2),\r
- (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(add GR32:$src1, i32immSExt8:$src2),\r
- (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;\r
-\r
-// sub reg, reg\r
-def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;\r
-def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;\r
-\r
-// sub reg, mem\r
-def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),\r
- (SUB8rm GR8:$src1, addr:$src2)>;\r
-def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),\r
- (SUB16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),\r
- (SUB32rm GR32:$src1, addr:$src2)>;\r
-\r
-// sub reg, imm\r
-def : Pat<(sub GR8:$src1, imm:$src2),\r
- (SUB8ri GR8:$src1, imm:$src2)>;\r
-def : Pat<(sub GR16:$src1, imm:$src2),\r
- (SUB16ri GR16:$src1, imm:$src2)>;\r
-def : Pat<(sub GR32:$src1, imm:$src2),\r
- (SUB32ri GR32:$src1, imm:$src2)>;\r
-def : Pat<(sub GR16:$src1, i16immSExt8:$src2),\r
- (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(sub GR32:$src1, i32immSExt8:$src2),\r
- (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;\r
-\r
-// sub 0, reg\r
-def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;\r
-def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;\r
-def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;\r
-def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;\r
-\r
-// mul reg, reg\r
-def : Pat<(mul GR16:$src1, GR16:$src2),\r
- (IMUL16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(mul GR32:$src1, GR32:$src2),\r
- (IMUL32rr GR32:$src1, GR32:$src2)>;\r
-\r
-// mul reg, mem\r
-def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),\r
- (IMUL16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),\r
- (IMUL32rm GR32:$src1, addr:$src2)>;\r
-\r
-// mul reg, imm\r
-def : Pat<(mul GR16:$src1, imm:$src2),\r
- (IMUL16rri GR16:$src1, imm:$src2)>;\r
-def : Pat<(mul GR32:$src1, imm:$src2),\r
- (IMUL32rri GR32:$src1, imm:$src2)>;\r
-def : Pat<(mul GR16:$src1, i16immSExt8:$src2),\r
- (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(mul GR32:$src1, i32immSExt8:$src2),\r
- (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;\r
-\r
-// reg = mul mem, imm\r
-def : Pat<(mul (loadi16 addr:$src1), imm:$src2),\r
- (IMUL16rmi addr:$src1, imm:$src2)>;\r
-def : Pat<(mul (loadi32 addr:$src1), imm:$src2),\r
- (IMUL32rmi addr:$src1, imm:$src2)>;\r
-def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),\r
- (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),\r
- (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;\r
-\r
-// Patterns for nodes that do not produce flags, for instructions that do.\r
-\r
-// addition\r
-def : Pat<(add GR64:$src1, GR64:$src2),\r
- (ADD64rr GR64:$src1, GR64:$src2)>;\r
-def : Pat<(add GR64:$src1, i64immSExt8:$src2),\r
- (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(add GR64:$src1, i64immSExt32:$src2),\r
- (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;\r
-def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),\r
- (ADD64rm GR64:$src1, addr:$src2)>;\r
-\r
-// subtraction\r
-def : Pat<(sub GR64:$src1, GR64:$src2),\r
- (SUB64rr GR64:$src1, GR64:$src2)>;\r
-def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),\r
- (SUB64rm GR64:$src1, addr:$src2)>;\r
-def : Pat<(sub GR64:$src1, i64immSExt8:$src2),\r
- (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(sub GR64:$src1, i64immSExt32:$src2),\r
- (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;\r
-\r
-// Multiply\r
-def : Pat<(mul GR64:$src1, GR64:$src2),\r
- (IMUL64rr GR64:$src1, GR64:$src2)>;\r
-def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),\r
- (IMUL64rm GR64:$src1, addr:$src2)>;\r
-def : Pat<(mul GR64:$src1, i64immSExt8:$src2),\r
- (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(mul GR64:$src1, i64immSExt32:$src2),\r
- (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;\r
-def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),\r
- (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),\r
- (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;\r
-\r
-// Increment/Decrement reg.\r
-// Do not make INC/DEC if it is slow\r
-let Predicates = [NotSlowIncDec] in {\r
- def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;\r
- def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;\r
- def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;\r
- def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;\r
- def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;\r
- def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;\r
- def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;\r
- def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;\r
-}\r
-\r
-// or reg/reg.\r
-def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;\r
-def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;\r
-def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;\r
-\r
-// or reg/mem\r
-def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),\r
- (OR8rm GR8:$src1, addr:$src2)>;\r
-def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),\r
- (OR16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),\r
- (OR32rm GR32:$src1, addr:$src2)>;\r
-def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),\r
- (OR64rm GR64:$src1, addr:$src2)>;\r
-\r
-// or reg/imm\r
-def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;\r
-def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;\r
-def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;\r
-def : Pat<(or GR16:$src1, i16immSExt8:$src2),\r
- (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(or GR32:$src1, i32immSExt8:$src2),\r
- (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;\r
-def : Pat<(or GR64:$src1, i64immSExt8:$src2),\r
- (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(or GR64:$src1, i64immSExt32:$src2),\r
- (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;\r
-\r
-// xor reg/reg\r
-def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;\r
-def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;\r
-def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;\r
-\r
-// xor reg/mem\r
-def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),\r
- (XOR8rm GR8:$src1, addr:$src2)>;\r
-def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),\r
- (XOR16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),\r
- (XOR32rm GR32:$src1, addr:$src2)>;\r
-def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),\r
- (XOR64rm GR64:$src1, addr:$src2)>;\r
-\r
-// xor reg/imm\r
-def : Pat<(xor GR8:$src1, imm:$src2),\r
- (XOR8ri GR8:$src1, imm:$src2)>;\r
-def : Pat<(xor GR16:$src1, imm:$src2),\r
- (XOR16ri GR16:$src1, imm:$src2)>;\r
-def : Pat<(xor GR32:$src1, imm:$src2),\r
- (XOR32ri GR32:$src1, imm:$src2)>;\r
-def : Pat<(xor GR16:$src1, i16immSExt8:$src2),\r
- (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(xor GR32:$src1, i32immSExt8:$src2),\r
- (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;\r
-def : Pat<(xor GR64:$src1, i64immSExt8:$src2),\r
- (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(xor GR64:$src1, i64immSExt32:$src2),\r
- (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;\r
-\r
-// and reg/reg\r
-def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;\r
-def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;\r
-def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;\r
-def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;\r
-\r
-// and reg/mem\r
-def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),\r
- (AND8rm GR8:$src1, addr:$src2)>;\r
-def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),\r
- (AND16rm GR16:$src1, addr:$src2)>;\r
-def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),\r
- (AND32rm GR32:$src1, addr:$src2)>;\r
-def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),\r
- (AND64rm GR64:$src1, addr:$src2)>;\r
-\r
-// and reg/imm\r
-def : Pat<(and GR8:$src1, imm:$src2),\r
- (AND8ri GR8:$src1, imm:$src2)>;\r
-def : Pat<(and GR16:$src1, imm:$src2),\r
- (AND16ri GR16:$src1, imm:$src2)>;\r
-def : Pat<(and GR32:$src1, imm:$src2),\r
- (AND32ri GR32:$src1, imm:$src2)>;\r
-def : Pat<(and GR16:$src1, i16immSExt8:$src2),\r
- (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;\r
-def : Pat<(and GR32:$src1, i32immSExt8:$src2),\r
- (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;\r
-def : Pat<(and GR64:$src1, i64immSExt8:$src2),\r
- (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;\r
-def : Pat<(and GR64:$src1, i64immSExt32:$src2),\r
- (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;\r
-\r
-// Bit scan instruction patterns to match explicit zero-undef behavior.\r
-def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;\r
-def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;\r
-def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;\r
-def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;\r
-def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;\r
-def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;\r
-\r
-// When HasMOVBE is enabled it is possible to get a non-legalized\r
-// register-register 16 bit bswap. This maps it to a ROL instruction.\r
-let Predicates = [HasMOVBE] in {\r
- def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;\r
-}\r
+//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the various pseudo instructions used by the compiler,
+// as well as Pat patterns used during instruction selection.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Pattern Matching Support
+
+def GetLo32XForm : SDNodeXForm<imm, [{
+ // Transformation function: get the low 32 bits.
+ return getI32Imm((unsigned)N->getZExtValue());
+}]>;
+
+def GetLo8XForm : SDNodeXForm<imm, [{
+ // Transformation function: get the low 8 bits.
+ return getI8Imm((uint8_t)N->getZExtValue());
+}]>;
+
+
+//===----------------------------------------------------------------------===//
+// Random Pseudo Instructions.
+
+// PIC base construction. This expands to code that looks like this:
+// call $next_inst
+// popl %destreg"
+let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
+ def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
+ "", []>;
+
+
+// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber EFLAGS.
+let Defs = [ESP, EFLAGS], Uses = [ESP] in {
+def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start timm:$amt)]>,
+ Requires<[NotLP64]>;
+def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[NotLP64]>;
+}
+
+// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber EFLAGS.
+let Defs = [RSP, EFLAGS], Uses = [RSP] in {
+def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start timm:$amt)]>,
+ Requires<[IsLP64]>;
+def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[IsLP64]>;
+}
+
+
+
+// x86-64 va_start lowering magic.
+let usesCustomInserter = 1, Defs = [EFLAGS] in {
+def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
+ (outs),
+ (ins GR8:$al,
+ i64imm:$regsavefi, i64imm:$offset,
+ variable_ops),
+ "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
+ [(X86vastart_save_xmm_regs GR8:$al,
+ imm:$regsavefi,
+ imm:$offset),
+ (implicit EFLAGS)]>;
+
+// The VAARG_64 pseudo-instruction takes the address of the va_list,
+// and places the address of the next argument into a register.
+let Defs = [EFLAGS] in
+def VAARG_64 : I<0, Pseudo,
+ (outs GR64:$dst),
+ (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
+ "#VAARG_64 $dst, $ap, $size, $mode, $align",
+ [(set GR64:$dst,
+ (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
+ (implicit EFLAGS)]>;
+
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+ def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
+ "# dynamic stack allocation",
+ [(X86WinAlloca)]>;
+
+// When using segmented stacks these are lowered into instructions which first
+// check if the current stacklet has enough free memory. If it does, memory is
+// allocated by bumping the stack pointer. Otherwise memory is allocated from
+// the heap.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
+ "# variable sized alloca for segmented stacks",
+ [(set GR32:$dst,
+ (X86SegAlloca GR32:$size))]>,
+ Requires<[NotLP64]>;
+
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
+def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
+ "# variable sized alloca for segmented stacks",
+ [(set GR64:$dst,
+ (X86SegAlloca GR64:$size))]>,
+ Requires<[In64BitMode]>;
+}
+
+// The MSVC runtime contains an _ftol2 routine for converting floating-point
+// to integer values. It has a strange calling convention: the input is
+// popped from the x87 stack, and the return value is given in EDX:EAX. ECX is
+// used as a temporary register. No other registers (aside from flags) are
+// touched.
+// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
+// variant is unnecessary.
+
+let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
+ def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
+ "# win32 fptoui",
+ [(X86WinFTOL RFP32:$src)]>,
+ Requires<[Not64BitMode]>;
+
+ def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
+ "# win32 fptoui",
+ [(X86WinFTOL RFP64:$src)]>,
+ Requires<[Not64BitMode]>;
+}
+
+//===----------------------------------------------------------------------===//
+// EH Pseudo Instructions
+//
+let SchedRW = [WriteSystem] in {
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1, isCodeGenOnly = 1 in {
+def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
+ "ret\t#eh_return, addr: $addr",
+ [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
+
+}
+
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1, isCodeGenOnly = 1 in {
+def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
+ "ret\t#eh_return, addr: $addr",
+ [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
+
+}
+
+let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+ usesCustomInserter = 1 in {
+ def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
+ Requires<[Not64BitMode]>;
+ def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
+ "#EH_SJLJ_SETJMP64",
+ [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
+ Requires<[In64BitMode]>;
+ let isTerminator = 1 in {
+ def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(X86eh_sjlj_longjmp addr:$buf)]>,
+ Requires<[Not64BitMode]>;
+ def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
+ "#EH_SJLJ_LONGJMP64",
+ [(X86eh_sjlj_longjmp addr:$buf)]>,
+ Requires<[In64BitMode]>;
+ }
+}
+} // SchedRW
+
+let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
+ def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
+ "#EH_SjLj_Setup\t$dst", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions used by unwind info.
+//
+let isPseudo = 1 in {
+ def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
+ "#SEH_PushReg $reg", []>;
+ def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
+ "#SEH_SaveReg $reg, $dst", []>;
+ def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
+ "#SEH_SaveXMM $reg, $dst", []>;
+ def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
+ "#SEH_StackAlloc $size", []>;
+ def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
+ "#SEH_SetFrame $reg, $offset", []>;
+ def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
+ "#SEH_PushFrame $mode", []>;
+ def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
+ "#SEH_EndPrologue", []>;
+ def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
+ "#SEH_Epilogue", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions used by segmented stacks.
+//
+
+// This is lowered into a RET instruction by MCInstLower. We need
+// this so that we don't have to have a MachineBasicBlock which ends
+// with a RET and also has successors.
+let isPseudo = 1 in {
+def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
+ "", []>;
+
+// This instruction is lowered to a RET followed by a MOV. The two
+// instructions are not generated on a higher level since then the
+// verifier sees a MachineBasicBlock ending with a non-terminator.
+def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
+ "", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Alias Instructions
+//===----------------------------------------------------------------------===//
+
+// Alias instruction mapping movr0 to xor.
+// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
+let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
+ isPseudo = 1 in
+def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
+ [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
+
+// Other widths can also make use of the 32-bit xor, which may have a smaller
+// encoding and avoid partial register updates.
+def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
+def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
+def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
+ let AddedComplexity = 20;
+}
+
+// Materialize i64 constant where top 32-bits are zero. This could theoretically
+// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
+// that would make it more difficult to rematerialize.
+let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
+ isCodeGenOnly = 1, hasSideEffects = 0 in
+def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
+ "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
+
+// This 64-bit pseudo-move can be used for both a 64-bit constant that is
+// actually the zero-extension of a 32-bit constant, and for labels in the
+// x86-64 small code model.
+def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
+
+let AddedComplexity = 1 in
+def : Pat<(i64 mov64imm32:$src),
+ (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
+
+// Use sbb to materialize carry bit.
+let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
+// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
+// However, Pat<> can't replicate the destination reg into the inputs of the
+// result.
+def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
+ [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
+ [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
+ [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
+ [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+} // isCodeGenOnly
+
+
+def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C64r)>;
+
+def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C64r)>;
+
+// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
+// will be eliminated and that the sbb can be extended up to a wider type. When
+// this happens, it is great. However, if we are left with an 8-bit sbb and an
+// and, we might as well just match it as a setb.
+def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
+ (SETBr)>;
+
+// (add OP, SETB) -> (adc OP, 0)
+def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
+ (ADC8ri GR8:$op, 0)>;
+def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
+ (ADC32ri8 GR32:$op, 0)>;
+def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
+ (ADC64ri8 GR64:$op, 0)>;
+
+// (sub OP, SETB) -> (sbb OP, 0)
+def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
+ (SBB8ri GR8:$op, 0)>;
+def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
+ (SBB32ri8 GR32:$op, 0)>;
+def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
+ (SBB64ri8 GR64:$op, 0)>;
+
+// (sub OP, SETCC_CARRY) -> (adc OP, 0)
+def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
+ (ADC8ri GR8:$op, 0)>;
+def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
+ (ADC32ri8 GR32:$op, 0)>;
+def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
+ (ADC64ri8 GR64:$op, 0)>;
+
+//===----------------------------------------------------------------------===//
+// String Pseudo Instructions
+//
+let SchedRW = [WriteMicrocoded] in {
+let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
+def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
+ [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
+ Requires<[Not64BitMode]>;
+def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
+ Requires<[Not64BitMode]>;
+def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
+ Requires<[Not64BitMode]>;
+}
+
+let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
+def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
+ [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
+ Requires<[In64BitMode]>;
+def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
+ Requires<[In64BitMode]>;
+def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
+ Requires<[In64BitMode]>;
+def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
+ [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
+ Requires<[In64BitMode]>;
+}
+
+// FIXME: Should use "(X86rep_stos AL)" as the pattern.
+let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
+ let Uses = [AL,ECX,EDI] in
+ def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
+ [(X86rep_stos i8)], IIC_REP_STOS>, REP,
+ Requires<[Not64BitMode]>;
+ let Uses = [AX,ECX,EDI] in
+ def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
+ Requires<[Not64BitMode]>;
+ let Uses = [EAX,ECX,EDI] in
+ def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
+ Requires<[Not64BitMode]>;
+}
+
+let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
+ let Uses = [AL,RCX,RDI] in
+ def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
+ [(X86rep_stos i8)], IIC_REP_STOS>, REP,
+ Requires<[In64BitMode]>;
+ let Uses = [AX,RCX,RDI] in
+ def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
+ Requires<[In64BitMode]>;
+ let Uses = [RAX,RCX,RDI] in
+ def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
+ Requires<[In64BitMode]>;
+
+ let Uses = [RAX,RCX,RDI] in
+ def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
+ [(X86rep_stos i64)], IIC_REP_STOS>, REP,
+ Requires<[In64BitMode]>;
+}
+} // SchedRW
+
+//===----------------------------------------------------------------------===//
+// Thread Local Storage Instructions
+//
+
+// ELF TLS Support
+// All calls clobber the non-callee saved registers. ESP is marked as
+// a use to prevent stack-pointer assignments that appear immediately
+// before calls from potentially appearing dead.
+let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
+ ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in {
+def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLS_addr32",
+ [(X86tlsaddr tls32addr:$sym)]>,
+ Requires<[Not64BitMode]>;
+def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLS_base_addr32",
+ [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
+ Requires<[Not64BitMode]>;
+}
+
+// All calls clobber the non-callee saved registers. RSP is marked as
+// a use to prevent stack-pointer assignments that appear immediately
+// before calls from potentially appearing dead.
+let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
+ ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [RSP] in {
+def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLS_addr64",
+ [(X86tlsaddr tls64addr:$sym)]>,
+ Requires<[In64BitMode]>;
+def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLS_base_addr64",
+ [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
+ Requires<[In64BitMode]>;
+}
+
+// Darwin TLS Support
+// For i386, the address of the thunk is passed on the stack, on return the
+// address of the variable is in %eax. %ecx is trashed during the function
+// call. All other registers are preserved.
+let Defs = [EAX, ECX, EFLAGS],
+ Uses = [ESP],
+ usesCustomInserter = 1 in
+def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLSCall_32",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[Not64BitMode]>;
+
+// For x86_64, the address of the thunk is passed in %rdi, on return
+// the address of the variable is in %rax. All other registers are preserved.
+let Defs = [RAX, EFLAGS],
+ Uses = [RSP, RDI],
+ usesCustomInserter = 1 in
+def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLSCall_64",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In64BitMode]>;
+
+
+//===----------------------------------------------------------------------===//
+// Conditional Move Pseudo Instructions
+
+// X86 doesn't have 8-bit conditional moves. Use a customInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure.
+let usesCustomInserter = 1, Uses = [EFLAGS] in {
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
+let Predicates = [NoCMov] in {
+def CMOV_GR32 : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
+ "#CMOV_GR32* PSEUDO!",
+ [(set GR32:$dst,
+ (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_GR16 : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
+ "#CMOV_GR16* PSEUDO!",
+ [(set GR16:$dst,
+ (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
+} // Predicates = [NoCMov]
+
+// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
+// SSE1.
+let Predicates = [FPStackf32] in
+def CMOV_RFP32 : I<0, Pseudo,
+ (outs RFP32:$dst),
+ (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
+ "#CMOV_RFP32 PSEUDO!",
+ [(set RFP32:$dst,
+ (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
+ EFLAGS))]>;
+// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
+// SSE2.
+let Predicates = [FPStackf64] in
+def CMOV_RFP64 : I<0, Pseudo,
+ (outs RFP64:$dst),
+ (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
+ "#CMOV_RFP64 PSEUDO!",
+ [(set RFP64:$dst,
+ (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP80 : I<0, Pseudo,
+ (outs RFP80:$dst),
+ (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
+ "#CMOV_RFP80 PSEUDO!",
+ [(set RFP80:$dst,
+ (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
+ EFLAGS))]>;
+} // UsesCustomInserter = 1, Uses = [EFLAGS]
+
+
+//===----------------------------------------------------------------------===//
+// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
+// FIXME: Use normal instructions and add lock prefix dynamically.
+
+// Memory barriers
+
+// TODO: Get this to fold the constant into the instruction.
+let isCodeGenOnly = 1, Defs = [EFLAGS] in
+def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
+ "or{l}\t{$zero, $dst|$dst, $zero}",
+ [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
+ Sched<[WriteALULd, WriteRMW]>;
+
+let hasSideEffects = 1 in
+def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
+ "#MEMBARRIER",
+ [(X86MemBarrier)]>, Sched<[WriteLoad]>;
+
+// RegOpc corresponds to the mr version of the instruction
+// ImmOpc corresponds to the mi version of the instruction
+// ImmOpc8 corresponds to the mi8 version of the instruction
+// ImmMod corresponds to the instruction format of the mi and mi8 versions
+multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
+ Format ImmMod, string mnemonic> {
+let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
+ SchedRW = [WriteALULd, WriteRMW] in {
+
+def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
+ RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
+ MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
+ !strconcat(mnemonic, "{b}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_NONMEM>, LOCK;
+def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
+ RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
+ MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ !strconcat(mnemonic, "{w}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_NONMEM>, OpSize16, LOCK;
+def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
+ RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
+ MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ !strconcat(mnemonic, "{l}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_NONMEM>, OpSize32, LOCK;
+def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
+ RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
+ MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ !strconcat(mnemonic, "{q}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_NONMEM>, LOCK;
+
+def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
+ ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
+ ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
+ !strconcat(mnemonic, "{b}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, LOCK;
+
+def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
+ ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
+ ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
+ !strconcat(mnemonic, "{w}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
+
+def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
+ ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
+ ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
+ !strconcat(mnemonic, "{l}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
+
+def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
+ ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
+ ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
+ !strconcat(mnemonic, "{q}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, LOCK;
+
+def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
+ ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
+ ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
+ !strconcat(mnemonic, "{w}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
+def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
+ ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
+ ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
+ !strconcat(mnemonic, "{l}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
+def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
+ ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
+ ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
+ !strconcat(mnemonic, "{q}\t",
+ "{$src2, $dst|$dst, $src2}"),
+ [], IIC_ALU_MEM>, LOCK;
+
+}
+
+}
+
+defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
+defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
+defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
+defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
+defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
+
+// Optimized codegen when the non-memory output is not used.
+multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
+ string mnemonic> {
+let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
+ SchedRW = [WriteALULd, WriteRMW] in {
+
+def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
+ !strconcat(mnemonic, "{b}\t$dst"),
+ [], IIC_UNARY_MEM>, LOCK;
+def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
+ !strconcat(mnemonic, "{w}\t$dst"),
+ [], IIC_UNARY_MEM>, OpSize16, LOCK;
+def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
+ !strconcat(mnemonic, "{l}\t$dst"),
+ [], IIC_UNARY_MEM>, OpSize32, LOCK;
+def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
+ !strconcat(mnemonic, "{q}\t$dst"),
+ [], IIC_UNARY_MEM>, LOCK;
+}
+}
+
+defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
+defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
+
+// Atomic compare and swap.
+multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
+ SDPatternOperator frag, X86MemOperand x86memop,
+ InstrItinClass itin> {
+let isCodeGenOnly = 1 in {
+ def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
+ !strconcat(mnemonic, "\t$ptr"),
+ [(frag addr:$ptr)], itin>, TB, LOCK;
+}
+}
+
+multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
+ string mnemonic, SDPatternOperator frag,
+ InstrItinClass itin8, InstrItinClass itin> {
+let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
+ let Defs = [AL, EFLAGS], Uses = [AL] in
+ def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
+ !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
+ let Defs = [AX, EFLAGS], Uses = [AX] in
+ def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
+ !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
+ let Defs = [EAX, EFLAGS], Uses = [EAX] in
+ def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
+ !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
+ let Defs = [RAX, EFLAGS], Uses = [RAX] in
+ def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
+ !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
+ [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
+}
+}
+
+let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
+ SchedRW = [WriteALULd, WriteRMW] in {
+defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
+ X86cas8, i64mem,
+ IIC_CMPX_LOCK_8B>;
+}
+
+let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
+ Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
+defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
+ X86cas16, i128mem,
+ IIC_CMPX_LOCK_16B>, REX_W;
+}
+
+defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
+ X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
+
+// Atomic exchange and add
+multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
+ string frag,
+ InstrItinClass itin8, InstrItinClass itin> {
+ let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
+ SchedRW = [WriteALULd, WriteRMW] in {
+ def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
+ (ins GR8:$val, i8mem:$ptr),
+ !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
+ [(set GR8:$dst,
+ (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
+ itin8>;
+ def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$val, i16mem:$ptr),
+ !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR16:$dst,
+ (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
+ itin>, OpSize16;
+ def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$val, i32mem:$ptr),
+ !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR32:$dst,
+ (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
+ itin>, OpSize32;
+ def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$val, i64mem:$ptr),
+ !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
+ [(set
+ GR64:$dst,
+ (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
+ itin>;
+ }
+}
+
+defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
+ IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
+ TB, LOCK;
+
+/* The following multiclass tries to make sure that in code like
+ * x.store (immediate op x.load(acquire), release)
+ * an operation directly on memory is generated instead of wasting a register.
+ * It is not automatic as atomic_store/load are only lowered to MOV instructions
+ * extremely late to prevent them from being accidentally reordered in the backend
+ * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
+ */
+multiclass RELEASE_BINOP_MI<string op> {
+ def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
+ "#RELEASE_BINOP PSEUDO!",
+ [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
+ (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
+ // NAME#16 is not generated as 16-bit arithmetic instructions are considered
+ // costly and avoided as far as possible by this backend anyway
+ def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
+ "#RELEASE_BINOP PSEUDO!",
+ [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
+ (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
+ def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "#RELEASE_BINOP PSEUDO!",
+ [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
+ (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
+}
+defm RELEASE_ADD : RELEASE_BINOP_MI<"add">;
+defm RELEASE_AND : RELEASE_BINOP_MI<"and">;
+defm RELEASE_OR : RELEASE_BINOP_MI<"or">;
+defm RELEASE_XOR : RELEASE_BINOP_MI<"xor">;
+// Note: we don't deal with sub, because substractions of constants are
+// optimized into additions before this code can run
+
+multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
+ def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
+ "#RELEASE_UNOP PSEUDO!",
+ [(atomic_store_8 addr:$dst, dag8)]>;
+ def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
+ "#RELEASE_UNOP PSEUDO!",
+ [(atomic_store_16 addr:$dst, dag16)]>;
+ def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
+ "#RELEASE_UNOP PSEUDO!",
+ [(atomic_store_32 addr:$dst, dag32)]>;
+ def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
+ "#RELEASE_UNOP PSEUDO!",
+ [(atomic_store_64 addr:$dst, dag64)]>;
+}
+
+defm RELEASE_INC : RELEASE_UNOP<
+ (add (atomic_load_8 addr:$dst), (i8 1)),
+ (add (atomic_load_16 addr:$dst), (i16 1)),
+ (add (atomic_load_32 addr:$dst), (i32 1)),
+ (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
+defm RELEASE_DEC : RELEASE_UNOP<
+ (add (atomic_load_8 addr:$dst), (i8 -1)),
+ (add (atomic_load_16 addr:$dst), (i16 -1)),
+ (add (atomic_load_32 addr:$dst), (i32 -1)),
+ (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
+/*
+TODO: These don't work because the type inference of TableGen fails.
+TODO: find a way to fix it.
+defm RELEASE_NEG : RELEASE_UNOP<
+ (ineg (atomic_load_8 addr:$dst)),
+ (ineg (atomic_load_16 addr:$dst)),
+ (ineg (atomic_load_32 addr:$dst)),
+ (ineg (atomic_load_64 addr:$dst))>;
+defm RELEASE_NOT : RELEASE_UNOP<
+ (not (atomic_load_8 addr:$dst)),
+ (not (atomic_load_16 addr:$dst)),
+ (not (atomic_load_32 addr:$dst)),
+ (not (atomic_load_64 addr:$dst))>;
+*/
+
+def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
+ "#RELEASE_MOV PSEUDO !",
+ [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
+def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
+ "#RELEASE_MOV PSEUDO !",
+ [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
+def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
+ "#RELEASE_MOV PSEUDO !",
+ [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
+def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "#RELEASE_MOV PSEUDO !",
+ [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
+
+def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
+ "#RELEASE_MOV PSEUDO!",
+ [(atomic_store_8 addr:$dst, GR8 :$src)]>;
+def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
+ "#RELEASE_MOV PSEUDO!",
+ [(atomic_store_16 addr:$dst, GR16:$src)]>;
+def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
+ "#RELEASE_MOV PSEUDO!",
+ [(atomic_store_32 addr:$dst, GR32:$src)]>;
+def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
+ "#RELEASE_MOV PSEUDO!",
+ [(atomic_store_64 addr:$dst, GR64:$src)]>;
+
+def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
+ "#ACQUIRE_MOV PSEUDO!",
+ [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
+def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
+ "#ACQUIRE_MOV PSEUDO!",
+ [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
+def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
+ "#ACQUIRE_MOV PSEUDO!",
+ [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
+def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
+ "#ACQUIRE_MOV PSEUDO!",
+ [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
+//===----------------------------------------------------------------------===//
+// Conditional Move Pseudo Instructions.
+//===----------------------------------------------------------------------===//
+
+// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
+// instruction selection into a branch sequence.
+let Uses = [EFLAGS], usesCustomInserter = 1 in {
+ def CMOV_FR32 : I<0, Pseudo,
+ (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
+ "#CMOV_FR32 PSEUDO!",
+ [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
+ EFLAGS))]>;
+ def CMOV_FR64 : I<0, Pseudo,
+ (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
+ "#CMOV_FR64 PSEUDO!",
+ [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
+ EFLAGS))]>;
+ def CMOV_V4F32 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V4F32 PSEUDO!",
+ [(set VR128:$dst,
+ (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V2F64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2F64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V2I64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2I64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V8F32 : I<0, Pseudo,
+ (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
+ "#CMOV_V8F32 PSEUDO!",
+ [(set VR256:$dst,
+ (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V4F64 : I<0, Pseudo,
+ (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
+ "#CMOV_V4F64 PSEUDO!",
+ [(set VR256:$dst,
+ (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V4I64 : I<0, Pseudo,
+ (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
+ "#CMOV_V4I64 PSEUDO!",
+ [(set VR256:$dst,
+ (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V8I64 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V8I64 PSEUDO!",
+ [(set VR512:$dst,
+ (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V8F64 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V8F64 PSEUDO!",
+ [(set VR512:$dst,
+ (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V16F32 : I<0, Pseudo,
+ (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
+ "#CMOV_V16F32 PSEUDO!",
+ [(set VR512:$dst,
+ (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,
+ EFLAGS)))]>;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DAG Pattern Matching Rules
+//===----------------------------------------------------------------------===//
+
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
+def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
+def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
+def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
+def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
+
+def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
+ (ADD32ri GR32:$src1, tconstpool:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
+ (ADD32ri GR32:$src1, tjumptable:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
+ (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
+ (ADD32ri GR32:$src1, texternalsym:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
+ (ADD32ri GR32:$src1, tblockaddress:$src2)>;
+
+def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tglobaladdr:$src)>;
+def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
+ (MOV32mi addr:$dst, texternalsym:$src)>;
+def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tblockaddress:$src)>;
+
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
+// code model mode, should use 'movabs'. FIXME: This is really a hack, the
+// 'movabs' predicate should handle this sort of thing.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
+
+// In kernel code model, we can get the address of a label
+// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
+// the MOV64ri32 should accept these.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
+
+// If we have small model and -static mode, it is safe to store global addresses
+// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
+// for MOV64mi32 should handle this sort of thing.
+def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tconstpool:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tjumptable:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, texternalsym:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tblockaddress:$src)>,
+ Requires<[NearData, IsStatic]>;
+
+def : Pat<(i32 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
+def : Pat<(i64 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV64ri texternalsym:$dst)>;
+
+// Calls
+
+// tls has some funny stuff here...
+// This corresponds to movabs $foo@tpoff, %rax
+def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
+ (MOV64ri32 tglobaltlsaddr :$dst)>;
+// This corresponds to add $foo@tpoff, %rax
+def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
+ (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
+
+
+// Direct PC relative function call for small code model. 32-bit displacement
+// sign extended to 64-bit.
+def : Pat<(X86call (i64 tglobaladdr:$dst)),
+ (CALL64pcrel32 tglobaladdr:$dst)>;
+def : Pat<(X86call (i64 texternalsym:$dst)),
+ (CALL64pcrel32 texternalsym:$dst)>;
+
+// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
+// can never use callee-saved registers. That is the purpose of the GR64_TC
+// register classes.
+//
+// The only volatile register that is never used by the calling convention is
+// %r11. This happens when calling a vararg function with 6 arguments.
+//
+// Match an X86tcret that uses less than 7 volatile registers.
+def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
+ (X86tcret node:$ptr, node:$off), [{
+ // X86tcret args: (*chain, ptr, imm, regs..., glue)
+ unsigned NumRegs = 0;
+ for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
+ if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
+ return false;
+ return true;
+}]>;
+
+def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
+ (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
+ Requires<[Not64BitMode]>;
+
+// FIXME: This is disabled for 32-bit PIC mode because the global base
+// register which is part of the address mode may be assigned a
+// callee-saved register.
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi addr:$dst, imm:$off)>,
+ Requires<[Not64BitMode, IsNotPIC]>;
+
+def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
+ (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
+ Requires<[NotLP64]>;
+
+def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[NotLP64]>;
+
+def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
+ (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+// Don't fold loads into X86tcret requiring more than 6 regs.
+// There wouldn't be enough scratch registers for base+index.
+def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
+ (TCRETURNmi64 addr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
+ (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
+ Requires<[IsLP64]>;
+
+def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
+ (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
+ Requires<[IsLP64]>;
+
+// Normal calls, with various flavors of addresses.
+def : Pat<(X86call (i32 tglobaladdr:$dst)),
+ (CALLpcrel32 tglobaladdr:$dst)>;
+def : Pat<(X86call (i32 texternalsym:$dst)),
+ (CALLpcrel32 texternalsym:$dst)>;
+def : Pat<(X86call (i32 imm:$dst)),
+ (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
+
+// Comparisons.
+
+// TEST R,R is smaller than CMP R,0
+def : Pat<(X86cmp GR8:$src1, 0),
+ (TEST8rr GR8:$src1, GR8:$src1)>;
+def : Pat<(X86cmp GR16:$src1, 0),
+ (TEST16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(X86cmp GR32:$src1, 0),
+ (TEST32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(X86cmp GR64:$src1, 0),
+ (TEST64rr GR64:$src1, GR64:$src1)>;
+
+// Conditional moves with folded loads with operands swapped and conditions
+// inverted.
+multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
+ Instruction Inst64> {
+ let Predicates = [HasCMov] in {
+ def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
+ (Inst16 GR16:$src2, addr:$src1)>;
+ def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
+ (Inst32 GR32:$src2, addr:$src1)>;
+ def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
+ (Inst64 GR64:$src2, addr:$src1)>;
+ }
+}
+
+defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
+defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
+defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
+defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
+defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
+defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
+defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
+defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
+defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
+defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
+defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
+defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
+defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
+defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
+defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
+defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
+
+// zextload bool -> zextload byte
+def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(zextloadi64i1 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+
+// extload bool -> extload byte
+// When extloading from 16-bit and smaller memory locations into 64-bit
+// registers, use zero-extending loads so that the entire 64-bit register is
+// defined, avoiding partial-register updates.
+
+def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
+
+// For other extloads, use subregs, since the high contents of the register are
+// defined after an extload.
+def : Pat<(extloadi64i1 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+def : Pat<(extloadi64i8 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+def : Pat<(extloadi64i16 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
+def : Pat<(extloadi64i32 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
+
+// anyext. Define these to do an explicit zero-extend to
+// avoid partial-register updates.
+def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
+ (MOVZX32rr8 GR8 :$src), sub_16bit)>;
+def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
+
+// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
+def : Pat<(i32 (anyext GR16:$src)),
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
+
+def : Pat<(i64 (anyext GR8 :$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
+def : Pat<(i64 (anyext GR16:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
+def : Pat<(i64 (anyext GR32:$src)),
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
+
+
+// Any instruction that defines a 32-bit result leaves the high half of the
+// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
+// be copying from a truncate. And x86's cmov doesn't do anything if the
+// condition is false. But any other 32-bit operation will zero-extend
+// up to 64 bits.
+def def32 : PatLeaf<(i32 GR32:$src), [{
+ return N->getOpcode() != ISD::TRUNCATE &&
+ N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+ N->getOpcode() != ISD::CopyFromReg &&
+ N->getOpcode() != ISD::AssertSext &&
+ N->getOpcode() != X86ISD::CMOV;
+}]>;
+
+// In the case of a 32-bit def that is known to implicitly zero-extend,
+// we can use a SUBREG_TO_REG.
+def : Pat<(i64 (zext def32:$src)),
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
+
+//===----------------------------------------------------------------------===//
+// Pattern match OR as ADD
+//===----------------------------------------------------------------------===//
+
+// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
+// 3-addressified into an LEA instruction to avoid copies. However, we also
+// want to finally emit these instructions as an or at the end of the code
+// generator to make the generated code easier to read. To do this, we select
+// into "disjoint bits" pseudo ops.
+
+// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
+def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+
+ APInt KnownZero0, KnownOne0;
+ CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
+}]>;
+
+
+// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
+// Try this before the selecting to OR.
+let AddedComplexity = 5, SchedRW = [WriteALU] in {
+
+let isConvertibleToThreeAddress = 1,
+ Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
+let isCommutable = 1 in {
+def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "", // orw/addw REG, REG
+ [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
+def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "", // orl/addl REG, REG
+ [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
+def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "", // orq/addq REG, REG
+ [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
+} // isCommutable
+
+// NOTE: These are order specific, we want the ri8 forms to be listed
+// first so that they are slightly preferred to the ri forms.
+
+def ADD16ri8_DB : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
+ "", // orw/addw REG, imm8
+ [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
+def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ "", // orw/addw REG, imm
+ [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
+
+def ADD32ri8_DB : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
+ "", // orl/addl REG, imm8
+ [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
+def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ "", // orl/addl REG, imm
+ [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
+
+
+def ADD64ri8_DB : I<0, Pseudo,
+ (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "", // orq/addq REG, imm8
+ [(set GR64:$dst, (or_is_add GR64:$src1,
+ i64immSExt8:$src2))]>;
+def ADD64ri32_DB : I<0, Pseudo,
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "", // orq/addq REG, imm
+ [(set GR64:$dst, (or_is_add GR64:$src1,
+ i64immSExt32:$src2))]>;
+}
+} // AddedComplexity, SchedRW
+
+
+//===----------------------------------------------------------------------===//
+// Some peepholes
+//===----------------------------------------------------------------------===//
+
+// Odd encoding trick: -128 fits into an 8-bit immediate field while
+// +128 doesn't, so in this special case use a sub instead of an add.
+def : Pat<(add GR16:$src1, 128),
+ (SUB16ri8 GR16:$src1, -128)>;
+def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
+ (SUB16mi8 addr:$dst, -128)>;
+
+def : Pat<(add GR32:$src1, 128),
+ (SUB32ri8 GR32:$src1, -128)>;
+def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
+ (SUB32mi8 addr:$dst, -128)>;
+
+def : Pat<(add GR64:$src1, 128),
+ (SUB64ri8 GR64:$src1, -128)>;
+def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
+ (SUB64mi8 addr:$dst, -128)>;
+
+// The same trick applies for 32-bit immediate fields in 64-bit
+// instructions.
+def : Pat<(add GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
+ (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
+
+// To avoid needing to materialize an immediate in a register, use a 32-bit and
+// with implicit zero-extension instead of a 64-bit and if the immediate has at
+// least 32 bits of leading zeros. If in addition the last 32 bits can be
+// represented with a sign extension of a 8 bit constant, use that.
+
+def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri8
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo8XForm imm:$imm))),
+ sub_32bit)>;
+
+def : Pat<(and GR64:$src, i64immZExt32:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo32XForm imm:$imm))),
+ sub_32bit)>;
+
+
+// r & (2^16-1) ==> movz
+def : Pat<(and GR32:$src1, 0xffff),
+ (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[Not64BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
+ (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
+
+// r & (2^32-1) ==> movz
+def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
+ (SUBREG_TO_REG (i64 0),
+ (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
+ sub_32bit)>;
+// r & (2^16-1) ==> movz
+def : Pat<(and GR64:$src, 0xffff),
+ (SUBREG_TO_REG (i64 0),
+ (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
+ sub_32bit)>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR64:$src, 0xff),
+ (SUBREG_TO_REG (i64 0),
+ (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
+ sub_32bit)>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
+ Requires<[In64BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (EXTRACT_SUBREG (MOVZX32rr8 (i8
+ (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
+ Requires<[In64BitMode]>;
+
+
+// sext_inreg patterns
+def : Pat<(sext_inreg GR32:$src, i16),
+ (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[Not64BitMode]>;
+
+def : Pat<(sext_inreg GR16:$src, i8),
+ (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
+ (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
+
+def : Pat<(sext_inreg GR64:$src, i32),
+ (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
+def : Pat<(sext_inreg GR64:$src, i16),
+ (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
+def : Pat<(sext_inreg GR64:$src, i8),
+ (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
+ Requires<[In64BitMode]>;
+def : Pat<(sext_inreg GR16:$src, i8),
+ (EXTRACT_SUBREG (MOVSX32rr8
+ (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
+ Requires<[In64BitMode]>;
+
+// sext, sext_load, zext, zext_load
+def: Pat<(i16 (sext GR8:$src)),
+ (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
+def: Pat<(sextloadi16i8 addr:$src),
+ (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
+def: Pat<(i16 (zext GR8:$src)),
+ (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
+def: Pat<(zextloadi16i8 addr:$src),
+ (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
+
+// trunc patterns
+def : Pat<(i16 (trunc GR32:$src)),
+ (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
+def : Pat<(i8 (trunc GR32:$src)),
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit)>,
+ Requires<[Not64BitMode]>;
+def : Pat<(i8 (trunc GR16:$src)),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit)>,
+ Requires<[Not64BitMode]>;
+def : Pat<(i32 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
+def : Pat<(i16 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
+def : Pat<(i8 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
+def : Pat<(i8 (trunc GR32:$src)),
+ (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i8 (trunc GR16:$src)),
+ (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
+ Requires<[In64BitMode]>;
+
+// h-register tricks
+def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[Not64BitMode]>;
+def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[Not64BitMode]>;
+def : Pat<(srl GR16:$src, (i8 8)),
+ (EXTRACT_SUBREG
+ (MOVZX32rr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
+def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
+
+// h-register tricks.
+// For now, be conservative on x86-64 and use an h-register extract only if the
+// value is immediately zero-extended or stored, which are somewhat common
+// cases. This uses a bunch of code to prevent a register requiring a REX prefix
+// from being allocated in the same instruction as the h register, as there's
+// currently no way to describe this requirement to the register allocator.
+
+// h-register extract and zero-extend.
+def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl GR16:$src, (i8 8)),
+ (EXTRACT_SUBREG
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+
+// h-register extract and store.
+def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi))>;
+def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+
+
+// (shl x, 1) ==> (add x, x)
+// Note that if x is undef (immediate or otherwise), we could theoretically
+// end up with the two uses of x getting different values, producing a result
+// where the least significant bit is not 0. However, the probability of this
+// happening is considered low enough that this is officially not a
+// "real problem".
+def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
+def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+
+// Helper imms that check if a mask doesn't change significant shift bits.
+def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
+def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
+
+// Shift amount is implicitly masked.
+multiclass MaskedShiftAmountPats<SDNode frag, string name> {
+ // (shift x (and y, 31)) ==> (shift x, y)
+ def : Pat<(frag GR8:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
+ def : Pat<(frag GR16:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
+ def : Pat<(frag GR32:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
+ def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "8mCL") addr:$dst)>;
+ def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "16mCL") addr:$dst)>;
+ def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "32mCL") addr:$dst)>;
+
+ // (shift x (and y, 63)) ==> (shift x, y)
+ def : Pat<(frag GR64:$src1, (and CL, immShift64)),
+ (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
+ def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
+ (!cast<Instruction>(name # "64mCL") addr:$dst)>;
+}
+
+defm : MaskedShiftAmountPats<shl, "SHL">;
+defm : MaskedShiftAmountPats<srl, "SHR">;
+defm : MaskedShiftAmountPats<sra, "SAR">;
+defm : MaskedShiftAmountPats<rotl, "ROL">;
+defm : MaskedShiftAmountPats<rotr, "ROR">;
+
+// (anyext (setcc_carry)) -> (setcc_carry)
+def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+
+
+
+
+//===----------------------------------------------------------------------===//
+// EFLAGS-defining Patterns
+//===----------------------------------------------------------------------===//
+
+// add reg, reg
+def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
+
+// add reg, mem
+def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
+ (ADD8rm GR8:$src1, addr:$src2)>;
+def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
+ (ADD16rm GR16:$src1, addr:$src2)>;
+def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
+ (ADD32rm GR32:$src1, addr:$src2)>;
+
+// add reg, imm
+def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
+def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
+def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
+def : Pat<(add GR16:$src1, i16immSExt8:$src2),
+ (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(add GR32:$src1, i32immSExt8:$src2),
+ (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// sub reg, reg
+def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
+
+// sub reg, mem
+def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
+ (SUB8rm GR8:$src1, addr:$src2)>;
+def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
+ (SUB16rm GR16:$src1, addr:$src2)>;
+def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
+ (SUB32rm GR32:$src1, addr:$src2)>;
+
+// sub reg, imm
+def : Pat<(sub GR8:$src1, imm:$src2),
+ (SUB8ri GR8:$src1, imm:$src2)>;
+def : Pat<(sub GR16:$src1, imm:$src2),
+ (SUB16ri GR16:$src1, imm:$src2)>;
+def : Pat<(sub GR32:$src1, imm:$src2),
+ (SUB32ri GR32:$src1, imm:$src2)>;
+def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
+ (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
+ (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// sub 0, reg
+def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
+def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
+def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
+def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
+
+// mul reg, reg
+def : Pat<(mul GR16:$src1, GR16:$src2),
+ (IMUL16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(mul GR32:$src1, GR32:$src2),
+ (IMUL32rr GR32:$src1, GR32:$src2)>;
+
+// mul reg, mem
+def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
+ (IMUL16rm GR16:$src1, addr:$src2)>;
+def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
+ (IMUL32rm GR32:$src1, addr:$src2)>;
+
+// mul reg, imm
+def : Pat<(mul GR16:$src1, imm:$src2),
+ (IMUL16rri GR16:$src1, imm:$src2)>;
+def : Pat<(mul GR32:$src1, imm:$src2),
+ (IMUL32rri GR32:$src1, imm:$src2)>;
+def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
+ (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
+ (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// reg = mul mem, imm
+def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
+ (IMUL16rmi addr:$src1, imm:$src2)>;
+def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
+ (IMUL32rmi addr:$src1, imm:$src2)>;
+def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
+ (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
+def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
+ (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
+
+// Patterns for nodes that do not produce flags, for instructions that do.
+
+// addition
+def : Pat<(add GR64:$src1, GR64:$src2),
+ (ADD64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(add GR64:$src1, i64immSExt8:$src2),
+ (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(add GR64:$src1, i64immSExt32:$src2),
+ (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
+def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
+ (ADD64rm GR64:$src1, addr:$src2)>;
+
+// subtraction
+def : Pat<(sub GR64:$src1, GR64:$src2),
+ (SUB64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
+ (SUB64rm GR64:$src1, addr:$src2)>;
+def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
+ (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
+ (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Multiply
+def : Pat<(mul GR64:$src1, GR64:$src2),
+ (IMUL64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
+ (IMUL64rm GR64:$src1, addr:$src2)>;
+def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
+ (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
+ (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
+ (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
+ (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
+
+// Increment/Decrement reg.
+// Do not make INC/DEC if it is slow
+let Predicates = [NotSlowIncDec] in {
+ def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
+ def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
+ def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
+ def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
+ def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
+ def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
+ def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
+ def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
+}
+
+// or reg/reg.
+def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
+
+// or reg/mem
+def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
+ (OR8rm GR8:$src1, addr:$src2)>;
+def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
+ (OR16rm GR16:$src1, addr:$src2)>;
+def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
+ (OR32rm GR32:$src1, addr:$src2)>;
+def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
+ (OR64rm GR64:$src1, addr:$src2)>;
+
+// or reg/imm
+def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
+def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, i16immSExt8:$src2),
+ (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(or GR32:$src1, i32immSExt8:$src2),
+ (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(or GR64:$src1, i64immSExt8:$src2),
+ (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(or GR64:$src1, i64immSExt32:$src2),
+ (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// xor reg/reg
+def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
+
+// xor reg/mem
+def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
+ (XOR8rm GR8:$src1, addr:$src2)>;
+def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
+ (XOR16rm GR16:$src1, addr:$src2)>;
+def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
+ (XOR32rm GR32:$src1, addr:$src2)>;
+def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
+ (XOR64rm GR64:$src1, addr:$src2)>;
+
+// xor reg/imm
+def : Pat<(xor GR8:$src1, imm:$src2),
+ (XOR8ri GR8:$src1, imm:$src2)>;
+def : Pat<(xor GR16:$src1, imm:$src2),
+ (XOR16ri GR16:$src1, imm:$src2)>;
+def : Pat<(xor GR32:$src1, imm:$src2),
+ (XOR32ri GR32:$src1, imm:$src2)>;
+def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
+ (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
+ (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
+ (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
+ (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// and reg/reg
+def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
+
+// and reg/mem
+def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
+ (AND8rm GR8:$src1, addr:$src2)>;
+def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
+ (AND16rm GR16:$src1, addr:$src2)>;
+def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
+ (AND32rm GR32:$src1, addr:$src2)>;
+def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
+ (AND64rm GR64:$src1, addr:$src2)>;
+
+// and reg/imm
+def : Pat<(and GR8:$src1, imm:$src2),
+ (AND8ri GR8:$src1, imm:$src2)>;
+def : Pat<(and GR16:$src1, imm:$src2),
+ (AND16ri GR16:$src1, imm:$src2)>;
+def : Pat<(and GR32:$src1, imm:$src2),
+ (AND32ri GR32:$src1, imm:$src2)>;
+def : Pat<(and GR16:$src1, i16immSExt8:$src2),
+ (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(and GR32:$src1, i32immSExt8:$src2),
+ (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(and GR64:$src1, i64immSExt8:$src2),
+ (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(and GR64:$src1, i64immSExt32:$src2),
+ (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Bit scan instruction patterns to match explicit zero-undef behavior.
+def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
+def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
+def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
+def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
+
+// When HasMOVBE is enabled it is possible to get a non-legalized
+// register-register 16 bit bswap. This maps it to a ROL instruction.
+let Predicates = [HasMOVBE] in {
+ def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;
+}
return false;
}
-int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
- const MachineFunction *MF = MI->getParent()->getParent();
- const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
-
- if (MI->getOpcode() == getCallFrameSetupOpcode() ||
- MI->getOpcode() == getCallFrameDestroyOpcode()) {
- unsigned StackAlign = TFI->getStackAlignment();
- int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign *
- StackAlign;
-
- SPAdj -= MI->getOperand(1).getImm();
-
- if (MI->getOpcode() == getCallFrameSetupOpcode())
- return SPAdj;
- else
- return -SPAdj;
- }
-
- // To know whether a call adjusts the stack, we need information
- // that is bound to the following ADJCALLSTACKUP pseudo.
- // Look for the next ADJCALLSTACKUP that follows the call.
- if (MI->isCall()) {
- const MachineBasicBlock* MBB = MI->getParent();
- auto I = ++MachineBasicBlock::const_iterator(MI);
- for (auto E = MBB->end(); I != E; ++I) {
- if (I->getOpcode() == getCallFrameDestroyOpcode() ||
- I->isCall())
- break;
- }
-
- // If we could not find a frame destroy opcode, then it has already
- // been simplified, so we don't care.
- if (I->getOpcode() != getCallFrameDestroyOpcode())
- return 0;
-
- return -(I->getOperand(1).getImm());
- }
-
- // Currently handle only PUSHes we can reasonably expect to see
- // in call sequences
- switch (MI->getOpcode()) {
- default:
- return 0;
- case X86::PUSH32i8:
- case X86::PUSH32r:
- case X86::PUSH32rmm:
- case X86::PUSH32rmr:
- case X86::PUSHi32:
- return 4;
- }
-}
-
/// isFrameOperand - Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
///
const X86RegisterInfo &getRegisterInfo() const { return RI; }
- /// getSPAdjust - This returns the stack pointer adjustment made by
- /// this instruction. For x86, we need to handle more complex call
- /// sequences involving PUSHes.
- int getSPAdjust(const MachineInstr *MI) const override;
-
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
unsigned ArgumentStackSize;
/// NumLocalDynamics - Number of local-dynamic TLS accesses.
unsigned NumLocalDynamics;
- /// HasPushSequences - Keeps track of whether this function uses sequences
- /// of pushes to pass function parameters.
- bool HasPushSequences;
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
VarArgsGPOffset(0),
VarArgsFPOffset(0),
ArgumentStackSize(0),
- NumLocalDynamics(0),
- HasPushSequences(false) {}
+ NumLocalDynamics(0) {}
explicit X86MachineFunctionInfo(MachineFunction &MF)
: ForceFramePointer(false),
VarArgsGPOffset(0),
VarArgsFPOffset(0),
ArgumentStackSize(0),
- NumLocalDynamics(0),
- HasPushSequences(false) {}
+ NumLocalDynamics(0) {}
bool getForceFramePointer() const { return ForceFramePointer;}
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; }
- bool getHasPushSequences() const { return HasPushSequences; }
- void setHasPushSequences(bool HasPush) { HasPushSequences = HasPush; }
-
bool getRestoreBasePointer() const { return RestoreBasePointerOffset!=0; }
void setRestoreBasePointer(const MachineFunction *MF);
int getRestoreBasePointerOffset() const {return RestoreBasePointerOffset; }
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
} else
FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
- if (BasePtr == StackPtr)
- FIOffset += SPAdj;
-
// The frame index format for stackmaps and patchpoints is different from the
// X86 format. It only has a FI and an offset.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
void addIRPasses() override;
bool addInstSelector() override;
bool addILPOpts() override;
- void addPreRegAlloc() override;
void addPostRegAlloc() override;
void addPreEmitPass() override;
};
return true;
}
-void X86PassConfig::addPreRegAlloc() {
- addPass(createX86CallFrameOptimization());
-}
-
void X86PassConfig::addPostRegAlloc() {
addPass(createX86FloatingPointStackifierPass());
}
to label %invoke.cont unwind label %lpad
; Uses end as sret param.
-; CHECK: pushl %[[end]]
+; CHECK: movl %[[end]], (%esp)
; CHECK: calll _plus
invoke.cont:
; RUN: llc < %s -mtriple=i686-windows | FileCheck %s -check-prefix=NORMAL
-; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-windows -force-align-stack -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
-
declare void @good(i32 %a, i32 %b, i32 %c, i32 %d)
declare void @inreg(i32 %a, i32 inreg %b, i32 %c, i32 %d)
; Here, we should have a reserved frame, so we don't expect pushes
-; NORMAL-LABEL: test1:
+; NORMAL-LABEL: test1
; NORMAL: subl $16, %esp
; NORMAL-NEXT: movl $4, 12(%esp)
; NORMAL-NEXT: movl $3, 8(%esp)
; NORMAL-NEXT: movl $2, 4(%esp)
; NORMAL-NEXT: movl $1, (%esp)
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
define void @test1() {
entry:
call void @good(i32 1, i32 2, i32 3, i32 4)
ret void
}
-; We're optimizing for code size, so we should get pushes for x86,
-; even though there is a reserved call frame.
-; Make sure we don't touch x86-64
-; NORMAL-LABEL: test1b:
-; NORMAL-NOT: subl {{.*}} %esp
-; NORMAL: pushl $4
-; NORMAL-NEXT: pushl $3
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-; X64-LABEL: test1b:
-; X64: movl $1, %ecx
-; X64-NEXT: movl $2, %edx
-; X64-NEXT: movl $3, %r8d
-; X64-NEXT: movl $4, %r9d
-; X64-NEXT: callq good
-define void @test1b() optsize {
-entry:
- call void @good(i32 1, i32 2, i32 3, i32 4)
- ret void
-}
-
-; Same as above, but for minsize
-; NORMAL-LABEL: test1c:
-; NORMAL-NOT: subl {{.*}} %esp
-; NORMAL: pushl $4
-; NORMAL-NEXT: pushl $3
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test1c() minsize {
-entry:
- call void @good(i32 1, i32 2, i32 3, i32 4)
- ret void
-}
-
-; If we have a reserved frame, we should have pushes
-; NORMAL-LABEL: test2:
+; Here, we expect a sequence of 4 immediate pushes
+; NORMAL-LABEL: test2
; NORMAL-NOT: subl {{.*}} %esp
; NORMAL: pushl $4
; NORMAL-NEXT: pushl $3
; Again, we expect a sequence of 4 immediate pushes
; Checks that we generate the right pushes for >8bit immediates
-; NORMAL-LABEL: test2b:
+; NORMAL-LABEL: test2b
; NORMAL-NOT: subl {{.*}} %esp
; NORMAL: pushl $4096
; NORMAL-NEXT: pushl $3072
; NORMAL-NEXT: pushl $2048
; NORMAL-NEXT: pushl $1024
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test2b() optsize {
+define void @test2b(i32 %k) {
entry:
+ %a = alloca i32, i32 %k
call void @good(i32 1024, i32 2048, i32 3072, i32 4096)
ret void
}
; The first push should push a register
-; NORMAL-LABEL: test3:
+; NORMAL-LABEL: test3
; NORMAL-NOT: subl {{.*}} %esp
; NORMAL: pushl $4
; NORMAL-NEXT: pushl $3
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl %e{{..}}
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test3(i32 %k) optsize {
+define void @test3(i32 %k) {
entry:
+ %a = alloca i32, i32 %k
call void @good(i32 %k, i32 2, i32 3, i32 4)
ret void
}
; We don't support weird calling conventions
-; NORMAL-LABEL: test4:
+; NORMAL-LABEL: test4
; NORMAL: subl $12, %esp
; NORMAL-NEXT: movl $4, 8(%esp)
; NORMAL-NEXT: movl $3, 4(%esp)
; NORMAL-NEXT: movl $1, (%esp)
; NORMAL-NEXT: movl $2, %eax
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $12, %esp
-define void @test4() optsize {
+define void @test4(i32 %k) {
entry:
+ %a = alloca i32, i32 %k
call void @inreg(i32 1, i32 2, i32 3, i32 4)
ret void
}
-; When there is no reserved call frame, check that additional alignment
-; is added when the pushes don't add up to the required alignment.
-; ALIGNED-LABEL: test5:
+; Check that additional alignment is added when the pushes
+; don't add up to the required alignment.
+; ALIGNED-LABEL: test5
; ALIGNED: subl $16, %esp
; ALIGNED-NEXT: pushl $4
; ALIGNED-NEXT: pushl $3
; Check that pushing the addresses of globals (Or generally, things that
; aren't exactly immediates) isn't broken.
; Fixes PR21878.
-; NORMAL-LABEL: test6:
+; NORMAL-LABEL: test6
; NORMAL: pushl $_ext
; NORMAL-NEXT: call
declare void @f(i8*)
alloca i32
ret void
}
-
-; Check that we fold simple cases into the push
-; NORMAL-LABEL: test7:
-; NORMAL-NOT: subl {{.*}} %esp
-; NORMAL: movl 4(%esp), [[EAX:%e..]]
-; NORMAL-NEXT: pushl $4
-; NORMAL-NEXT: pushl ([[EAX]])
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test7(i32* %ptr) optsize {
-entry:
- %val = load i32* %ptr
- call void @good(i32 1, i32 2, i32 %val, i32 4)
- ret void
-}
-
-; But we don't want to fold stack-relative loads into the push,
-; because the offset will be wrong
-; NORMAL-LABEL: test8:
-; NORMAL-NOT: subl {{.*}} %esp
-; NORMAL: movl 4(%esp), [[EAX:%e..]]
-; NORMAL-NEXT: pushl $4
-; NORMAL-NEXT: pushl [[EAX]]
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test8(i32* %ptr) optsize {
-entry:
- %val = ptrtoint i32* %ptr to i32
- call void @good(i32 1, i32 2, i32 %val, i32 4)
- ret void
-}
-
-; If one function is using push instructions, and the other isn't
-; (because it has frame-index references), then we must resolve
-; these references correctly.
-; NORMAL-LABEL: test9:
-; NORMAL-NOT: leal (%esp),
-; NORMAL: pushl $4
-; NORMAL-NEXT: pushl $3
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-; NORMAL-NEXT: subl $16, %esp
-; NORMAL-NEXT: leal 16(%esp), [[EAX:%e..]]
-; NORMAL-NEXT: movl [[EAX]], 12(%esp)
-; NORMAL-NEXT: movl $7, 8(%esp)
-; NORMAL-NEXT: movl $6, 4(%esp)
-; NORMAL-NEXT: movl $5, (%esp)
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-define void @test9() optsize {
-entry:
- %p = alloca i32, align 4
- call void @good(i32 1, i32 2, i32 3, i32 4)
- %0 = ptrtoint i32* %p to i32
- call void @good(i32 5, i32 6, i32 7, i32 %0)
- ret void
-}
-
-; We can end up with an indirect call which gets reloaded on the spot.
-; Make sure we reference the correct stack slot - we spill into (%esp)
-; and reload from 16(%esp) due to the pushes.
-; NORMAL-LABEL: test10:
-; NORMAL: movl $_good, [[ALLOC:.*]]
-; NORMAL-NEXT: movl [[ALLOC]], [[EAX:%e..]]
-; NORMAL-NEXT: movl [[EAX]], (%esp) # 4-byte Spill
-; NORMAL: nop
-; NORMAL: pushl $4
-; NORMAL-NEXT: pushl $3
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl $1
-; NORMAL-NEXT: calll *16(%esp)
-; NORMAL-NEXT: addl $16, %esp
-define void @test10() optsize {
- %stack_fptr = alloca void (i32, i32, i32, i32)*
- store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
- %good_ptr = load volatile void (i32, i32, i32, i32)** %stack_fptr
- call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
- call void (i32, i32, i32, i32)* %good_ptr(i32 1, i32 2, i32 3, i32 4)
- ret void
-}
-
-; We can't fold the load from the global into the push because of
-; interference from the store
-; NORMAL-LABEL: test11:
-; NORMAL: movl _the_global, [[EAX:%e..]]
-; NORMAL-NEXT: movl $42, _the_global
-; NORMAL-NEXT: pushl $4
-; NORMAL-NEXT: pushl $3
-; NORMAL-NEXT: pushl $2
-; NORMAL-NEXT: pushl [[EAX]]
-; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-@the_global = external global i32
-define void @test11() optsize {
- %myload = load i32* @the_global
- store i32 42, i32* @the_global
- call void @good(i32 %myload, i32 2, i32 3, i32 4)
- ret void
-}