#include "X86.h"
#include "X86InstrBuilder.h"
-#include "X86ISelLowering.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
bool X86ScalarSSEf32;
public:
- explicit X86FastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- )
- : FastISel(mf, mmi, dw, vm, bm, am
-#ifndef NDEBUG
- , cil
-#endif
- ) {
+ explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
- virtual bool TargetSelectInstruction(Instruction *I);
+ virtual bool TargetSelectInstruction(const Instruction *I);
+ /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+ /// vreg is being provided by the specified load instruction. If possible,
+ /// try to fold the load as an operand to the instruction, returning true if
+ /// possible.
+ virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
+
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
- bool X86FastEmitStore(EVT VT, Value *Val,
+ bool X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM);
bool X86FastEmitStore(EVT VT, unsigned Val,
const X86AddressMode &AM);
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
- bool X86SelectAddress(Value *V, X86AddressMode &AM);
- bool X86SelectCallAddress(Value *V, X86AddressMode &AM);
+ bool X86SelectAddress(const Value *V, X86AddressMode &AM);
+ bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
- bool X86SelectLoad(Instruction *I);
+ bool X86SelectLoad(const Instruction *I);
- bool X86SelectStore(Instruction *I);
+ bool X86SelectStore(const Instruction *I);
- bool X86SelectCmp(Instruction *I);
+ bool X86SelectRet(const Instruction *I);
- bool X86SelectZExt(Instruction *I);
+ bool X86SelectCmp(const Instruction *I);
- bool X86SelectBranch(Instruction *I);
+ bool X86SelectZExt(const Instruction *I);
- bool X86SelectShift(Instruction *I);
+ bool X86SelectBranch(const Instruction *I);
- bool X86SelectSelect(Instruction *I);
+ bool X86SelectShift(const Instruction *I);
- bool X86SelectTrunc(Instruction *I);
-
- bool X86SelectFPExt(Instruction *I);
- bool X86SelectFPTrunc(Instruction *I);
+ bool X86SelectSelect(const Instruction *I);
- bool X86SelectExtractValue(Instruction *I);
+ bool X86SelectTrunc(const Instruction *I);
+
+ bool X86SelectFPExt(const Instruction *I);
+ bool X86SelectFPTrunc(const Instruction *I);
- bool X86VisitIntrinsicCall(IntrinsicInst &I);
- bool X86SelectCall(Instruction *I);
+ bool X86SelectExtractValue(const Instruction *I);
- CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
+ bool X86VisitIntrinsicCall(const IntrinsicInst &I);
+ bool X86SelectCall(const Instruction *I);
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
return static_cast<const X86TargetMachine *>(&TM);
}
- unsigned TargetMaterializeConstant(Constant *C);
+ unsigned TargetMaterializeConstant(const Constant *C);
- unsigned TargetMaterializeAlloca(AllocaInst *C);
+ unsigned TargetMaterializeAlloca(const AllocaInst *C);
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
+ bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
};
} // end anonymous namespace.
-bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
- VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
- if (VT == MVT::Other || !VT.isSimple())
+bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+ EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
+ if (evt == MVT::Other || !evt.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
-
+
+ VT = evt.getSimpleVT();
// For now, require SSE/SSE2 for performing floating-point operations,
// since x87 requires additional work.
if (VT == MVT::f64 && !X86ScalarSSEf64)
#include "X86GenCallingConv.inc"
-/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
-/// convention.
-CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
- if (Subtarget->is64Bit()) {
- if (Subtarget->isTargetWin64())
- return CC_X86_Win64_C;
- else
- return CC_X86_64_C;
- }
-
- if (CC == CallingConv::X86_FastCall)
- return CC_X86_32_FastCall;
- else if (CC == CallingConv::Fast)
- return CC_X86_32_FastCC;
- else
- return CC_X86_32_C;
-}
-
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
}
ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc), ResultReg), AM);
return true;
}
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
}
break;
}
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM).addReg(Val);
return true;
}
-bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
+bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0;
bool Signed = true;
switch (VT.getSimpleVT().SimpleTy) {
}
if (Opc) {
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
- .addImm(Signed ? CI->getSExtValue() :
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM)
+ .addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue());
return true;
}
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT,
unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
+ unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ Src, /*TODO: Kill=*/false);
if (RR != 0) {
ResultReg = RR;
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
-bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
- User *U = NULL;
+bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
+ const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V)) {
- Opcode = I->getOpcode();
- U = I;
- } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ // Don't walk into other basic blocks; it's possible we haven't
+ // visited them yet, so the instructions may not yet be assigned
+ // virtual registers.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
+ if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
switch (Opcode) {
default: break;
case Instruction::BitCast:
case Instruction::Alloca: {
// Do static allocas.
const AllocaInst *A = cast<AllocaInst>(V);
- DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
- if (SI != StaticAllocaMap.end()) {
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(A);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = SI->second;
return true;
case Instruction::Add: {
// Adds of constants are common and easy enough.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
// They have to fit in the 32-bit signed displacement field though.
- if (isInt32(Disp)) {
+ if (isInt<32>(Disp)) {
AM.Disp = (uint32_t)Disp;
return X86SelectAddress(U->getOperand(0), AM);
}
}
case Instruction::GetElementPtr: {
+ X86AddressMode SavedAM = AM;
+
// Pattern-match simple GEPs.
uint64_t Disp = (int32_t)AM.Disp;
unsigned IndexReg = AM.IndexReg;
gep_type_iterator GTI = gep_type_begin(U);
// Iterate through the indices, folding what we can. Constants can be
// folded, and one dynamic index can be handled, if the scale is supported.
- for (User::op_iterator i = U->op_begin() + 1, e = U->op_end();
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
- Value *Op = *i;
+ const Value *Op = *i;
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
Disp += SL->getElementOffset(Idx);
} else {
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // Constant-offset addressing.
- Disp += CI->getSExtValue() * S;
- } else if (IndexReg == 0 &&
- (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
- (S == 1 || S == 2 || S == 4 || S == 8)) {
- // Scaled-index addressing.
- Scale = S;
- IndexReg = getRegForGEPIndex(Op);
- if (IndexReg == 0)
- return false;
- } else
- // Unsupported.
- goto unsupported_gep;
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(Op);
+ do {
+ Op = Worklist.pop_back_val();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ Disp += CI->getSExtValue() * S;
+ } else if (isa<AddOperator>(Op) &&
+ isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
+ // An add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ Disp += CI->getSExtValue() * S;
+ // Add the other operand back to the work list.
+ Worklist.push_back(cast<AddOperator>(Op)->getOperand(0));
+ } else if (IndexReg == 0 &&
+ (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
+ (S == 1 || S == 2 || S == 4 || S == 8)) {
+ // Scaled-index addressing.
+ Scale = S;
+ IndexReg = getRegForGEPIndex(Op).first;
+ if (IndexReg == 0)
+ return false;
+ } else
+ // Unsupported.
+ goto unsupported_gep;
+ } while (!Worklist.empty());
}
}
// Check for displacement overflow.
- if (!isInt32(Disp))
+ if (!isInt<32>(Disp))
break;
// Ok, the GEP indices were covered by constant-offset and scaled-index
// addressing. Update the address state and move on to examining the base.
AM.IndexReg = IndexReg;
AM.Scale = Scale;
AM.Disp = (uint32_t)Disp;
- return X86SelectAddress(U->getOperand(0), AM);
+ if (X86SelectAddress(U->getOperand(0), AM))
+ return true;
+
+ // If we couldn't merge the sub value into this addr mode, revert back to
+ // our address and just match the value instead of completely failing.
+ AM = SavedAM;
+ break;
unsupported_gep:
// Ok, the GEP indices weren't all covered.
break;
}
// Handle constant address.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
return false;
// Can't handle TLS yet.
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal())
return false;
// If this reference is relative to the pic base, set it now.
if (isGlobalRelativeToPICBase(GVFlags)) {
// FIXME: How do we know Base.Reg is free??
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
+ AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
}
// Unless the ABI requires an extra load, return a direct reference to
StubAM.GV = GV;
StubAM.GVOpFlags = GVFlags;
+ // Prepare for inserting code in the local-value area.
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
}
LoadReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
-
+ MachineInstrBuilder LoadMI =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ addFullAddress(LoadMI, StubAM);
+
+ // Ok, back to normal mode.
+ leaveLocalValueArea(SaveInsertPt);
+
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg;
}
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
///
-bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
- User *U = NULL;
+bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
+ const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
U = I;
- } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
}
// Handle constant address.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
return false;
// Can't handle TLS or DLLImport.
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal() || GVar->hasDLLImportLinkage())
return false;
/// X86SelectStore - Select and emit code to implement store instructions.
-bool X86FastISel::X86SelectStore(Instruction* I) {
- EVT VT;
+bool X86FastISel::X86SelectStore(const Instruction *I) {
+ MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
return X86FastEmitStore(VT, I->getOperand(0), AM);
}
+/// X86SelectRet - Select and emit code to implement ret instructions.
+bool X86FastISel::X86SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (CC != CallingConv::C &&
+ CC != CallingConv::Fast &&
+ CC != CallingConv::X86_FastCall)
+ return false;
+
+ if (Subtarget->isTargetWin64())
+ return false;
+
+ // Don't handle popping bytes on return for now.
+ if (FuncInfo.MF->getInfo<X86MachineFunctionInfo>()
+ ->getBytesToPopOnReturn() != 0)
+ return 0;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ return false;
+
+ // Let SDISel handle vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, RetCC_X86);
+
+ const Value *RV = Ret->getOperand(0);
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+ // TODO: For now, don't try to handle cases where getLocInfo()
+ // says Full but the types don't match.
+ if (TLI.getValueType(RV->getType()) != VA.getValVT())
+ return false;
+
+ // The calling-convention tables for x87 returns don't tell
+ // the whole story.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
+ return false;
+
+ // Make the copy.
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DstReg = VA.getLocReg();
+ const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!SrcRC->contains(DstReg))
+ return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ DstReg).addReg(SrcReg);
+
+ // Mark the register as live out of the function.
+ MRI.addLiveOut(VA.getLocReg());
+ }
+
+ // Now emit the RET.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
+ return true;
+}
+
/// X86SelectLoad - Select and emit code to implement load instructions.
///
-bool X86FastISel::X86SelectLoad(Instruction *I) {
- EVT VT;
+bool X86FastISel::X86SelectLoad(const Instruction *I) {
+ MVT VT;
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
return false;
return false;
}
-static unsigned X86ChooseCmpOpcode(EVT VT) {
+static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
case MVT::i8: return X86::CMP8rr;
case MVT::i16: return X86::CMP16rr;
case MVT::i32: return X86::CMP32rr;
case MVT::i64: return X86::CMP64rr;
- case MVT::f32: return X86::UCOMISSrr;
- case MVT::f64: return X86::UCOMISDrr;
+ case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0;
+ case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0;
}
}
/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
/// of the comparison, return an opcode that works for the compare (e.g.
/// CMP32ri) otherwise return 0.
-static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
+static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
switch (VT.getSimpleVT().SimpleTy) {
// Otherwise, we can't fold the immediate into this comparison.
default: return 0;
}
}
-bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
+bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
+ EVT VT) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
- .addImm(Op1C->getSExtValue());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
+ .addReg(Op0Reg)
+ .addImm(Op1C->getSExtValue());
return true;
}
}
- unsigned CompareOpc = X86ChooseCmpOpcode(VT);
+ unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false;
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
+ .addReg(Op0Reg)
+ .addReg(Op1Reg);
return true;
}
-bool X86FastISel::X86SelectCmp(Instruction *I) {
- CmpInst *CI = cast<CmpInst>(I);
+bool X86FastISel::X86SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
- EVT VT;
+ MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
return false;
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
- BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNPr), NPReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
- BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
- BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNEr), NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETPr), PReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::OR8rr), ResultReg)
+ .addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg);
return true;
}
return false;
}
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectZExt(Instruction *I) {
+bool X86FastISel::X86SelectZExt(const Instruction *I) {
// Handle zero-extension from i1 to i8, which is common.
- if (I->getType() == Type::getInt8Ty(I->getContext()) &&
- I->getOperand(0)->getType() == Type::getInt1Ty(I->getContext())) {
+ if (I->getType()->isIntegerTy(8) &&
+ I->getOperand(0)->getType()->isIntegerTy(1)) {
unsigned ResultReg = getRegForValue(I->getOperand(0));
if (ResultReg == 0) return false;
// Set the high bits to zero.
- ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
+ ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
if (ResultReg == 0) return false;
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectBranch(Instruction *I) {
+bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Unconditional branches are selected by tablegen-generated code.
// Handle a conditional branch.
- BranchInst *BI = cast<BranchInst>(I);
- MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
- MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
-
- // Fold the common case of a conditional branch with a comparison.
- if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- if (CI->hasOneUse()) {
+ const BranchInst *BI = cast<BranchInst>(I);
+ MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
+
+ // Fold the common case of a conditional branch with a comparison
+ // in the same block (values defined on other blocks may not have
+ // initialized registers).
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
// Try to take advantage of fallthrough opportunities.
CmpInst::Predicate Predicate = CI->getPredicate();
- if (MBB->isLayoutSuccessor(TrueMBB)) {
+ if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate);
}
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::FCMP_UNE;
// FALL THROUGH
- case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE; break;
- case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA; break;
- case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE; break;
- case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA; break;
- case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE; break;
- case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE; break;
- case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP; break;
- case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP; break;
- case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE; break;
- case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB; break;
- case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE; break;
- case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
- case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
+ case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
+ case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
+ case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
+ case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA_4; break;
+ case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE_4; break;
+ case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
+ case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP_4; break;
+ case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP_4; break;
+ case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
+ case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB_4; break;
+ case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
+ case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
+ case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
- case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE; break;
- case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE; break;
- case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA; break;
- case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE; break;
- case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break;
- case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break;
- case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG; break;
- case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE; break;
- case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL; break;
- case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE; break;
+ case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
+ case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
+ case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
+ case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
+ case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
+ case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
+ case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG_4; break;
+ case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE_4; break;
+ case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL_4; break;
+ case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE_4; break;
default:
return false;
}
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above).
- BuildMI(MBB, DL, TII.get(X86::JP)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
+ .addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
} else if (ExtractValueInst *EI =
// looking for the SETO/SETB instruction. If an instruction modifies the
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
// convert the branch into a JO/JB instruction.
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
+ if (const IntrinsicInst *CI =
+ dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
const MachineInstr *SetMI = 0;
- unsigned Reg = lookUpRegForValue(EI);
+ unsigned Reg = getRegForValue(EI);
for (MachineBasicBlock::const_reverse_iterator
- RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
+ RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend();
+ RI != RE; ++RI) {
const MachineInstr &MI = *RI;
- if (MI.modifiesRegister(Reg)) {
- unsigned Src, Dst, SrcSR, DstSR;
-
- if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
- Reg = Src;
+ if (MI.definesRegister(Reg)) {
+ if (MI.isCopy()) {
+ Reg = MI.getOperand(1).getReg();
continue;
}
unsigned OpCode = SetMI->getOpcode();
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? X86::JO : X86::JB))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
}
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
- BuildMI(MBB, DL, TII.get(X86::JNE)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(OpReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
+ .addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
-bool X86FastISel::X86SelectShift(Instruction *I) {
+bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned CReg = 0, OpReg = 0, OpImm = 0;
const TargetRegisterClass *RC = NULL;
- if (I->getType() == Type::getInt8Ty(I->getContext())) {
+ if (I->getType()->isIntegerTy(8)) {
CReg = X86::CL;
RC = &X86::GR8RegClass;
switch (I->getOpcode()) {
case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break;
default: return false;
}
- } else if (I->getType() == Type::getInt16Ty(I->getContext())) {
+ } else if (I->getType()->isIntegerTy(16)) {
CReg = X86::CX;
RC = &X86::GR16RegClass;
switch (I->getOpcode()) {
case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break;
default: return false;
}
- } else if (I->getType() == Type::getInt32Ty(I->getContext())) {
+ } else if (I->getType()->isIntegerTy(32)) {
CReg = X86::ECX;
RC = &X86::GR32RegClass;
switch (I->getOpcode()) {
case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break;
default: return false;
}
- } else if (I->getType() == Type::getInt64Ty(I->getContext())) {
+ } else if (I->getType()->isIntegerTy(64)) {
CReg = X86::RCX;
RC = &X86::GR64RegClass;
switch (I->getOpcode()) {
return false;
}
- EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
- if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
return false;
unsigned Op0Reg = getRegForValue(I->getOperand(0));
if (Op0Reg == 0) return false;
// Fold immediate in shl(x,3).
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpImm),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg);
return true;
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CReg).addReg(Op1Reg);
// The shift instruction uses X86::CL. If we defined a super-register
- // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
- // we're doing here.
+ // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(MBB, DL, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL)
- .addReg(CReg).addImm(X86::SUBREG_8BIT);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::KILL), X86::CL)
+ .addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
+ .addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectSelect(Instruction *I) {
- EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
- if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+bool X86FastISel::X86SelectSelect(const Instruction *I) {
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
return false;
+ // We only use cmov here, if we don't have a cmov instruction bail.
+ if (!Subtarget->hasCMov()) return false;
+
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
- if (VT.getSimpleVT() == MVT::i16) {
+ if (VT == MVT::i16) {
Opc = X86::CMOVE16rr;
RC = &X86::GR16RegClass;
- } else if (VT.getSimpleVT() == MVT::i32) {
+ } else if (VT == MVT::i32) {
Opc = X86::CMOVE32rr;
RC = &X86::GR32RegClass;
- } else if (VT.getSimpleVT() == MVT::i64) {
+ } else if (VT == MVT::i64) {
Opc = X86::CMOVE64rr;
RC = &X86::GR64RegClass;
} else {
unsigned Op2Reg = getRegForValue(I->getOperand(2));
if (Op2Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(Op0Reg).addReg(Op0Reg);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectFPExt(Instruction *I) {
+bool X86FastISel::X86SelectFPExt(const Instruction *I) {
// fpext from float to double.
if (Subtarget->hasSSE2() &&
- I->getType() == Type::getDoubleTy(I->getContext())) {
- Value *V = I->getOperand(0);
- if (V->getType() == Type::getFloatTy(I->getContext())) {
+ I->getType()->isDoubleTy()) {
+ const Value *V = I->getOperand(0);
+ if (V->getType()->isFloatTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSS2SDrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
return false;
}
-bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
+bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
if (Subtarget->hasSSE2()) {
- if (I->getType() == Type::getFloatTy(I->getContext())) {
- Value *V = I->getOperand(0);
- if (V->getType() == Type::getDoubleTy(I->getContext())) {
+ if (I->getType()->isFloatTy()) {
+ const Value *V = I->getOperand(0);
+ if (V->getType()->isDoubleTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSD2SSrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
return false;
}
-bool X86FastISel::X86SelectTrunc(Instruction *I) {
+bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (Subtarget->is64Bit())
// All other cases should be handled by the tblgen generated code.
return false;
return false;
// First issue a copy to GR16_ABCD or GR32_ABCD.
- unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
- CopyReg, X86::SUBREG_8BIT);
+ CopyReg, /*Kill=*/true,
+ X86::sub_8bit);
if (!ResultReg)
return false;
return true;
}
-bool X86FastISel::X86SelectExtractValue(Instruction *I) {
- ExtractValueInst *EI = cast<ExtractValueInst>(I);
- Value *Agg = EI->getAggregateOperand();
+bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
+ const ExtractValueInst *EI = cast<ExtractValueInst>(I);
+ const Value *Agg = EI->getAggregateOperand();
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
+ if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
+ case Intrinsic::uadd_with_overflow: {
// Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ unsigned OpReg = getRegForValue(Agg);
+ if (OpReg == 0)
+ return false;
+ UpdateValueMap(I, OpReg + *EI->idx_begin());
return true;
}
+ }
}
return false;
}
-bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
+bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
switch (I.getIntrinsicID()) {
default: return false;
+ case Intrinsic::stackprotector: {
+ // Emit code inline code to store the stack guard onto the stack.
+ EVT PtrTy = TLI.getPointerTy();
+
+ const Value *Op1 = I.getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
+
+ // Grab the frame index.
+ X86AddressMode AM;
+ if (!X86SelectAddress(Slot, AM)) return false;
+
+ if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
+
+ return true;
+ }
+ case Intrinsic::objectsize: {
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
+ const Type *Ty = I.getCalledFunction()->getReturnType();
+
+ assert(CI && "Non-constant type in Intrinsic::objectsize?");
+
+ MVT VT;
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ unsigned OpC = 0;
+ if (VT == MVT::i32)
+ OpC = X86::MOV32ri;
+ else if (VT == MVT::i64)
+ OpC = X86::MOV64ri;
+ else
+ return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
+ addImm(CI->isZero() ? -1ULL : 0);
+ UpdateValueMap(&I, ResultReg);
+ return true;
+ }
+ case Intrinsic::dbg_declare: {
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
+ X86AddressMode AM;
+ assert(DI->getAddress() && "Null address should be checked earlier!");
+ if (!X86SelectAddress(DI->getAddress(), AM))
+ return false;
+ const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
+ // FIXME may need to add RegState::Debug to any registers produced,
+ // although ESP/EBP should be the only ones at the moment.
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
+ addImm(0).addMetadata(DI->getVariable());
+ return true;
+ }
+ case Intrinsic::trap: {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
+ return true;
+ }
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow: {
// Replace "add with overflow" intrinsics with an "add" instruction followed
const Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
- EVT VT;
+ MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
- Value *Op1 = I.getOperand(1);
- Value *Op2 = I.getOperand(2);
+ const Value *Op1 = I.getArgOperand(0);
+ const Value *Op2 = I.getArgOperand(1);
unsigned Reg1 = getRegForValue(Op1);
unsigned Reg2 = getRegForValue(Op2);
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
+ .addReg(Reg1).addReg(Reg2);
unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
// If the add with overflow is an intra-block value then we just want to
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
- BuildMI(MBB, DL, TII.get(Opc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
return true;
}
}
}
-bool X86FastISel::X86SelectCall(Instruction *I) {
- CallInst *CI = cast<CallInst>(I);
- Value *Callee = I->getOperand(0);
+bool X86FastISel::X86SelectCall(const Instruction *I) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
// Can't handle inline asm yet.
if (isa<InlineAsm>(Callee))
return false;
// Handle intrinsic calls.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
return X86VisitIntrinsicCall(*II);
// Handle only C and fastcc calling conventions for now.
- CallSite CS(CI);
- unsigned CC = CS.getCallingConv();
+ ImmutableCallSite CS(CI);
+ CallingConv::ID CC = CS.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
CC != CallingConv::X86_FastCall)
return false;
- // On X86, -tailcallopt changes the fastcc ABI. FastISel doesn't
- // handle this for now.
- if (CC == CallingConv::Fast && PerformTailCallOpt)
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
return false;
// Let SDISel handle vararg functions.
if (FTy->isVarArg())
return false;
+ // Fast-isel doesn't know about callee-pop yet.
+ if (Subtarget->IsCalleePop(FTy->isVarArg(), CC))
+ return false;
+
// Handle *simple* calls for now.
const Type *RetTy = CS.getType();
- EVT RetVT;
- if (RetTy == Type::getVoidTy(I->getContext()))
+ MVT RetVT;
+ if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
else if (!isTypeLegal(RetTy, RetVT, true))
return false;
if (!X86SelectCallAddress(Callee, CalleeAM))
return false;
unsigned CalleeOp = 0;
- GlobalValue *GV = 0;
+ const GlobalValue *GV = 0;
if (CalleeAM.GV != 0) {
GV = CalleeAM.GV;
} else if (CalleeAM.Base.Reg != 0) {
}
// Deal with call operands first.
- SmallVector<Value*, 8> ArgVals;
+ SmallVector<const Value *, 8> ArgVals;
SmallVector<unsigned, 8> Args;
- SmallVector<EVT, 8> ArgVTs;
+ SmallVector<MVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
Args.reserve(CS.arg_size());
ArgVals.reserve(CS.arg_size());
ArgVTs.reserve(CS.arg_size());
ArgFlags.reserve(CS.arg_size());
- for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
unsigned Arg = getRegForValue(*i);
if (Arg == 0)
return false;
const Type *ArgTy = (*i)->getType();
- EVT ArgVT;
+ MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT))
return false;
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
+
+ // Allocate shadow area for Win64
+ if (Subtarget->isTargetWin64()) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
// Issue CALLSEQ_START
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
+ .addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting
// copies / loads.
break;
}
case CCValAssign::AExt: {
+ // We don't handle MMX parameters yet.
+ if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() == 128)
+ return false;
bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg);
if (!Emitted)
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT().getSimpleVT(),
- ISD::BIT_CONVERT, Arg);
+ unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
+ ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
ArgVT = VA.getLocVT();
}
if (VA.isRegLoc()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
- Arg, RC, RC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset;
- Value *ArgVal = ArgVals[VA.getValNo()];
+ const Value *ArgVal = ArgVals[VA.getValNo()];
// If this is a really simple value, emit this with the Value* version of
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (Subtarget->isPICStyleGOT()) {
- TargetRegisterClass *RC = X86::GR32RegisterClass;
- unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ X86::EBX).addReg(Base);
}
// Issue the call.
MachineInstrBuilder MIB;
if (CalleeOp) {
// Register-indirect call.
- unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
+ unsigned CallOpc;
+ if (Subtarget->isTargetWin64())
+ CallOpc = X86::WINCALL64r;
+ else if (Subtarget->is64Bit())
+ CallOpc = X86::CALL64r;
+ else
+ CallOpc = X86::CALL32r;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addReg(CalleeOp);
} else {
// Direct call.
assert(GV && "Not a direct call");
- unsigned CallOpc =
- Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
+ unsigned CallOpc;
+ if (Subtarget->isTargetWin64())
+ CallOpc = X86::WINCALL64pcrel32;
+ else if (Subtarget->is64Bit())
+ CallOpc = X86::CALL64pcrel32;
+ else
+ CallOpc = X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand.
unsigned char OpFlags = 0;
}
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, OpFlags);
}
// Add an implicit use GOT pointer in EBX.
// Issue CALLSEQ_END
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
- if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
+ SmallVector<unsigned, 4> UsedRegs;
+ if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
EVT CopyVT = RVLocs[0].getValVT();
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
- TargetRegisterClass *SrcRC = DstRC;
// If this is a call to a function that returns an fp value on the x87 fp
// stack, but where we prefer to use the value in xmm registers, copy it
RVLocs[0].getLocReg() == X86::ST1) &&
isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
CopyVT = MVT::f80;
- SrcRC = X86::RSTRegisterClass;
DstRC = X86::RFP80RegisterClass;
}
unsigned ResultReg = createResultReg(DstRC);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- RVLocs[0].getLocReg(), DstRC, SrcRC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
if (CopyVT != RVLocs[0].getValVT()) {
// Round the F80 the right size, which also moves to the appropriate xmm
// register. This is accomplished by storing the F80 value in memory and
EVT ResVT = RVLocs[0].getValVT();
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, MemSize);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
+ int FI = MFI.CreateStackObject(MemSize, MemSize, false);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc)), FI)
+ .addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), FI);
}
if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult;
}
UpdateValueMap(I, ResultReg);
}
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
return true;
}
bool
-X86FastISel::TargetSelectInstruction(Instruction *I) {
+X86FastISel::TargetSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default: break;
case Instruction::Load:
return X86SelectLoad(I);
case Instruction::Store:
return X86SelectStore(I);
+ case Instruction::Ret:
+ return X86SelectRet(I);
case Instruction::ICmp:
case Instruction::FCmp:
return X86SelectCmp(I);
return false;
}
-unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
- EVT VT;
+unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
+ MVT VT;
if (!isTypeLegal(C->getType(), VT))
return false;
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return false;
case MVT::i8:
Opc = X86::MOV8rm;
else
Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
return 0;
unsigned char OpFlag = 0;
if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
OpFlag = X86II::MO_PIC_BASE_OFFSET;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleGOT()) {
OpFlag = X86II::MO_GOTOFF;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleRIPRel() &&
TM.getCodeModel() == CodeModel::Small) {
PICBase = X86::RIP;
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
return ResultReg;
}
-unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
+unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// Fail on dynamic allocas. At this point, getRegForValue has already
// checked its CSE maps, so if we're here trying to handle a dynamic
// alloca, we're not going to succeed. X86SelectAddress has a
// various places, but TargetMaterializeAlloca also needs a check
// in order to avoid recursion between getRegForValue,
// X86SelectAddrss, and TargetMaterializeAlloca.
- if (!StaticAllocaMap.count(C))
+ if (!FuncInfo.StaticAllocaMap.count(C))
return 0;
X86AddressMode AM;
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
+/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+/// vreg is being provided by the specified load instruction. If possible,
+/// try to fold the load as an operand to the instruction, returning true if
+/// possible.
+bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ X86AddressMode AM;
+ if (!X86SelectAddress(LI->getOperand(0), AM))
+ return false;
+
+ X86InstrInfo &XII = (X86InstrInfo&)TII;
+
+ unsigned Size = TD.getTypeAllocSize(LI->getType());
+ unsigned Alignment = LI->getAlignment();
+
+ SmallVector<MachineOperand, 8> AddrOps;
+ AM.getFullAddress(AddrOps);
+
+ MachineInstr *Result =
+ XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
+ if (Result == 0) return false;
+
+ MI->getParent()->insert(MI, Result);
+ MI->eraseFromParent();
+ return true;
+}
+
+
namespace llvm {
- llvm::FastISel *X86::createFastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- ) {
- return new X86FastISel(mf, mmi, dw, vm, bm, am
-#ifndef NDEBUG
- , cil
-#endif
- );
+ llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
+ return new X86FastISel(funcInfo);
}
}