#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
+#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static cl::opt<bool>
DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX"));
// Forward declarations.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2);
+static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
+ switch (TM.getSubtarget<X86Subtarget>().TargetType) {
+ default: llvm_unreachable("unknown subtarget type");
+ case X86Subtarget::isDarwin:
+ return new TargetLoweringObjectFileMachO();
+ case X86Subtarget::isELF:
+ return new TargetLoweringObjectFileELF();
+ case X86Subtarget::isMingw:
+ case X86Subtarget::isCygwin:
+ case X86Subtarget::isWindows:
+ return new TargetLoweringObjectFileCOFF();
+ }
+
+}
+
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
- : TargetLowering(TM) {
+ : TargetLowering(TM, createTLOF(TM)) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
setShiftAmountType(MVT::i8);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(SchedulingForRegPressure);
- setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
setStackPointerRegisterToSaveRestore(X86StackPtr);
if (Subtarget->isTargetDarwin()) {
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
}
- // X86 ret instruction may pop stack.
- setOperationAction(ISD::RET , MVT::Other, Custom);
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// Darwin ABI issue.
setOperationAction(ISD::SELECT, MVT::v4i16, Promote);
setOperationAction(ISD::SELECT, MVT::v2i32, Promote);
setOperationAction(ISD::SELECT, MVT::v1i64, Custom);
+ setOperationAction(ISD::VSETCC, MVT::v8i8, Custom);
+ setOperationAction(ISD::VSETCC, MVT::v4i16, Custom);
+ setOperationAction(ISD::VSETCC, MVT::v2i32, Custom);
}
if (!UseSoftFloat && Subtarget->hasSSE1()) {
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
+ EVT VT = (MVT::SimpleValueType)i;
// Do not attempt to custom lower non-power-of-2 vectors
if (!isPowerOf2_32(VT.getVectorNumElements()))
continue;
// Do not attempt to custom lower non-128-bit vectors
if (!VT.is128BitVector())
continue;
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR,
+ VT.getSimpleVT().SimpleTy, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE,
+ VT.getSimpleVT().SimpleTy, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT,
+ VT.getSimpleVT().SimpleTy, Custom);
}
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) {
- MVT VT = (MVT::SimpleValueType)i;
+ MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
+ EVT VT = SVT;
// Do not attempt to promote non-128-bit vectors
if (!VT.is128BitVector()) {
continue;
}
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v2i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v2i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
+ setOperationAction(ISD::AND, SVT, Promote);
+ AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
+ setOperationAction(ISD::OR, SVT, Promote);
+ AddPromotedToType (ISD::OR, SVT, MVT::v2i64);
+ setOperationAction(ISD::XOR, SVT, Promote);
+ AddPromotedToType (ISD::XOR, SVT, MVT::v2i64);
+ setOperationAction(ISD::LOAD, SVT, Promote);
+ AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64);
+ setOperationAction(ISD::SELECT, SVT, Promote);
+ AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64);
}
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
// This includes 256-bit vectors
for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
+ EVT VT = (MVT::SimpleValueType)i;
// Do not attempt to custom lower non-power-of-2 vectors
if (!isPowerOf2_32(VT.getVectorNumElements()))
// Promote v32i8, v16i16, v8i32 load, select, and, or, xor to v4i64.
// Including 256-bit vectors
for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; i++) {
- MVT VT = (MVT::SimpleValueType)i;
+ EVT VT = (MVT::SimpleValueType)i;
if (!VT.is256BitVector()) {
continue;
}
-MVT X86TargetLowering::getSetCCResultType(MVT VT) const {
+MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const {
return MVT::i8;
}
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
/// determining it.
-MVT
+EVT
X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
bool isSrcConst, bool isSrcStr,
SelectionDAG &DAG) const {
SelectionDAG &DAG) const {
if (usesGlobalOffsetTable())
return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy());
- if (!Subtarget->isPICStyleRIPRel())
+ if (!Subtarget->is64Bit())
// This doesn't have DebugLoc associated with it, but is not really the
// same as a Register.
return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc::getUnknownLoc(),
#include "X86GenCallingConv.inc"
-/// LowerRET - Lower an ISD::RET node.
-SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
+SDValue
+X86TargetLowering::LowerReturn(SDValue Chain,
+ unsigned CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ DebugLoc dl, SelectionDAG &DAG) {
SmallVector<CCValAssign, 16> RVLocs;
- unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
- bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
- CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
- CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
+ RVLocs, *DAG.getContext());
+ CCInfo.AnalyzeReturn(Outs, RetCC_X86);
// If this is the first return lowered for this function, add the regs to the
// liveout set for the function.
if (RVLocs[i].isRegLoc())
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
- SDValue Chain = Op.getOperand(0);
-
- // Handle tail call return.
- Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL);
- if (Chain.getOpcode() == X86ISD::TAILCALL) {
- SDValue TailCall = Chain;
- SDValue TargetAddress = TailCall.getOperand(1);
- SDValue StackAdjustment = TailCall.getOperand(2);
- assert(((TargetAddress.getOpcode() == ISD::Register &&
- (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::EAX ||
- cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R11)) ||
- TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
- TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
- "Expecting an global address, external symbol, or register");
- assert(StackAdjustment.getOpcode() == ISD::Constant &&
- "Expecting a const value");
-
- SmallVector<SDValue,8> Operands;
- Operands.push_back(Chain.getOperand(0));
- Operands.push_back(TargetAddress);
- Operands.push_back(StackAdjustment);
- // Copy registers used by the call. Last operand is a flag so it is not
- // copied.
- for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
- Operands.push_back(Chain.getOperand(i));
- }
- return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0],
- Operands.size());
- }
- // Regular return.
SDValue Flag;
SmallVector<SDValue, 6> RetOps;
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue ValToCopy = Op.getOperand(i*2+1);
+ SDValue ValToCopy = Outs[i].Val;
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier.
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
if (Subtarget->is64Bit()) {
- MVT ValVT = ValToCopy.getValueType();
+ EVT ValVT = ValToCopy.getValueType();
if (ValVT.isVector() && ValVT.getSizeInBits() == 64) {
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1)
MVT::Other, &RetOps[0], RetOps.size());
}
+/// LowerCallResult - Lower the result values of a call into the
+/// appropriate copies out of appropriate physical registers.
+///
+SDValue
+X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
+ unsigned CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) {
-/// LowerCallResult - Lower the result values of an ISD::CALL into the
-/// appropriate copies out of appropriate physical registers. This assumes that
-/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
-/// being lowered. The returns a SDNode with the same number of values as the
-/// ISD::CALL.
-SDNode *X86TargetLowering::
-LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
- unsigned CallingConv, SelectionDAG &DAG) {
-
- DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- bool isVarArg = TheCall->isVarArg();
bool Is64Bit = Subtarget->is64Bit();
- CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
- CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
-
- SmallVector<SDValue, 8> ResultVals;
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
+ RVLocs, *DAG.getContext());
+ CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
- MVT CopyVT = VA.getValVT();
+ EVT CopyVT = VA.getValVT();
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
- ((Is64Bit || TheCall->isInreg()) && !Subtarget->hasSSE1())) {
- cerr << "SSE register return with SSE disabled\n";
- exit(1);
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ llvm_report_error("SSE register return with SSE disabled");
}
// If this is a call to a function that returns an fp value on the floating
MVT::v2i64, InFlag).getValue(1);
Val = Chain.getValue(0);
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
- Val, DAG.getConstant(0, MVT::i64));
+ Val, DAG.getConstant(0, MVT::i64));
} else {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
MVT::i64, InFlag).getValue(1);
DAG.getIntPtrConstant(1));
}
- ResultVals.push_back(Val);
+ InVals.push_back(Val);
}
- // Merge everything together with a MERGE_VALUES node.
- ResultVals.push_back(Chain);
- return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
- &ResultVals[0], ResultVals.size()).getNode();
+ return Chain;
}
// For info on fast calling convention see Fast Calling Convention (tail call)
// implementation LowerX86_32FastCCCallTo.
-/// CallIsStructReturn - Determines whether a CALL node uses struct return
+/// CallIsStructReturn - Determines whether a call uses struct return
/// semantics.
-static bool CallIsStructReturn(CallSDNode *TheCall) {
- unsigned NumOps = TheCall->getNumArgs();
- if (!NumOps)
+static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ if (Outs.empty())
return false;
- return TheCall->getArgFlags(0).isSRet();
+ return Outs[0].Flags.isSRet();
}
-/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct
+/// ArgsAreStructReturn - Determines whether a function uses struct
/// return semantics.
-static bool ArgsAreStructReturn(SDValue Op) {
- unsigned NumArgs = Op.getNode()->getNumValues() - 1;
- if (!NumArgs)
+static bool
+ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
+ if (Ins.empty())
return false;
- return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet();
+ return Ins[0].Flags.isSRet();
}
-/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires
-/// the callee to pop its own arguments. Callee pop is necessary to support tail
-/// calls.
+/// IsCalleePop - Determines whether the callee is required to pop its
+/// own arguments. Callee pop is necessary to support tail calls.
bool X86TargetLowering::IsCalleePop(bool IsVarArg, unsigned CallingConv) {
if (IsVarArg)
return false;
return CC_X86_32_C;
}
-/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to
-/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node.
+/// NameDecorationForCallConv - Selects the appropriate decoration to
+/// apply to a MachineFunction containing a given calling convention.
NameDecorationStyle
-X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDValue Op) {
- unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- if (CC == CallingConv::X86_FastCall)
+X86TargetLowering::NameDecorationForCallConv(unsigned CallConv) {
+ if (CallConv == CallingConv::X86_FastCall)
return FastCall;
- else if (CC == CallingConv::X86_StdCall)
+ else if (CallConv == CallingConv::X86_StdCall)
return StdCall;
return None;
}
-/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer
-/// in a register before calling.
-bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) {
- return !IsTailCall && !Is64Bit &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT();
-}
-
-/// CallRequiresFnAddressInReg - Check whether the call requires the function
-/// address to be loaded in a register.
-bool
-X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) {
- return !Is64Bit && IsTailCall &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT();
-}
-
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" with size and alignment information specified by
/// the specific parameter attribute. The copy will be passed as a byval
/*AlwaysInline=*/true, NULL, 0, NULL, 0);
}
-SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG,
- const CCValAssign &VA,
- MachineFrameInfo *MFI,
- unsigned CC,
- SDValue Root, unsigned i) {
+SDValue
+X86TargetLowering::LowerMemArgument(SDValue Chain,
+ unsigned CallConv,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ const CCValAssign &VA,
+ MachineFrameInfo *MFI,
+ unsigned i) {
+
// Create the nodes corresponding to a load from this parameter slot.
- ISD::ArgFlagsTy Flags =
- cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags();
- bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt;
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ bool AlwaysUseMutable = (CallConv==CallingConv::Fast) && PerformTailCallOpt;
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
// FIXME: For now, all byval parameter objects are marked mutable. This can be
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal())
return FIN;
- return DAG.getLoad(VA.getValVT(), Op.getDebugLoc(), Root, FIN,
+ return DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0);
}
SDValue
-X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerFormalArguments(SDValue Chain,
+ unsigned CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) {
+
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- DebugLoc dl = Op.getDebugLoc();
const Function* Fn = MF.getFunction();
if (Fn->hasExternalLinkage() &&
FuncInfo->setForceFramePointer(true);
// Decorate the function name.
- FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op));
+ FuncInfo->setDecorationStyle(NameDecorationForCallConv(CallConv));
MachineFrameInfo *MFI = MF.getFrameInfo();
- SDValue Root = Op.getOperand(0);
- bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
- unsigned CC = MF.getFunction()->getCallingConv();
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isTargetWin64();
- assert(!(isVarArg && CC == CallingConv::Fast) &&
+ assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc");
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
+ ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
- SmallVector<SDValue, 8> ArgValues;
unsigned LastVal = ~0U;
+ SDValue ArgValue;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
LastVal = VA.getValNo();
if (VA.isRegLoc()) {
- MVT RegVT = VA.getLocVT();
+ EVT RegVT = VA.getLocVT();
TargetRegisterClass *RC = NULL;
if (RegVT == MVT::i32)
RC = X86::GR32RegisterClass;
RC = X86::FR64RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass;
- else if (RegVT.isVector()) {
- assert(RegVT.getSizeInBits() == 64);
- if (!Is64Bit)
- RC = X86::VR64RegisterClass; // MMX values are passed in MMXs.
- else {
- // Darwin calling convention passes MMX values in either GPRs or
- // XMMs in x86-64. Other targets pass them in memory.
- if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) {
- RC = X86::VR128RegisterClass; // MMX values are passed in XMMs.
- RegVT = MVT::v2i64;
- } else {
- RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs.
- RegVT = MVT::i64;
- }
- }
- } else {
- assert(0 && "Unknown argument type!");
- }
+ else if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
+ RC = X86::VR64RegisterClass;
+ else
+ llvm_unreachable("Unknown argument type!");
- unsigned Reg = DAG.getMachineFunction().addLiveIn(VA.getLocReg(), RC);
- SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the
else if (VA.getLocInfo() == CCValAssign::ZExt)
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::BCvt)
+ ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
- if (VA.getLocInfo() != CCValAssign::Full)
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
-
- // Handle MMX values passed in GPRs.
- if (Is64Bit && RegVT != VA.getLocVT()) {
- if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
- else if (RC == X86::VR128RegisterClass) {
+ if (VA.isExtInLoc()) {
+ // Handle MMX values passed in XMM regs.
+ if (RegVT.isVector()) {
ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
ArgValue, DAG.getConstant(0, MVT::i64));
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
- }
+ ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ } else
+ ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
}
-
- ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
- ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i));
+ ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
}
+
+ // If value is passed via pointer - do a load.
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, NULL, 0);
+
+ InVals.push_back(ArgValue);
}
// The x86-64 ABI for returning structs by value requires that we copy
// the sret argument into %rax for the return. Save the argument into
// a virtual register so that we can access it from the return points.
- if (Is64Bit && DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
- MachineFunction &MF = DAG.getMachineFunction();
+ if (Is64Bit && MF.getFunction()->hasStructRetAttr()) {
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
FuncInfo->setSRetReturnReg(Reg);
}
- SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, ArgValues[0]);
- Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Root);
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
unsigned StackSize = CCInfo.getNextStackOffset();
// align stack specially for tail calls
- if (PerformTailCallOpt && CC == CallingConv::Fast)
+ if (PerformTailCallOpt && CallConv == CallingConv::Fast)
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- if (Is64Bit || CC != CallingConv::X86_FastCall) {
+ if (Is64Bit || CallConv != CallingConv::X86_FastCall) {
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
}
if (Is64Bit) {
for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
X86::GR64RegisterClass);
- SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
X86::VR128RegisterClass);
- SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::v4f32);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
DAG.getIntPtrConstant(16));
}
if (!MemOps.empty())
- Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size());
}
}
- ArgValues.push_back(Root);
-
// Some CCs need callee pop.
- if (IsCalleePop(isVarArg, CC)) {
+ if (IsCalleePop(isVarArg, CallConv)) {
BytesToPopOnReturn = StackSize; // Callee pops everything.
BytesCallerReserves = 0;
} else {
BytesToPopOnReturn = 0; // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
- if (!Is64Bit && CC != CallingConv::Fast && ArgsAreStructReturn(Op))
+ if (!Is64Bit && CallConv != CallingConv::Fast && ArgsAreStructReturn(Ins))
BytesToPopOnReturn = 4;
BytesCallerReserves = StackSize;
}
if (!Is64Bit) {
RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
- if (CC == CallingConv::X86_FastCall)
+ if (CallConv == CallingConv::X86_FastCall)
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
}
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
- // Return the new list of results.
- return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
- &ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
+ return Chain;
}
SDValue
-X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
- const SDValue &StackPtr,
+X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
+ SDValue StackPtr, SDValue Arg,
+ DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
- SDValue Chain,
- SDValue Arg, ISD::ArgFlagsTy Flags) {
- DebugLoc dl = TheCall->getDebugLoc();
- unsigned LocMemOffset = VA.getLocMemOffset();
+ ISD::ArgFlagsTy Flags) {
+ const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0);
+ unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
if (Flags.isByVal()) {
if (!IsTailCall || FPDiff==0) return Chain;
// Adjust the Return address stack slot.
- MVT VT = getPointerTy();
+ EVT VT = getPointerTy();
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI =
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
- MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
+ EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0);
return Chain;
}
-SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
+SDValue
+X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
+ unsigned CallConv, bool isVarArg, bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) {
+
MachineFunction &MF = DAG.getMachineFunction();
- CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
- SDValue Chain = TheCall->getChain();
- unsigned CC = TheCall->getCallingConv();
- bool isVarArg = TheCall->isVarArg();
- bool IsTailCall = TheCall->isTailCall() &&
- CC == CallingConv::Fast && PerformTailCallOpt;
- SDValue Callee = TheCall->getCallee();
bool Is64Bit = Subtarget->is64Bit();
- bool IsStructRet = CallIsStructReturn(TheCall);
- DebugLoc dl = TheCall->getDebugLoc();
+ bool IsStructRet = CallIsStructReturn(Outs);
- assert(!(isVarArg && CC == CallingConv::Fast) &&
+ assert((!isTailCall ||
+ (CallConv == CallingConv::Fast && PerformTailCallOpt)) &&
+ "IsEligibleForTailCallOptimization missed a case!");
+ assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc");
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
+ ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
- if (PerformTailCallOpt && CC == CallingConv::Fast)
+ if (PerformTailCallOpt && CallConv == CallingConv::Fast)
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
- if (IsTailCall) {
+ if (isTailCall) {
// Lower arguments at fp - stackoffset + fpdiff.
unsigned NumBytesCallerPushed =
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
SDValue RetAddrFrIdx;
// Load return adress for tail calls.
- Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit,
+ Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, Is64Bit,
FPDiff, dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
// of tail call optimization arguments are handle later.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = TheCall->getArg(i);
- ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
+ EVT RegVT = VA.getLocVT();
+ SDValue Arg = Outs[i].Val;
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
bool isByVal = Flags.isByVal();
// Promote the value if needed.
switch (VA.getLocInfo()) {
- default: assert(0 && "Unknown loc info!");
+ default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
+ // Special case: passing MMX values in XMM registers.
+ Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
+ Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
+ Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
+ } else
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg);
break;
+ case CCValAssign::Indirect: {
+ // Store the argument.
+ SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
+ int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
+ Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
+ PseudoSourceValue::getFixedStack(FI), 0);
+ Arg = SpillSlot;
+ break;
+ }
}
if (VA.isRegLoc()) {
- if (Is64Bit) {
- MVT RegVT = VA.getLocVT();
- if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
- switch (VA.getLocReg()) {
- default:
- break;
- case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
- case X86::R8: {
- // Special case: passing MMX values in GPR registers.
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
- break;
- }
- case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
- case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
- // Special case: passing MMX values in XMM registers.
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
- Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
- Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
- break;
- }
- }
- }
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
- if (!IsTailCall || (IsTailCall && isByVal)) {
+ if (!isTailCall || (isTailCall && isByVal)) {
assert(VA.isMemLoc());
if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
- MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
- Chain, Arg, Flags));
+ MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
+ dl, DAG, VA, Flags));
}
}
}
SDValue InFlag;
// Tail call byval lowering might overwrite argument registers so in case of
// tail call optimization the copies to registers are lowered later.
- if (!IsTailCall)
+ if (!isTailCall)
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
- // ELF / PIC requires GOT in the EBX register before function calls via PLT
- // GOT pointer.
- if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) {
- Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
- DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(),
- getPointerTy()),
- InFlag);
- InFlag = Chain.getValue(1);
- }
- // If we are tail calling and generating PIC/GOT style code load the address
- // of the callee into ecx. The value in ecx is used as target of the tail
- // jump. This is done to circumvent the ebx/callee-saved problem for tail
- // calls on PIC/GOT architectures. Normally we would just put the address of
- // GOT into ebx and then call target@PLT. But for tail callss ebx would be
- // restored (since ebx is callee saved) before jumping to the target@PLT.
- if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) {
- // Note: The actual moving to ecx is done further down.
- GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
- if (G && !G->getGlobal()->hasHiddenVisibility() &&
- !G->getGlobal()->hasProtectedVisibility())
- Callee = LowerGlobalAddress(Callee, DAG);
- else if (isa<ExternalSymbolSDNode>(Callee))
- Callee = LowerExternalSymbol(Callee,DAG);
+
+ if (Subtarget->isPICStyleGOT()) {
+ // ELF / PIC requires GOT in the EBX register before function calls via PLT
+ // GOT pointer.
+ if (!isTailCall) {
+ Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ DebugLoc::getUnknownLoc(),
+ getPointerTy()),
+ InFlag);
+ InFlag = Chain.getValue(1);
+ } else {
+ // If we are tail calling and generating PIC/GOT style code load the
+ // address of the callee into ECX. The value in ecx is used as target of
+ // the tail jump. This is done to circumvent the ebx/callee-saved problem
+ // for tail calls on PIC/GOT architectures. Normally we would just put the
+ // address of GOT into ebx and then call target@PLT. But for tail calls
+ // ebx would be restored (since ebx is callee saved) before jumping to the
+ // target@PLT.
+
+ // Note: The actual moving to ECX is done further down.
+ GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
+ if (G && !G->getGlobal()->hasHiddenVisibility() &&
+ !G->getGlobal()->hasProtectedVisibility())
+ Callee = LowerGlobalAddress(Callee, DAG);
+ else if (isa<ExternalSymbolSDNode>(Callee))
+ Callee = LowerExternalSymbol(Callee, DAG);
+ }
}
if (Is64Bit && isVarArg) {
// For tail calls lower the arguments to the 'real' stack slot.
- if (IsTailCall) {
+ if (isTailCall) {
+ // Force all the incoming stack arguments to be loaded from the stack
+ // before any new outgoing arguments are stored to the stack, because the
+ // outgoing stack slots may alias the incoming argument stack slots, and
+ // the alias isn't otherwise explicit. This is slightly more conservative
+ // than necessary, because it means that each store effectively depends
+ // on every argument instead of just those arguments it would clobber.
+ SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
+
SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN;
int FI = 0;
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc()) {
assert(VA.isMemLoc());
- SDValue Arg = TheCall->getArg(i);
- ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
+ SDValue Arg = Outs[i].Val;
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
getPointerTy());
Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
- MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain,
+ MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
+ ArgChain,
Flags, DAG, dl));
} else {
// Store relative to framepointer.
MemOpChains2.push_back(
- DAG.getStore(Chain, dl, Arg, FIN,
+ DAG.getStore(ArgChain, dl, Arg, FIN,
PseudoSourceValue::getFixedStack(FI), 0));
}
}
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
- if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
- getTargetMachine(), true))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy(),
- G->getOffset());
+ GlobalValue *GV = G->getGlobal();
+ if (!GV->hasDLLImportLinkage()) {
+ unsigned char OpFlags = 0;
+
+ // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
+ // external symbols most go through the PLT in PIC mode. If the symbol
+ // has hidden or protected visibility, or if it is static or local, then
+ // we don't need to use the PLT - we can directly call it.
+ if (Subtarget->isTargetELF() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
+ OpFlags = X86II::MO_PLT;
+ } else if (Subtarget->isPICStyleStubAny() &&
+ (GV->isDeclaration() || GV->isWeakForLinker()) &&
+ Subtarget->getDarwinVers() < 9) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = X86II::MO_DARWIN_STUB;
+ }
+
+ Callee = DAG.getTargetGlobalAddress(GV, getPointerTy(),
+ G->getOffset(), OpFlags);
+ }
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
- } else if (IsTailCall) {
+ unsigned char OpFlags = 0;
+
+ // On ELF targets, in either X86-64 or X86-32 mode, direct calls to external
+ // symbols should go through the PLT.
+ if (Subtarget->isTargetELF() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ OpFlags = X86II::MO_PLT;
+ } else if (Subtarget->isPICStyleStubAny() &&
+ Subtarget->getDarwinVers() < 9) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = X86II::MO_DARWIN_STUB;
+ }
+
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
+ OpFlags);
+ } else if (isTailCall) {
unsigned Opc = Is64Bit ? X86::R11 : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl,
Callee,InFlag);
Callee = DAG.getRegister(Opc, getPointerTy());
// Add register as live out.
- DAG.getMachineFunction().getRegInfo().addLiveOut(Opc);
+ MF.getRegInfo().addLiveOut(Opc);
}
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
- if (IsTailCall) {
+ if (isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(0, true), InFlag);
InFlag = Chain.getValue(1);
-
- // Returns a chain & a flag for retval copy to use.
- NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- Ops.clear();
}
Ops.push_back(Chain);
Ops.push_back(Callee);
- if (IsTailCall)
+ if (isTailCall)
Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
// Add argument registers to the end of the list so that they are known live
RegsToPass[i].second.getValueType()));
// Add an implicit use GOT pointer in EBX.
- if (!IsTailCall && !Is64Bit &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT())
+ if (!isTailCall && Subtarget->isPICStyleGOT())
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
// Add an implicit use of AL for x86 vararg functions.
if (InFlag.getNode())
Ops.push_back(InFlag);
- if (IsTailCall) {
- assert(InFlag.getNode() &&
- "Flag must be set. Depend on flag being set in LowerRET");
- Chain = DAG.getNode(X86ISD::TAILCALL, dl,
- TheCall->getVTList(), &Ops[0], Ops.size());
+ if (isTailCall) {
+ // If this is the first return lowered for this function, add the regs
+ // to the liveout set for the function.
+ if (MF.getRegInfo().liveout_empty()) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
+ for (unsigned i = 0; i != RVLocs.size(); ++i)
+ if (RVLocs[i].isRegLoc())
+ MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
+ }
+
+ assert(((Callee.getOpcode() == ISD::Register &&
+ (cast<RegisterSDNode>(Callee)->getReg() == X86::EAX ||
+ cast<RegisterSDNode>(Callee)->getReg() == X86::R9)) ||
+ Callee.getOpcode() == ISD::TargetExternalSymbol ||
+ Callee.getOpcode() == ISD::TargetGlobalAddress) &&
+ "Expecting an global address, external symbol, or register");
- return SDValue(Chain.getNode(), Op.getResNo());
+ return DAG.getNode(X86ISD::TC_RETURN, dl,
+ NodeTys, &Ops[0], Ops.size());
}
Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush;
- if (IsCalleePop(isVarArg, CC))
+ if (IsCalleePop(isVarArg, CallConv))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
- else if (!Is64Bit && CC != CallingConv::Fast && IsStructRet)
+ else if (!Is64Bit && CallConv != CallingConv::Fast && IsStructRet)
// If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
// Handle result values, copying them out of physregs into vregs that we
// return.
- return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
- Op.getResNo());
+ return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
+ Ins, dl, DAG, InVals);
}
return Offset;
}
-/// IsEligibleForTailCallElimination - Check to see whether the next instruction
-/// following the call is a return. A function is eligible if caller/callee
-/// calling conventions match, currently only fastcc supports tail calls, and
-/// the function CALL is immediatly followed by a RET.
-bool X86TargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
- SDValue Ret,
- SelectionDAG& DAG) const {
- if (!PerformTailCallOpt)
- return false;
-
- if (CheckTailCallReturnConstraints(TheCall, Ret)) {
- MachineFunction &MF = DAG.getMachineFunction();
- unsigned CallerCC = MF.getFunction()->getCallingConv();
- unsigned CalleeCC= TheCall->getCallingConv();
- if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
- SDValue Callee = TheCall->getCallee();
- // On x86/32Bit PIC/GOT tail calls are supported.
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
- !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit())
- return true;
-
- // Can only do local tail calls (in same module, hidden or protected) on
- // x86_64 PIC/GOT at the moment.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- return G->getGlobal()->hasHiddenVisibility()
- || G->getGlobal()->hasProtectedVisibility();
- }
- }
-
- return false;
+/// IsEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization. Targets which want to do tail call
+/// optimization should implement this function.
+bool
+X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
+ unsigned CalleeCC,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned CallerCC = MF.getFunction()->getCallingConv();
+ return CalleeCC == CallingConv::Fast && CallerCC == CalleeCC;
}
FastISel *
}
+bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
+ bool hasSymbolicDisplacement) {
+ // Offset should fit into 32 bit immediate field.
+ if (!isInt32(Offset))
+ return false;
+
+ // If we don't have a symbolic displacement - we don't have any extra
+ // restrictions.
+ if (!hasSymbolicDisplacement)
+ return true;
+
+ // FIXME: Some tweaks might be needed for medium code model.
+ if (M != CodeModel::Small && M != CodeModel::Kernel)
+ return false;
+
+ // For small code model we assume that latest object is 16MB before end of 31
+ // bits boundary. We may also accept pretty large negative constants knowing
+ // that all objects are in the positive half of address space.
+ if (M == CodeModel::Small && Offset < 16*1024*1024)
+ return true;
+
+ // For kernel code model we know that all object resist in the negative half
+ // of 32bits address space. We may not accept negative offsets, since they may
+ // be just off and we may accept pretty large positive ones.
+ if (M == CodeModel::Kernel && Offset > 0)
+ return true;
+
+ return false;
+}
+
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
/// specific condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
}
switch (SetCCOpcode) {
- default: assert(0 && "Invalid integer condition!");
+ default: llvm_unreachable("Invalid integer condition!");
case ISD::SETEQ: return X86::COND_E;
case ISD::SETGT: return X86::COND_G;
case ISD::SETGE: return X86::COND_GE;
// 1 | 0 | 0 | X == Y
// 1 | 1 | 1 | unordered
switch (SetCCOpcode) {
- default: assert(0 && "Condcode should be pre-legalized away");
+ default: llvm_unreachable("Condcode should be pre-legalized away");
case ISD::SETUEQ:
case ISD::SETEQ: return X86::COND_E;
case ISD::SETOLT: // flipped
/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
/// the second operand.
-static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
if (VT == MVT::v4f32 || VT == MVT::v4i32 || VT == MVT::v4i16)
return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
if (VT == MVT::v2f64 || VT == MVT::v2i64)
/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
if (VT != MVT::v8i16)
return false;
/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
if (VT != MVT::v8i16)
return false;
/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
-static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
int NumElems = VT.getVectorNumElements();
if (NumElems != 2 && NumElems != 4)
return false;
/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
/// half elements to come from vector 1 (which would equal the dest.) and
/// the upper half to come from vector 2.
-static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
int NumElems = VT.getVectorNumElements();
if (NumElems != 2 && NumElems != 4)
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
-static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, MVT VT,
+static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool V2IsSplat = false) {
int NumElts = VT.getVectorNumElements();
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
-static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, MVT VT,
+static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool V2IsSplat = false) {
int NumElts = VT.getVectorNumElements();
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
/// <0, 0, 1, 1>
-static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
int NumElems = VT.getVectorNumElements();
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
/// <2, 2, 3, 3>
-static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
int NumElems = VT.getVectorNumElements();
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
-static bool isMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) {
if (VT.getVectorElementType().getSizeInBits() < 32)
return false;
/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
/// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT,
+static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool V2IsSplat = false, bool V2IsUndef = false) {
int NumOps = VT.getVectorNumElements();
if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
return Mask;
}
+/// isZeroNode - Returns true if Elt is a constant zero or a floating point
+/// constant +0.0.
+bool X86::isZeroNode(SDValue Elt) {
+ return ((isa<ConstantSDNode>(Elt) &&
+ cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
+ (isa<ConstantFPSDNode>(Elt) &&
+ cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
+}
+
/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in
/// their permute mask.
static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0);
+ EVT VT = SVOp->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> MaskVec;
/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
/// the two vector operands have swapped position.
-static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, MVT VT) {
+static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) {
unsigned NumElems = VT.getVectorNumElements();
for (unsigned i = 0; i != NumElems; ++i) {
int idx = Mask[i];
return true;
}
-/// isZeroNode - Returns true if Elt is a constant zero or a floating point
-/// constant +0.0.
-static inline bool isZeroNode(SDValue Elt) {
- return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
- (isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
-}
-
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
/// to an zero vector.
/// FIXME: move to dag combiner / method on ShuffleVectorSDNode
unsigned Opc = V2.getOpcode();
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
continue;
- if (Opc != ISD::BUILD_VECTOR || !isZeroNode(V2.getOperand(Idx-NumElems)))
+ if (Opc != ISD::BUILD_VECTOR ||
+ !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
return false;
} else if (Idx >= 0) {
unsigned Opc = V1.getOpcode();
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
continue;
- if (Opc != ISD::BUILD_VECTOR || !isZeroNode(V1.getOperand(Idx)))
+ if (Opc != ISD::BUILD_VECTOR ||
+ !X86::isZeroNode(V1.getOperand(Idx)))
return false;
}
}
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
-static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG,
+static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
/// getOnesVector - Returns a vector of specified type with all bits set.
///
-static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
// Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
/// that point to V2 points to its first element.
static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
- MVT VT = SVOp->getValueType(0);
+ EVT VT = SVOp->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
bool Changed = false;
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
}
/// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
-static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
}
/// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation.
-static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
unsigned Half = NumElems/2;
if (SV->getValueType(0).getVectorNumElements() <= 4)
return SDValue(SV, 0);
- MVT PVT = MVT::v4f32;
- MVT VT = SV->getValueType(0);
+ EVT PVT = MVT::v4f32;
+ EVT VT = SV->getValueType(0);
DebugLoc dl = SV->getDebugLoc();
SDValue V1 = SV->getOperand(0);
int NumElems = VT.getVectorNumElements();
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
bool isZero, bool HasSSE2,
SelectionDAG &DAG) {
- MVT VT = V2.getValueType();
+ EVT VT = V2.getValueType();
SDValue V1 = isZero
? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
unsigned NumElems = VT.getVectorNumElements();
continue;
}
SDValue Elt = DAG.getShuffleScalarElt(SVOp, Index);
- if (Elt.getNode() && isZeroNode(Elt))
+ if (Elt.getNode() && X86::isZeroNode(Elt))
++NumZeros;
else
break;
/// getVShift - Return a vector logical shift node.
///
-static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp,
+static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG,
const TargetLowering &TLI, DebugLoc dl) {
bool isMMX = VT.getSizeInBits() == 64;
- MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
+ EVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl);
}
- MVT VT = Op.getValueType();
- MVT EVT = VT.getVectorElementType();
- unsigned EVTBits = EVT.getSizeInBits();
+ EVT VT = Op.getValueType();
+ EVT ExtVT = VT.getVectorElementType();
+ unsigned EVTBits = ExtVT.getSizeInBits();
unsigned NumElems = Op.getNumOperands();
unsigned NumZero = 0;
if (Elt.getOpcode() != ISD::Constant &&
Elt.getOpcode() != ISD::ConstantFP)
IsAllConstants = false;
- if (isZeroNode(Elt))
+ if (X86::isZeroNode(Elt))
NumZero++;
else {
NonZeros |= (1 << i);
// insertion that way. Only do this if the value is non-constant or if the
// value is a constant being inserted into element 0. It is cheaper to do
// a constant pool load than it is to do a movd + shuffle.
- if (EVT == MVT::i64 && !Subtarget->is64Bit() &&
+ if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
(!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
// Handle MMX and SSE both.
- MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
+ EVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
// Truncate the value (which may itself be a constant) to i32, and
if (Idx == 0) {
if (NumZero == 0) {
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
- } else if (EVT == MVT::i32 || EVT == MVT::f32 || EVT == MVT::f64 ||
- (EVT == MVT::i64 && Subtarget->is64Bit())) {
+ } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
+ (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(),
DAG);
- } else if (EVT == MVT::i16 || EVT == MVT::i8) {
+ } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- MVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32;
+ EVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32;
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true,
Subtarget->hasSSE2(), DAG);
// Is it a vector logical left shift?
if (NumElems == 2 && Idx == 1 &&
- isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) {
+ X86::isZeroNode(Op.getOperand(0)) &&
+ !X86::isZeroNode(Op.getOperand(1))) {
unsigned NumBits = VT.getSizeInBits();
return getVShift(true, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG,
TargetLowering &TLI, DebugLoc dl) {
- MVT VT = SVOp->getValueType(0);
+ EVT VT = SVOp->getValueType(0);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
unsigned NumElems = VT.getVectorNumElements();
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
- MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
- MVT MaskEltVT = MaskVT.getVectorElementType();
- MVT NewVT = MaskVT;
- switch (VT.getSimpleVT()) {
+ EVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
+ EVT MaskEltVT = MaskVT.getVectorElementType();
+ EVT NewVT = MaskVT;
+ switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Unexpected!");
case MVT::v4f32: NewVT = MVT::v2f64; break;
case MVT::v4i32: NewVT = MVT::v2i64; break;
/// getVZextMovL - Return a zero-extending vector move low node.
///
-static SDValue getVZextMovL(MVT VT, MVT OpVT,
+static SDValue getVZextMovL(EVT VT, EVT OpVT,
SDValue SrcOp, SelectionDAG &DAG,
const X86Subtarget *Subtarget, DebugLoc dl) {
if (VT == MVT::v2f64 || VT == MVT::v4f32) {
if (!LD) {
// movssrr and movsdrr do not clear top bits. Try to use movd, movq
// instead.
- MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
- if ((EVT != MVT::i64 || Subtarget->is64Bit()) &&
+ MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
+ if ((ExtVT.SimpleTy != MVT::i64 || Subtarget->is64Bit()) &&
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
- SrcOp.getOperand(0).getOperand(0).getValueType() == EVT) {
+ SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
// PR2108
OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
- MVT VT = SVOp->getValueType(0);
+ EVT VT = SVOp->getValueType(0);
SmallVector<std::pair<int, int>, 8> Locs;
Locs.resize(4);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned NumElems = VT.getVectorNumElements();
bool isMMX = VT.getSizeInBits() == 64;
if (isShift && ShVal.hasOneUse()) {
// If the shifted value has multiple uses, it may be cheaper to use
// v_set0 + movlhps or movhlps, etc.
- MVT EVT = VT.getVectorElementType();
+ EVT EVT = VT.getVectorElementType();
ShAmt *= EVT.getSizeInBits();
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
}
if (isShift) {
// No better options. Use a vshl / vsrl.
- MVT EVT = VT.getVectorElementType();
+ EVT EVT = VT.getVectorElementType();
ShAmt *= EVT.getSizeInBits();
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
}
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
SelectionDAG &DAG) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
if (VT.getSizeInBits() == 8) {
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
return Res;
}
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
// TODO: handle v16i8.
if (VT.getSizeInBits() == 16) {
MVT::v4i32, Vec),
Op.getOperand(1)));
// Transform it so it match pextrw which produces a 32-bit result.
- MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1);
+ EVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy+1);
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EVT,
Op.getOperand(0), Op.getOperand(1));
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EVT, Extract,
// SHUFPS the element to the lowest double word, then movss.
int Mask[4] = { Idx, -1, -1, -1 };
- MVT VVT = Op.getOperand(0).getValueType();
+ EVT VVT = Op.getOperand(0).getValueType();
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
- MVT VVT = Op.getOperand(0).getValueType();
+ EVT VVT = Op.getOperand(0).getValueType();
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
SDValue
X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
- MVT VT = Op.getValueType();
- MVT EVT = VT.getVectorElementType();
+ EVT VT = Op.getValueType();
+ EVT EVT = VT.getVectorElementType();
DebugLoc dl = Op.getDebugLoc();
SDValue N0 = Op.getOperand(0);
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
+ // Create this as a scalar to vector..
+ N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
- } else if (EVT == MVT::i32) {
- // InsertPS works with constant index.
- if (isa<ConstantSDNode>(N2))
- return Op;
+ } else if (EVT == MVT::i32 && isa<ConstantSDNode>(N2)) {
+ // PINSR* works with constant index.
+ return Op;
}
return SDValue();
}
SDValue
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType();
- MVT EVT = VT.getVectorElementType();
+ EVT VT = Op.getValueType();
+ EVT EVT = VT.getVectorElementType();
if (Subtarget->hasSSE41())
return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
Op.getOperand(0))));
+ if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
+
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
- MVT VT = MVT::v2i32;
- switch (Op.getValueType().getSimpleVT()) {
+ EVT VT = MVT::v2i32;
+ switch (Op.getValueType().getSimpleVT().SimpleTy) {
default: break;
case MVT::v16i8:
case MVT::v8i16:
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- if (Subtarget->isPICStyleStub())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleRIPRel() &&
- getTargetMachine().getCodeModel() == CodeModel::Small)
- WrapperKind = X86ISD::WrapperRIP;
- }
+ CodeModel::Model M = getTargetMachine().getCodeModel();
+
+ if (Subtarget->isPICStyleRIPRel() &&
+ (M == CodeModel::Small || M == CodeModel::Kernel))
+ WrapperKind = X86ISD::WrapperRIP;
+ else if (Subtarget->isPICStyleGOT())
+ OpFlag = X86II::MO_GOTOFF;
+ else if (Subtarget->isPICStyleStubPIC())
+ OpFlag = X86II::MO_PIC_BASE_OFFSET;
SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
CP->getAlignment(),
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- if (Subtarget->isPICStyleStub())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleRIPRel())
- WrapperKind = X86ISD::WrapperRIP;
- }
+ CodeModel::Model M = getTargetMachine().getCodeModel();
+
+ if (Subtarget->isPICStyleRIPRel() &&
+ (M == CodeModel::Small || M == CodeModel::Kernel))
+ WrapperKind = X86ISD::WrapperRIP;
+ else if (Subtarget->isPICStyleGOT())
+ OpFlag = X86II::MO_GOTOFF;
+ else if (Subtarget->isPICStyleStubPIC())
+ OpFlag = X86II::MO_PIC_BASE_OFFSET;
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
OpFlag);
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- if (Subtarget->isPICStyleStub())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleRIPRel())
- WrapperKind = X86ISD::WrapperRIP;
- }
+ CodeModel::Model M = getTargetMachine().getCodeModel();
+
+ if (Subtarget->isPICStyleRIPRel() &&
+ (M == CodeModel::Small || M == CodeModel::Kernel))
+ WrapperKind = X86ISD::WrapperRIP;
+ else if (Subtarget->isPICStyleGOT())
+ OpFlag = X86II::MO_GOTOFF;
+ else if (Subtarget->isPICStyleStubPIC())
+ OpFlag = X86II::MO_PIC_BASE_OFFSET;
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->isPICStyleRIPRel()) {
+ !Subtarget->is64Bit()) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(),
X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
int64_t Offset,
SelectionDAG &DAG) const {
- bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- bool ExtraLoadRequired =
- Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false);
-
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
+ unsigned char OpFlags =
+ Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
+ CodeModel::Model M = getTargetMachine().getCodeModel();
SDValue Result;
- if (!IsPic && !ExtraLoadRequired && isInt32(Offset)) {
+ if (OpFlags == X86II::MO_NO_FLAG &&
+ X86::isOffsetSuitableForCodeModel(Offset, M)) {
+ // A direct static reference to a global.
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
Offset = 0;
} else {
- unsigned char OpFlags = 0;
-
- if (Subtarget->isPICStyleRIPRel() &&
- getTargetMachine().getRelocationModel() != Reloc::Static) {
- if (ExtraLoadRequired)
- OpFlags = X86II::MO_GOTPCREL;
- } else if (Subtarget->isPICStyleGOT() &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- if (ExtraLoadRequired)
- OpFlags = X86II::MO_GOT;
- else
- OpFlags = X86II::MO_GOTOFF;
- }
-
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0, OpFlags);
}
if (Subtarget->isPICStyleRIPRel() &&
- getTargetMachine().getCodeModel() == CodeModel::Small)
+ (M == CodeModel::Small || M == CodeModel::Kernel))
Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
else
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
- if (IsPic && !Subtarget->isPICStyleRIPRel()) {
+ if (isGlobalRelativeToPICBase(OpFlags)) {
Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
Result);
}
- // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
- // load the value at address GV, not the value of GV itself. This means that
- // the GlobalAddress must be in the base or index register of the address, not
- // the GV offset field. Platform check is inside GVRequiresExtraLoad() call
- // The same applies for external symbols during PIC codegen
- if (ExtraLoadRequired)
+ // For globals that require a load from a stub to get the address, emit the
+ // load.
+ if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
PseudoSourceValue::getGOT(), 0);
static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
- SDValue *InFlag, const MVT PtrVT, unsigned ReturnReg,
+ SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
unsigned char OperandFlags) {
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
DebugLoc dl = GA->getDebugLoc();
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT PtrVT) {
+ const EVT PtrVT) {
SDValue InFlag;
DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT PtrVT) {
+ const EVT PtrVT) {
return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT,
X86::RAX, X86II::MO_TLSGD);
}
// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
// "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT PtrVT, TLSModel::Model model,
+ const EVT PtrVT, TLSModel::Model model,
bool is64Bit) {
DebugLoc dl = GA->getDebugLoc();
// Get the Thread Pointer
Subtarget->is64Bit());
}
- assert(0 && "Unreachable");
+ llvm_unreachable("Unreachable");
return SDValue();
}
/// take a 2 x i32 value to shift plus a shift amount.
SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
DebugLoc dl = Op.getDebugLoc();
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
- SDValue Tmp1 = isSRA ?
- DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
- DAG.getConstant(VTBits - 1, MVT::i8)) :
- DAG.getConstant(0, VT);
+ SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
+ DAG.getConstant(VTBits - 1, MVT::i8))
+ : DAG.getConstant(0, VT);
SDValue Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
}
SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
- DAG.getConstant(VTBits, MVT::i8));
+ DAG.getConstant(VTBits, MVT::i8));
SDValue Cond = DAG.getNode(X86ISD::CMP, dl, VT,
- AndNode, DAG.getConstant(0, MVT::i8));
+ AndNode, DAG.getConstant(0, MVT::i8));
SDValue Hi, Lo;
SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
}
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
- MVT SrcVT = Op.getOperand(0).getValueType();
+ EVT SrcVT = Op.getOperand(0).getValueType();
if (SrcVT.isVector()) {
if (SrcVT == MVT::v2i32 && Op.getValueType() == MVT::v2f64) {
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
-SDValue X86TargetLowering::BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain,
+SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue StackSlot,
SelectionDAG &DAG) {
// Build the FILD
*/
DebugLoc dl = Op.getDebugLoc();
+ LLVMContext *Context = DAG.getContext();
// Build some magic constants.
std::vector<Constant*> CV0;
- CV0.push_back(ConstantInt::get(APInt(32, 0x45300000)));
- CV0.push_back(ConstantInt::get(APInt(32, 0x43300000)));
- CV0.push_back(ConstantInt::get(APInt(32, 0)));
- CV0.push_back(ConstantInt::get(APInt(32, 0)));
+ CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000)));
+ CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000)));
+ CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
+ CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
Constant *C0 = ConstantVector::get(CV0);
SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
std::vector<Constant*> CV1;
- CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4530000000000000ULL))));
- CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4330000000000000ULL))));
+ CV1.push_back(
+ ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL))));
+ CV1.push_back(
+ ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL))));
Constant *C1 = ConstantVector::get(CV1);
SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
// Handle final rounding.
- MVT DestVT = Op.getValueType();
+ EVT DestVT = Op.getValueType();
if (DestVT.bitsLT(MVT::f64)) {
return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
- MVT SrcVT = N0.getValueType();
+ EVT SrcVT = N0.getValueType();
if (SrcVT == MVT::i64) {
// We only handle SSE2 f64 target here; caller can expand the rest.
if (Op.getValueType() != MVT::f64 || !X86ScalarSSEf64)
FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) {
DebugLoc dl = Op.getDebugLoc();
- MVT DstTy = Op.getValueType();
+ EVT DstTy = Op.getValueType();
if (!IsSigned) {
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
unsigned Opc;
- switch (DstTy.getSimpleVT()) {
- default: assert(0 && "Invalid FP_TO_SINT to lower!");
+ switch (DstTy.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
}
SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
+ LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType();
- MVT EltVT = VT;
+ EVT VT = Op.getValueType();
+ EVT EltVT = VT;
if (VT.isVector())
EltVT = VT.getVectorElementType();
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))));
+ Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(APFloat(APInt(32, ~(1U << 31))));
+ Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
}
SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
+ LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType();
- MVT EltVT = VT;
+ EVT VT = Op.getValueType();
+ EVT EltVT = VT;
unsigned EltNum = 1;
if (VT.isVector()) {
EltVT = VT.getVectorElementType();
}
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(APFloat(APInt(64, 1ULL << 63)));
+ Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(APFloat(APInt(32, 1U << 31)));
+ Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
}
SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
+ LLVMContext *Context = DAG.getContext();
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
DebugLoc dl = Op.getDebugLoc();
- MVT VT = Op.getValueType();
- MVT SrcVT = Op1.getValueType();
+ EVT VT = Op.getValueType();
+ EVT SrcVT = Op1.getValueType();
// If second operand is smaller, extend it first.
if (SrcVT.bitsLT(VT)) {
// First get the sign bit of second operand.
std::vector<Constant*> CV;
if (SrcVT == MVT::f64) {
- CV.push_back(ConstantFP::get(APFloat(APInt(64, 1ULL << 63))));
- CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 1U << 31))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
}
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
// Clear first operand sign bit.
CV.clear();
if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))));
- CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(APFloat(APInt(32, ~(1U << 31)))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
- CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0))));
}
C = ConstantVector::get(CV);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
DebugLoc dl = Op.getDebugLoc();
if (isFP) {
unsigned SSECC = 8;
- MVT VT0 = Op0.getValueType();
+ EVT VT0 = Op0.getValueType();
assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64);
unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
bool Swap = false;
NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
}
- assert(0 && "Illegal FP comparison");
+ llvm_unreachable("Illegal FP comparison");
}
// Handle all other FP comparisons here.
return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
bool Swap = false, Invert = false, FlipSigns = false;
- switch (VT.getSimpleVT()) {
+ switch (VT.getSimpleVT().SimpleTy) {
default: break;
+ case MVT::v8i8:
case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
+ case MVT::v4i16:
case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
+ case MVT::v2i32:
case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
}
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations.
if (FlipSigns) {
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()),
EltVT);
std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
bool IllegalFPCMov = false;
if (VT.isFloatingPoint() && !VT.isVector() &&
SDValue Flag;
- MVT IntPtr = getPointerTy();
- MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+ EVT IntPtr = getPointerTy();
+ EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
if (const char *bzeroEntry = V &&
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
- MVT IntPtr = getPointerTy();
- const Type *IntPtrTy = TD->getIntPtrType();
+ EVT IntPtr = getPointerTy();
+ const Type *IntPtrTy = TD->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst;
Entry.Node = Size;
Args.push_back(Entry);
std::pair<SDValue,SDValue> CallResult =
- LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
- 0, CallingConv::C, false,
+ LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false,
+ 0, CallingConv::C, false, /*isReturnValueUsed=*/false,
DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl);
return CallResult.second;
}
uint64_t SizeVal = ConstantSize->getZExtValue();
SDValue InFlag(0, 0);
- MVT AVT;
+ EVT AVT;
SDValue Count;
ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
unsigned BytesLeft = 0;
if (TwoRepStos) {
InFlag = Chain.getValue(1);
Count = Size;
- MVT CVT = Count.getValueType();
+ EVT CVT = Count.getValueType();
SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
} else if (BytesLeft) {
// Handle the last 1 - 7 bytes.
unsigned Offset = SizeVal - BytesLeft;
- MVT AddrVT = Dst.getValueType();
- MVT SizeVT = Size.getValueType();
+ EVT AddrVT = Dst.getValueType();
+ EVT SizeVT = Size.getValueType();
Chain = DAG.getMemset(Chain, dl,
DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
return SDValue();
// DWORD aligned
- MVT AVT = MVT::i32;
+ EVT AVT = MVT::i32;
if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
AVT = MVT::i64;
if (BytesLeft) {
// Handle the last 1 - 7 bytes.
unsigned Offset = SizeVal - BytesLeft;
- MVT DstVT = Dst.getValueType();
- MVT SrcVT = Src.getValueType();
- MVT SizeVT = Size.getValueType();
+ EVT DstVT = Dst.getValueType();
+ EVT SrcVT = Src.getValueType();
+ EVT SizeVT = Size.getValueType();
Results.push_back(DAG.getMemcpy(Chain, dl,
DAG.getNode(ISD::ADD, dl, DstVT, Dst,
DAG.getConstant(Offset, DstVT)),
SDValue SrcPtr = Op.getOperand(1);
SDValue SrcSV = Op.getOperand(2);
- assert(0 && "VAArgInst is not yet implemented for x86-64!");
- abort();
+ llvm_report_error("VAArgInst is not yet implemented for x86-64!");
return SDValue();
}
DAG.getConstant(X86CC, MVT::i8), Cond);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
+ // ptest intrinsics. The intrinsic these come from are designed to return
+ // an integer value, not just an instruction so lower it to the ptest
+ // pattern and a setcc for the result.
+ case Intrinsic::x86_sse41_ptestz:
+ case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_sse41_ptestnzc:{
+ unsigned X86CC = 0;
+ switch (IntNo) {
+ default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
+ case Intrinsic::x86_sse41_ptestz:
+ // ZF = 1
+ X86CC = X86::COND_E;
+ break;
+ case Intrinsic::x86_sse41_ptestc:
+ // CF = 1
+ X86CC = X86::COND_B;
+ break;
+ case Intrinsic::x86_sse41_ptestnzc:
+ // ZF and CF = 0
+ X86CC = X86::COND_A;
+ break;
+ }
+
+ SDValue LHS = Op.getOperand(1);
+ SDValue RHS = Op.getOperand(2);
+ SDValue Test = DAG.getNode(X86ISD::PTEST, dl, MVT::i32, LHS, RHS);
+ SDValue CC = DAG.getConstant(X86CC, MVT::i8);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+ }
// Fix vector shift instructions where the last operand is a non-immediate
// i32 value.
return SDValue();
unsigned NewIntNo = 0;
- MVT ShAmtVT = MVT::v4i32;
+ EVT ShAmtVT = MVT::v4i32;
switch (IntNo) {
case Intrinsic::x86_sse2_pslli_w:
NewIntNo = Intrinsic::x86_sse2_psll_w;
case Intrinsic::x86_mmx_psrai_d:
NewIntNo = Intrinsic::x86_mmx_psra_d;
break;
- default: abort(); // Can't reach here.
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
}
break;
}
}
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShAmtVT, ShAmt));
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
switch (CC) {
default:
- assert(0 && "Unsupported calling convention");
+ llvm_unreachable("Unsupported calling convention");
case CallingConv::C:
case CallingConv::X86_StdCall: {
// Pass 'nest' parameter in ECX.
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
if (InRegCount > 2) {
- cerr << "Nest register in use - reduce number of inreg parameters!\n";
- abort();
+ llvm_report_error("Nest register in use - reduce number of inreg parameters!");
}
}
break;
const TargetMachine &TM = MF.getTarget();
const TargetFrameInfo &TFI = *TM.getFrameInfo();
unsigned StackAlignment = TFI.getStackAlignment();
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
// Save FP Control Word to stack slot
}
SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType();
- MVT OpVT = VT;
+ EVT VT = Op.getValueType();
+ EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
DebugLoc dl = Op.getDebugLoc();
}
SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType();
- MVT OpVT = VT;
+ EVT VT = Op.getValueType();
+ EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
DebugLoc dl = Op.getDebugLoc();
}
SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
DebugLoc dl = Op.getDebugLoc();
DebugLoc dl = Op.getDebugLoc();
switch (Op.getOpcode()) {
- default: assert(0 && "Unknown ovf instruction!");
+ default: llvm_unreachable("Unknown ovf instruction!");
case ISD::SADDO:
// A subtract of one will be selected as a INC. Note that INC doesn't
// set CF, so we can't do this for UADDO.
}
SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
- MVT T = Op.getValueType();
+ EVT T = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned Reg = 0;
unsigned size = 0;
- switch(T.getSimpleVT()) {
+ switch(T.getSimpleVT().SimpleTy) {
default:
assert(false && "Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
- MVT T = Node->getValueType(0);
+ EVT T = Node->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
DAG.getConstant(0, T), Node->getOperand(2));
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
///
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
- default: assert(0 && "Should not custom lower this!");
+ default: llvm_unreachable("Should not custom lower this!");
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- case ISD::CALL: return LowerCALL(Op, DAG);
- case ISD::RET: return LowerRET(Op, DAG);
- case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
void X86TargetLowering::
ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG, unsigned NewOp) {
- MVT T = Node->getValueType(0);
+ EVT T = Node->getValueType(0);
DebugLoc dl = Node->getDebugLoc();
assert (T == MVT::i64 && "Only know how to expand i64 atomics");
FP_TO_INTHelper(SDValue(N, 0), DAG, true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
if (FIST.getNode() != 0) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// Return a load from the stack slot.
Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, NULL, 0));
}
return;
}
case ISD::ATOMIC_CMP_SWAP: {
- MVT T = N->getValueType(0);
+ EVT T = N->getValueType(0);
assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
SDValue cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
case X86ISD::FLD: return "X86ISD::FLD";
case X86ISD::FST: return "X86ISD::FST";
case X86ISD::CALL: return "X86ISD::CALL";
- case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::INC: return "X86ISD::INC";
case X86ISD::DEC: return "X86ISD::DEC";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
+ case X86ISD::PTEST: return "X86ISD::PTEST";
}
}
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
// X86 supports extremely general addressing modes.
+ CodeModel::Model M = getTargetMachine().getCodeModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
- if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
+ if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
return false;
if (AM.BaseGV) {
- // We can only fold this if we don't need an extra load.
- if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
+ unsigned GVFlags =
+ Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
+
+ // If a reference to this global requires an extra load, we can't fold it.
+ if (isGlobalStubReference(GVFlags))
return false;
- // If BaseGV requires a register, we cannot also have a BaseReg.
- if (Subtarget->GVRequiresRegister(AM.BaseGV, getTargetMachine(), false) &&
- AM.HasBaseReg)
+
+ // If BaseGV requires a register for the PIC base, we cannot also have a
+ // BaseReg specified.
+ if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
return false;
- // X86-64 only supports addr of globals in small code model.
- if (Subtarget->is64Bit()) {
- if (getTargetMachine().getCodeModel() != CodeModel::Small)
- return false;
- // If lower 4G is not available, then we must use rip-relative addressing.
- if (AM.BaseOffs || AM.Scale > 1)
- return false;
- }
+ // If lower 4G is not available, then we must use rip-relative addressing.
+ if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
+ return false;
}
switch (AM.Scale) {
return Subtarget->is64Bit() || NumBits1 < 64;
}
-bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
if (!VT1.isInteger() || !VT2.isInteger())
return false;
unsigned NumBits1 = VT1.getSizeInBits();
bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
- return Ty1 == Type::Int32Ty && Ty2 == Type::Int64Ty && Subtarget->is64Bit();
+ return Ty1 == Type::getInt32Ty(Ty1->getContext()) &&
+ Ty2 == Type::getInt64Ty(Ty1->getContext()) && Subtarget->is64Bit();
}
-bool X86TargetLowering::isZExtFree(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
}
-bool X86TargetLowering::isNarrowingProfitable(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
// i16 instructions are longer (0x66 prefix) and potentially slower.
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}
/// are assumed to be legal.
bool
X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
- MVT VT) const {
+ EVT VT) const {
// Only do shuffles on 128-bit vector types for now.
if (VT.getSizeInBits() == 64)
return false;
bool
X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
- MVT VT) const {
+ EVT VT) const {
unsigned NumElts = VT.getVectorNumElements();
// FIXME: This collection of masks seems suspect.
if (NumElts == 2)
// Get the X86 opcode to use.
unsigned Opc;
switch (MI->getOpcode()) {
- default: assert(0 && "illegal opcode!");
+ default: llvm_unreachable("illegal opcode!");
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
}
static bool EltsFromConsecutiveLoads(ShuffleVectorSDNode *N, unsigned NumElems,
- MVT EVT, LoadSDNode *&LDBase,
+ EVT EVT, LoadSDNode *&LDBase,
unsigned &LastLoadedElt,
SelectionDAG &DAG, MachineFrameInfo *MFI,
const TargetLowering &TLI) {
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI) {
DebugLoc dl = N->getDebugLoc();
- MVT VT = N->getValueType(0);
- MVT EVT = VT.getVectorElementType();
+ EVT VT = N->getValueType(0);
+ EVT EVT = VT.getVectorElementType();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
unsigned NumElems = VT.getVectorNumElements();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
if (VT != MVT::i64)
return SDValue();
if (!Subtarget->hasSSE2())
return SDValue();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
return SDValue();
SDValue ShAmtOp = N->getOperand(1);
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
DebugLoc DL = N->getDebugLoc();
SDValue BaseShAmt;
if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) {
SDValue ValOp = N->getOperand(0);
switch (N->getOpcode()) {
default:
- assert(0 && "Unknown shift opcode!");
+ llvm_unreachable("Unknown shift opcode!");
break;
case ISD::SHL:
if (VT == MVT::v2i64)
// Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
StoreSDNode *St = cast<StoreSDNode>(N);
- MVT VT = St->getValue().getValueType();
+ EVT VT = St->getValue().getValueType();
if (VT.getSizeInBits() != 64)
return SDValue();
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
if (Subtarget->is64Bit() || F64IsLegal) {
- MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
+ EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(),
Ld->getBasePtr(), Ld->getSrcValue(),
Ld->getSrcValueOffset(), Ld->isVolatile(),
SDValue Op = N->getOperand(0);
if (Op.getOpcode() == ISD::BIT_CONVERT)
Op = Op.getOperand(0);
- MVT VT = N->getValueType(0), OpVT = Op.getValueType();
+ EVT VT = N->getValueType(0), OpVT = Op.getValueType();
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) {
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
+static bool LowerToBSwap(CallInst *CI) {
+ // FIXME: this should verify that we are targetting a 486 or better. If not,
+ // we will turn this bswap into something that will be lowered to logical ops
+ // instead of emitting the bswap asm. For now, we don't support 486 or lower
+ // so don't worry about this.
+
+ // Verify this is a simple bswap.
+ if (CI->getNumOperands() != 2 ||
+ CI->getType() != CI->getOperand(1)->getType() ||
+ !CI->getType()->isInteger())
+ return false;
+
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+
+ // Okay, we can do this xform, do so now.
+ const Type *Tys[] = { Ty };
+ Module *M = CI->getParent()->getParent()->getParent();
+ Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
+
+ Value *Op = CI->getOperand(1);
+ Op = CallInst::Create(Int, Op, CI->getName(), CI);
+
+ CI->replaceAllUsesWith(Op);
+ CI->eraseFromParent();
+ return true;
+}
+
+bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
+ InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
+ std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
+
+ std::string AsmStr = IA->getAsmString();
+
+ // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
+ std::vector<std::string> AsmPieces;
+ SplitString(AsmStr, AsmPieces, "\n"); // ; as separator?
+
+ switch (AsmPieces.size()) {
+ default: return false;
+ case 1:
+ AsmStr = AsmPieces[0];
+ AsmPieces.clear();
+ SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace.
+
+ // bswap $0
+ if (AsmPieces.size() == 2 &&
+ (AsmPieces[0] == "bswap" ||
+ AsmPieces[0] == "bswapq" ||
+ AsmPieces[0] == "bswapl") &&
+ (AsmPieces[1] == "$0" ||
+ AsmPieces[1] == "${0:q}")) {
+ // No need to check constraints, nothing other than the equivalent of
+ // "=r,0" would be valid here.
+ return LowerToBSwap(CI);
+ }
+ // rorw $$8, ${0:w} --> llvm.bswap.i16
+ if (CI->getType() == Type::getInt16Ty(CI->getContext()) &&
+ AsmPieces.size() == 3 &&
+ AsmPieces[0] == "rorw" &&
+ AsmPieces[1] == "$$8," &&
+ AsmPieces[2] == "${0:w}" &&
+ IA->getConstraintString() == "=r,0,~{dirflag},~{fpsr},~{flags},~{cc}") {
+ return LowerToBSwap(CI);
+ }
+ break;
+ case 3:
+ if (CI->getType() == Type::getInt64Ty(CI->getContext()) &&
+ Constraints.size() >= 2 &&
+ Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
+ Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
+ // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
+ std::vector<std::string> Words;
+ SplitString(AsmPieces[0], Words, " \t");
+ if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") {
+ Words.clear();
+ SplitString(AsmPieces[1], Words, " \t");
+ if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") {
+ Words.clear();
+ SplitString(AsmPieces[2], Words, " \t,");
+ if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
+ Words[2] == "%edx") {
+ return LowerToBSwap(CI);
+ }
+ }
+ }
+ }
+ break;
+ }
+ return false;
+}
+
+
+
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
const char *X86TargetLowering::
-LowerXConstraint(MVT ConstraintVT) const {
+LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::Int32Ty, C->getSExtValue())) {
+ if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
break;
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::Int32Ty, C->getZExtValue())) {
+ if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
// Otherwise, this isn't something we can handle, reject it.
return;
}
+
+ GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
- if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(),
- getTargetMachine(), false))
+ if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV,
+ getTargetMachine())))
return;
if (hasMemory)
- Op = LowerGlobalAddress(GA->getGlobal(), Op.getDebugLoc(), Offset, DAG);
+ Op = LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
else
- Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
- Offset);
+ Op = DAG.getTargetGlobalAddress(GV, GA->getValueType(0), Offset);
Result = Op;
break;
}
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+ EVT VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
- case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
+ case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
+ if (Subtarget->is64Bit()) {
+ if (VT == MVT::i32)
+ return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
+ X86::ESI, X86::EDI, X86::R8D, X86::R9D,
+ X86::R10D,X86::R11D,X86::R12D,
+ X86::R13D,X86::R14D,X86::R15D,
+ X86::EBP, X86::ESP, 0);
+ else if (VT == MVT::i16)
+ return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
+ X86::SI, X86::DI, X86::R8W,X86::R9W,
+ X86::R10W,X86::R11W,X86::R12W,
+ X86::R13W,X86::R14W,X86::R15W,
+ X86::BP, X86::SP, 0);
+ else if (VT == MVT::i8)
+ return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL,
+ X86::SIL, X86::DIL, X86::R8B,X86::R9B,
+ X86::R10B,X86::R11B,X86::R12B,
+ X86::R13B,X86::R14B,X86::R15B,
+ X86::BPL, X86::SPL, 0);
+
+ else if (VT == MVT::i64)
+ return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
+ X86::RSI, X86::RDI, X86::R8, X86::R9,
+ X86::R10, X86::R11, X86::R12,
+ X86::R13, X86::R14, X86::R15,
+ X86::RBP, X86::RSP, 0);
+
+ break;
+ }
+ // 32-bit fallthrough
case 'Q': // Q_REGS
if (VT == MVT::i32)
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+ EVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
case 'x': // SSE_REGS if SSE1 allowed
if (!Subtarget->hasSSE1()) break;
- switch (VT.getSimpleVT()) {
+ switch (VT.getSimpleVT().SimpleTy) {
default: break;
// Scalar SSE types.
case MVT::f32:
// 'A' means EAX + EDX.
if (Constraint == "A") {
Res.first = X86::EAX;
- Res.second = X86::GRADRegisterClass;
+ Res.second = X86::GR32_ADRegisterClass;
}
return Res;
}
/// When and where to widen is target dependent based on the cost of
/// scalarizing vs using the wider vector type.
-MVT X86TargetLowering::getWidenVectorType(MVT VT) const {
+EVT X86TargetLowering::getWidenVectorType(EVT VT) const {
assert(VT.isVector());
if (isTypeLegal(VT))
return VT;
// type based on element type. This would speed up our search (though
// it may not be worth it since the size of the list is relatively
// small).
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
// On X86, it make sense to widen any vector wider than 1
for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
+ EVT SVT = (MVT::SimpleValueType)nVT;
if (isTypeLegal(SVT) &&
SVT.getVectorElementType() == EltVT &&