if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
- setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+ setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+
+ // SETOEQ and SETUNE require checking two conditions.
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
+ setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_8, MVT::i8, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_8 , MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
+
+ if (!Subtarget->is64Bit()) {
+ setOperationAction(ISD::ATOMIC_LOAD_ADD_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_AND_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_OR_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_SWAP_64, MVT::i64, Custom);
+ }
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
+ bool ignored;
APFloat TmpFlt(+0.0);
- TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven);
+ TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
+ &ignored);
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
APFloat TmpFlt2(+1.0);
- TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven);
+ TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
+ &ignored);
addLegalFPImmediate(TmpFlt2); // FLD1
TmpFlt2.changeSign();
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
}
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SDValue RetAddrFrIdx;
// Load return adress for tail calls.
// non-JIT mode.
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
getTargetMachine(), true))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy(),
+ G->getOffset());
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
} else if (IsTailCall) {
if (IsTailCall) {
Ops.push_back(Chain);
- Ops.push_back(DAG.getIntPtrConstant(NumBytes));
- Ops.push_back(DAG.getIntPtrConstant(0));
+ Ops.push_back(DAG.getIntPtrConstant(NumBytes, true));
+ Ops.push_back(DAG.getIntPtrConstant(0, true));
if (InFlag.getNode())
Ops.push_back(InFlag);
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
// Returns a flag for retval copy to use.
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(NumBytes),
- DAG.getIntPtrConstant(NumBytesForCalleeToPush),
+ DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(NumBytesForCalleeToPush,
+ true),
InFlag);
InFlag = Chain.getValue(1);
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *,
MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am) {
-
- return X86::createFastISel(mf, mmo, vm, bm, am);
+ DenseMap<const AllocaInst *, int> &am
+#ifndef NDEBUG
+ , SmallSet<Instruction*, 8> &cil
+#endif
+ ) {
+ return X86::createFastISel(mf, mmo, vm, bm, am
+#ifndef NDEBUG
+ , cil
+#endif
+ );
}
else if (isIdentityMask(PermMask.getNode(), true))
return V2;
+ // Canonicalize movddup shuffles.
+ if (V2IsUndef && Subtarget->hasSSE2() &&
+ VT.getSizeInBits() == 128 &&
+ X86::isMOVDDUPMask(PermMask.getNode()))
+ return CanonicalizeMovddup(Op, V1, PermMask, DAG, Subtarget->hasSSE3());
+
if (isSplatMask(PermMask.getNode())) {
if (isMMX || NumElems < 4) return Op;
// Promote it to a v4{if}32 splat.
return PromoteSplat(Op, DAG, Subtarget->hasSSE2());
}
- // Canonicalize movddup shuffles.
- if (V2IsUndef && Subtarget->hasSSE2() &&
- X86::isMOVDDUPMask(PermMask.getNode()))
- return CanonicalizeMovddup(Op, V1, PermMask, DAG, Subtarget->hasSSE3());
-
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
SDValue
X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
+ int64_t Offset,
SelectionDAG &DAG) const {
- SDValue Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
+ bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;
+ bool ExtraLoadRequired =
+ Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false);
+
+ // Create the TargetGlobalAddress node, folding in the constant
+ // offset if it is legal.
+ SDValue Result;
+ if (!IsPic && !ExtraLoadRequired) {
+ Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
+ Offset = 0;
+ } else
+ Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0);
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
+
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->isPICStyleRIPRel()) {
+ if (IsPic && !Subtarget->isPICStyleRIPRel()) {
Result = DAG.getNode(ISD::ADD, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
Result);
// the GlobalAddress must be in the base or index register of the address, not
// the GV offset field. Platform check is inside GVRequiresExtraLoad() call
// The same applies for external symbols during PIC codegen
- if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false))
+ if (ExtraLoadRequired)
Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result,
PseudoSourceValue::getGOT(), 0);
+ // If there was a non-zero offset that we didn't fold, create an explicit
+ // addition for it.
+ if (Offset != 0)
+ Result = DAG.getNode(ISD::ADD, getPointerTy(), Result,
+ DAG.getConstant(Offset, getPointerTy()));
+
return Result;
}
SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- return LowerGlobalAddress(GV, DAG);
+ int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
+ return LowerGlobalAddress(GV, Offset, DAG);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
- ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
unsigned X86CC;
DAG.getConstant(X86CC, MVT::i8), Cond);
}
- assert(isFP && "Illegal integer SetCC!");
-
- Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
- switch (SetCCOpcode) {
- default: assert(false && "Illegal floating point SetCC!");
- case ISD::SETOEQ: { // !PF & ZF
- SDValue Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
- SDValue Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86::COND_E, MVT::i8), Cond);
- return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
- }
- case ISD::SETUNE: { // PF | !ZF
- SDValue Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86::COND_P, MVT::i8), Cond);
- SDValue Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
- return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
- }
- }
+ assert(0 && "Illegal SetCC!");
+ return SDValue();
}
SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
bool IllegalFPCMov = false;
if (VT.isFloatingPoint() && !VT.isVector() &&
!isScalarFPTypeInSSEReg(VT)) // FPStack?
- IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
if ((Opc == X86ISD::CMP ||
Opc == X86ISD::COMI ||
MVT IntPtr = getPointerTy();
MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
Flag = Chain.getValue(1);
Flag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(0),
- DAG.getIntPtrConstant(0),
+ DAG.getIntPtrConstant(0, true),
+ DAG.getIntPtrConstant(0, true),
Flag);
Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
SDValue
X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- const Value *DstSV, uint64_t DstSVOff) {
+ SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ const Value *DstSV,
+ uint64_t DstSVOff) {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- /// If not DWORD aligned or size is more than the threshold, call the library.
- /// The libc version is likely to be faster for these cases. It can use the
- /// address value and run time information about the CPU.
+ // If not DWORD aligned or size is more than the threshold, call the library.
+ // The libc version is likely to be faster for these cases. It can use the
+ // address value and run time information about the CPU.
if ((Align & 3) != 0 ||
!ConstantSize ||
ConstantSize->getZExtValue() >
// Check to see if there is a specialized entry-point for memory zeroing.
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
- if (const char *bzeroEntry =
- V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
+
+ if (const char *bzeroEntry = V &&
+ V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
MVT IntPtr = getPointerTy();
const Type *IntPtrTy = TD->getIntPtrType();
TargetLowering::ArgListTy Args;
Entry.Node = Size;
Args.push_back(Entry);
std::pair<SDValue,SDValue> CallResult =
- LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C,
- false, DAG.getExternalSymbol(bzeroEntry, IntPtr),
- Args, DAG);
+ LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
+ CallingConv::C, false,
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG);
return CallResult.second;
}
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
- // Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
- return SDValue();
-
- SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
- return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
- DAG.getIntPtrConstant(TD->getPointerSize()));
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+ MVT VT = Op.getValueType();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), FrameReg, VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, DAG.getEntryNode(), FrameAddr, NULL, 0);
+ return FrameAddr;
}
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
// Check that ECX wasn't needed by an 'inreg' parameter.
const FunctionType *FTy = Func->getFunctionType();
- const PAListPtr &Attrs = Func->getParamAttrs();
+ const AttrListPtr &Attrs = Func->getAttributes();
if (!Attrs.isEmpty() && !Func->isVarArg()) {
unsigned InRegCount = 0;
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I, ++Idx)
- if (Attrs.paramHasAttr(Idx, ParamAttr::InReg))
+ if (Attrs.paramHasAttr(Idx, Attribute::InReg))
// FIXME: should only count parameters that are lowered to integers.
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
case MVT::i64:
if (Subtarget->is64Bit()) {
Reg = X86::RAX; size = 8;
- } else //Should go away when LowerType stuff lands
+ } else //Should go away when LegalizeType stuff lands
return SDValue(ExpandATOMIC_CMP_SWAP(Op.getNode(), DAG), 0);
break;
};
return DAG.getMergeValues(Vals, 2).getNode();
}
-SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op,
- SelectionDAG &DAG) {
- MVT T = Op->getValueType(0);
+SDValue X86TargetLowering::LowerATOMIC_BINARY_64(SDValue Op,
+ SelectionDAG &DAG,
+ unsigned NewOp) {
+ SDNode *Node = Op.getNode();
+ MVT T = Node->getValueType(0);
+ assert (T == MVT::i64 && "Only know how to expand i64 atomics");
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue In1 = Node->getOperand(1);
+ SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
+ Node->getOperand(2), DAG.getIntPtrConstant(0));
+ SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
+ Node->getOperand(2), DAG.getIntPtrConstant(1));
+ // This is a generalized SDNode, not an AtomicSDNode, so it doesn't
+ // have a MemOperand. Pass the info through as a normal operand.
+ SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
+ SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
+ SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
+ SDValue Result = DAG.getNode(NewOp, Tys, Ops, 5);
+ SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
+ SDValue ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2);
+ SDValue Vals[2] = { ResultVal, Result.getValue(2) };
+ return SDValue(DAG.getMergeValues(Vals, 2).getNode(), 0);
+}
+
+SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
+ SDNode *Node = Op.getNode();
+ MVT T = Node->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, T,
- DAG.getConstant(0, T), Op->getOperand(2));
- return DAG.getAtomic((T==MVT::i8 ? ISD::ATOMIC_LOAD_ADD_8:
- T==MVT::i16 ? ISD::ATOMIC_LOAD_ADD_16:
- T==MVT::i32 ? ISD::ATOMIC_LOAD_ADD_32:
- T==MVT::i64 ? ISD::ATOMIC_LOAD_ADD_64: 0),
- Op->getOperand(0), Op->getOperand(1), negOp,
- cast<AtomicSDNode>(Op)->getSrcValue(),
- cast<AtomicSDNode>(Op)->getAlignment()).getNode();
+ DAG.getConstant(0, T), Node->getOperand(2));
+ return DAG.getAtomic((Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_8 ?
+ ISD::ATOMIC_LOAD_ADD_8 :
+ Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_16 ?
+ ISD::ATOMIC_LOAD_ADD_16 :
+ Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_32 ?
+ ISD::ATOMIC_LOAD_ADD_32 :
+ ISD::ATOMIC_LOAD_ADD_64),
+ Node->getOperand(0),
+ Node->getOperand(1), negOp,
+ cast<AtomicSDNode>(Node)->getSrcValue(),
+ cast<AtomicSDNode>(Node)->getAlignment());
}
/// LowerOperation - Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Should not custom lower this!");
- case ISD::ATOMIC_CMP_SWAP_8: return LowerCMP_SWAP(Op,DAG);
- case ISD::ATOMIC_CMP_SWAP_16: return LowerCMP_SWAP(Op,DAG);
- case ISD::ATOMIC_CMP_SWAP_32: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_8:
+ case ISD::ATOMIC_CMP_SWAP_16:
+ case ISD::ATOMIC_CMP_SWAP_32:
case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_LOAD_SUB_8:
+ case ISD::ATOMIC_LOAD_SUB_16:
+ case ISD::ATOMIC_LOAD_SUB_32: return LowerLOAD_SUB(Op,DAG);
+ case ISD::ATOMIC_LOAD_SUB_64: return (Subtarget->is64Bit()) ?
+ LowerLOAD_SUB(Op,DAG) :
+ LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMSUB64_DAG);
+ case ISD::ATOMIC_LOAD_AND_64: return LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMAND64_DAG);
+ case ISD::ATOMIC_LOAD_OR_64: return LowerATOMIC_BINARY_64(Op, DAG,
+ X86ISD::ATOMOR64_DAG);
+ case ISD::ATOMIC_LOAD_XOR_64: return LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMXOR64_DAG);
+ case ISD::ATOMIC_LOAD_NAND_64:return LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMNAND64_DAG);
+ case ISD::ATOMIC_LOAD_ADD_64: return LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMADD64_DAG);
+ case ISD::ATOMIC_SWAP_64: return LowerATOMIC_BINARY_64(Op,DAG,
+ X86ISD::ATOMSWAP64_DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
/// with a new node built out of custom code.
SDNode *X86TargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
switch (N->getOpcode()) {
- default: assert(0 && "Should not custom lower this!");
+ default:
+ return X86TargetLowering::LowerOperation(SDValue (N, 0), DAG).getNode();
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG);
- case ISD::ATOMIC_LOAD_SUB_8: return ExpandATOMIC_LOAD_SUB(N,DAG);
- case ISD::ATOMIC_LOAD_SUB_16: return ExpandATOMIC_LOAD_SUB(N,DAG);
- case ISD::ATOMIC_LOAD_SUB_32: return ExpandATOMIC_LOAD_SUB(N,DAG);
- case ISD::ATOMIC_LOAD_SUB_64: return ExpandATOMIC_LOAD_SUB(N,DAG);
}
}
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
+ case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
+ case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG";
+ case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG";
+ case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG";
+ case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
+ case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VSHL: return "X86ISD::VSHL";
tt = t1;
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
- assert((argOpers[valArgIndx]->isRegister() ||
- argOpers[valArgIndx]->isImmediate()) &&
+ assert((argOpers[valArgIndx]->isReg() ||
+ argOpers[valArgIndx]->isImm()) &&
"invalid operand");
- if (argOpers[valArgIndx]->isRegister())
+ if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, TII->get(regOpc), t2);
else
MIB = BuildMI(newMBB, TII->get(immOpc), t2);
return nextMBB;
}
+// private utility function: 64 bit atomics on 32 bit host.
+MachineBasicBlock *
+X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
+ MachineBasicBlock *MBB,
+ unsigned regOpcL,
+ unsigned regOpcH,
+ unsigned immOpcL,
+ unsigned immOpcH,
+ bool invSrc) {
+ // For the atomic bitwise operator, we generate
+ // thisMBB (instructions are in pairs, except cmpxchg8b)
+ // ld t1,t2 = [bitinstr.addr]
+ // newMBB:
+ // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
+ // op t5, t6 <- out1, out2, [bitinstr.val]
+ // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val])
+ // mov ECX, EBX <- t5, t6
+ // mov EAX, EDX <- t1, t2
+ // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit]
+ // mov t3, t4 <- EAX, EDX
+ // bz newMBB
+ // result in out1, out2
+ // fallthrough -->nextMBB
+
+ const TargetRegisterClass *RC = X86::GR32RegisterClass;
+ const unsigned LoadOpc = X86::MOV32rm;
+ const unsigned copyOpc = X86::MOV32rr;
+ const unsigned NotOpc = X86::NOT32r;
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction::iterator MBBIter = MBB;
+ ++MBBIter;
+
+ /// First build the CFG
+ MachineFunction *F = MBB->getParent();
+ MachineBasicBlock *thisMBB = MBB;
+ MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(MBBIter, newMBB);
+ F->insert(MBBIter, nextMBB);
+
+ // Move all successors to thisMBB to nextMBB
+ nextMBB->transferSuccessors(thisMBB);
+
+ // Update thisMBB to fall through to newMBB
+ thisMBB->addSuccessor(newMBB);
+
+ // newMBB jumps to itself and fall through to nextMBB
+ newMBB->addSuccessor(nextMBB);
+ newMBB->addSuccessor(newMBB);
+
+ // Insert instructions into newMBB based on incoming instruction
+ // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
+ assert(bInstr->getNumOperands() < 18 && "unexpected number of operands");
+ MachineOperand& dest1Oper = bInstr->getOperand(0);
+ MachineOperand& dest2Oper = bInstr->getOperand(1);
+ MachineOperand* argOpers[6];
+ for (int i=0; i < 6; ++i)
+ argOpers[i] = &bInstr->getOperand(i+2);
+
+ // x86 address has 4 operands: base, index, scale, and displacement
+ int lastAddrIndx = 3; // [0,3]
+
+ unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
+ MachineInstrBuilder MIB = BuildMI(thisMBB, TII->get(LoadOpc), t1);
+ for (int i=0; i <= lastAddrIndx; ++i)
+ (*MIB).addOperand(*argOpers[i]);
+ unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
+ MIB = BuildMI(thisMBB, TII->get(LoadOpc), t2);
+ // add 4 to displacement.
+ for (int i=0; i <= lastAddrIndx-1; ++i)
+ (*MIB).addOperand(*argOpers[i]);
+ MachineOperand newOp3 = *(argOpers[3]);
+ if (newOp3.isImm())
+ newOp3.setImm(newOp3.getImm()+4);
+ else
+ newOp3.setOffset(newOp3.getOffset()+4);
+ (*MIB).addOperand(newOp3);
+
+ // t3/4 are defined later, at the bottom of the loop
+ unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
+ unsigned t4 = F->getRegInfo().createVirtualRegister(RC);
+ BuildMI(newMBB, TII->get(X86::PHI), dest1Oper.getReg())
+ .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB);
+ BuildMI(newMBB, TII->get(X86::PHI), dest2Oper.getReg())
+ .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
+
+ unsigned tt1 = F->getRegInfo().createVirtualRegister(RC);
+ unsigned tt2 = F->getRegInfo().createVirtualRegister(RC);
+ if (invSrc) {
+ MIB = BuildMI(newMBB, TII->get(NotOpc), tt1).addReg(t1);
+ MIB = BuildMI(newMBB, TII->get(NotOpc), tt2).addReg(t2);
+ } else {
+ tt1 = t1;
+ tt2 = t2;
+ }
+
+ assert((argOpers[4]->isReg() || argOpers[4]->isImm()) &&
+ "invalid operand");
+ unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
+ unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
+ if (argOpers[4]->isReg())
+ MIB = BuildMI(newMBB, TII->get(regOpcL), t5);
+ else
+ MIB = BuildMI(newMBB, TII->get(immOpcL), t5);
+ if (regOpcL != X86::MOV32rr)
+ MIB.addReg(tt1);
+ (*MIB).addOperand(*argOpers[4]);
+ assert(argOpers[5]->isReg() == argOpers[4]->isReg());
+ assert(argOpers[5]->isImm() == argOpers[4]->isImm());
+ if (argOpers[5]->isReg())
+ MIB = BuildMI(newMBB, TII->get(regOpcH), t6);
+ else
+ MIB = BuildMI(newMBB, TII->get(immOpcH), t6);
+ if (regOpcH != X86::MOV32rr)
+ MIB.addReg(tt2);
+ (*MIB).addOperand(*argOpers[5]);
+
+ MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EAX);
+ MIB.addReg(t1);
+ MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EDX);
+ MIB.addReg(t2);
+
+ MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EBX);
+ MIB.addReg(t5);
+ MIB = BuildMI(newMBB, TII->get(copyOpc), X86::ECX);
+ MIB.addReg(t6);
+
+ MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG8B));
+ for (int i=0; i <= lastAddrIndx; ++i)
+ (*MIB).addOperand(*argOpers[i]);
+
+ assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
+ (*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
+
+ MIB = BuildMI(newMBB, TII->get(copyOpc), t3);
+ MIB.addReg(X86::EAX);
+ MIB = BuildMI(newMBB, TII->get(copyOpc), t4);
+ MIB.addReg(X86::EDX);
+
+ // insert branch
+ BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB);
+
+ F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
+ return nextMBB;
+}
+
// private utility function
MachineBasicBlock *
X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
(*MIB).addOperand(*argOpers[i]);
// We only support register and immediate values
- assert((argOpers[valArgIndx]->isRegister() ||
- argOpers[valArgIndx]->isImmediate()) &&
+ assert((argOpers[valArgIndx]->isReg() ||
+ argOpers[valArgIndx]->isImm()) &&
"invalid operand");
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
- if (argOpers[valArgIndx]->isRegister())
+ if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
else
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
X86AddressMode AM;
MachineOperand &Op = MI->getOperand(0);
- if (Op.isRegister()) {
+ if (Op.isReg()) {
AM.BaseType = X86AddressMode::RegBase;
AM.Base.Reg = Op.getReg();
} else {
AM.Base.FrameIndex = Op.getIndex();
}
Op = MI->getOperand(1);
- if (Op.isImmediate())
+ if (Op.isImm())
AM.Scale = Op.getImm();
Op = MI->getOperand(2);
- if (Op.isImmediate())
+ if (Op.isImm())
AM.IndexReg = Op.getImm();
Op = MI->getOperand(3);
- if (Op.isGlobalAddress()) {
+ if (Op.isGlobal()) {
AM.GV = Op.getGlobal();
} else {
AM.Disp = Op.getImm();
X86::NOT8r, X86::AL,
X86::GR8RegisterClass, true);
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
+ // This group is for 64-bit host.
case X86::ATOMAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
case X86::ATOMUMAX64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
+
+ // This group does 64-bit operations on a 32-bit host.
+ case X86::ATOMAND6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::AND32rr, X86::AND32rr,
+ X86::AND32ri, X86::AND32ri,
+ false);
+ case X86::ATOMOR6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::OR32rr, X86::OR32rr,
+ X86::OR32ri, X86::OR32ri,
+ false);
+ case X86::ATOMXOR6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::XOR32rr, X86::XOR32rr,
+ X86::XOR32ri, X86::XOR32ri,
+ false);
+ case X86::ATOMNAND6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::AND32rr, X86::AND32rr,
+ X86::AND32ri, X86::AND32ri,
+ true);
+ case X86::ATOMADD6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::ADD32rr, X86::ADC32rr,
+ X86::ADD32ri, X86::ADC32ri,
+ false);
+ case X86::ATOMSUB6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::SUB32rr, X86::SBB32rr,
+ X86::SUB32ri, X86::SBB32ri,
+ false);
+ case X86::ATOMSWAP6432:
+ return EmitAtomicBit6432WithCustomInserter(MI, BB,
+ X86::MOV32rr, X86::MOV32rr,
+ X86::MOV32ri, X86::MOV32ri,
+ false);
}
}
if (N->getOpcode() == X86ISD::Wrapper) {
if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
+ Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
return true;
}
}
if (GA) {
if (hasMemory)
- Op = LowerGlobalAddress(GA->getGlobal(), DAG);
+ Op = LowerGlobalAddress(GA->getGlobal(), Offset, DAG);
else
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
Offset);
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
case 'l': // INDEX_REGS
- if (VT == MVT::i64 && Subtarget->is64Bit())
- return std::make_pair(0U, X86::GR64RegisterClass);
- if (VT == MVT::i32)
- return std::make_pair(0U, X86::GR32RegisterClass);
- else if (VT == MVT::i16)
- return std::make_pair(0U, X86::GR16RegisterClass);
- else if (VT == MVT::i8)
+ if (VT == MVT::i8)
return std::make_pair(0U, X86::GR8RegisterClass);
- break;
+ if (VT == MVT::i16)
+ return std::make_pair(0U, X86::GR16RegisterClass);
+ if (VT == MVT::i32 || !Subtarget->is64Bit())
+ return std::make_pair(0U, X86::GR32RegisterClass);
+ return std::make_pair(0U, X86::GR64RegisterClass);
case 'f': // FP Stack registers.
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
// value to the correct fpstack register class.
case 'y': // MMX_REGS if MMX allowed.
if (!Subtarget->hasMMX()) break;
return std::make_pair(0U, X86::VR64RegisterClass);
- break;
case 'Y': // SSE_REGS if SSE2 allowed
if (!Subtarget->hasSSE2()) break;
// FALL THROUGH.