X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FXCore%2FXCoreISelLowering.cpp;h=079e886457bcf0059abf12fa24df97b110e5ee19;hb=f4ec8bfaecef4e38f713b9e05d89869b023e1ce8;hp=6e894acedb5e161cb3ae851b9c2441516f87d84b;hpb=0b8c9a80f20772c3793201ab5b251d3520b9cea3;p=oota-llvm.git diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 6e894acedb5..079e886457b 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -28,6 +28,7 @@ #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/CallingConv.h" +#include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" @@ -36,6 +37,8 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" +#include + using namespace llvm; const char *XCoreTargetLowering:: @@ -47,6 +50,7 @@ getTargetNodeName(unsigned Opcode) const case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; + case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; case XCoreISD::STWSP : return "XCoreISD::STWSP"; case XCoreISD::RETSP : return "XCoreISD::RETSP"; case XCoreISD::LADD : return "XCoreISD::LADD"; @@ -54,8 +58,12 @@ getTargetNodeName(unsigned Opcode) const case XCoreISD::LMUL : return "XCoreISD::LMUL"; case XCoreISD::MACCU : return "XCoreISD::MACCU"; case XCoreISD::MACCS : return "XCoreISD::MACCS"; + case XCoreISD::CRC8 : return "XCoreISD::CRC8"; case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; + case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; + case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; + case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; default : return NULL; } } @@ -76,14 +84,14 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) setStackPointerRegisterToSaveRestore(XCore::SP); - setSchedulingPreference(Sched::RegPressure); + setSchedulingPreference(Sched::Source); // Use i32 for setcc operations results (slt, sgt, ...). setBooleanContents(ZeroOrOneBooleanContent); setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? // XCore does not have the NodeTypes below. - setOperationAction(ISD::BR_CC, MVT::Other, Expand); + setOperationAction(ISD::BR_CC, MVT::i32, Expand); setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::ADDC, MVT::i32, Expand); setOperationAction(ISD::ADDE, MVT::i32, Expand); @@ -119,9 +127,6 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); - // Thread Local Storage - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - // Conversion of i64 -> double produces constantpool nodes setOperationAction(ISD::ConstantPool, MVT::i32, Custom); @@ -148,43 +153,87 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); + // Exception handling + setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); + setExceptionPointerRegister(XCore::R0); + setExceptionSelectorRegister(XCore::R1); + setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); + + // Atomic operations + // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. + // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. + setInsertFencesForAtomic(true); + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); + // TRAMPOLINE is custom lowered. setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); - maxStoresPerMemset = maxStoresPerMemsetOptSize = 4; - maxStoresPerMemmove = maxStoresPerMemmoveOptSize - = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2; + // We want to custom lower some of our intrinsics. + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + + MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; + MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize + = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; // We have target-specific dag combine patterns for the following nodes: setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::ADD); + setTargetDAGCombine(ISD::INTRINSIC_VOID); + setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); setMinFunctionAlignment(1); + setPrefFunctionAlignment(2); +} + +bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { + if (Val.getOpcode() != ISD::LOAD) + return false; + + EVT VT1 = Val.getValueType(); + if (!VT1.isSimple() || !VT1.isInteger() || + !VT2.isSimple() || !VT2.isInteger()) + return false; + + switch (VT1.getSimpleVT().SimpleTy) { + default: break; + case MVT::i8: + return true; + } + + return false; } SDValue XCoreTargetLowering:: LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { - case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); - case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); - case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); - case ISD::ConstantPool: return LowerConstantPool(Op, DAG); - case ISD::BR_JT: return LowerBR_JT(Op, DAG); - case ISD::LOAD: return LowerLOAD(Op, DAG); - case ISD::STORE: return LowerSTORE(Op, DAG); - case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); - case ISD::VAARG: return LowerVAARG(Op, DAG); - case ISD::VASTART: return LowerVASTART(Op, DAG); - case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); - case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); + case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); + case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); + case ISD::BR_JT: return LowerBR_JT(Op, DAG); + case ISD::LOAD: return LowerLOAD(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::VAARG: return LowerVAARG(Op, DAG); + case ISD::VASTART: return LowerVASTART(Op, DAG); + case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); + case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); // FIXME: Remove these when LegalizeDAGTypes lands. case ISD::ADD: - case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); - case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); - case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); - case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); + case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); + case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); + case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); + case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); + case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); + case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); default: llvm_unreachable("unimplemented operand"); } @@ -212,7 +261,7 @@ void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, SDValue XCoreTargetLowering:: LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4)); return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), @@ -224,74 +273,69 @@ getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, SelectionDAG &DAG) const { // FIXME there is no actual debug info here - DebugLoc dl = GA.getDebugLoc(); + SDLoc dl(GA); const GlobalValue *UnderlyingGV = GV; // If GV is an alias then use the aliasee to determine the wrapper type if (const GlobalAlias *GA = dyn_cast(GV)) UnderlyingGV = GA->resolveAliasedGlobal(); if (const GlobalVariable *GVar = dyn_cast(UnderlyingGV)) { - if (GVar->isConstant()) + if ( ( GVar->isConstant() && + UnderlyingGV->isLocalLinkage(GV->getLinkage()) ) + || ( GVar->hasSection() && + StringRef(GVar->getSection()).startswith(".cp.") ) ) return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); } return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); } -SDValue XCoreTargetLowering:: -LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const -{ - const GlobalValue *GV = cast(Op)->getGlobal(); - SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32); - return getGlobalAddressWrapper(GA, GV, DAG); -} +static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { + if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) + return true; -static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, - DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); -} + Type *ObjType = GV->getType()->getPointerElementType(); + if (!ObjType->isSized()) + return false; -static inline bool isZeroLengthArray(Type *Ty) { - ArrayType *AT = dyn_cast_or_null(Ty); - return AT && (AT->getNumElements() == 0); + unsigned ObjSize = XTL.getDataLayout()->getTypeAllocSize(ObjType); + return ObjSize < CodeModelLargeSize && ObjSize != 0; } SDValue XCoreTargetLowering:: -LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const +LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - // FIXME there isn't really debug info here - DebugLoc dl = Op.getDebugLoc(); - // transform to label + getid() * size - const GlobalValue *GV = cast(Op)->getGlobal(); - SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); - const GlobalVariable *GVar = dyn_cast(GV); - if (!GVar) { - // If GV is an alias then use the aliasee to determine size - if (const GlobalAlias *GA = dyn_cast(GV)) - GVar = dyn_cast_or_null(GA->resolveAliasedGlobal()); - } - if (!GVar) { - llvm_unreachable("Thread local object not a GlobalVariable?"); - } - Type *Ty = cast(GV->getType())->getElementType(); - if (!Ty->isSized() || isZeroLengthArray(Ty)) { -#ifndef NDEBUG - errs() << "Size of thread local object " << GVar->getName() - << " is unknown\n"; -#endif - llvm_unreachable(0); + const GlobalAddressSDNode *GN = cast(Op); + const GlobalValue *GV = GN->getGlobal(); + SDLoc DL(GN); + int64_t Offset = GN->getOffset(); + if (IsSmallObject(GV, *this)) { + // We can only fold positive offsets that are a multiple of the word size. + int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); + SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); + GA = getGlobalAddressWrapper(GA, GV, DAG); + // Handle the rest of the offset. + if (Offset != FoldedOffset) { + SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32); + GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); + } + return GA; + } else { + // Ideally we would not fold in offset with an index <= 11. + Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); + Constant *GA = ConstantExpr::getBitCast(const_cast(GV), Ty); + Ty = Type::getInt32Ty(*DAG.getContext()); + Constant *Idx = ConstantInt::get(Ty, Offset); + Constant *GAI = ConstantExpr::getGetElementPtr(GA, Idx); + SDValue CP = DAG.getConstantPool(GAI, MVT::i32); + return DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), CP, + MachinePointerInfo(), false, false, false, 0); } - SDValue base = getGlobalAddressWrapper(GA, GV, DAG); - const DataLayout *TD = TM.getDataLayout(); - unsigned Size = TD->getTypeAllocSize(Ty); - SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), - DAG.getConstant(Size, MVT::i32)); - return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); } SDValue XCoreTargetLowering:: LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); const BlockAddress *BA = cast(Op)->getBlockAddress(); SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); @@ -304,15 +348,15 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { ConstantPoolSDNode *CP = cast(Op); // FIXME there isn't really debug info here - DebugLoc dl = CP->getDebugLoc(); + SDLoc dl(CP); EVT PtrVT = Op.getValueType(); SDValue Res; if (CP->isMachineConstantPoolEntry()) { Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, - CP->getAlignment()); + CP->getAlignment(), CP->getOffset()); } else { Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, - CP->getAlignment()); + CP->getAlignment(), CP->getOffset()); } return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); } @@ -327,7 +371,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const SDValue Chain = Op.getOperand(0); SDValue Table = Op.getOperand(1); SDValue Index = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); JumpTableSDNode *JT = cast(Table); unsigned JTI = JT->getIndex(); MachineFunction &MF = DAG.getMachineFunction(); @@ -345,55 +389,58 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const ScaledIndex); } -static bool -IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, - int64_t &Offset) +SDValue XCoreTargetLowering:: +lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, + int64_t Offset, SelectionDAG &DAG) const { - if (Addr.getOpcode() != ISD::ADD) { - return false; - } - ConstantSDNode *CN = 0; - if (!(CN = dyn_cast(Addr.getOperand(1)))) { - return false; - } - int64_t off = CN->getSExtValue(); - const SDValue &Base = Addr.getOperand(0); - const SDValue *Root = &Base; - if (Base.getOpcode() == ISD::ADD && - Base.getOperand(1).getOpcode() == ISD::SHL) { - ConstantSDNode *CN = dyn_cast(Base.getOperand(1) - .getOperand(1)); - if (CN && (CN->getSExtValue() >= 2)) { - Root = &Base.getOperand(0); - } + if ((Offset & 0x3) == 0) { + return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(), + false, false, false, 0); } - if (isa(*Root)) { - // All frame indicies are word aligned - AlignedBase = Base; - Offset = off; - return true; - } - if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || - Root->getOpcode() == XCoreISD::CPRelativeWrapper) { - // All dp / cp relative addresses are word aligned - AlignedBase = Base; - Offset = off; - return true; - } - // Check for an aligned global variable. - if (GlobalAddressSDNode *GA = dyn_cast(*Root)) { - const GlobalValue *GV = GA->getGlobal(); - if (GA->getOffset() == 0 && GV->getAlignment() >= 4) { - AlignedBase = Base; - Offset = off; - return true; - } + // Lower to pair of consecutive word aligned loads plus some bit shifting. + int32_t HighOffset = RoundUpToAlignment(Offset, 4); + int32_t LowOffset = HighOffset - 4; + SDValue LowAddr, HighAddr; + if (GlobalAddressSDNode *GASD = + dyn_cast(Base.getNode())) { + LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), + LowOffset); + HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), + HighOffset); + } else { + LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, + DAG.getConstant(LowOffset, MVT::i32)); + HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, + DAG.getConstant(HighOffset, MVT::i32)); } - return false; + SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32); + SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32); + + SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, + LowAddr, MachinePointerInfo(), + false, false, false, 0); + SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, + HighAddr, MachinePointerInfo(), + false, false, false, 0); + SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); + SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); + SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), + High.getValue(1)); + SDValue Ops[] = { Result, Chain }; + return DAG.getMergeValues(Ops, 2, DL); +} + +static bool isWordAligned(SDValue Value, SelectionDAG &DAG) +{ + APInt KnownZero, KnownOne; + DAG.ComputeMaskedBits(Value, KnownZero, KnownOne); + return KnownZero.countTrailingOnes() >= 2; } SDValue XCoreTargetLowering:: LowerLOAD(SDValue Op, SelectionDAG &DAG) const { + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); LoadSDNode *LD = cast(Op); assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type"); @@ -409,47 +456,25 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = LD->getChain(); SDValue BasePtr = LD->getBasePtr(); - DebugLoc DL = Op.getDebugLoc(); - - SDValue Base; - int64_t Offset; - if (!LD->isVolatile() && - IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { - if (Offset % 4 == 0) { - // We've managed to infer better alignment information than the load - // already has. Use an aligned load. - // - return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr, - MachinePointerInfo(), - false, false, false, 0); + SDLoc DL(Op); + + if (!LD->isVolatile()) { + const GlobalValue *GV; + int64_t Offset = 0; + if (DAG.isBaseWithConstantOffset(BasePtr) && + isWordAligned(BasePtr->getOperand(0), DAG)) { + SDValue NewBasePtr = BasePtr->getOperand(0); + Offset = cast(BasePtr->getOperand(1))->getSExtValue(); + return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, + Offset, DAG); + } + if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && + MinAlign(GV->getAlignment(), 4) == 4) { + SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, + BasePtr->getValueType(0)); + return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, + Offset, DAG); } - // Lower to - // ldw low, base[offset >> 2] - // ldw high, base[(offset >> 2) + 1] - // shr low_shifted, low, (offset & 0x3) * 8 - // shl high_shifted, high, 32 - (offset & 0x3) * 8 - // or result, low_shifted, high_shifted - SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); - SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); - SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); - SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); - - SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset); - SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset); - - SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, - LowAddr, MachinePointerInfo(), - false, false, false, 0); - SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, - HighAddr, MachinePointerInfo(), - false, false, false, 0); - SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); - SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); - SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); - Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), - High.getValue(1)); - SDValue Ops[] = { Result, Chain }; - return DAG.getMergeValues(Ops, 2, DL); } if (LD->getAlignment() == 2) { @@ -512,7 +537,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const SDValue Chain = ST->getChain(); SDValue BasePtr = ST->getBasePtr(); SDValue Value = ST->getValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (ST->getAlignment() == 2) { SDValue Low = Value; @@ -559,7 +584,7 @@ LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && "Unexpected operand to lower!"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue Zero = DAG.getConstant(0, MVT::i32); @@ -576,7 +601,7 @@ LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && "Unexpected operand to lower!"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue Zero = DAG.getConstant(0, MVT::i32); @@ -661,7 +686,7 @@ TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const } else { return SDValue(); } - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue LL, RL, AddendL, AddendH; LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); @@ -720,7 +745,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const return Result; } - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // Extract components SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, @@ -736,13 +761,13 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : XCoreISD::LSUB; SDValue Zero = DAG.getConstant(0, MVT::i32); - SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), - LHSL, RHSL, Zero); - SDValue Lo(Carry.getNode(), 1); + SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), + LHSL, RHSL, Zero); + SDValue Carry(Lo.getNode(), 1); - SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), - LHSH, RHSH, Carry); - SDValue Hi(Ignored.getNode(), 1); + SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), + LHSH, RHSH, Carry); + SDValue Ignored(Hi.getNode(), 1); // Merge the pieces return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); } @@ -750,31 +775,33 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const SDValue XCoreTargetLowering:: LowerVAARG(SDValue Op, SelectionDAG &DAG) const { - llvm_unreachable("unimplemented"); - // FIXME Arguments passed by reference need a extra dereference. + // Whist llvm does not support aggregate varargs we can ignore + // the possibility of the ValueType being an implicit byVal vararg. SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); - const Value *V = cast(Node->getOperand(2))->getValue(); - EVT VT = Node->getValueType(0); - SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), - Node->getOperand(1), MachinePointerInfo(V), + EVT VT = Node->getValueType(0); // not an aggregate + SDValue InChain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); + EVT PtrVT = VAListPtr.getValueType(); + const Value *SV = cast(Node->getOperand(2))->getValue(); + SDLoc dl(Node); + SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, + VAListPtr, MachinePointerInfo(SV), false, false, false, 0); // Increment the pointer, VAList, to the next vararg - SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, - DAG.getConstant(VT.getSizeInBits(), - getPointerTy())); + SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, + DAG.getIntPtrConstant(VT.getSizeInBits() / 8)); // Store the incremented VAList to the legalized pointer - Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), - MachinePointerInfo(V), false, false, 0); + InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, + MachinePointerInfo(SV), false, false, 0); // Load the actual argument out of the pointer VAList - return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), + return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), false, false, false, 0); } SDValue XCoreTargetLowering:: LowerVASTART(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // vastart stores the address of the VarArgsFrameIndex slot into the // memory location argument MachineFunction &MF = DAG.getMachineFunction(); @@ -786,15 +813,85 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + // This nodes represent llvm.frameaddress on the DAG. + // It takes one operand, the index of the frame address to return. + // An index of zero corresponds to the current function's frame address. + // An index of one to the parent's frame address, and so on. + // Depths > 0 not supported yet! + if (cast(Op.getOperand(0))->getZExtValue() > 0) + return SDValue(); + + MachineFunction &MF = DAG.getMachineFunction(); + const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), + RegInfo->getFrameRegister(MF), MVT::i32); +} + +SDValue XCoreTargetLowering:: +LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { + // This nodes represent llvm.returnaddress on the DAG. + // It takes one operand, the index of the return address to return. + // An index of zero corresponds to the current function's return address. + // An index of one to the parent's return address, and so on. // Depths > 0 not supported yet! if (cast(Op.getOperand(0))->getZExtValue() > 0) return SDValue(); MachineFunction &MF = DAG.getMachineFunction(); + XCoreFunctionInfo *XFI = MF.getInfo(); + int FI = XFI->createLRSpillSlot(MF); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + return DAG.getLoad(getPointerTy(), SDLoc(Op), DAG.getEntryNode(), FIN, + MachinePointerInfo::getFixedStack(FI), false, false, + false, 0); +} + +SDValue XCoreTargetLowering:: +LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { + // This node represents offset from frame pointer to first on-stack argument. + // This is needed for correct stack adjustment during unwind. + // However, we don't know the offset until after the frame has be finalised. + // This is done during the XCoreFTAOElim pass. + return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); +} + +SDValue XCoreTargetLowering:: +LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { + // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) + // This node represents 'eh_return' gcc dwarf builtin, which is used to + // return from exception. The general meaning is: adjust stack by OFFSET and + // pass execution to HANDLER. + MachineFunction &MF = DAG.getMachineFunction(); + SDValue Chain = Op.getOperand(0); + SDValue Offset = Op.getOperand(1); + SDValue Handler = Op.getOperand(2); + SDLoc dl(Op); + + // Absolute SP = (FP + FrameToArgs) + Offset const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); - return DAG.getCopyFromReg(DAG.getEntryNode(), dl, + SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RegInfo->getFrameRegister(MF), MVT::i32); + SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, + MVT::i32); + Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); + Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); + + // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister + // which leaves 2 caller saved registers, R2 & R3 for us to use. + unsigned StackReg = XCore::R2; + unsigned HandlerReg = XCore::R3; + + SDValue OutChains[] = { + DAG.getCopyToReg(Chain, dl, StackReg, Stack), + DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) + }; + + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 2); + + return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, + DAG.getRegister(StackReg, MVT::i32), + DAG.getRegister(HandlerReg, MVT::i32)); + } SDValue XCoreTargetLowering:: @@ -826,7 +923,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { SDValue Addr = Trmp; - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), Addr, MachinePointerInfo(TrmpAddr), false, false, 0); @@ -858,6 +955,90 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5); } +SDValue XCoreTargetLowering:: +LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + switch (IntNo) { + case Intrinsic::xcore_crc8: + EVT VT = Op.getValueType(); + SDValue Data = + DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), + Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); + SDValue Crc(Data.getNode(), 1); + SDValue Results[] = { Crc, Data }; + return DAG.getMergeValues(Results, 2, DL); + } + return SDValue(); +} + +SDValue XCoreTargetLowering:: +LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); +} + +SDValue XCoreTargetLowering:: +LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { + AtomicSDNode *N = cast(Op); + assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); + assert(N->getOrdering() <= Monotonic && + "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + if (N->getMemoryVT() == MVT::i32) { + if (N->getAlignment() < 4) + report_fatal_error("atomic load must be aligned"); + return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(), + N->getBasePtr(), N->getPointerInfo(), + N->isVolatile(), N->isNonTemporal(), + N->isInvariant(), N->getAlignment(), + N->getTBAAInfo(), N->getRanges()); + } + if (N->getMemoryVT() == MVT::i16) { + if (N->getAlignment() < 2) + report_fatal_error("atomic load must be aligned"); + return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), + N->getBasePtr(), N->getPointerInfo(), MVT::i16, + N->isVolatile(), N->isNonTemporal(), + N->getAlignment(), N->getTBAAInfo()); + } + if (N->getMemoryVT() == MVT::i8) + return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), + N->getBasePtr(), N->getPointerInfo(), MVT::i8, + N->isVolatile(), N->isNonTemporal(), + N->getAlignment(), N->getTBAAInfo()); + return SDValue(); +} + +SDValue XCoreTargetLowering:: +LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { + AtomicSDNode *N = cast(Op); + assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); + assert(N->getOrdering() <= Monotonic && + "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + if (N->getMemoryVT() == MVT::i32) { + if (N->getAlignment() < 4) + report_fatal_error("atomic store must be aligned"); + return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), + N->getBasePtr(), N->getPointerInfo(), + N->isVolatile(), N->isNonTemporal(), + N->getAlignment(), N->getTBAAInfo()); + } + if (N->getMemoryVT() == MVT::i16) { + if (N->getAlignment() < 2) + report_fatal_error("atomic store must be aligned"); + return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), + N->getBasePtr(), N->getPointerInfo(), MVT::i16, + N->isVolatile(), N->isNonTemporal(), + N->getAlignment(), N->getTBAAInfo()); + } + if (N->getMemoryVT() == MVT::i8) + return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), + N->getBasePtr(), N->getPointerInfo(), MVT::i8, + N->isVolatile(), N->isNonTemporal(), + N->getAlignment(), N->getTBAAInfo()); + return SDValue(); +} + //===----------------------------------------------------------------------===// // Calling Convention Implementation //===----------------------------------------------------------------------===// @@ -873,10 +1054,10 @@ SDValue XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; - SmallVector &Outs = CLI.Outs; - SmallVector &OutVals = CLI.OutVals; - SmallVector &Ins = CLI.Ins; + SDLoc &dl = CLI.DL; + SmallVectorImpl &Outs = CLI.Outs; + SmallVectorImpl &OutVals = CLI.OutVals; + SmallVectorImpl &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; bool &isTailCall = CLI.IsTailCall; @@ -898,6 +1079,52 @@ XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } } +/// LowerCallResult - Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers / memory locations. +static SDValue +LowerCallResult(SDValue Chain, SDValue InFlag, + const SmallVectorImpl &RVLocs, + SDLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) { + SmallVector, 4> ResultMemLocs; + // Copy results out of physical registers. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { + const CCValAssign &VA = RVLocs[i]; + if (VA.isRegLoc()) { + Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), + InFlag).getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } else { + assert(VA.isMemLoc()); + ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), + InVals.size())); + // Reserve space for this result. + InVals.push_back(SDValue()); + } + } + + // Copy results out of memory. + SmallVector MemOpChains; + for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { + int offset = ResultMemLocs[i].first; + unsigned index = ResultMemLocs[i].second; + SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); + SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, MVT::i32) }; + SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops, 2); + InVals[index] = load; + MemOpChains.push_back(load.getValue(1)); + } + + // Transform all loads nodes into one single node because + // all load nodes are independent of each other. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + return Chain; +} + /// LowerCCCCallTo - functions arguments are copied from virtual /// regs to (physical regs)/(stack frame), CALLSEQ_START and /// CALLSEQ_END are emitted. @@ -909,7 +1136,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { // Analyze operands of the call, assigning locations to each operand. @@ -923,11 +1150,18 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, CCInfo.AnalyzeCallOperands(Outs, CC_XCore); + SmallVector RVLocs; + // Analyze return values to determine the number of bytes of stack required. + CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), RVLocs, *DAG.getContext()); + RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); + RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); + // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = RetCCInfo.getNextStackOffset(); Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, - getPointerTy(), true)); + getPointerTy(), true), dl); SmallVector, 4> RegsToPass; SmallVector MemOpChains; @@ -1017,53 +1251,29 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, getPointerTy(), true), DAG.getConstant(0, getPointerTy(), true), - InFlag); + InFlag, dl); InFlag = Chain.getValue(1); // Handle result values, copying them out of physregs into vregs that we // return. - return LowerCallResult(Chain, InFlag, CallConv, isVarArg, - Ins, dl, DAG, InVals); -} - -/// LowerCallResult - Lower the result values of a call into the -/// appropriate copies out of appropriate physical registers. -SDValue -XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const { - - // Assign locations to each value returned by this call. - SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); - - CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); - - // Copy all of the result registers out of their specified physreg. - for (unsigned i = 0; i != RVLocs.size(); ++i) { - Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), - RVLocs[i].getValVT(), InFlag).getValue(1); - InFlag = Chain.getValue(2); - InVals.push_back(Chain.getValue(0)); - } - - return Chain; + return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); } //===----------------------------------------------------------------------===// // Formal Arguments Calling Convention Implementation //===----------------------------------------------------------------------===// +namespace { + struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; +} + /// XCore formal arguments implementation SDValue XCoreTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { @@ -1088,12 +1298,13 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, bool isVarArg, const SmallVectorImpl &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); + XCoreFunctionInfo *XFI = MF.getInfo(); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; @@ -1106,9 +1317,25 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned LRSaveSize = StackSlotSize; + if (!isVarArg) + XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); + + // All getCopyFromReg ops must precede any getMemcpys to prevent the + // scheduler clobbering a register before it has been copied. + // The stages are: + // 1. CopyFromReg (and load) arg & vararg registers. + // 2. Chain CopyFromReg nodes into a TokenFactor. + // 3. Memcpy 'byVal' args & push final InVals. + // 4. Chain mem ops nodes into a TokenFactor. + SmallVector CFRegNode; + SmallVector ArgData; + SmallVector MemOps; + + // 1a. CopyFromReg (and load) arg registers. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; + SDValue ArgIn; if (VA.isRegLoc()) { // Arguments passed in registers @@ -1125,7 +1352,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, case MVT::i32: unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); - InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); + ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); + CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); } } else { // sanity check @@ -1145,14 +1373,17 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, // Create the SelectionDAG nodes corresponding to a load //from this parameter SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); - InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, - MachinePointerInfo::getFixedStack(FI), - false, false, false, 0)); + ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, + MachinePointerInfo::getFixedStack(FI), + false, false, false, 0); } + const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; + ArgData.push_back(ADP); } + // 1b. CopyFromReg vararg registers. if (isVarArg) { - /* Argument registers */ + // Argument registers static const uint16_t ArgRegs[] = { XCore::R0, XCore::R1, XCore::R2, XCore::R3 }; @@ -1160,7 +1391,6 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, array_lengthof(ArgRegs)); if (FirstVAReg < array_lengthof(ArgRegs)) { - SmallVector MemOps; int offset = 0; // Save remaining registers, storing higher register numbers at a higher // address @@ -1176,14 +1406,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(ArgRegs[i], VReg); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); + CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); // Move argument from virt reg -> stack SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo(), false, false, 0); MemOps.push_back(Store); } - if (!MemOps.empty()) - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &MemOps[0], MemOps.size()); } else { // This will point to the next argument passed via stack. XFI->setVarArgsFrameIndex( @@ -1192,6 +1420,42 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, } } + // 2. chain CopyFromReg nodes into a TokenFactor. + if (!CFRegNode.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0], + CFRegNode.size()); + + // 3. Memcpy 'byVal' args & push final InVals. + // Aggregates passed "byVal" need to be copied by the callee. + // The callee will use a pointer to this copy, rather than the original + // pointer. + for (SmallVectorImpl::const_iterator ArgDI = ArgData.begin(), + ArgDE = ArgData.end(); + ArgDI != ArgDE; ++ArgDI) { + if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { + unsigned Size = ArgDI->Flags.getByValSize(); + unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); + // Create a new object on the stack and copy the pointee into it. + int FI = MFI->CreateStackObject(Size, Align, false); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + InVals.push_back(FIN); + MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, + DAG.getConstant(Size, MVT::i32), + Align, false, false, + MachinePointerInfo(), + MachinePointerInfo())); + } else { + InVals.push_back(ArgDI->SDV); + } + } + + // 4, chain mem ops nodes into a TokenFactor. + if (!MemOps.empty()) { + MemOps.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], + MemOps.size()); + } + return Chain; } @@ -1206,7 +1470,11 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, LLVMContext &Context) const { SmallVector RVLocs; CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); - return CCInfo.CheckReturn(Outs, RetCC_XCore); + if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) + return false; + if (CCInfo.getNextStackOffset() != 0 && isVarArg) + return false; + return true; } SDValue @@ -1214,7 +1482,11 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { + + XCoreFunctionInfo *XFI = + DAG.getMachineFunction().getInfo(); + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); // CCValAssign - represent the assignment of // the return value to a location @@ -1225,38 +1497,69 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, getTargetMachine(), RVLocs, *DAG.getContext()); // Analyze return values. - CCInfo.AnalyzeReturn(Outs, RetCC_XCore); + if (!isVarArg) + CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); - // If this is the first return lowered for this function, add - // the regs to the liveout set for the function. - if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { - for (unsigned i = 0; i != RVLocs.size(); ++i) - if (RVLocs[i].isRegLoc()) - DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); - } + CCInfo.AnalyzeReturn(Outs, RetCC_XCore); SDValue Flag; + SmallVector RetOps(1, Chain); - // Copy the result values into the output registers. - for (unsigned i = 0; i != RVLocs.size(); ++i) { + // Return on XCore is always a "retsp 0" + RetOps.push_back(DAG.getConstant(0, MVT::i32)); + + SmallVector MemOpChains; + // Handle return values that must be copied to memory. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; - assert(VA.isRegLoc() && "Can only return in registers!"); + if (VA.isRegLoc()) + continue; + assert(VA.isMemLoc()); + if (isVarArg) { + report_fatal_error("Can't return value from vararg function in memory"); + } + + int Offset = VA.getLocMemOffset(); + unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; + // Create the frame index object for the memory location. + int FI = MFI->CreateFixedObject(ObjSize, Offset, false); + + // Create a SelectionDAG node corresponding to a store + // to this memory location. + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + MemOpChains.push_back(DAG.getStore(Chain, dl, OutVals[i], FIN, + MachinePointerInfo::getFixedStack(FI), false, false, + 0)); + } + + // Transform all store nodes into one single node because + // all stores are independent of each other. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); - Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), - OutVals[i], Flag); + // Now handle return values copied to registers. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { + CCValAssign &VA = RVLocs[i]; + if (!VA.isRegLoc()) + continue; + // Copy the result values into the output registers. + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); // guarantee that all emitted copies are // stuck together, avoiding something bad Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } - // Return on XCore is always a "retsp 0" + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. if (Flag.getNode()) - return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, - Chain, DAG.getConstant(0, MVT::i32), Flag); - else // Return Void - return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, - Chain, DAG.getConstant(0, MVT::i32)); + RetOps.push_back(Flag); + + return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, + &RetOps[0], RetOps.size()); } //===----------------------------------------------------------------------===// @@ -1294,8 +1597,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. @@ -1333,9 +1635,49 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); switch (N->getOpcode()) { default: break; + case ISD::INTRINSIC_VOID: + switch (cast(N->getOperand(1))->getZExtValue()) { + case Intrinsic::xcore_outt: + case Intrinsic::xcore_outct: + case Intrinsic::xcore_chkct: { + SDValue OutVal = N->getOperand(3); + // These instructions ignore the high bits. + if (OutVal.hasOneUse()) { + unsigned BitWidth = OutVal.getValueSizeInBits(); + APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); + APInt KnownZero, KnownOne; + TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), + !DCI.isBeforeLegalizeOps()); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) || + TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne, + TLO)) + DCI.CommitTargetLoweringOpt(TLO); + } + break; + } + case Intrinsic::xcore_setpt: { + SDValue Time = N->getOperand(3); + // This instruction ignores the high bits. + if (Time.hasOneUse()) { + unsigned BitWidth = Time.getValueSizeInBits(); + APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); + APInt KnownZero, KnownOne; + TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), + !DCI.isBeforeLegalizeOps()); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (TLO.ShrinkDemandedConstant(Time, DemandedMask) || + TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne, + TLO)) + DCI.CommitTargetLoweringOpt(TLO); + } + break; + } + } + break; case XCoreISD::LADD: { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -1353,13 +1695,13 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, SDValue Carry = DAG.getConstant(0, VT); SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, DAG.getConstant(1, VT)); - SDValue Ops [] = { Carry, Result }; + SDValue Ops[] = { Result, Carry }; return DAG.getMergeValues(Ops, 2, dl); } // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the // low bit set - if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { + if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { APInt KnownZero, KnownOne; APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), VT.getSizeInBits() - 1); @@ -1367,7 +1709,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, if ((KnownZero & Mask) == Mask) { SDValue Carry = DAG.getConstant(0, VT); SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); - SDValue Ops [] = { Carry, Result }; + SDValue Ops[] = { Result, Carry }; return DAG.getMergeValues(Ops, 2, dl); } } @@ -1391,14 +1733,14 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, SDValue Borrow = N2; SDValue Result = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), N2); - SDValue Ops [] = { Borrow, Result }; + SDValue Ops[] = { Result, Borrow }; return DAG.getMergeValues(Ops, 2, dl); } } // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the // low bit set - if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { + if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { APInt KnownZero, KnownOne; APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), VT.getSizeInBits() - 1); @@ -1406,7 +1748,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, if ((KnownZero & Mask) == Mask) { SDValue Borrow = DAG.getConstant(0, VT); SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); - SDValue Ops [] = { Borrow, Result }; + SDValue Ops[] = { Result, Borrow }; return DAG.getMergeValues(Ops, 2, dl); } } @@ -1432,11 +1774,15 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, // If the high result is unused fold to add(a, b) if (N->hasNUsesOfValue(0, 0)) { SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); - SDValue Ops [] = { Lo, Lo }; + SDValue Ops[] = { Lo, Lo }; return DAG.getMergeValues(Ops, 2, dl); } // Otherwise fold to ladd(a, b, 0) - return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); + SDValue Result = + DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); + SDValue Carry(Result.getNode(), 1); + SDValue Ops[] = { Carry, Result }; + return DAG.getMergeValues(Ops, 2, dl); } } break; @@ -1530,12 +1876,40 @@ void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, default: break; case XCoreISD::LADD: case XCoreISD::LSUB: - if (Op.getResNo() == 0) { + if (Op.getResNo() == 1) { // Top bits of carry / borrow are clear. KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), KnownZero.getBitWidth() - 1); } break; + case ISD::INTRINSIC_W_CHAIN: + { + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + switch (IntNo) { + case Intrinsic::xcore_getts: + // High bits are known to be zero. + KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), + KnownZero.getBitWidth() - 16); + break; + case Intrinsic::xcore_int: + case Intrinsic::xcore_inct: + // High bits are known to be zero. + KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), + KnownZero.getBitWidth() - 8); + break; + case Intrinsic::xcore_testct: + // Result is either 0 or 1. + KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), + KnownZero.getBitWidth() - 1); + break; + case Intrinsic::xcore_testwct: + // Result is in the range 0 - 4. + KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), + KnownZero.getBitWidth() - 3); + break; + } + } + break; } } @@ -1606,7 +1980,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, std::pair XCoreTargetLowering:: getRegForInlineAsmConstraint(const std::string &Constraint, - EVT VT) const { + MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { default : break;