X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FSparc%2FSparcISelLowering.cpp;h=c8b0570d632e924c2af5aa018122d98588ba9b5c;hb=d86e30a88e222e62fe1f55d02855facbdb02011d;hp=64625f70185829777020673898ca134e7af035da;hpb=847b5d976d1b43226604508ededbd648d0752e8a;p=oota-llvm.git diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 64625f70185..c8b0570d632 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -13,10 +13,11 @@ //===----------------------------------------------------------------------===// #include "SparcISelLowering.h" +#include "MCTargetDesc/SparcMCExpr.h" #include "SparcMachineFunctionInfo.h" #include "SparcRegisterInfo.h" #include "SparcTargetMachine.h" -#include "MCTargetDesc/SparcBaseInfo.h" +#include "SparcTargetObjectFile.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -52,11 +53,11 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { - static const uint16_t RegList[] = { + static const MCPhysReg RegList[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; // Try to get first reg. - if (unsigned Reg = State.AllocateReg(RegList, 6)) { + if (unsigned Reg = State.AllocateReg(RegList)) { State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } else { // Assign whole thing in stack. @@ -67,7 +68,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, } // Try to get second reg. - if (unsigned Reg = State.AllocateReg(RegList, 6)) + if (unsigned Reg = State.AllocateReg(RegList)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, @@ -80,11 +81,14 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { - assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) && + assert((LocVT == MVT::f32 || LocVT == MVT::f128 + || LocVT.getSizeInBits() == 64) && "Can't handle non-64 bits locations"); // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. - unsigned Offset = State.AllocateStack(8, 8); + unsigned size = (LocVT == MVT::f128) ? 16 : 8; + unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; + unsigned Offset = State.AllocateStack(size, alignment); unsigned Reg = 0; if (LocVT == MVT::i64 && Offset < 6*8) @@ -96,6 +100,9 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, else if (LocVT == MVT::f32 && Offset < 16*8) // Promote floats to %f1, %f3, ... Reg = SP::F1 + Offset/4; + else if (LocVT == MVT::f128 && Offset < 16*8) + // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). + Reg = SP::Q0 + Offset/16; // Promote to register when possible, otherwise use the stack slot. if (Reg) { @@ -183,8 +190,8 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); @@ -228,8 +235,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, if (Flag.getNode()) RetOps.push_back(Flag); - return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, - &RetOps[0], RetOps.size()); + return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); } // Lower return values for the 64-bit ABI. @@ -244,11 +250,11 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. - CCInfo.AnalyzeReturn(Outs, CC_Sparc64); + CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); SDValue Flag; SmallVector RetOps(1, Chain); @@ -265,6 +271,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, // Integer return values must be sign or zero extended by the callee. switch (VA.getLocInfo()) { + case CCValAssign::Full: break; case CCValAssign::SExt: OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); break; @@ -273,8 +280,9 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, break; case CCValAssign::AExt: OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); - default: break; + default: + llvm_unreachable("Unknown loc info!"); } // The custom bit on an i32 return value indicates that it should be passed @@ -306,8 +314,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, if (Flag.getNode()) RetOps.push_back(Flag); - return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, - &RetOps[0], RetOps.size()); + return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); } SDValue SparcTargetLowering:: @@ -342,16 +349,19 @@ LowerFormalArguments_32(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); const unsigned StackOffset = 92; - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + unsigned InIdx = 0; + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) { CCValAssign &VA = ArgLocs[i]; - if (i == 0 && Ins[i].Flags.isSRet()) { + if (Ins[InIdx].Flags.isSRet()) { + if (InIdx != 0) + report_fatal_error("sparc only supports sret on the first parameter"); // Get SRet from [%fp+64]. int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); @@ -464,7 +474,7 @@ LowerFormalArguments_32(SDValue Chain, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, MachinePointerInfo(), - VA.getValVT(), false, false,0); + VA.getValVT(), false, false, false,0); Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load); } InVals.push_back(Load); @@ -484,11 +494,11 @@ LowerFormalArguments_32(SDValue Chain, // Store remaining ArgRegs to the stack if this is a varargs function. if (isVarArg) { - static const uint16_t ArgRegs[] = { + static const MCPhysReg ArgRegs[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; - unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); - const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; + unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs); + const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = CCInfo.getNextStackOffset(); if (NumAllocated == 6) ArgOffset += StackOffset; @@ -519,8 +529,7 @@ LowerFormalArguments_32(SDValue Chain, if (!OutChains.empty()) { OutChains.push_back(Chain); - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &OutChains[0], OutChains.size()); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); } } @@ -540,8 +549,8 @@ LowerFormalArguments_64(SDValue Chain, // Analyze arguments according to CC_Sparc64. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); // The argument array begins at %fp+BIAS+128, after the register save area. @@ -635,8 +644,7 @@ LowerFormalArguments_64(SDValue Chain, } if (!OutChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, - &OutChains[0], OutChains.size()); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); return Chain; } @@ -654,7 +662,7 @@ static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, if (CS) return CS->hasFnAttr(Attribute::ReturnsTwice); - const Function *CalleeFn = 0; + const Function *CalleeFn = nullptr; if (GlobalAddressSDNode *G = dyn_cast(Callee)) { CalleeFn = dyn_cast(G->getGlobal()); } else if (ExternalSymbolSDNode *E = @@ -690,8 +698,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); // Get the size of the outgoing arguments stack space requirement. @@ -868,8 +876,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Emit all stores, make sure the occur before any copies into physregs. if (!MemOpChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &MemOpChains[0], MemOpChains.size()); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. @@ -888,10 +895,12 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // If the callee is a GlobalAddress node (quite common, every direct call is) // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. // Likewise ExternalSymbol -> TargetExternalSymbol. + unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) + ? SparcMCExpr::VK_Sparc_WPLT30 : 0); if (GlobalAddressSDNode *G = dyn_cast(Callee)) - Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) - Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); // Returns a chain & a flag for retval copy to use SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); @@ -905,18 +914,18 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, RegsToPass[i].second.getValueType())); // Add a register mask operand representing the call-preserved registers. - const SparcRegisterInfo *TRI = - ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo(); - const uint32_t *Mask = ((hasReturnsTwice) - ? TRI->getRTCallPreservedMask(CallConv) - : TRI->getCallPreservedMask(CallConv)); + const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); + const uint32_t *Mask = + ((hasReturnsTwice) + ? TRI->getRTCallPreservedMask(CallConv) + : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv)); assert(Mask && "Missing call preserved mask for calling convention"); Ops.push_back(DAG.getRegisterMask(Mask)); if (InFlag.getNode()) Ops.push_back(InFlag); - Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); + Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops); InFlag = Chain.getValue(1); Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), @@ -925,8 +934,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); @@ -950,9 +959,9 @@ static bool isFP128ABICall(const char *CalleeName) "_Q_sqrt", "_Q_neg", "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", "_Q_lltoq", "_Q_ulltoq", - 0 + nullptr }; - for (const char * const *I = ABICalls; *I != 0; ++I) + for (const char * const *I = ABICalls; *I != nullptr; ++I) if (strcmp(CalleeName, *I) == 0) return true; return false; @@ -961,7 +970,7 @@ static bool isFP128ABICall(const char *CalleeName) unsigned SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const { - const Function *CalleeFn = 0; + const Function *CalleeFn = nullptr; if (GlobalAddressSDNode *G = dyn_cast(Callee)) { CalleeFn = dyn_cast(G->getGlobal()); } else if (ExternalSymbolSDNode *E = @@ -998,9 +1007,10 @@ static void fixupVariableFloatArgs(SmallVectorImpl &ArgLocs, ArrayRef Outs) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { const CCValAssign &VA = ArgLocs[i]; + MVT ValTy = VA.getLocVT(); // FIXME: What about f32 arguments? C promotes them to f64 when calling // varargs functions. - if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64) + if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) continue; // The fixed arguments to a varargs function still go in FP registers. if (Outs[VA.getValNo()].IsFixed) @@ -1010,15 +1020,25 @@ static void fixupVariableFloatArgs(SmallVectorImpl &ArgLocs, CCValAssign NewVA; // Determine the offset into the argument array. - unsigned Offset = 8 * (VA.getLocReg() - SP::D0); + unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; + unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; + unsigned Offset = argSize * (VA.getLocReg() - firstReg); assert(Offset < 16*8 && "Offset out of range, bad register enum?"); if (Offset < 6*8) { // This argument should go in %i0-%i5. unsigned IReg = SP::I0 + Offset/8; - // Full register, just bitconvert into i64. - NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), - IReg, MVT::i64, CCValAssign::BCvt); + if (ValTy == MVT::f64) + // Full register, just bitconvert into i64. + NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), + IReg, MVT::i64, CCValAssign::BCvt); + else { + assert(ValTy == MVT::f128 && "Unexpected type!"); + // Full register, just bitconvert into i128 -- We will lower this into + // two i64s in LowerCall_64. + NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), + IReg, MVT::i128, CCValAssign::BCvt); + } } else { // This needs to go to memory, we're out of integer registers. NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), @@ -1041,8 +1061,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); // Get the size of the outgoing arguments stack space requirement. @@ -1094,11 +1114,46 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); break; case CCValAssign::BCvt: - Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); + // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But + // SPARC does not support i128 natively. Lower it into two i64, see below. + if (!VA.needsCustom() || VA.getValVT() != MVT::f128 + || VA.getLocVT() != MVT::i128) + Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); break; } if (VA.isRegLoc()) { + if (VA.needsCustom() && VA.getValVT() == MVT::f128 + && VA.getLocVT() == MVT::i128) { + // Store and reload into the interger register reg and reg+1. + unsigned Offset = 8 * (VA.getLocReg() - SP::I0); + unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; + SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); + SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset); + HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, + HiPtrOff); + SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8); + LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, + LoPtrOff); + + // Store to %sp+BIAS+128+Offset + SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff, + MachinePointerInfo(), + false, false, 0); + // Load into Reg and Reg+1 + SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, + MachinePointerInfo(), + false, false, false, 0); + SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, + MachinePointerInfo(), + false, false, false, 0); + RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), + Hi64)); + RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), + Lo64)); + continue; + } + // The custom bit on an i32 return value indicates that it should be // passed in the high bits of the register. if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { @@ -1137,8 +1192,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Emit all stores, make sure they occur before the call. if (!MemOpChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, - &MemOpChains[0], MemOpChains.size()); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); // Build a sequence of CopyToReg nodes glued together with token chain and // glue operands which copy the outgoing args into registers. The InGlue is @@ -1156,10 +1210,13 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Likewise ExternalSymbol -> TargetExternalSymbol. SDValue Callee = CLI.Callee; bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); + unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) + ? SparcMCExpr::VK_Sparc_WPLT30 : 0); if (GlobalAddressSDNode *G = dyn_cast(Callee)) - Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy()); + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0, + TF); else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) - Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy()); + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), TF); // Build the operands for the call instruction itself. SmallVector Ops; @@ -1170,11 +1227,11 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, RegsToPass[i].second.getValueType())); // Add a register mask operand representing the call-preserved registers. - const SparcRegisterInfo *TRI = - ((const SparcTargetMachine&)getTargetMachine()).getRegisterInfo(); - const uint32_t *Mask = ((hasReturnsTwice) - ? TRI->getRTCallPreservedMask(CLI.CallConv) - : TRI->getCallPreservedMask(CLI.CallConv)); + const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); + const uint32_t *Mask = + ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv) + : TRI->getCallPreservedMask(DAG.getMachineFunction(), + CLI.CallConv)); assert(Mask && "Missing call preserved mask for calling convention"); Ops.push_back(DAG.getRegisterMask(Mask)); @@ -1185,7 +1242,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Now the call itself. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); + Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops); InGlue = Chain.getValue(1); // Revert the stack pointer immediately after the call. @@ -1198,9 +1255,15 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); - RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64); + CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Set inreg flag manually for codegen generated library calls that + // return float. + if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr) + CLI.Ins[0].Flags.setInReg(); + + RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -1302,10 +1365,9 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { } } -SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) - : TargetLowering(TM, new TargetLoweringObjectFileELF()) { - Subtarget = &TM.getSubtarget(); - +SparcTargetLowering::SparcTargetLowering(TargetMachine &TM, + const SparcSubtarget &STI) + : TargetLowering(TM), Subtarget(&STI) { // Set up the register classes. addRegisterClass(MVT::i32, &SP::IntRegsRegClass); addRegisterClass(MVT::f32, &SP::FPRegsRegClass); @@ -1315,11 +1377,14 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) addRegisterClass(MVT::i64, &SP::I64RegsRegClass); // Turn FP extload into load/fextend - setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); - setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); + for (MVT VT : MVT::fp_valuetypes()) { + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); + } // Sparc doesn't have i1 sign extending load - setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); // Turn FP truncstore into trunc + store. setTruncStoreAction(MVT::f64, MVT::f32, Expand); @@ -1403,7 +1468,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setOperationAction(ISD::BR_CC, MVT::i64, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); - setOperationAction(ISD::CTPOP, MVT::i64, Legal); + setOperationAction(ISD::CTPOP, MVT::i64, + Subtarget->usePopc() ? Legal : Expand); setOperationAction(ISD::CTTZ , MVT::i64, Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); setOperationAction(ISD::CTLZ , MVT::i64, Expand); @@ -1414,9 +1480,29 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); } - // FIXME: There are instructions available for ATOMIC_FENCE - // on SparcV8 and later. - setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); + // ATOMICs. + // FIXME: We insert fences for each atomics and generate sub-optimal code + // for PSO/TSO. Also, implement other atomicrmw operations. + + setInsertFencesForAtomic(true); + + setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, + (Subtarget->isV9() ? Legal: Expand)); + + + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); + + // Custom Lower Atomic LOAD/STORE + setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); + + if (Subtarget->is64Bit()) { + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); + setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); + setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); + } if (!Subtarget->isV9()) { // SparcV8 does not have FNEGD and FABSD. @@ -1439,7 +1525,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setOperationAction(ISD::FSINCOS, MVT::f32, Expand); setOperationAction(ISD::FREM , MVT::f32, Expand); setOperationAction(ISD::FMA , MVT::f32, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Expand); setOperationAction(ISD::CTTZ , MVT::i32, Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); setOperationAction(ISD::CTLZ , MVT::i32, Expand); @@ -1467,6 +1552,13 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); setOperationAction(ISD::MULHU, MVT::i64, Expand); setOperationAction(ISD::MULHS, MVT::i64, Expand); + + setOperationAction(ISD::UMULO, MVT::i64, Custom); + setOperationAction(ISD::SMULO, MVT::i64, Custom); + + setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); } // VASTART needs to be custom lowered to use the VarArgsFrameIndex. @@ -1474,6 +1566,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) // VAARG needs to be lowered to not do unaligned accesses for doubles. setOperationAction(ISD::VAARG , MVT::Other, Custom); + setOperationAction(ISD::TRAP , MVT::Other, Legal); + // Use the default implementation. setOperationAction(ISD::VACOPY , MVT::Other, Expand); setOperationAction(ISD::VAEND , MVT::Other, Expand); @@ -1486,8 +1580,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setStackPointerRegisterToSaveRestore(SP::O6); - if (Subtarget->isV9()) - setOperationAction(ISD::CTPOP, MVT::i32, Legal); + setOperationAction(ISD::CTPOP, MVT::i32, + Subtarget->usePopc() ? Legal : Expand); if (Subtarget->isV9() && Subtarget->hasHardQuad()) { setOperationAction(ISD::LOAD, MVT::f128, Legal); @@ -1577,12 +1671,12 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) setMinFunctionAlignment(2); - computeRegisterProperties(); + computeRegisterProperties(Subtarget->getRegisterInfo()); } const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; case SPISD::CMPICC: return "SPISD::CMPICC"; case SPISD::CMPFCC: return "SPISD::CMPFCC"; case SPISD::BRICC: return "SPISD::BRICC"; @@ -1616,7 +1710,7 @@ EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to /// be zero. Op is expected to be a target specific node. Used by DAG /// combiner. -void SparcTargetLowering::computeMaskedBitsForTargetNode +void SparcTargetLowering::computeKnownBitsForTargetNode (const SDValue Op, APInt &KnownZero, APInt &KnownOne, @@ -1630,10 +1724,8 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode case SPISD::SELECT_ICC: case SPISD::SELECT_XCC: case SPISD::SELECT_FCC: - DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); - DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); + DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); + DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); // Only known if known in both the LHS and RHS. KnownOne &= KnownOne2; @@ -1714,7 +1806,8 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { // Handle PIC mode first. if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { // This is the pic32 code model, the GOT is known to be smaller than 4GB. - SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, + SparcMCExpr::VK_Sparc_GOT10, DAG); SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this @@ -1729,23 +1822,26 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { switch(getTargetMachine().getCodeModel()) { default: llvm_unreachable("Unsupported absolute code model"); - case CodeModel::JITDefault: case CodeModel::Small: // abs32. - return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, + SparcMCExpr::VK_Sparc_LO, DAG); case CodeModel::Medium: { // abs44. - SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG); + SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, + SparcMCExpr::VK_Sparc_M44, DAG); H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); - SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG); + SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); return DAG.getNode(ISD::ADD, DL, VT, H44, L44); } case CodeModel::Large: { // abs64. - SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG); + SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, + SparcMCExpr::VK_Sparc_HM, DAG); Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); - SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); + SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, + SparcMCExpr::VK_Sparc_LO, DAG); return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); } } @@ -1777,14 +1873,18 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, TLSModel::Model model = getTargetMachine().getTLSModel(GV); if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { - unsigned HiTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_HI22 - : SPII::MO_TLS_LDM_HI22); - unsigned LoTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_LO10 - : SPII::MO_TLS_LDM_LO10); - unsigned addTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_ADD - : SPII::MO_TLS_LDM_ADD); - unsigned callTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_CALL - : SPII::MO_TLS_LDM_CALL); + unsigned HiTF = ((model == TLSModel::GeneralDynamic) + ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 + : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); + unsigned LoTF = ((model == TLSModel::GeneralDynamic) + ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 + : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); + unsigned addTF = ((model == TLSModel::GeneralDynamic) + ? SparcMCExpr::VK_Sparc_TLS_GD_ADD + : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); + unsigned callTF = ((model == TLSModel::GeneralDynamic) + ? SparcMCExpr::VK_Sparc_TLS_GD_CALL + : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); @@ -1806,12 +1906,12 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, Ops.push_back(Callee); Ops.push_back(Symbol); Ops.push_back(DAG.getRegister(SP::O0, PtrVT)); - const uint32_t *Mask = getTargetMachine() - .getRegisterInfo()->getCallPreservedMask(CallingConv::C); + const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask( + DAG.getMachineFunction(), CallingConv::C); assert(Mask && "Missing call preserved mask for calling convention"); Ops.push_back(DAG.getRegisterMask(Mask)); Ops.push_back(InFlag); - Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, &Ops[0], Ops.size()); + Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops); InFlag = Chain.getValue(1); Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true), DAG.getIntPtrConstant(0, true), InFlag, DL); @@ -1822,17 +1922,17 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, return Ret; SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, - withTargetFlags(Op, SPII::MO_TLS_LDO_HIX22, DAG)); + withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, - withTargetFlags(Op, SPII::MO_TLS_LDO_LOX10, DAG)); + withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, - withTargetFlags(Op, SPII::MO_TLS_LDO_ADD, DAG)); + withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); } if (model == TLSModel::InitialExec) { - unsigned ldTF = ((PtrVT == MVT::i64)? SPII::MO_TLS_IE_LDX - : SPII::MO_TLS_IE_LD); + unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX + : SparcMCExpr::VK_Sparc_TLS_IE_LD); SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); @@ -1842,21 +1942,23 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, MFI->setHasCalls(true); SDValue TGA = makeHiLoPair(Op, - SPII::MO_TLS_IE_HI22, SPII::MO_TLS_IE_LO10, DAG); + SparcMCExpr::VK_Sparc_TLS_IE_HI22, + SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); SDValue Offset = DAG.getNode(SPISD::TLS_LD, DL, PtrVT, Ptr, withTargetFlags(Op, ldTF, DAG)); return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, DAG.getRegister(SP::G7, PtrVT), Offset, - withTargetFlags(Op, SPII::MO_TLS_IE_ADD, DAG)); + withTargetFlags(Op, + SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); } assert(model == TLSModel::LocalExec); SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, - withTargetFlags(Op, SPII::MO_TLS_LE_HIX22, DAG)); + withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, - withTargetFlags(Op, SPII::MO_TLS_LE_LOX10, DAG)); + withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); return DAG.getNode(ISD::ADD, DL, PtrVT, @@ -1928,13 +2030,10 @@ SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG, for (unsigned i = 0, e = numArgs; i != e; ++i) { Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG); } - TargetLowering:: - CallLoweringInfo CLI(Chain, - RetTyABI, - false, false, false, false, - 0, CallingConv::C, - false, false, true, - Callee, Args, DAG, SDLoc(Op)); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(SDLoc(Op)).setChain(Chain) + .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0); + std::pair CallInfo = LowerCallTo(CLI); // chain is in second result. @@ -1960,7 +2059,7 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, SDLoc DL, SelectionDAG &DAG) const { - const char *LibCall = 0; + const char *LibCall = nullptr; bool is64Bit = Subtarget->is64Bit(); switch(SPCC) { default: llvm_unreachable("Unhandled conditional code!"); @@ -1987,13 +2086,9 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG); Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG); - TargetLowering:: - CallLoweringInfo CLI(Chain, - RetTy, - false, false, false, false, - 0, CallingConv::C, - false, false, true, - Callee, Args, DAG, DL); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(DL).setChain(Chain) + .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0); std::pair CallInfo = LowerCallTo(CLI); @@ -2069,7 +2164,7 @@ LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); llvm_unreachable("fpextend with non-float operand!"); - return SDValue(0, 0); + return SDValue(); } static SDValue @@ -2087,7 +2182,7 @@ LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); llvm_unreachable("fpround to non-float!"); - return SDValue(0, 0); + return SDValue(); } static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, @@ -2108,7 +2203,7 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, // Expand if the resulting type is illegal. if (!TLI.isTypeLegal(VT)) - return SDValue(0, 0); + return SDValue(); // Otherwise, Convert the fp value to integer in an FP register. if (VT == MVT::i32) @@ -2139,7 +2234,7 @@ static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, // Expand if the operand type is illegal. if (!TLI.isTypeLegal(OpVT)) - return SDValue(0, 0); + return SDValue(); // Otherwise, Convert the int value to FP in an FP register. SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); @@ -2157,7 +2252,7 @@ static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, // quad floating point instructions and the resulting type is legal. if (Op.getOperand(0).getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(VT))) - return SDValue(0, 0); + return SDValue(); assert(VT == MVT::i32 || VT == MVT::i64); @@ -2178,7 +2273,7 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, // Expand if it does not involve f128 or the target has support for // quad floating point instructions and the operand type is legal. if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) - return SDValue(0, 0); + return SDValue(); return TLI.LowerF128Op(Op, DAG, TLI.getLibcallName(OpVT == MVT::i32 @@ -2323,7 +2418,7 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP, DAG.getConstant(regSpillArea, VT)); SDValue Ops[2] = { NewVal, Chain }; - return DAG.getMergeValues(Ops, 2, dl); + return DAG.getMergeValues(Ops, dl); } @@ -2334,43 +2429,64 @@ static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { return Chain; } -static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { +static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, + const SparcSubtarget *Subtarget) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); SDLoc dl(Op); unsigned FrameReg = SP::I6; - - uint64_t depth = Op.getConstantOperandVal(0); + unsigned stackBias = Subtarget->getStackPointerBias(); SDValue FrameAddr; - if (depth == 0) + + if (depth == 0) { FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); - else { - // flush first to make sure the windowed registers' values are in stack - SDValue Chain = getFLUSHW(Op, DAG); - FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); - - for (uint64_t i = 0; i != depth; ++i) { - SDValue Ptr = DAG.getNode(ISD::ADD, - dl, MVT::i32, - FrameAddr, DAG.getIntPtrConstant(56)); - FrameAddr = DAG.getLoad(MVT::i32, dl, - Chain, - Ptr, - MachinePointerInfo(), false, false, false, 0); - } + if (Subtarget->is64Bit()) + FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, + DAG.getIntPtrConstant(stackBias)); + return FrameAddr; } + + // flush first to make sure the windowed registers' values are in stack + SDValue Chain = getFLUSHW(Op, DAG); + FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); + + unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; + + while (depth--) { + SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, + DAG.getIntPtrConstant(Offset)); + FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(), + false, false, false, 0); + } + if (Subtarget->is64Bit()) + FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, + DAG.getIntPtrConstant(stackBias)); return FrameAddr; } + +static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, + const SparcSubtarget *Subtarget) { + + uint64_t depth = Op.getConstantOperandVal(0); + + return getFRAMEADDR(depth, Op, DAG, Subtarget); + +} + static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, - const SparcTargetLowering &TLI) { + const SparcTargetLowering &TLI, + const SparcSubtarget *Subtarget) { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MFI->setReturnAddressIsTaken(true); + if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG)) + return SDValue(); + EVT VT = Op.getValueType(); SDLoc dl(Op); uint64_t depth = Op.getConstantOperandVal(0); @@ -2380,25 +2496,20 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(TLI.getPointerTy())); RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); - } else { - // Need frame address to find return address of the caller. - MFI->setFrameAddressIsTaken(true); - - // flush first to make sure the windowed registers' values are in stack - SDValue Chain = getFLUSHW(Op, DAG); - RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT); - - for (uint64_t i = 0; i != depth; ++i) { - SDValue Ptr = DAG.getNode(ISD::ADD, - dl, MVT::i32, - RetAddr, - DAG.getIntPtrConstant((i == depth-1)?60:56)); - RetAddr = DAG.getLoad(MVT::i32, dl, - Chain, - Ptr, - MachinePointerInfo(), false, false, false, 0); - } + return RetAddr; } + + // Need frame address to find return address of the caller. + SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); + + unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; + SDValue Ptr = DAG.getNode(ISD::ADD, + dl, VT, + FrameAddr, + DAG.getIntPtrConstant(Offset)); + RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, + MachinePointerInfo(), false, false, false, 0); + return RetAddr; } @@ -2476,10 +2587,9 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) SubRegOdd); SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1), SDValue(Lo64.getNode(), 1) }; - SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &OutChains[0], 2); + SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); SDValue Ops[2] = {SDValue(InFP128,0), OutChain}; - return DAG.getMergeValues(Ops, 2, dl); + return DAG.getMergeValues(Ops, dl); } // Lower a f128 store into two f64 stores. @@ -2523,28 +2633,20 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { LoPtr, MachinePointerInfo(), false, false, alignment); - return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &OutChains[0], 2); + return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); } -static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG, - const SparcTargetLowering &TLI, - bool is64Bit) { - if (Op.getValueType() == MVT::f64) - return LowerF64Op(Op, DAG, ISD::FNEG); - if (Op.getValueType() == MVT::f128) - return TLI.LowerF128Op(Op, DAG, ((is64Bit) ? "_Qp_neg" : "_Q_neg"), 1); - return Op; -} +static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { + assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) + && "invalid opcode"); -static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { if (Op.getValueType() == MVT::f64) - return LowerF64Op(Op, DAG, ISD::FABS); + return LowerF64Op(Op, DAG, Op.getOpcode()); if (Op.getValueType() != MVT::f128) return Op; - // Lower fabs on f128 to fabs on f64 - // fabs f128 => fabs f64:sub_even64, fmov f64:sub_odd64 + // Lower fabs/fneg on f128 to fabs/fneg on f64 + // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 SDLoc dl(Op); SDValue SrcReg128 = Op.getOperand(0); @@ -2555,7 +2657,7 @@ static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { if (isV9) Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); else - Hi64 = LowerF64Op(Hi64, DAG, ISD::FABS); + Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode()); SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::f128), 0); @@ -2612,21 +2714,80 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); SDValue Ops[2] = { Dst, Carry }; - return DAG.getMergeValues(Ops, 2, dl); + return DAG.getMergeValues(Ops, dl); } +// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() +// in LegalizeDAG.cpp except the order of arguments to the library function. +static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, + const SparcTargetLowering &TLI) +{ + unsigned opcode = Op.getOpcode(); + assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); + + bool isSigned = (opcode == ISD::SMULO); + EVT VT = MVT::i64; + EVT WideVT = MVT::i128; + SDLoc dl(Op); + SDValue LHS = Op.getOperand(0); + + if (LHS.getValueType() != VT) + return Op; + + SDValue ShiftAmt = DAG.getConstant(63, VT); + + SDValue RHS = Op.getOperand(1); + SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); + SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); + SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; + + SDValue MulResult = TLI.makeLibCall(DAG, + RTLIB::MUL_I128, WideVT, + Args, 4, isSigned, dl).first; + SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, + MulResult, DAG.getIntPtrConstant(0)); + SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, + MulResult, DAG.getIntPtrConstant(1)); + if (isSigned) { + SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); + TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); + } else { + TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT), + ISD::SETNE); + } + // MulResult is a node with an illegal type. Because such things are not + // generally permitted during this phase of legalization, ensure that + // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have + // been folded. + assert(MulResult->use_empty() && "Illegally typed node still in use!"); + + SDValue Ops[2] = { BottomHalf, TopHalf } ; + return DAG.getMergeValues(Ops, dl); +} + +static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { + // Monotonic load/stores are legal. + if (cast(Op)->getOrdering() <= Monotonic) + return Op; + + // Otherwise, expand with a fence. + return SDValue(); +} + + SDValue SparcTargetLowering:: LowerOperation(SDValue Op, SelectionDAG &DAG) const { bool hasHardQuad = Subtarget->hasHardQuad(); - bool is64Bit = Subtarget->is64Bit(); bool isV9 = Subtarget->isV9(); switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); - case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this); - case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, + Subtarget); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, + Subtarget); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); @@ -2660,42 +2821,91 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const { getLibcallName(RTLIB::DIV_F128), 2); case ISD::FSQRT: return LowerF128Op(Op, DAG, getLibcallName(RTLIB::SQRT_F128),1); - case ISD::FNEG: return LowerFNEG(Op, DAG, *this, is64Bit); - case ISD::FABS: return LowerFABS(Op, DAG, isV9); + case ISD::FABS: + case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); case ISD::ADDC: case ISD::ADDE: case ISD::SUBC: case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); + case ISD::UMULO: + case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); + case ISD::ATOMIC_LOAD: + case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); } } MachineBasicBlock * SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { - const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); - unsigned BROpcode; - unsigned CC; - DebugLoc dl = MI->getDebugLoc(); - // Figure out the conditional branch opcode to use for this select_cc. switch (MI->getOpcode()) { default: llvm_unreachable("Unknown SELECT_CC!"); case SP::SELECT_CC_Int_ICC: case SP::SELECT_CC_FP_ICC: case SP::SELECT_CC_DFP_ICC: case SP::SELECT_CC_QFP_ICC: - BROpcode = SP::BCOND; - break; + return expandSelectCC(MI, BB, SP::BCOND); case SP::SELECT_CC_Int_FCC: case SP::SELECT_CC_FP_FCC: case SP::SELECT_CC_DFP_FCC: case SP::SELECT_CC_QFP_FCC: - BROpcode = SP::FBCOND; - break; + return expandSelectCC(MI, BB, SP::FBCOND); + + case SP::ATOMIC_LOAD_ADD_32: + return expandAtomicRMW(MI, BB, SP::ADDrr); + case SP::ATOMIC_LOAD_ADD_64: + return expandAtomicRMW(MI, BB, SP::ADDXrr); + case SP::ATOMIC_LOAD_SUB_32: + return expandAtomicRMW(MI, BB, SP::SUBrr); + case SP::ATOMIC_LOAD_SUB_64: + return expandAtomicRMW(MI, BB, SP::SUBXrr); + case SP::ATOMIC_LOAD_AND_32: + return expandAtomicRMW(MI, BB, SP::ANDrr); + case SP::ATOMIC_LOAD_AND_64: + return expandAtomicRMW(MI, BB, SP::ANDXrr); + case SP::ATOMIC_LOAD_OR_32: + return expandAtomicRMW(MI, BB, SP::ORrr); + case SP::ATOMIC_LOAD_OR_64: + return expandAtomicRMW(MI, BB, SP::ORXrr); + case SP::ATOMIC_LOAD_XOR_32: + return expandAtomicRMW(MI, BB, SP::XORrr); + case SP::ATOMIC_LOAD_XOR_64: + return expandAtomicRMW(MI, BB, SP::XORXrr); + case SP::ATOMIC_LOAD_NAND_32: + return expandAtomicRMW(MI, BB, SP::ANDrr); + case SP::ATOMIC_LOAD_NAND_64: + return expandAtomicRMW(MI, BB, SP::ANDXrr); + + case SP::ATOMIC_SWAP_64: + return expandAtomicRMW(MI, BB, 0); + + case SP::ATOMIC_LOAD_MAX_32: + return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); + case SP::ATOMIC_LOAD_MAX_64: + return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G); + case SP::ATOMIC_LOAD_MIN_32: + return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE); + case SP::ATOMIC_LOAD_MIN_64: + return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE); + case SP::ATOMIC_LOAD_UMAX_32: + return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU); + case SP::ATOMIC_LOAD_UMAX_64: + return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU); + case SP::ATOMIC_LOAD_UMIN_32: + return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU); + case SP::ATOMIC_LOAD_UMIN_64: + return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU); } +} - CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); +MachineBasicBlock* +SparcTargetLowering::expandSelectCC(MachineInstr *MI, + MachineBasicBlock *BB, + unsigned BROpcode) const { + const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); // To "insert" a SELECT_CC instruction, we actually have to insert the diamond // control-flow pattern. The incoming instruction knows the destination vreg @@ -2719,7 +2929,7 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); @@ -2749,6 +2959,101 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, return BB; } +MachineBasicBlock* +SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, + MachineBasicBlock *MBB, + unsigned Opcode, + unsigned CondCode) const { + const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); + MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); + DebugLoc DL = MI->getDebugLoc(); + + // MI is an atomic read-modify-write instruction of the form: + // + // rd = atomicrmw addr, rs2 + // + // All three operands are registers. + unsigned DestReg = MI->getOperand(0).getReg(); + unsigned AddrReg = MI->getOperand(1).getReg(); + unsigned Rs2Reg = MI->getOperand(2).getReg(); + + // SelectionDAG has already inserted memory barriers before and after MI, so + // we simply have to implement the operatiuon in terms of compare-and-swap. + // + // %val0 = load %addr + // loop: + // %val = phi %val0, %dest + // %upd = op %val, %rs2 + // %dest = cas %addr, %val, %upd + // cmp %val, %dest + // bne loop + // done: + // + bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg)); + const TargetRegisterClass *ValueRC = + is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; + unsigned Val0Reg = MRI.createVirtualRegister(ValueRC); + + BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg) + .addReg(AddrReg).addImm(0); + + // Split the basic block MBB before MI and insert the loop block in the hole. + MachineFunction::iterator MFI = MBB; + const BasicBlock *LLVM_BB = MBB->getBasicBlock(); + MachineFunction *MF = MBB->getParent(); + MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB); + ++MFI; + MF->insert(MFI, LoopMBB); + MF->insert(MFI, DoneMBB); + + // Move MI and following instructions to DoneMBB. + DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end()); + DoneMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // Connect the CFG again. + MBB->addSuccessor(LoopMBB); + LoopMBB->addSuccessor(LoopMBB); + LoopMBB->addSuccessor(DoneMBB); + + // Build the loop block. + unsigned ValReg = MRI.createVirtualRegister(ValueRC); + // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). + unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); + + BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) + .addReg(Val0Reg).addMBB(MBB) + .addReg(DestReg).addMBB(LoopMBB); + + if (CondCode) { + // This is one of the min/max operations. We need a CMPrr followed by a + // MOVXCC/MOVICC. + BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); + BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) + .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); + } else if (Opcode) { + BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) + .addReg(ValReg).addReg(Rs2Reg); + } + + if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 || + MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) { + unsigned TmpReg = UpdReg; + UpdReg = MRI.createVirtualRegister(ValueRC); + BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1); + } + + BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg) + .addReg(AddrReg).addReg(ValReg).addReg(UpdReg) + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg); + BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND)) + .addMBB(LoopMBB).addImm(SPCC::ICC_NE); + + MI->eraseFromParent(); + return DoneMBB; +} + //===----------------------------------------------------------------------===// // Sparc Inline Assembly Support //===----------------------------------------------------------------------===// @@ -2761,23 +3066,105 @@ SparcTargetLowering::getConstraintType(const std::string &Constraint) const { switch (Constraint[0]) { default: break; case 'r': return C_RegisterClass; + case 'I': // SIMM13 + return C_Other; } } return TargetLowering::getConstraintType(Constraint); } -std::pair -SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, +TargetLowering::ConstraintWeight SparcTargetLowering:: +getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (!CallOperandVal) + return CW_Default; + + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + case 'I': // SIMM13 + if (ConstantInt *C = dyn_cast(info.CallOperandVal)) { + if (isInt<13>(C->getSExtValue())) + weight = CW_Constant; + } + break; + } + return weight; +} + +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void SparcTargetLowering:: +LowerAsmOperandForConstraint(SDValue Op, + std::string &Constraint, + std::vector &Ops, + SelectionDAG &DAG) const { + SDValue Result(nullptr, 0); + + // Only support length 1 constraints for now. + if (Constraint.length() > 1) + return; + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: break; + case 'I': + if (ConstantSDNode *C = dyn_cast(Op)) { + if (isInt<13>(C->getSExtValue())) { + Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType()); + break; + } + return; + } + } + + if (Result.getNode()) { + Ops.push_back(Result); + return; + } + TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + +std::pair +SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': return std::make_pair(0U, &SP::IntRegsRegClass); } + } else if (!Constraint.empty() && Constraint.size() <= 5 + && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { + // constraint = '{r}' + // Remove the braces from around the name. + StringRef name(Constraint.data()+1, Constraint.size()-2); + // Handle register aliases: + // r0-r7 -> g0-g7 + // r8-r15 -> o0-o7 + // r16-r23 -> l0-l7 + // r24-r31 -> i0-i7 + uint64_t intVal = 0; + if (name.substr(0, 1).equals("r") + && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { + const char regTypes[] = { 'g', 'o', 'l', 'i' }; + char regType = regTypes[intVal/8]; + char regIdx = '0' + (intVal % 8); + char tmp[] = { '{', regType, regIdx, '}', 0 }; + std::string newConstraint = std::string(tmp); + return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint, + VT); + } } - return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); } bool