//===----------------------------------------------------------------------===//
#include "SparcISelLowering.h"
+#include "MCTargetDesc/SparcMCExpr.h"
#include "SparcMachineFunctionInfo.h"
#include "SparcRegisterInfo.h"
#include "SparcTargetMachine.h"
-#include "MCTargetDesc/SparcBaseInfo.h"
+#include "SparcTargetObjectFile.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State)
{
- static const uint16_t RegList[] = {
+ static const MCPhysReg RegList[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
// Try to get first reg.
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
- assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) &&
+ assert((LocVT == MVT::f32 || LocVT == MVT::f128
+ || LocVT.getSizeInBits() == 64) &&
"Can't handle non-64 bits locations");
// Stack space is allocated for all arguments starting from [%fp+BIAS+128].
- unsigned Offset = State.AllocateStack(8, 8);
+ unsigned size = (LocVT == MVT::f128) ? 16 : 8;
+ unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
+ unsigned Offset = State.AllocateStack(size, alignment);
unsigned Reg = 0;
if (LocVT == MVT::i64 && Offset < 6*8)
else if (LocVT == MVT::f32 && Offset < 16*8)
// Promote floats to %f1, %f3, ...
Reg = SP::F1 + Offset/4;
+ else if (LocVT == MVT::f128 && Offset < 16*8)
+ // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
+ Reg = SP::Q0 + Offset/16;
// Promote to register when possible, otherwise use the stack slot.
if (Reg) {
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
}
// Lower return values for the 64-bit ABI.
DAG.getTarget(), RVLocs, *DAG.getContext());
// Analyze return values.
- CCInfo.AnalyzeReturn(Outs, CC_Sparc64);
+ CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
// Integer return values must be sign or zero extended by the callee.
switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
case CCValAssign::SExt:
OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
break;
break;
case CCValAssign::AExt:
OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
- default:
break;
+ default:
+ llvm_unreachable("Unknown loc info!");
}
// The custom bit on an i32 return value indicates that it should be passed
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
}
SDValue SparcTargetLowering::
const unsigned StackOffset = 92;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ unsigned InIdx = 0;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
CCValAssign &VA = ArgLocs[i];
- if (i == 0 && Ins[i].Flags.isSRet()) {
+ if (Ins[InIdx].Flags.isSRet()) {
+ if (InIdx != 0)
+ report_fatal_error("sparc only supports sret on the first parameter");
// Get SRet from [%fp+64].
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
// Store remaining ArgRegs to the stack if this is a varargs function.
if (isVarArg) {
- static const uint16_t ArgRegs[] = {
+ static const MCPhysReg ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
- const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
+ const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
unsigned ArgOffset = CCInfo.getNextStackOffset();
if (NumAllocated == 6)
ArgOffset += StackOffset;
if (!OutChains.empty()) {
OutChains.push_back(Chain);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
}
}
if (!OutChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
return Chain;
}
if (CS)
return CS->hasFnAttr(Attribute::ReturnsTwice);
- const Function *CalleeFn = 0;
+ const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
// Emit all stores, make sure the occur before any copies into physregs.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
+ unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
// Returns a chain & a flag for retval copy to use
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (InFlag.getNode())
Ops.push_back(InFlag);
- Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
{ "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
"_Q_sqrt", "_Q_neg",
"_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
- 0
+ "_Q_lltoq", "_Q_ulltoq",
+ nullptr
};
- for (const char * const *I = ABICalls; I != 0; ++I)
+ for (const char * const *I = ABICalls; *I != nullptr; ++I)
if (strcmp(CalleeName, *I) == 0)
return true;
return false;
unsigned
SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
{
- const Function *CalleeFn = 0;
+ const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
ArrayRef<ISD::OutputArg> Outs) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
const CCValAssign &VA = ArgLocs[i];
+ MVT ValTy = VA.getLocVT();
// FIXME: What about f32 arguments? C promotes them to f64 when calling
// varargs functions.
- if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64)
+ if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
continue;
// The fixed arguments to a varargs function still go in FP registers.
if (Outs[VA.getValNo()].IsFixed)
CCValAssign NewVA;
// Determine the offset into the argument array.
- unsigned Offset = 8 * (VA.getLocReg() - SP::D0);
+ unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
+ unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
+ unsigned Offset = argSize * (VA.getLocReg() - firstReg);
assert(Offset < 16*8 && "Offset out of range, bad register enum?");
if (Offset < 6*8) {
// This argument should go in %i0-%i5.
unsigned IReg = SP::I0 + Offset/8;
- // Full register, just bitconvert into i64.
- NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
- IReg, MVT::i64, CCValAssign::BCvt);
+ if (ValTy == MVT::f64)
+ // Full register, just bitconvert into i64.
+ NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
+ IReg, MVT::i64, CCValAssign::BCvt);
+ else {
+ assert(ValTy == MVT::f128 && "Unexpected type!");
+ // Full register, just bitconvert into i128 -- We will lower this into
+ // two i64s in LowerCall_64.
+ NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
+ IReg, MVT::i128, CCValAssign::BCvt);
+ }
} else {
// This needs to go to memory, we're out of integer registers.
NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
+ // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
+ // SPARC does not support i128 natively. Lower it into two i64, see below.
+ if (!VA.needsCustom() || VA.getValVT() != MVT::f128
+ || VA.getLocVT() != MVT::i128)
+ Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
break;
}
if (VA.isRegLoc()) {
+ if (VA.needsCustom() && VA.getValVT() == MVT::f128
+ && VA.getLocVT() == MVT::i128) {
+ // Store and reload into the interger register reg and reg+1.
+ unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
+ unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
+ SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy());
+ SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset);
+ HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
+ HiPtrOff);
+ SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8);
+ LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
+ LoPtrOff);
+
+ // Store to %sp+BIAS+128+Offset
+ SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff,
+ MachinePointerInfo(),
+ false, false, 0);
+ // Load into Reg and Reg+1
+ SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
+ Hi64));
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
+ Lo64));
+ continue;
+ }
+
// The custom bit on an i32 return value indicates that it should be
// passed in the high bits of the register.
if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
// Emit all stores, make sure they occur before the call.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Build a sequence of CopyToReg nodes glued together with token chain and
// glue operands which copy the outgoing args into registers. The InGlue is
// Likewise ExternalSymbol -> TargetExternalSymbol.
SDValue Callee = CLI.Callee;
bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
+ unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy());
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0,
+ TF);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy(), TF);
// Build the operands for the call instruction itself.
SmallVector<SDValue, 8> Ops;
// Now the call itself.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
InGlue = Chain.getValue(1);
// Revert the stack pointer immediately after the call.
SmallVector<CCValAssign, 16> RVLocs;
CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
DAG.getTarget(), RVLocs, *DAG.getContext());
- RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64);
+
+ // Set inreg flag manually for codegen generated library calls that
+ // return float.
+ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr)
+ CLI.Ins[0].Flags.setInReg();
+
+ RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
}
SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
+ : TargetLowering(TM, new SparcELFTargetObjectFile()) {
Subtarget = &TM.getSubtarget<SparcSubtarget>();
// Set up the register classes.
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-
+
// ... nor does SparcV9.
if (Subtarget->is64Bit()) {
setOperationAction(ISD::UREM, MVT::i64, Expand);
// Custom expand fp<->sint
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
// Custom Expand fp<->uint
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
- setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+ setOperationAction(ISD::CTPOP, MVT::i64,
+ Subtarget->usePopc() ? Legal : Expand);
setOperationAction(ISD::CTTZ , MVT::i64, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::CTLZ , MVT::i64, Expand);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ROTL , MVT::i64, Expand);
+ setOperationAction(ISD::ROTR , MVT::i64, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
}
- // FIXME: There are instructions available for ATOMIC_FENCE
- // on SparcV8 and later.
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
+ // ATOMICs.
+ // FIXME: We insert fences for each atomics and generate sub-optimal code
+ // for PSO/TSO. Also, implement other atomicrmw operations.
+
+ setInsertFencesForAtomic(true);
+
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
+ (Subtarget->isV9() ? Legal: Expand));
+
+
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
+
+ // Custom Lower Atomic LOAD/STORE
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
+
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
+ }
if (!Subtarget->isV9()) {
// SparcV8 does not have FNEGD and FABSD.
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::FMA , MVT::f32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::CTLZ , MVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+
+ setOperationAction(ISD::UMULO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
+ }
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// VAARG needs to be lowered to not do unaligned accesses for doubles.
setOperationAction(ISD::VAARG , MVT::Other, Custom);
+ setOperationAction(ISD::TRAP , MVT::Other, Legal);
+
// Use the default implementation.
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setStackPointerRegisterToSaveRestore(SP::O6);
- if (Subtarget->isV9())
- setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ setOperationAction(ISD::CTPOP, MVT::i32,
+ Subtarget->usePopc() ? Legal : Expand);
if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
setOperationAction(ISD::LOAD, MVT::f128, Legal);
setOperationAction(ISD::FNEG, MVT::f128, Custom);
setOperationAction(ISD::FABS, MVT::f128, Custom);
}
+
+ if (!Subtarget->is64Bit()) {
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
+ }
+
} else {
// Custom legalize f128 operations.
setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case SPISD::CMPICC: return "SPISD::CMPICC";
case SPISD::CMPFCC: return "SPISD::CMPFCC";
case SPISD::BRICC: return "SPISD::BRICC";
case SPISD::Lo: return "SPISD::Lo";
case SPISD::FTOI: return "SPISD::FTOI";
case SPISD::ITOF: return "SPISD::ITOF";
+ case SPISD::FTOX: return "SPISD::FTOX";
+ case SPISD::XTOF: return "SPISD::XTOF";
case SPISD::CALL: return "SPISD::CALL";
case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
}
}
+EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
/// be zero. Op is expected to be a target specific node. Used by DAG
/// combiner.
-void SparcTargetLowering::computeMaskedBitsForTargetNode
+void SparcTargetLowering::computeKnownBitsForTargetNode
(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
case SPISD::SELECT_ICC:
case SPISD::SELECT_XCC:
case SPISD::SELECT_FCC:
- DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
// Handle PIC mode first.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
// This is the pic32 code model, the GOT is known to be smaller than 4GB.
- SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
+ SparcMCExpr::VK_Sparc_GOT10, DAG);
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
// GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
switch(getTargetMachine().getCodeModel()) {
default:
llvm_unreachable("Unsupported absolute code model");
- case CodeModel::JITDefault:
case CodeModel::Small:
// abs32.
- return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
+ SparcMCExpr::VK_Sparc_LO, DAG);
case CodeModel::Medium: {
// abs44.
- SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG);
+ SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
+ SparcMCExpr::VK_Sparc_M44, DAG);
H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32));
- SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG);
+ SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
}
case CodeModel::Large: {
// abs64.
- SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG);
+ SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
+ SparcMCExpr::VK_Sparc_HM, DAG);
Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32));
- SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
+ SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
+ SparcMCExpr::VK_Sparc_LO, DAG);
return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
}
}
TLSModel::Model model = getTargetMachine().getTLSModel(GV);
if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
- unsigned HiTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_HI22
- : SPII::MO_TLS_LDM_HI22);
- unsigned LoTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_LO10
- : SPII::MO_TLS_LDM_LO10);
- unsigned addTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_ADD
- : SPII::MO_TLS_LDM_ADD);
- unsigned callTF = ((model == TLSModel::GeneralDynamic)? SPII::MO_TLS_GD_CALL
- : SPII::MO_TLS_LDM_CALL);
+ unsigned HiTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
+ : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
+ unsigned LoTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
+ : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
+ unsigned addTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
+ : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
+ unsigned callTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
+ : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
Ops.push_back(InFlag);
- Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true),
DAG.getIntPtrConstant(0, true), InFlag, DL);
return Ret;
SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
- withTargetFlags(Op, SPII::MO_TLS_LDO_HIX22, DAG));
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
- withTargetFlags(Op, SPII::MO_TLS_LDO_LOX10, DAG));
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
- withTargetFlags(Op, SPII::MO_TLS_LDO_ADD, DAG));
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
}
if (model == TLSModel::InitialExec) {
- unsigned ldTF = ((PtrVT == MVT::i64)? SPII::MO_TLS_IE_LDX
- : SPII::MO_TLS_IE_LD);
+ unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
+ : SparcMCExpr::VK_Sparc_TLS_IE_LD);
SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
MFI->setHasCalls(true);
SDValue TGA = makeHiLoPair(Op,
- SPII::MO_TLS_IE_HI22, SPII::MO_TLS_IE_LO10, DAG);
+ SparcMCExpr::VK_Sparc_TLS_IE_HI22,
+ SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
SDValue Offset = DAG.getNode(SPISD::TLS_LD,
DL, PtrVT, Ptr,
withTargetFlags(Op, ldTF, DAG));
return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
DAG.getRegister(SP::G7, PtrVT), Offset,
- withTargetFlags(Op, SPII::MO_TLS_IE_ADD, DAG));
+ withTargetFlags(Op,
+ SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
}
assert(model == TLSModel::LocalExec);
SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
- withTargetFlags(Op, SPII::MO_TLS_LE_HIX22, DAG));
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
- withTargetFlags(Op, SPII::MO_TLS_LE_LOX10, DAG));
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
return DAG.getNode(ISD::ADD, DL, PtrVT,
for (unsigned i = 0, e = numArgs; i != e; ++i) {
Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
}
- TargetLowering::
- CallLoweringInfo CLI(Chain,
- RetTyABI,
- false, false, false, false,
- 0, CallingConv::C,
- false, false, true,
- Callee, Args, DAG, SDLoc(Op));
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
+ .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0);
+
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
// chain is in second result.
SDLoc DL,
SelectionDAG &DAG) const {
- const char *LibCall = 0;
+ const char *LibCall = nullptr;
bool is64Bit = Subtarget->is64Bit();
switch(SPCC) {
default: llvm_unreachable("Unhandled conditional code!");
Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
- TargetLowering::
- CallLoweringInfo CLI(Chain,
- RetTy,
- false, false, false, false,
- 0, CallingConv::C,
- false, false, true,
- Callee, Args, DAG, DL);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL).setChain(Chain)
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
llvm_unreachable("fpextend with non-float operand!");
- return SDValue(0, 0);
+ return SDValue();
}
static SDValue
TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
llvm_unreachable("fpround to non-float!");
- return SDValue(0, 0);
+ return SDValue();
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI,
bool hasHardQuad) {
SDLoc dl(Op);
- // Convert the fp value to integer in an FP register.
- assert(Op.getValueType() == MVT::i32);
+ EVT VT = Op.getValueType();
+ assert(VT == MVT::i32 || VT == MVT::i64);
+
+ // Expand f128 operations to fp128 abi calls.
+ if (Op.getOperand(0).getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
+ const char *libName = TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOSINT_F128_I32
+ : RTLIB::FPTOSINT_F128_I64);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
- if (Op.getOperand(0).getValueType() == MVT::f128 && !hasHardQuad)
- return TLI.LowerF128Op(Op, DAG,
- TLI.getLibcallName(RTLIB::FPTOSINT_F128_I32), 1);
+ // Expand if the resulting type is illegal.
+ if (!TLI.isTypeLegal(VT))
+ return SDValue();
- Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
+ // Otherwise, Convert the fp value to integer in an FP register.
+ if (VT == MVT::i32)
+ Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
+ else
+ Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
+
+ return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI,
bool hasHardQuad) {
SDLoc dl(Op);
- assert(Op.getOperand(0).getValueType() == MVT::i32);
- SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
- // Convert the int value to FP in an FP register.
- if (Op.getValueType() == MVT::f128 && !hasHardQuad)
- return TLI.LowerF128Op(Op, DAG,
- TLI.getLibcallName(RTLIB::SINTTOFP_I32_F128), 1);
- return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
+
+ EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
+
+ // Expand f128 operations to fp128 ABI calls.
+ if (Op.getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
+ const char *libName = TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::SINTTOFP_I32_F128
+ : RTLIB::SINTTOFP_I64_F128);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
+
+ // Expand if the operand type is illegal.
+ if (!TLI.isTypeLegal(OpVT))
+ return SDValue();
+
+ // Otherwise, Convert the int value to FP in an FP register.
+ SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
+ unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
+ return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
}
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI,
bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
// Expand if it does not involve f128 or the target has support for
- // quad floating point instructions.
- if (Op.getOperand(0).getValueType() != MVT::f128 || hasHardQuad)
- return SDValue(0, 0);
+ // quad floating point instructions and the resulting type is legal.
+ if (Op.getOperand(0).getValueType() != MVT::f128 ||
+ (hasHardQuad && TLI.isTypeLegal(VT)))
+ return SDValue();
- SDLoc dl(Op);
- assert(Op.getValueType() == MVT::i32);
+ assert(VT == MVT::i32 || VT == MVT::i64);
return TLI.LowerF128Op(Op, DAG,
- TLI.getLibcallName(RTLIB::FPTOUINT_F128_I32), 1);
+ TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOUINT_F128_I32
+ : RTLIB::FPTOUINT_F128_I64),
+ 1);
}
-
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI,
bool hasHardQuad) {
- // Expand if it does not involve f128 or the target has support for
- // quad floating point instructions.
- if (Op.getValueType() != MVT::f128 || hasHardQuad)
- return SDValue(0, 0);
-
SDLoc dl(Op);
- assert(Op.getOperand(0).getValueType() == MVT::i32);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || OpVT == MVT::i64);
+
+ // Expand if it does not involve f128 or the target has support for
+ // quad floating point instructions and the operand type is legal.
+ if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
+ return SDValue();
return TLI.LowerF128Op(Op, DAG,
- TLI.getLibcallName(RTLIB::UINTTOFP_I32_F128), 1);
+ TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::UINTTOFP_I32_F128
+ : RTLIB::UINTTOFP_I64_F128),
+ 1);
}
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
}
-static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
SDValue Chain = Op.getOperand(0); // Legalize the chain.
SDValue Size = Op.getOperand(1); // Legalize the size.
+ EVT VT = Size->getValueType(0);
SDLoc dl(Op);
unsigned SPReg = SP::O6;
- SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
- SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
+ SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
+ SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
// The resultant pointer is actually 16 words from the bottom of the stack,
// to provide a register spill area.
- SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
- DAG.getConstant(96, MVT::i32));
+ unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96;
+ regSpillArea += Subtarget->getStackPointerBias();
+
+ SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
+ DAG.getConstant(regSpillArea, VT));
SDValue Ops[2] = { NewVal, Chain };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
return Chain;
}
-static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
+static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
SDLoc dl(Op);
unsigned FrameReg = SP::I6;
-
- uint64_t depth = Op.getConstantOperandVal(0);
+ unsigned stackBias = Subtarget->getStackPointerBias();
SDValue FrameAddr;
- if (depth == 0)
+
+ if (depth == 0) {
FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
- else {
- // flush first to make sure the windowed registers' values are in stack
- SDValue Chain = getFLUSHW(Op, DAG);
- FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
-
- for (uint64_t i = 0; i != depth; ++i) {
- SDValue Ptr = DAG.getNode(ISD::ADD,
- dl, MVT::i32,
- FrameAddr, DAG.getIntPtrConstant(56));
- FrameAddr = DAG.getLoad(MVT::i32, dl,
- Chain,
- Ptr,
- MachinePointerInfo(), false, false, false, 0);
- }
+ if (Subtarget->is64Bit())
+ FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(stackBias));
+ return FrameAddr;
}
+
+ // flush first to make sure the windowed registers' values are in stack
+ SDValue Chain = getFLUSHW(Op, DAG);
+ FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
+
+ unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
+
+ while (depth--) {
+ SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset));
+ FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(),
+ false, false, false, 0);
+ }
+ if (Subtarget->is64Bit())
+ FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(stackBias));
return FrameAddr;
}
+
+static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
+
+ uint64_t depth = Op.getConstantOperandVal(0);
+
+ return getFRAMEADDR(depth, Op, DAG, Subtarget);
+
+}
+
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
- const SparcTargetLowering &TLI) {
+ const SparcTargetLowering &TLI,
+ const SparcSubtarget *Subtarget) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
EVT VT = Op.getValueType();
SDLoc dl(Op);
uint64_t depth = Op.getConstantOperandVal(0);
unsigned RetReg = MF.addLiveIn(SP::I7,
TLI.getRegClassFor(TLI.getPointerTy()));
RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
- } else {
- // Need frame address to find return address of the caller.
- MFI->setFrameAddressIsTaken(true);
-
- // flush first to make sure the windowed registers' values are in stack
- SDValue Chain = getFLUSHW(Op, DAG);
- RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT);
-
- for (uint64_t i = 0; i != depth; ++i) {
- SDValue Ptr = DAG.getNode(ISD::ADD,
- dl, MVT::i32,
- RetAddr,
- DAG.getIntPtrConstant((i == depth-1)?60:56));
- RetAddr = DAG.getLoad(MVT::i32, dl,
- Chain,
- Ptr,
- MachinePointerInfo(), false, false, false, 0);
- }
+ return RetAddr;
}
+
+ // Need frame address to find return address of the caller.
+ SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
+
+ unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
+ SDValue Ptr = DAG.getNode(ISD::ADD,
+ dl, VT,
+ FrameAddr,
+ DAG.getIntPtrConstant(Offset));
+ RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr,
+ MachinePointerInfo(), false, false, false, 0);
+
return RetAddr;
}
SubRegOdd);
SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
SDValue(Lo64.getNode(), 1) };
- SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], 2);
+ SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
// Lower a f128 store into two f64 stores.
LoPtr,
MachinePointerInfo(),
false, false, alignment);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], 2);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
-static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG,
- const SparcTargetLowering &TLI,
- bool is64Bit) {
- if (Op.getValueType() == MVT::f64)
- return LowerF64Op(Op, DAG, ISD::FNEG);
- if (Op.getValueType() == MVT::f128)
- return TLI.LowerF128Op(Op, DAG, ((is64Bit) ? "_Qp_neg" : "_Q_neg"), 1);
- return Op;
-}
+static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
+ assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
+ && "invalid opcode");
-static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
if (Op.getValueType() == MVT::f64)
- return LowerF64Op(Op, DAG, ISD::FABS);
+ return LowerF64Op(Op, DAG, Op.getOpcode());
if (Op.getValueType() != MVT::f128)
return Op;
- // Lower fabs on f128 to fabs on f64
- // fabs f128 => fabs f64:sub_even64, fmov f64:sub_odd64
+ // Lower fabs/fneg on f128 to fabs/fneg on f64
+ // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
SDLoc dl(Op);
SDValue SrcReg128 = Op.getOperand(0);
if (isV9)
Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
else
- Hi64 = LowerF64Op(Hi64, DAG, ISD::FABS);
+ Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode());
SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, MVT::f128), 0);
SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
SDValue Ops[2] = { Dst, Carry };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
+}
+
+// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
+// in LegalizeDAG.cpp except the order of arguments to the library function.
+static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI)
+{
+ unsigned opcode = Op.getOpcode();
+ assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
+
+ bool isSigned = (opcode == ISD::SMULO);
+ EVT VT = MVT::i64;
+ EVT WideVT = MVT::i128;
+ SDLoc dl(Op);
+ SDValue LHS = Op.getOperand(0);
+
+ if (LHS.getValueType() != VT)
+ return Op;
+
+ SDValue ShiftAmt = DAG.getConstant(63, VT);
+
+ SDValue RHS = Op.getOperand(1);
+ SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
+ SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
+ SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
+
+ SDValue MulResult = TLI.makeLibCall(DAG,
+ RTLIB::MUL_I128, WideVT,
+ Args, 4, isSigned, dl).first;
+ SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
+ MulResult, DAG.getIntPtrConstant(0));
+ SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
+ MulResult, DAG.getIntPtrConstant(1));
+ if (isSigned) {
+ SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
+ TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
+ } else {
+ TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT),
+ ISD::SETNE);
+ }
+ // MulResult is a node with an illegal type. Because such things are not
+ // generally permitted during this phase of legalization, delete the
+ // node. The above EXTRACT_ELEMENT nodes should have been folded.
+ DAG.DeleteNode(MulResult.getNode());
+
+ SDValue Ops[2] = { BottomHalf, TopHalf } ;
+ return DAG.getMergeValues(Ops, dl);
}
+static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
+ // Monotonic load/stores are legal.
+ if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
+ return Op;
+
+ // Otherwise, expand with a fence.
+ return SDValue();
+}
+
+
SDValue SparcTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
bool hasHardQuad = Subtarget->hasHardQuad();
- bool is64Bit = Subtarget->is64Bit();
bool isV9 = Subtarget->isV9();
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
- case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this);
- case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
+ Subtarget);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
+ Subtarget);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
hasHardQuad);
case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
case ISD::VAARG: return LowerVAARG(Op, DAG);
- case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
+ Subtarget);
case ISD::LOAD: return LowerF128Load(Op, DAG);
case ISD::STORE: return LowerF128Store(Op, DAG);
getLibcallName(RTLIB::DIV_F128), 2);
case ISD::FSQRT: return LowerF128Op(Op, DAG,
getLibcallName(RTLIB::SQRT_F128),1);
- case ISD::FNEG: return LowerFNEG(Op, DAG, *this, is64Bit);
- case ISD::FABS: return LowerFABS(Op, DAG, isV9);
+ case ISD::FABS:
+ case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
case ISD::ADDC:
case ISD::ADDE:
case ISD::SUBC:
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+ case ISD::UMULO:
+ case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
}
}
MachineBasicBlock *
SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
- unsigned BROpcode;
- unsigned CC;
- DebugLoc dl = MI->getDebugLoc();
- // Figure out the conditional branch opcode to use for this select_cc.
switch (MI->getOpcode()) {
default: llvm_unreachable("Unknown SELECT_CC!");
case SP::SELECT_CC_Int_ICC:
case SP::SELECT_CC_FP_ICC:
case SP::SELECT_CC_DFP_ICC:
case SP::SELECT_CC_QFP_ICC:
- BROpcode = SP::BCOND;
- break;
+ return expandSelectCC(MI, BB, SP::BCOND);
case SP::SELECT_CC_Int_FCC:
case SP::SELECT_CC_FP_FCC:
case SP::SELECT_CC_DFP_FCC:
case SP::SELECT_CC_QFP_FCC:
- BROpcode = SP::FBCOND;
- break;
+ return expandSelectCC(MI, BB, SP::FBCOND);
+
+ case SP::ATOMIC_LOAD_ADD_32:
+ return expandAtomicRMW(MI, BB, SP::ADDrr);
+ case SP::ATOMIC_LOAD_ADD_64:
+ return expandAtomicRMW(MI, BB, SP::ADDXrr);
+ case SP::ATOMIC_LOAD_SUB_32:
+ return expandAtomicRMW(MI, BB, SP::SUBrr);
+ case SP::ATOMIC_LOAD_SUB_64:
+ return expandAtomicRMW(MI, BB, SP::SUBXrr);
+ case SP::ATOMIC_LOAD_AND_32:
+ return expandAtomicRMW(MI, BB, SP::ANDrr);
+ case SP::ATOMIC_LOAD_AND_64:
+ return expandAtomicRMW(MI, BB, SP::ANDXrr);
+ case SP::ATOMIC_LOAD_OR_32:
+ return expandAtomicRMW(MI, BB, SP::ORrr);
+ case SP::ATOMIC_LOAD_OR_64:
+ return expandAtomicRMW(MI, BB, SP::ORXrr);
+ case SP::ATOMIC_LOAD_XOR_32:
+ return expandAtomicRMW(MI, BB, SP::XORrr);
+ case SP::ATOMIC_LOAD_XOR_64:
+ return expandAtomicRMW(MI, BB, SP::XORXrr);
+ case SP::ATOMIC_LOAD_NAND_32:
+ return expandAtomicRMW(MI, BB, SP::ANDrr);
+ case SP::ATOMIC_LOAD_NAND_64:
+ return expandAtomicRMW(MI, BB, SP::ANDXrr);
+
+ case SP::ATOMIC_SWAP_64:
+ return expandAtomicRMW(MI, BB, 0);
+
+ case SP::ATOMIC_LOAD_MAX_32:
+ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
+ case SP::ATOMIC_LOAD_MAX_64:
+ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G);
+ case SP::ATOMIC_LOAD_MIN_32:
+ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE);
+ case SP::ATOMIC_LOAD_MIN_64:
+ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE);
+ case SP::ATOMIC_LOAD_UMAX_32:
+ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU);
+ case SP::ATOMIC_LOAD_UMAX_64:
+ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU);
+ case SP::ATOMIC_LOAD_UMIN_32:
+ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU);
+ case SP::ATOMIC_LOAD_UMIN_64:
+ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU);
}
+}
- CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
+MachineBasicBlock*
+SparcTargetLowering::expandSelectCC(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned BROpcode) const {
+ const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
+ DebugLoc dl = MI->getDebugLoc();
+ unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
+ std::next(MachineBasicBlock::iterator(MI)),
BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
return BB;
}
+MachineBasicBlock*
+SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned Opcode,
+ unsigned CondCode) const {
+ const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ // MI is an atomic read-modify-write instruction of the form:
+ //
+ // rd = atomicrmw<op> addr, rs2
+ //
+ // All three operands are registers.
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned AddrReg = MI->getOperand(1).getReg();
+ unsigned Rs2Reg = MI->getOperand(2).getReg();
+
+ // SelectionDAG has already inserted memory barriers before and after MI, so
+ // we simply have to implement the operatiuon in terms of compare-and-swap.
+ //
+ // %val0 = load %addr
+ // loop:
+ // %val = phi %val0, %dest
+ // %upd = op %val, %rs2
+ // %dest = cas %addr, %val, %upd
+ // cmp %val, %dest
+ // bne loop
+ // done:
+ //
+ bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg));
+ const TargetRegisterClass *ValueRC =
+ is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
+ unsigned Val0Reg = MRI.createVirtualRegister(ValueRC);
+
+ BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg)
+ .addReg(AddrReg).addImm(0);
+
+ // Split the basic block MBB before MI and insert the loop block in the hole.
+ MachineFunction::iterator MFI = MBB;
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction *MF = MBB->getParent();
+ MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ ++MFI;
+ MF->insert(MFI, LoopMBB);
+ MF->insert(MFI, DoneMBB);
+
+ // Move MI and following instructions to DoneMBB.
+ DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end());
+ DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // Connect the CFG again.
+ MBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+
+ // Build the loop block.
+ unsigned ValReg = MRI.createVirtualRegister(ValueRC);
+ // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
+ unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
+
+ BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
+ .addReg(Val0Reg).addMBB(MBB)
+ .addReg(DestReg).addMBB(LoopMBB);
+
+ if (CondCode) {
+ // This is one of the min/max operations. We need a CMPrr followed by a
+ // MOVXCC/MOVICC.
+ BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
+ BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
+ .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
+ } else if (Opcode) {
+ BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
+ .addReg(ValReg).addReg(Rs2Reg);
+ }
+
+ if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 ||
+ MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) {
+ unsigned TmpReg = UpdReg;
+ UpdReg = MRI.createVirtualRegister(ValueRC);
+ BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1);
+ }
+
+ BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg)
+ .addReg(AddrReg).addReg(ValReg).addReg(UpdReg)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg);
+ BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND))
+ .addMBB(LoopMBB).addImm(SPCC::ICC_NE);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
//===----------------------------------------------------------------------===//
// Sparc Inline Assembly Support
//===----------------------------------------------------------------------===//
switch (Constraint[0]) {
default: break;
case 'r': return C_RegisterClass;
+ case 'I': // SIMM13
+ return C_Other;
}
}
return TargetLowering::getConstraintType(Constraint);
}
+TargetLowering::ConstraintWeight SparcTargetLowering::
+getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (!CallOperandVal)
+ return CW_Default;
+
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'I': // SIMM13
+ if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
+ if (isInt<13>(C->getSExtValue()))
+ weight = CW_Constant;
+ }
+ break;
+ }
+ return weight;
+}
+
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void SparcTargetLowering::
+LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result(nullptr, 0);
+
+ // Only support length 1 constraints for now.
+ if (Constraint.length() > 1)
+ return;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ default: break;
+ case 'I':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (isInt<13>(C->getSExtValue())) {
+ Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
+ break;
+ }
+ return;
+ }
+ }
+
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
std::pair<unsigned, const TargetRegisterClass*>
SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const {
case 'r':
return std::make_pair(0U, &SP::IntRegsRegClass);
}
+ } else if (!Constraint.empty() && Constraint.size() <= 5
+ && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
+ // constraint = '{r<d>}'
+ // Remove the braces from around the name.
+ StringRef name(Constraint.data()+1, Constraint.size()-2);
+ // Handle register aliases:
+ // r0-r7 -> g0-g7
+ // r8-r15 -> o0-o7
+ // r16-r23 -> l0-l7
+ // r24-r31 -> i0-i7
+ uint64_t intVal = 0;
+ if (name.substr(0, 1).equals("r")
+ && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
+ const char regTypes[] = { 'g', 'o', 'l', 'i' };
+ char regType = regTypes[intVal/8];
+ char regIdx = '0' + (intVal % 8);
+ char tmp[] = { '{', regType, regIdx, '}', 0 };
+ std::string newConstraint = std::string(tmp);
+ return TargetLowering::getRegForInlineAsmConstraint(newConstraint, VT);
+ }
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// The Sparc target isn't yet aware of offsets.
return false;
}
+
+void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const {
+
+ SDLoc dl(N);
+
+ RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Do not know how to custom type legalize this operation!");
+
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getOperand(0).getValueType() != MVT::f128
+ || N->getValueType(0) != MVT::i64)
+ return;
+ libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
+ ? RTLIB::FPTOSINT_F128_I64
+ : RTLIB::FPTOUINT_F128_I64);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getValueType(0) != MVT::f128
+ || N->getOperand(0).getValueType() != MVT::i64)
+ return;
+
+ libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
+ ? RTLIB::SINTTOFP_I64_F128
+ : RTLIB::UINTTOFP_I64_F128);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+ }
+}