#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
-#include "llvm/ParameterAttributes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
- setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// TRAP is legal.
setOperationAction(ISD::TRAP, MVT::Other, Legal);
-
+
+ // TRAMPOLINE is custom lowered.
+ setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ // Comparisons that require checking two conditions.
+ setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
+
if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
}
setShiftAmountType(MVT::i32);
- setSetCCResultContents(ZeroOrOneSetCCResult);
+ setBooleanContents(ZeroOrOneBooleanContent);
if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
setStackPointerRegisterToSaveRestore(PPC::X1);
setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
+ setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
+ setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
+ setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
+ setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
+ setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
}
computeRegisterProperties();
}
-MVT PPCTargetLowering::getSetCCResultType(const SDValue &) const {
+MVT PPCTargetLowering::getSetCCResultType(MVT VT) const {
return MVT::i32;
}
/// true if Op is undef or if it matches the specified value.
static bool isConstantOrUndef(SDValue Op, unsigned Val) {
return Op.getOpcode() == ISD::UNDEF ||
- cast<ConstantSDNode>(Op)->getValue() == Val;
+ cast<ConstantSDNode>(Op)->getZExtValue() == Val;
}
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
// Otherwise, check to see if the rest of the elements are consequtively
// numbered from this value.
- unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
+ unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getZExtValue();
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
unsigned ElementBase = 0;
SDValue Elt = N->getOperand(0);
if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
- ElementBase = EltV->getValue();
+ ElementBase = EltV->getZExtValue();
else
return false; // FIXME: Handle UNDEF elements too!
- if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
+ if (cast<ConstantSDNode>(Elt)->getZExtValue() >= 16)
return false;
// Check that they are consequtive.
for (unsigned i = 1; i != EltSize; ++i) {
if (!isa<ConstantSDNode>(N->getOperand(i)) ||
- cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
+ cast<ConstantSDNode>(N->getOperand(i))->getZExtValue() != i+ElementBase)
return false;
}
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
assert(isSplatShuffleMask(N, EltSize));
- return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
+ return cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() / EltSize;
}
/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
if (LeadingZero) {
if (UniquedVals[Multiple-1].getNode() == 0)
return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
- int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
+ int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
if (Val < 16)
return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
}
if (LeadingOnes) {
if (UniquedVals[Multiple-1].getNode() == 0)
return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
- int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
+ int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
return DAG.getTargetConstant(Val, MVT::i32);
}
unsigned ValSizeInBytes = 0;
uint64_t Value = 0;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
- Value = CN->getValue();
+ Value = CN->getZExtValue();
ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8;
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
if (N->getOpcode() != ISD::Constant)
return false;
- Imm = (short)cast<ConstantSDNode>(N)->getValue();
+ Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue();
+ return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue();
+ return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
}
static bool isIntS16Immediate(SDValue Op, short &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
/// can be more efficiently represented with [r+imm].
bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
SDValue &Index,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
short imm = 0;
if (N.getOpcode() == ISD::ADD) {
if (isIntS16Immediate(N.getOperand(1), imm))
/// a signed 16-bit displacement [r+imm], and if it is not better
/// represented as reg+reg.
bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
- SDValue &Base, SelectionDAG &DAG){
+ SDValue &Base,
+ SelectionDAG &DAG) const {
+ // FIXME dl should come from parent load or store, not from address
+ DebugLoc dl = N.getDebugLoc();
// If this can be more profitably realized as r+r, fail.
if (SelectAddressRegReg(N, Disp, Base, DAG))
return false;
return true; // [r+i]
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
// Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
&& "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); // The global address.
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
// Handle 32-bit sext immediates with LIS + addr mode.
if (CN->getValueType(0) == MVT::i32 ||
- (int64_t)CN->getValue() == (int)CN->getValue()) {
- int Addr = (int)CN->getValue();
+ (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
+ int Addr = (int)CN->getZExtValue();
// Otherwise, break this down into an LIS + disp.
Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
- Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
+ Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base), 0);
return true;
}
}
/// represented as an indexed [r+r] operation.
bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
SDValue &Index,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
// Check to see if we can easily represent this as an [r+r] address. This
// will fail if it thinks that the address is more profitably represented as
// reg+imm, e.g. where imm = 0.
/// [r+imm*4]. Suitable for use by STD and friends.
bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
SDValue &Base,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
+ // FIXME dl should come from the parent load or store, not the address
+ DebugLoc dl = N.getDebugLoc();
// If this can be more profitably realized as r+r, fail.
if (SelectAddressRegReg(N, Disp, Base, DAG))
return false;
return true; // [r+i]
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
// Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
&& "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); // The global address.
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
}
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
// Loading from a constant address. Verify low two bits are clear.
- if ((CN->getValue() & 3) == 0) {
+ if ((CN->getZExtValue() & 3) == 0) {
// If this address fits entirely in a 14-bit sext immediate field, codegen
// this as "d, 0"
short Imm;
// Fold the low-part of 32-bit absolute addresses into addr mode.
if (CN->getValueType(0) == MVT::i32 ||
- (int64_t)CN->getValue() == (int)CN->getValue()) {
- int Addr = (int)CN->getValue();
+ (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
+ int Addr = (int)CN->getZExtValue();
// Otherwise, break this down into an LIS + disp.
Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
-
Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
- Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
+ Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base),0);
return true;
}
}
bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
// Disabled by default for now.
if (!EnablePPCPreinc) return false;
Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
SDValue Zero = DAG.getConstant(0, PtrVT);
+ // FIXME there isn't really any debug info here
+ DebugLoc dl = Op.getDebugLoc();
const TargetMachine &TM = DAG.getTarget();
- SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
+ SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero);
+ SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero);
// If this is a non-darwin platform, we don't support non-static relo models
// yet.
!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
// Generate non-pic code that has direct accesses to the constant pool.
// The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
}
if (TM.getRelocationModel() == Reloc::PIC_) {
// With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
+ Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg,
+ DebugLoc::getUnknownLoc(), PtrVT), Hi);
}
- Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
return Lo;
}
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
SDValue Zero = DAG.getConstant(0, PtrVT);
+ // FIXME there isn't really any debug loc here
+ DebugLoc dl = Op.getDebugLoc();
const TargetMachine &TM = DAG.getTarget();
- SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
+ SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero);
+ SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, JTI, Zero);
// If this is a non-darwin platform, we don't support non-static relo models
// yet.
!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
// Generate non-pic code that has direct accesses to the constant pool.
// The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
}
if (TM.getRelocationModel() == Reloc::PIC_) {
// With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
+ Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg,
+ DebugLoc::getUnknownLoc(), PtrVT), Hi);
}
- Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
return Lo;
}
}
SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) {
MVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
- // If it's a debug information descriptor, don't mess with it.
- if (DAG.isVerifiedDebugInfoDesc(Op))
- return GA;
SDValue Zero = DAG.getConstant(0, PtrVT);
+ // FIXME there isn't really any debug info here
+ DebugLoc dl = GSDN->getDebugLoc();
const TargetMachine &TM = DAG.getTarget();
- SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
+ SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero);
+ SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, GA, Zero);
// If this is a non-darwin platform, we don't support non-static relo models
// yet.
!TM.getSubtarget<PPCSubtarget>().isDarwin()) {
// Generate non-pic code that has direct accesses to globals.
// The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
}
if (TM.getRelocationModel() == Reloc::PIC_) {
// With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
+ Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg,
+ DebugLoc::getUnknownLoc(), PtrVT), Hi);
}
- Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
+ Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
return Lo;
// If the global is weak or external, we have to go through the lazy
// resolution stub.
- return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0);
}
SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ DebugLoc dl = Op.getDebugLoc();
// If we're comparing for equality to zero, expose the fact that this is
// implented as a ctlz/srl pair on ppc, so that the dag combiner can
SDValue Zext = Op.getOperand(0);
if (VT.bitsLT(MVT::i32)) {
VT = MVT::i32;
- Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
+ Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
}
unsigned Log2b = Log2_32(VT.getSizeInBits());
- SDValue Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
- SDValue Scc = DAG.getNode(ISD::SRL, VT, Clz,
- DAG.getConstant(Log2b, MVT::i32));
- return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
+ SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
+ SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
+ DAG.getConstant(Log2b, MVT::i32));
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
}
// Leave comparisons against 0 and -1 alone for now, since they're usually
// optimized. FIXME: revisit this when we can custom lower all setcc
MVT LHSVT = Op.getOperand(0).getValueType();
if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
MVT VT = Op.getValueType();
- SDValue Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
+ SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
Op.getOperand(1));
- return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
+ return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
}
return SDValue();
}
return SDValue(); // Not reached
}
+SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Trmp = Op.getOperand(1); // trampoline
+ SDValue FPtr = Op.getOperand(2); // nested function
+ SDValue Nest = Op.getOperand(3); // 'nest' parameter value
+ DebugLoc dl = Op.getDebugLoc();
+
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ bool isPPC64 = (PtrVT == MVT::i64);
+ const Type *IntPtrTy =
+ DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType();
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+
+ Entry.Ty = IntPtrTy;
+ Entry.Node = Trmp; Args.push_back(Entry);
+
+ // TrampSize == (isPPC64 ? 48 : 40);
+ Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
+ isPPC64 ? MVT::i64 : MVT::i32);
+ Args.push_back(Entry);
+
+ Entry.Node = FPtr; Args.push_back(Entry);
+ Entry.Node = Nest; Args.push_back(Entry);
+
+ // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
+ std::pair<SDValue, SDValue> CallResult =
+ LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), false, false,
+ false, false, CallingConv::C, false,
+ DAG.getExternalSymbol("__trampoline_setup", PtrVT),
+ Args, DAG, dl);
+
+ SDValue Ops[] =
+ { CallResult.first, CallResult.second };
+
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
- int VarArgsFrameIndex,
- int VarArgsStackOffset,
- unsigned VarArgsNumGPR,
- unsigned VarArgsNumFPR,
- const PPCSubtarget &Subtarget) {
+ int VarArgsFrameIndex,
+ int VarArgsStackOffset,
+ unsigned VarArgsNumGPR,
+ unsigned VarArgsNumFPR,
+ const PPCSubtarget &Subtarget) {
+ DebugLoc dl = Op.getDebugLoc();
if (Subtarget.isMachoABI()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0);
+ return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
}
// For ELF 32 ABI we follow the layout of the va_list struct.
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
// Store first byte : number of int regs
- SDValue firstStore = DAG.getStore(Op.getOperand(0), ArgGPR,
+ SDValue firstStore = DAG.getStore(Op.getOperand(0), dl, ArgGPR,
Op.getOperand(1), SV, 0);
uint64_t nextOffset = FPROffset;
- SDValue nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1),
+ SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
ConstFPROffset);
// Store second byte : number of float regs
SDValue secondStore =
- DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset);
+ DAG.getStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset);
nextOffset += StackOffset;
- nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset);
+ nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
// Store second word : arguments given on stack
SDValue thirdStore =
- DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset);
+ DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset);
nextOffset += FrameOffset;
- nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset);
+ nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
// Store third word : arguments given in registers
- return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset);
+ return DAG.getStore(thirdStore, dl, FR, nextPtr, SV, nextOffset);
}
/// CalculateStackSlotSize - Calculates the size reserved for this argument on
/// the stack.
-static unsigned CalculateStackSlotSize(SDValue Arg, SDValue Flag,
+static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags,
bool isVarArg, unsigned PtrByteSize) {
MVT ArgVT = Arg.getValueType();
- ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags();
unsigned ArgSize =ArgVT.getSizeInBits()/8;
if (Flags.isByVal())
ArgSize = Flags.getByValSize();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
SmallVector<SDValue, 8> ArgValues;
SDValue Root = Op.getOperand(0);
- bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
+ DebugLoc dl = Op.getDebugLoc();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
if (isVarArg || isPPC64) {
MinReservedArea = ((MinReservedArea+15)/16)*16;
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
- Op.getOperand(ArgNo+3),
+ Flags,
isVarArg,
PtrByteSize);
} else nAltivecParamsAtEnd++;
} else
// Calculate min reserved area.
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
- Op.getOperand(ArgNo+3),
+ Flags,
isVarArg,
PtrByteSize);
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
RegInfo.addLiveIn(GPR[GPR_idx], VReg);
- SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
- SDValue Store = DAG.getTruncStore(Val.getValue(1), Val, FIN,
+ SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
+ SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
MemOps.push_back(Store);
++GPR_idx;
RegInfo.addLiveIn(GPR[GPR_idx], VReg);
int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
++GPR_idx;
if (isMachoABI) ArgOffset += PtrByteSize;
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
RegInfo.addLiveIn(GPR[GPR_idx], VReg);
- ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
+ ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
++GPR_idx;
} else {
needsLoad = true;
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
RegInfo.addLiveIn(GPR[GPR_idx], VReg);
- ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
+ ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32) {
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
if (Flags.isSExt())
- ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal,
+ ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
else if (Flags.isZExt())
- ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal,
+ ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
- ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal);
+ ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
}
++GPR_idx;
else
VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
RegInfo.addLiveIn(FPR[FPR_idx], VReg);
- ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
+ ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT);
++FPR_idx;
} else {
needsLoad = true;
if (VR_idx != Num_VR_Regs) {
unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass);
RegInfo.addLiveIn(VR[VR_idx], VReg);
- ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
+ ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT);
if (isVarArg) {
while ((ArgOffset % 16) != 0) {
ArgOffset += PtrByteSize;
CurArgOffset + (ArgSize - ObjSize),
isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
+ ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0);
}
ArgValues.push_back(ArgVal);
if (isELF32_ABI) {
for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) {
SDValue Val = DAG.getRegister(GPR[GPR_idx], PtrVT);
- SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0);
+ SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
- FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
RegInfo.addLiveIn(GPR[GPR_idx], VReg);
- SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
- FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
// In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex
if (isELF32_ABI) {
for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) {
SDValue Val = DAG.getRegister(FPR[FPR_idx], MVT::f64);
- SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0);
+ SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
PtrVT);
- FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
RegInfo.addLiveIn(FPR[FPR_idx], VReg);
- SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::f64);
- SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::f64);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
PtrVT);
- FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
+ FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
}
if (!MemOps.empty())
- Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
+ Root = DAG.getNode(ISD::TokenFactor, dl,
+ MVT::Other, &MemOps[0], MemOps.size());
ArgValues.push_back(Root);
// Return the new list of results.
- return DAG.getMergeValues(Op.getNode()->getVTList(), &ArgValues[0],
- ArgValues.size());
+ return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
+ &ArgValues[0], ArgValues.size());
}
/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
bool isMachoABI,
bool isVarArg,
unsigned CC,
- SDValue Call,
+ CallSDNode *TheCall,
unsigned &nAltivecParamsAtEnd) {
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
- unsigned NumOps = (Call.getNumOperands() - 5) / 2;
+ unsigned NumOps = TheCall->getNumArgs();
unsigned PtrByteSize = isPPC64 ? 8 : 4;
// Add up all the space actually used.
// 16-byte aligned.
nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Call.getOperand(5+2*i);
- SDValue Flag = Call.getOperand(5+2*i+1);
+ SDValue Arg = TheCall->getArg(i);
+ ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
MVT ArgVT = Arg.getValueType();
// Varargs Altivec parameters are padded to a 16 byte boundary.
if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
// Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
NumBytes = ((NumBytes+15)/16)*16;
}
- NumBytes += CalculateStackSlotSize(Arg, Flag, isVarArg, PtrByteSize);
+ NumBytes += CalculateStackSlotSize(Arg, Flags, isVarArg, PtrByteSize);
}
// Allow for Altivec parameters at the end, if needed.
/// calling conventions match, currently only fastcc supports tail calls, and
/// the function CALL is immediatly followed by a RET.
bool
-PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Call,
+PPCTargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG& DAG) const {
// Variable argument functions are not supported.
- if (!PerformTailCallOpt ||
- cast<ConstantSDNode>(Call.getOperand(2))->getValue() != 0) return false;
+ if (!PerformTailCallOpt || TheCall->isVarArg())
+ return false;
- if (CheckTailCallReturnConstraints(Call, Ret)) {
+ if (CheckTailCallReturnConstraints(TheCall, Ret)) {
MachineFunction &MF = DAG.getMachineFunction();
unsigned CallerCC = MF.getFunction()->getCallingConv();
- unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
+ unsigned CalleeCC = TheCall->getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
// Functions containing by val parameters are not supported.
- for (unsigned i = 0; i != ((Call.getNumOperands()-5)/2); i++) {
- ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Call.getOperand(5+2*i+1))
- ->getArgFlags();
+ for (unsigned i = 0; i != TheCall->getNumArgs(); i++) {
+ ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
if (Flags.isByVal()) return false;
}
- SDValue Callee = Call.getOperand(4);
+ SDValue Callee = TheCall->getCallee();
// Non PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
if (!C) return 0;
- int Addr = C->getValue();
+ int Addr = C->getZExtValue();
if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
(Addr << 6 >> 6) != Addr)
return 0; // Top 6 bits have to be sext of immediate.
- return DAG.getConstant((int)C->getValue() >> 2,
+ return DAG.getConstant((int)C->getZExtValue() >> 2,
DAG.getTargetLoweringInfo().getPointerTy()).getNode();
}
StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
SDValue Chain,
const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
- SmallVector<SDValue, 8> &MemOpChains) {
+ SmallVector<SDValue, 8> &MemOpChains,
+ DebugLoc dl) {
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
SDValue Arg = TailCallArgs[i].Arg;
SDValue FIN = TailCallArgs[i].FrameIdxOp;
int FI = TailCallArgs[i].FrameIdx;
// Store relative to framepointer.
- MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
PseudoSourceValue::getFixedStack(FI),
0));
}
SDValue OldFP,
int SPDiff,
bool isPPC64,
- bool isMachoABI) {
+ bool isMachoABI,
+ DebugLoc dl) {
if (SPDiff) {
// Calculate the new stack slot for the return address.
int SlotSize = isPPC64 ? 8 : 4;
MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
- Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx,
+ Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
PseudoSourceValue::getFixedStack(NewRetAddr), 0);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
- Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx,
+ Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
PseudoSourceValue::getFixedStack(NewFPIdx), 0);
}
return Chain;
/// stack slot. Returns the chain as result and the loaded frame pointers in
/// LROpOut/FPOpout. Used when tail calling.
SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
- int SPDiff,
- SDValue Chain,
- SDValue &LROpOut,
- SDValue &FPOpOut) {
+ int SPDiff,
+ SDValue Chain,
+ SDValue &LROpOut,
+ SDValue &FPOpOut,
+ DebugLoc dl) {
if (SPDiff) {
// Load the LR and FP stack slot for later adjusting.
MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
- LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0);
+ LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0);
Chain = SDValue(LROpOut.getNode(), 1);
FPOpOut = getFramePointerFrameIndex(DAG);
- FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0);
+ FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0);
Chain = SDValue(FPOpOut.getNode(), 1);
}
return Chain;
static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- unsigned Size) {
+ unsigned Size, DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
- return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false,
- NULL, 0, NULL, 0);
+ return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
+ false, NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
SDValue Arg, SDValue PtrOff, int SPDiff,
unsigned ArgOffset, bool isPPC64, bool isTailCall,
bool isVector, SmallVector<SDValue, 8> &MemOpChains,
- SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
+ SmallVector<TailCallArgumentInfo, 8>& TailCallArguments,
+ DebugLoc dl) {
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
if (!isTailCall) {
if (isVector) {
StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
else
StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
- PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, PtrVT));
}
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0));
// Calculate and remember argument location.
} else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
TailCallArguments);
SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget,
TargetMachine &TM) {
- SDValue Chain = Op.getOperand(0);
- bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
- bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 &&
- CC == CallingConv::Fast && PerformTailCallOpt;
- SDValue Callee = Op.getOperand(4);
- unsigned NumOps = (Op.getNumOperands() - 5) / 2;
+ CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
+ SDValue Chain = TheCall->getChain();
+ bool isVarArg = TheCall->isVarArg();
+ unsigned CC = TheCall->getCallingConv();
+ bool isTailCall = TheCall->isTailCall()
+ && CC == CallingConv::Fast && PerformTailCallOpt;
+ SDValue Callee = TheCall->getCallee();
+ unsigned NumOps = TheCall->getNumArgs();
+ DebugLoc dl = TheCall->getDebugLoc();
bool isMachoABI = Subtarget.isMachoABI();
bool isELF32_ABI = Subtarget.isELF32_ABI();
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes =
CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
- Op, nAltivecParamsAtEnd);
+ TheCall, nAltivecParamsAtEnd);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain,
- DAG.getConstant(NumBytes, PtrVT));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SDValue CallSeqStart = Chain;
// Load the return address and frame pointer so it can be move somewhere else
// later.
SDValue LROp, FPOp;
- Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp);
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
// Set up a copy of the stack pointer for use loading and storing any
// arguments that may not fit in the registers available for argument
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
bool inMem = false;
- SDValue Arg = Op.getOperand(5+2*i);
- ISD::ArgFlagsTy Flags =
- cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
+ SDValue Arg = TheCall->getArg(i);
+ ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
// See if next argument requires stack alignment in ELF
bool Align = Flags.isSplit();
else
PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
// On PPC64, promote integers to 64-bit values.
if (isPPC64 && Arg.getValueType() == MVT::i32) {
// FIXME: Should this use ANY_EXTEND if neither sext nor zext?
unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
+ Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
}
// FIXME Elf untested, what are alignment rules?
// Everything else is passed left-justified.
MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
NULL, 0, VT);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
} else {
SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
- SDValue AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const);
+ SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
CallSeqStart.getNode()->getOperand(0),
- Flags, DAG, Size);
+ Flags, DAG, Size, dl);
// This must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
CallSeqStart.getNode()->getOperand(1));
// registers. (This is not what the doc says.)
SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
- Flags, DAG, Size);
+ Flags, DAG, Size, dl);
// This must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
CallSeqStart.getNode()->getOperand(1));
// And copy the pieces of it that fit into registers.
for (unsigned j=0; j<Size; j+=PtrByteSize) {
SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
- SDValue AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const);
+ SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0);
+ SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, NULL, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
if (isMachoABI)
} else {
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
isPPC64, isTailCall, false, MemOpChains,
- TailCallArguments);
+ TailCallArguments, dl);
inMem = true;
}
if (inMem || isMachoABI) {
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
if (isVarArg) {
- SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
MemOpChains.push_back(Store);
// Float varargs are always shadowed in available integer registers
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0);
MemOpChains.push_back(Load.getValue(1));
if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
Load));
}
if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
- SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0);
MemOpChains.push_back(Load.getValue(1));
if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
Load));
} else {
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
isPPC64, isTailCall, false, MemOpChains,
- TailCallArguments);
+ TailCallArguments, dl);
inMem = true;
}
if (inMem || isMachoABI) {
}
// We could elide this store in the case where the object fits
// entirely in R registers. Maybe later.
- PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
+ PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, PtrVT));
- SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
MemOpChains.push_back(Store);
if (VR_idx != NumVRs) {
- SDValue Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0);
+ SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
}
for (unsigned i=0; i<16; i+=PtrByteSize) {
if (GPR_idx == NumGPRs)
break;
- SDValue Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff,
+ SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, PtrVT));
- SDValue Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0);
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, NULL, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
// We are emitting Altivec params in order.
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
isPPC64, isTailCall, true, MemOpChains,
- TailCallArguments);
+ TailCallArguments, dl);
ArgOffset += 16;
}
break;
ArgOffset = ((ArgOffset+15)/16)*16;
ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Op.getOperand(5+2*i);
+ SDValue Arg = TheCall->getArg(i);
MVT ArgType = Arg.getValueType();
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
// We are emitting Altivec params in order.
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
isPPC64, isTailCall, true, MemOpChains,
- TailCallArguments);
+ TailCallArguments, dl);
ArgOffset += 16;
}
}
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
- InFlag);
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
// With the ELF 32 ABI, set CR6 to true if this is a vararg call.
if (isVarArg && isELF32_ABI) {
- SDValue SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0);
- Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag);
+ SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0);
+ Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag);
InFlag = Chain.getValue(1);
}
// Do not flag preceeding copytoreg stuff together with the following stuff.
InFlag = SDValue();
StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
- MemOpChains2);
+ MemOpChains2, dl);
if (!MemOpChains2.empty())
- Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains2[0], MemOpChains2.size());
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
- isPPC64, isMachoABI);
+ isPPC64, isMachoABI, dl);
}
// Emit callseq_end just before tailcall node.
if (isTailCall) {
- SmallVector<SDValue, 8> CallSeqOps;
- SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- CallSeqOps.push_back(Chain);
- CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes));
- CallSeqOps.push_back(DAG.getIntPtrConstant(0));
- if (InFlag.getNode())
- CallSeqOps.push_back(InFlag);
- Chain = DAG.getNode(ISD::CALLSEQ_END, CallSeqNodeTys, &CallSeqOps[0],
- CallSeqOps.size());
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(0, true), InFlag);
InFlag = Chain.getValue(1);
}
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
// to do the call, we can't use PPCISD::CALL.
SDValue MTCTROps[] = {Chain, Callee, InFlag};
- Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps,
+ Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
2 + (InFlag.getNode() != 0));
InFlag = Chain.getValue(1);
// Copy the callee address into R12/X12 on darwin.
if (isMachoABI) {
unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12;
- Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag);
+ Chain = DAG.getCopyToReg(Chain, dl, Reg, Callee, InFlag);
InFlag = Chain.getValue(1);
}
if (isTailCall) {
assert(InFlag.getNode() &&
"Flag must be set. Depend on flag being set in LowerRET");
- Chain = DAG.getNode(PPCISD::TAILCALL,
- Op.getNode()->getVTList(), &Ops[0], Ops.size());
+ Chain = DAG.getNode(PPCISD::TAILCALL, dl,
+ TheCall->getVTList(), &Ops[0], Ops.size());
return SDValue(Chain.getNode(), Op.getResNo());
}
- Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, PtrVT),
- DAG.getConstant(BytesCalleePops, PtrVT),
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(BytesCalleePops, true),
InFlag);
- if (Op.getNode()->getValueType(0) != MVT::Other)
+ if (TheCall->getValueType(0) != MVT::Other)
InFlag = Chain.getValue(1);
SmallVector<SDValue, 16> ResultVals;
SmallVector<CCValAssign, 16> RVLocs;
unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
- CCInfo.AnalyzeCallResult(Op.getNode(), RetCC_PPC);
+ CCInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
MVT VT = VA.getValVT();
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1);
+ Chain = DAG.getCopyFromReg(Chain, dl,
+ VA.getLocReg(), VT, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
InFlag = Chain.getValue(2);
}
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals.push_back(Chain);
- SDValue Res = DAG.getMergeValues(Op.getNode()->getVTList(), &ResultVals[0],
- ResultVals.size());
+ SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
+ &ResultVals[0], ResultVals.size());
return Res.getValue(Op.getResNo());
}
SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ DebugLoc dl = Op.getDebugLoc();
CCState CCInfo(CC, isVarArg, TM, RVLocs);
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
Operands.push_back(Chain.getOperand(i));
}
- return DAG.getNode(PPCISD::TC_RETURN, MVT::Other, &Operands[0],
+ return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Operands[0],
Operands.size());
}
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag);
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Op.getOperand(i*2+1), Flag);
Flag = Chain.getValue(1);
}
if (Flag.getNode())
- return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag);
+ return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
else
- return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain);
+ return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain);
}
SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
// When we pop the dynamic allocation we need to restore the SP link.
+ DebugLoc dl = Op.getDebugLoc();
// Get the corect type for pointers.
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue SaveSP = Op.getOperand(1);
// Load the old link SP.
- SDValue LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
+ SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0);
// Restore the stack pointer.
- Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
+ Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
// Store the old link SP.
- return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
+ return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0);
}
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
-
+ DebugLoc dl = Op.getDebugLoc();
+
// Get the corect type for pointers.
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Negate the size.
- SDValue NegSize = DAG.getNode(ISD::SUB, PtrVT,
+ SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
DAG.getConstant(0, PtrVT), Size);
// Construct a node for the frame pointer save index.
SDValue FPSIdx = getFramePointerFrameIndex(DAG);
// Build a DYNALLOC node.
SDValue Ops[3] = { Chain, NegSize, FPSIdx };
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
- return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
+ return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
}
/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
MVT CmpVT = Op.getOperand(0).getValueType();
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
+ DebugLoc dl = Op.getDebugLoc();
// If the RHS of the comparison is a 0.0, we don't need to do the
// subtraction at all.
case ISD::SETOGE:
case ISD::SETGE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
- LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
- return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
+ LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
case ISD::SETUGT:
case ISD::SETGT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
case ISD::SETOLE:
case ISD::SETLE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
- LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
- return DAG.getNode(PPCISD::FSEL, ResVT,
- DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
+ LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT,
+ DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
}
SDValue Cmp;
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETULT:
case ISD::SETLT:
- Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
+ Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
- Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
- return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
+ Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
case ISD::SETOGE:
case ISD::SETGE:
- Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
+ Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
- Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
- return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
+ Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
case ISD::SETUGT:
case ISD::SETGT:
- Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
+ Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
- Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
- return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
+ Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
case ISD::SETOLE:
case ISD::SETLE:
- Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
+ Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
- Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
- return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
+ Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+ return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
}
return SDValue();
}
// FIXME: Split this code up when LegalizeDAGTypes lands.
-SDValue PPCTargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
+ DebugLoc dl) {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
- Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
+ Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
SDValue Tmp;
switch (Op.getValueType().getSimpleVT()) {
default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
case MVT::i32:
- Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
+ Tmp = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Src);
break;
case MVT::i64:
- Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
+ Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src);
break;
}
SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
// Emit a store to the stack slot.
- SDValue Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0);
+ SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0);
// Result is a load from the stack slot. If loading 4 bytes, make sure to
// add in a bias.
if (Op.getValueType() == MVT::i32)
- FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr,
+ FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, FIPtr.getValueType()));
- return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0);
-}
-
-SDValue PPCTargetLowering::LowerFP_ROUND_INREG(SDValue Op,
- SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::ppcf128);
- SDNode *Node = Op.getNode();
- assert(Node->getOperand(0).getValueType() == MVT::ppcf128);
- assert(Node->getOperand(0).getNode()->getOpcode() == ISD::BUILD_PAIR);
- SDValue Lo = Node->getOperand(0).getNode()->getOperand(0);
- SDValue Hi = Node->getOperand(0).getNode()->getOperand(1);
-
- // This sequence changes FPSCR to do round-to-zero, adds the two halves
- // of the long double, and puts FPSCR back the way it was. We do not
- // actually model FPSCR.
- std::vector<MVT> NodeTys;
- SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
-
- NodeTys.push_back(MVT::f64); // Return register
- NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
- Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
- MFFSreg = Result.getValue(0);
- InFlag = Result.getValue(1);
-
- NodeTys.clear();
- NodeTys.push_back(MVT::Flag); // Returns a flag
- Ops[0] = DAG.getConstant(31, MVT::i32);
- Ops[1] = InFlag;
- Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2);
- InFlag = Result.getValue(0);
-
- NodeTys.clear();
- NodeTys.push_back(MVT::Flag); // Returns a flag
- Ops[0] = DAG.getConstant(30, MVT::i32);
- Ops[1] = InFlag;
- Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2);
- InFlag = Result.getValue(0);
-
- NodeTys.clear();
- NodeTys.push_back(MVT::f64); // result of add
- NodeTys.push_back(MVT::Flag); // Returns a flag
- Ops[0] = Lo;
- Ops[1] = Hi;
- Ops[2] = InFlag;
- Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3);
- FPreg = Result.getValue(0);
- InFlag = Result.getValue(1);
-
- NodeTys.clear();
- NodeTys.push_back(MVT::f64);
- Ops[0] = DAG.getConstant(1, MVT::i32);
- Ops[1] = MFFSreg;
- Ops[2] = FPreg;
- Ops[3] = InFlag;
- Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4);
- FPreg = Result.getValue(0);
-
- // We know the low half is about to be thrown away, so just use something
- // convenient.
- return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg);
+ return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0);
}
SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
// Don't handle ppc_fp128 here; let it be lowered to a libcall.
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
if (Op.getOperand(0).getValueType() == MVT::i64) {
- SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
- SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
+ SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
+ MVT::f64, Op.getOperand(0));
+ SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
if (Op.getValueType() == MVT::f32)
- FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
+ FP = DAG.getNode(ISD::FP_ROUND, dl,
+ MVT::f32, FP, DAG.getIntPtrConstant(0));
return FP;
}
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
- SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
+ SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
Op.getOperand(0));
// STD the extended value into the stack slot.
MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
MachineMemOperand::MOStore, 0, 8, 8);
- SDValue Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
+ SDValue Store = DAG.getNode(PPCISD::STD_32, dl, MVT::Other,
DAG.getEntryNode(), Ext64, FIdx,
DAG.getMemOperand(MO));
// Load the value as a double.
- SDValue Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
+ SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0);
// FCFID it and return it.
- SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
+ SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
if (Op.getValueType() == MVT::f32)
- FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
+ FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
return FP;
}
SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
/*
The rounding mode is in bits 30:31 of FPSR, and has the following
settings:
// Save FP Control Word to register
NodeTys.push_back(MVT::f64); // return register
NodeTys.push_back(MVT::Flag); // unused in this context
- SDValue Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
+ SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
// Save FP register to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), Chain,
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
StackSlot, NULL, 0);
// Load FP Control Word from low 32 bits of stack slot.
SDValue Four = DAG.getConstant(4, PtrVT);
- SDValue Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four);
- SDValue CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0);
+ SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
+ SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0);
// Transform as necessary
SDValue CWD1 =
- DAG.getNode(ISD::AND, MVT::i32,
+ DAG.getNode(ISD::AND, dl, MVT::i32,
CWD, DAG.getConstant(3, MVT::i32));
SDValue CWD2 =
- DAG.getNode(ISD::SRL, MVT::i32,
- DAG.getNode(ISD::AND, MVT::i32,
- DAG.getNode(ISD::XOR, MVT::i32,
+ DAG.getNode(ISD::SRL, dl, MVT::i32,
+ DAG.getNode(ISD::AND, dl, MVT::i32,
+ DAG.getNode(ISD::XOR, dl, MVT::i32,
CWD, DAG.getConstant(3, MVT::i32)),
DAG.getConstant(3, MVT::i32)),
- DAG.getConstant(1, MVT::i8));
+ DAG.getConstant(1, MVT::i32));
SDValue RetVal =
- DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2);
+ DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
return DAG.getNode((VT.getSizeInBits() < 16 ?
- ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
+ ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
}
SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
+ DebugLoc dl = Op.getDebugLoc();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SHL!");
SDValue Amt = Op.getOperand(2);
MVT AmtVT = Amt.getValueType();
- SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
- SDValue Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt);
- SDValue Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1);
- SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
- SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
- SDValue Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5);
- SDValue OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
- SDValue OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt);
+ SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
+ DAG.getConstant(BitWidth, AmtVT), Amt);
+ SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
+ SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
+ SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
+ SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
+ DAG.getConstant(-BitWidth, AmtVT));
+ SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
+ SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
+ SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2, dl);
}
SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
SDValue Amt = Op.getOperand(2);
MVT AmtVT = Amt.getValueType();
- SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
- SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
- SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
- SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
- SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
- SDValue Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5);
- SDValue OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
- SDValue OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt);
+ SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
+ DAG.getConstant(BitWidth, AmtVT), Amt);
+ SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
+ SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
+ SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
+ SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
+ DAG.getConstant(-BitWidth, AmtVT));
+ SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
+ SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
+ SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2, dl);
}
SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
MVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
SDValue Amt = Op.getOperand(2);
MVT AmtVT = Amt.getValueType();
- SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
- SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
- SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
- SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
- SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
- SDValue Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5);
- SDValue OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt);
- SDValue OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT),
- Tmp4, Tmp6, ISD::SETLE);
+ SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
+ DAG.getConstant(BitWidth, AmtVT), Amt);
+ SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
+ SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
+ SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
+ SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
+ DAG.getConstant(-BitWidth, AmtVT));
+ SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
+ SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
+ SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
+ Tmp4, Tmp6, ISD::SETLE);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2, dl);
}
//===----------------------------------------------------------------------===//
UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
continue;
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
- EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
+ EltBits = CN->getZExtValue() & (~0U >> (32-EltBitSize));
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
/// BuildSplatI - Build a canonical splati of Val with an element size of
/// SplatSize. Cast the result to VT.
static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG, DebugLoc dl) {
assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
static const MVT VTys[] = { // canonical VT to use for each size.
SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
SmallVector<SDValue, 8> Ops;
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
+ SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
&Ops[0], Ops.size());
- return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
}
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
- SelectionDAG &DAG,
- MVT DestVT = MVT::Other) {
+ SelectionDAG &DAG, DebugLoc dl,
+ MVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, MVT::i32), LHS, RHS);
}
/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
- SDValue Op2, SelectionDAG &DAG,
- MVT DestVT = MVT::Other) {
+ SDValue Op2, SelectionDAG &DAG,
+ DebugLoc dl, MVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
}
/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
/// amount. The result has the specified value type.
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
- MVT VT, SelectionDAG &DAG) {
+ MVT VT, SelectionDAG &DAG, DebugLoc dl) {
// Force LHS/RHS to be the right type.
- LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
- RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
+ LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
+ RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
SDValue Ops[16];
for (unsigned i = 0; i != 16; ++i)
Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
- SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
- return DAG.getNode(ISD::BIT_CONVERT, VT, T);
+ SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16));
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
}
// If this is a case we can't handle, return null and let the default
// zero.
uint64_t VectorBits[2];
uint64_t UndefBits[2];
+ DebugLoc dl = Op.getDebugLoc();
if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits))
return SDValue(); // Not a constant vector.
// Canonicalize all zero vectors to be v4i32.
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
SDValue Z = DAG.getConstant(0, MVT::i32);
- Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
- Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
+ Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
+ Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
}
return Op;
}
// If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
if (SextVal >= -16 && SextVal <= 15)
- return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
+ return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
// Two instruction sequences.
// If this value is in the range [-32,30] and is even, use:
// tmp = VSPLTI[bhw], result = add tmp, tmp
if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
- SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG);
- Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
+ Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
// for fneg/fabs.
if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
// Make -1 and vspltisw -1:
- SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
+ SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
// Make the VSLW intrinsic, computing 0x8000_0000.
SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
- OnesV, DAG);
+ OnesV, DAG, dl);
// xor by OnesV to invert it.
- Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// Check to see if this is a wide variety of vsplti*, binop self cases.
// vsplti + shl self.
if (SextVal == (i << (int)TypeShiftAmt)) {
- SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
Intrinsic::ppc_altivec_vslw
};
- Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// vsplti + srl self.
if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
- SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
Intrinsic::ppc_altivec_vsrw
};
- Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// vsplti + sra self.
if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
- SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
Intrinsic::ppc_altivec_vsraw
};
- Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// vsplti + rol self.
if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
- SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
+ SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { // Intrinsic to use for each size.
Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
Intrinsic::ppc_altivec_vrlw
};
- Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
+ Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
}
// t = vsplti c, result = vsldoi t, t, 1
if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
- SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
- return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
+ SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+ return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 2
if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
- SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
- return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
+ SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+ return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 3
if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
- SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
- return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
+ SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+ return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
}
}
// Odd, in range [17,31]: (vsplti C)-(vsplti -16).
if (SextVal >= 0 && SextVal <= 31) {
- SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
- SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
+ SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
+ SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
+ LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
}
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) {
- SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
- SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
- LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS);
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
+ SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
+ SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
+ LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
}
}
/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
- SDValue RHS, SelectionDAG &DAG) {
+ SDValue RHS, SelectionDAG &DAG,
+ DebugLoc dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
- unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
+ unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
enum {
}
SDValue OpLHS, OpRHS;
- OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
- OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
+ OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
+ OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
unsigned ShufIdxs[16];
switch (OpNum) {
ShufIdxs[i] = (i&3)+12;
break;
case OP_VSLDOI4:
- return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
+ return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
case OP_VSLDOI8:
- return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
+ return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
case OP_VSLDOI12:
- return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
+ return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
}
SDValue Ops[16];
for (unsigned i = 0; i != 16; ++i)
Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8);
- return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
+ return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(),
+ OpLHS, OpRHS,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
/// lowered into a vperm.
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
SDValue PermMask = Op.getOperand(2);
continue; // Undef, ignore it.
unsigned ByteSource =
- cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
+ cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getZExtValue();
if ((ByteSource & 3) != j) {
isFourElementShuffle = false;
break;
// available, if this block is within a loop, we should avoid using vperm
// for 3-operation perms and use a constant pool load instead.
if (Cost < 3)
- return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
+ return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
}
// Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
SrcElt = 0;
else
- SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
+ SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getZExtValue();
for (unsigned j = 0; j != BytesPerElement; ++j)
ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
MVT::i8));
}
- SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
+ SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
&ResultMask[0], ResultMask.size());
- return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
+ return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
}
/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
/// information about the intrinsic.
static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
bool &isDot) {
- unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
+ unsigned IntrinsicID =
+ cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
CompareOpc = -1;
isDot = false;
switch (IntrinsicID) {
SelectionDAG &DAG) {
// If this is a lowered altivec predicate compare, CompareOpc is set to the
// opcode number of the comparison.
+ DebugLoc dl = Op.getDebugLoc();
int CompareOpc;
bool isDot;
if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
// If this is a non-dot comparison, make the VCMP node and we are done.
if (!isDot) {
- SDValue Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
+ SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(CompareOpc, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
}
// Create the PPCISD altivec 'dot' comparison node.
std::vector<MVT> VTs;
VTs.push_back(Op.getOperand(2).getValueType());
VTs.push_back(MVT::Flag);
- SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
+ SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
// Now that we have the comparison, emit a copy from the CR to a GPR.
// This is flagged to the above dot comparison.
- SDValue Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
+ SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
DAG.getRegister(PPC::CR6, MVT::i32),
CompNode.getValue(1));
// Unpack the result based on how the target uses it.
unsigned BitNo; // Bit # of CR6.
bool InvertBit; // Invert result?
- switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
+ switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
default: // Can't happen, don't crash on invalid number though.
case 0: // Return the value of the EQ bit of CR6.
BitNo = 0; InvertBit = false;
}
// Shift the bit into the low position.
- Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
+ Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
DAG.getConstant(8-(3-BitNo), MVT::i32));
// Isolate the bit.
- Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
+ Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
DAG.getConstant(1, MVT::i32));
// If we are supposed to, toggle the bit.
if (InvertBit)
- Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
+ Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
DAG.getConstant(1, MVT::i32));
return Flags;
}
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
// Create a stack slot that is 16-byte aligned.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(16, 16);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
// Store the input value into Value#0 of the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(),
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
Op.getOperand(0), FIdx, NULL, 0);
// Load it out.
- return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
+ return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, NULL, 0);
}
SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
if (Op.getValueType() == MVT::v4i32) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
- SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
- SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
+ SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl);
+ SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
SDValue RHSSwap = // = vrlw RHS, 16
- BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
+ BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
// Shrinkify inputs to v8i16.
- LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
- RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
- RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
+ LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
+ RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
+ RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
// Low parts multiplied together, generating 32-bit results (we ignore the
// top parts).
SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
- LHS, RHS, DAG, MVT::v4i32);
+ LHS, RHS, DAG, dl, MVT::v4i32);
SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
- LHS, RHSSwap, Zero, DAG, MVT::v4i32);
+ LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
// Shift the high parts up 16 bits.
- HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
- return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
+ HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
+ Neg16, DAG, dl);
+ return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
} else if (Op.getValueType() == MVT::v8i16) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
- SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
+ SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
- LHS, RHS, Zero, DAG);
+ LHS, RHS, Zero, DAG, dl);
} else if (Op.getValueType() == MVT::v16i8) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
// Multiply the even 8-bit parts, producing 16-bit sums.
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
- LHS, RHS, DAG, MVT::v8i16);
- EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
+ LHS, RHS, DAG, dl, MVT::v8i16);
+ EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
// Multiply the odd 8-bit parts, producing 16-bit sums.
SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
- LHS, RHS, DAG, MVT::v8i16);
- OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
+ LHS, RHS, DAG, dl, MVT::v8i16);
+ OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
// Merge the results together.
SDValue Ops[16];
Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
}
- return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
+ return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
} else {
assert(0 && "Unknown mul to lower!");
abort();
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
+ case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
case ISD::VASTART:
return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG,
+ Op.getDebugLoc());
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
- case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
// Lower 64-bit shifts.
return SDValue();
}
-SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
+void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) {
+ DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
- default: assert(0 && "Wasn't expecting to be able to lower this!");
- case ISD::FP_TO_SINT: {
- SDValue Res = LowerFP_TO_SINT(SDValue(N, 0), DAG);
- // Use MERGE_VALUES to drop the chain result value and get a node with one
- // result. This requires turning off getMergeValues simplification, since
- // otherwise it will give us Res back.
- return DAG.getMergeValues(&Res, 1, false).getNode();
+ default:
+ assert(false && "Do not know how to custom type legalize this operation!");
+ return;
+ case ISD::FP_ROUND_INREG: {
+ assert(N->getValueType(0) == MVT::ppcf128);
+ assert(N->getOperand(0).getValueType() == MVT::ppcf128);
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
+ MVT::f64, N->getOperand(0),
+ DAG.getIntPtrConstant(0));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
+ MVT::f64, N->getOperand(0),
+ DAG.getIntPtrConstant(1));
+
+ // This sequence changes FPSCR to do round-to-zero, adds the two halves
+ // of the long double, and puts FPSCR back the way it was. We do not
+ // actually model FPSCR.
+ std::vector<MVT> NodeTys;
+ SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
+
+ NodeTys.push_back(MVT::f64); // Return register
+ NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
+ Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
+ MFFSreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(31, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = DAG.getConstant(30, MVT::i32);
+ Ops[1] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
+ InFlag = Result.getValue(0);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64); // result of add
+ NodeTys.push_back(MVT::Flag); // Returns a flag
+ Ops[0] = Lo;
+ Ops[1] = Hi;
+ Ops[2] = InFlag;
+ Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3);
+ FPreg = Result.getValue(0);
+ InFlag = Result.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::f64);
+ Ops[0] = DAG.getConstant(1, MVT::i32);
+ Ops[1] = MFFSreg;
+ Ops[2] = FPreg;
+ Ops[3] = InFlag;
+ Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4);
+ FPreg = Result.getValue(0);
+
+ // We know the low half is about to be thrown away, so just use something
+ // convenient.
+ Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
+ FPreg, FPreg));
+ return;
}
+ case ISD::FP_TO_SINT:
+ Results.push_back(LowerFP_TO_SINT(SDValue(N, 0), DAG, dl));
+ return;
}
}
MachineBasicBlock *
PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- bool is64bit, unsigned BinOpcode) {
+ bool is64bit, unsigned BinOpcode) const {
// This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *BB,
bool is8bit, // operation
- unsigned BinOpcode) {
+ unsigned BinOpcode) const {
// This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
// In 64 bit mode we have to use 64 bits for addresses, even though the
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) {
+ MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
// To "insert" these instructions we actually have to insert their
BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
- BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
+ BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
- BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
+ BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
- BB = EmitAtomicBinary(MI, BB, false, PPC::NAND);
+ BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
- BB = EmitAtomicBinary(MI, BB, true, PPC::NAND8);
+ BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
// Target Optimization Hooks
//===----------------------------------------------------------------------===//
-SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
+SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
TargetMachine &TM = getTargetMachine();
SelectionDAG &DAG = DCI.DAG;
+ DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default: break;
case PPCISD::SHL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getValue() == 0) // 0 << V -> 0.
+ if (C->getZExtValue() == 0) // 0 << V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getValue() == 0) // 0 >>u V -> 0.
+ if (C->getZExtValue() == 0) // 0 >>u V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRA:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getValue() == 0 || // 0 >>s V -> 0.
+ if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
C->isAllOnesValue()) // -1 >>s V -> -1.
return N->getOperand(0);
}
N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
SDValue Val = N->getOperand(0).getOperand(0);
if (Val.getValueType() == MVT::f32) {
- Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
+ Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
}
- Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
+ Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
- Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
+ Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
if (N->getValueType(0) == MVT::f32) {
- Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val,
+ Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
DAG.getIntPtrConstant(0));
DCI.AddToWorklist(Val.getNode());
}
N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
SDValue Val = N->getOperand(1).getOperand(0);
if (Val.getValueType() == MVT::f32) {
- Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
+ Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
}
- Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
+ Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
- Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
+ Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val,
N->getOperand(2), N->getOperand(3));
DCI.AddToWorklist(Val.getNode());
return Val;
SDValue BSwapOp = N->getOperand(1).getOperand(0);
// Do an any-extend to 32-bits if this is a half-word input.
if (BSwapOp.getValueType() == MVT::i16)
- BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
+ BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
- return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
- N->getOperand(2), N->getOperand(3),
+ return DAG.getNode(PPCISD::STBRX, dl, MVT::Other, N->getOperand(0),
+ BSwapOp, N->getOperand(2), N->getOperand(3),
DAG.getValueType(N->getOperand(1).getValueType()));
}
break;
MO, // MemOperand
DAG.getValueType(N->getValueType(0)) // VT
};
- SDValue BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
+ SDValue BSLoad = DAG.getNode(PPCISD::LBRX, dl, VTs, Ops, 4);
// If this is an i16 load, insert the truncate.
SDValue ResVal = BSLoad;
if (N->getValueType(0) == MVT::i16)
- ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
+ ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
// First, combine the bswap away. This makes the value produced by the
// load dead.
// If this is a comparison against something other than 0/1, then we know
// that the condition is never/always true.
- unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
+ unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
if (Val != 0 && Val != 1) {
if (CC == ISD::SETEQ) // Cond never true, remove branch.
return N->getOperand(0);
// Always !=, turn it into an unconditional branch.
- return DAG.getNode(ISD::BR, MVT::Other,
+ return DAG.getNode(ISD::BR, dl, MVT::Other,
N->getOperand(0), N->getOperand(4));
}
};
VTs.push_back(LHS.getOperand(2).getValueType());
VTs.push_back(MVT::Flag);
- SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
+ SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
// Unpack the result based on how the target uses it.
PPC::Predicate CompOpc;
- switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
+ switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
default: // Can't happen, don't crash on invalid number though.
case 0: // Branch on the value of the EQ bit of CR6.
CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
break;
}
- return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
+ return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
DAG.getConstant(CompOpc, MVT::i32),
DAG.getRegister(PPC::CR6, MVT::i32),
N->getOperand(4), CompNode.getValue(1));
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
+ switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
default: break;
case Intrinsic::ppc_altivec_vcmpbfp_p:
case Intrinsic::ppc_altivec_vcmpeqfp_p:
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops.
+/// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
+/// it means one of the asm constraint of the inline asm instruction being
+/// processed is 'm'.
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
+ bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0,0);
case 'P': {
ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
if (!CST) return; // Must be an immediate to match.
- unsigned Value = CST->getValue();
+ unsigned Value = CST->getZExtValue();
switch (Letter) {
default: assert(0 && "Unknown constraint letter!");
case 'I': // "I" is a signed 16-bit constant.
}
// Handle standard constraint letters.
- TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
+ TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
}
// isLegalAddressingMode - Return true if the addressing mode represented
}
SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
// Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
+ if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
// Make sure the function really does not optimize away the store of the RA
// to the stack.
FuncInfo->setLRStoreRequired();
- return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
+ return DAG.getLoad(getPointerTy(), dl,
+ DAG.getEntryNode(), RetAddrFI, NULL, 0);
}
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
// Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
+ if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
&& MFI->getStackSize();
if (isPPC64)
- return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1,
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1,
MVT::i64);
else
- return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1,
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1,
MVT::i32);
}
+
+bool
+PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
+ // The PowerPC target isn't yet aware of offsets.
+ return false;
+}