X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FR600%2FR600ISelLowering.cpp;h=8357b6d9d0edcf72fd9134b04bf18515d47a6b2f;hb=344593ce6c914b3576328cf6bdf790cd2ea083af;hp=a236a3e91bf08b8fe1027426dba9ce39713b41a0;hpb=29f1788de96cbf88ab87e3da130cf626b2e8e029;p=oota-llvm.git diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index a236a3e91bf..8357b6d9d0e 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -13,9 +13,13 @@ //===----------------------------------------------------------------------===// #include "R600ISelLowering.h" +#include "AMDGPUFrameLowering.h" +#include "AMDGPUIntrinsicInfo.h" +#include "AMDGPUSubtarget.h" #include "R600Defines.h" #include "R600InstrInfo.h" #include "R600MachineFunctionInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -26,9 +30,9 @@ using namespace llvm; -R600TargetLowering::R600TargetLowering(TargetMachine &TM) : - AMDGPUTargetLowering(TM), - Gen(TM.getSubtarget().getGeneration()) { +R600TargetLowering::R600TargetLowering(TargetMachine &TM, + const AMDGPUSubtarget &STI) + : AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) { addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); @@ -36,7 +40,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass); addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass); - computeRegisterProperties(); + computeRegisterProperties(STI.getRegisterInfo()); // Set condition code actions setCondCodeAction(ISD::SETO, MVT::f32, Expand); @@ -65,6 +69,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::BR_CC, MVT::i32, Expand); setOperationAction(ISD::BR_CC, MVT::f32, Expand); + setOperationAction(ISD::BRCOND, MVT::Other, Custom); setOperationAction(ISD::FSUB, MVT::f32, Expand); @@ -78,13 +83,45 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::SETCC, MVT::i32, Expand); setOperationAction(ISD::SETCC, MVT::f32, Expand); setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); setOperationAction(ISD::SELECT, MVT::i32, Expand); setOperationAction(ISD::SELECT, MVT::f32, Expand); setOperationAction(ISD::SELECT, MVT::v2i32, Expand); - setOperationAction(ISD::SELECT, MVT::v2f32, Expand); setOperationAction(ISD::SELECT, MVT::v4i32, Expand); - setOperationAction(ISD::SELECT, MVT::v4f32, Expand); + + // ADD, SUB overflow. + // TODO: turn these into Legal? + if (Subtarget->hasCARRY()) + setOperationAction(ISD::UADDO, MVT::i32, Custom); + + if (Subtarget->hasBORROW()) + setOperationAction(ISD::USUBO, MVT::i32, Custom); + + // Expand sign extension of vectors + if (!Subtarget->hasBFE()) + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Expand); + + if (!Subtarget->hasBFE()) + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Expand); + + if (!Subtarget->hasBFE()) + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand); + // Legalize loads and stores to the private address space. setOperationAction(ISD::LOAD, MVT::i32, Custom); @@ -93,12 +130,19 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address // spaces, so it is custom lowered to handle those where it isn't. - setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom); + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom); + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom); + + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom); + + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom); + } setOperationAction(ISD::STORE, MVT::i8, Custom); setOperationAction(ISD::STORE, MVT::i32, Custom); @@ -111,16 +155,38 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::LOAD, MVT::v4i32, Custom); setOperationAction(ISD::FrameIndex, MVT::i32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); + + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); + setTargetDAGCombine(ISD::FP_ROUND); setTargetDAGCombine(ISD::FP_TO_SINT); setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); setTargetDAGCombine(ISD::SELECT_CC); setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); + // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32 + // to be Legal/Custom in order to avoid library calls. + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setBooleanContents(ZeroOrNegativeOneBooleanContent); - setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); + const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; + for (MVT VT : ScalarIntVTs) { + setOperationAction(ISD::ADDC, VT, Expand); + setOperationAction(ISD::SUBC, VT, Expand); + setOperationAction(ISD::ADDE, VT, Expand); + setOperationAction(ISD::SUBE, VT, Expand); + } + setSchedulingPreference(Sched::Source); } @@ -130,25 +196,24 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( MachineRegisterInfo &MRI = MF->getRegInfo(); MachineBasicBlock::iterator I = *MI; const R600InstrInfo *TII = - static_cast(MF->getTarget().getInstrInfo()); + static_cast(Subtarget->getInstrInfo()); switch (MI->getOpcode()) { default: - if (TII->isLDSInstr(MI->getOpcode()) && - TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst) != -1) { + // Replace LDS_*_RET instruction that don't have any uses with the + // equivalent LDS_*_NORET instruction. + if (TII->isLDSRetInstr(MI->getOpcode())) { int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); assert(DstIdx != -1); MachineInstrBuilder NewMI; - if (!MRI.use_empty(MI->getOperand(DstIdx).getReg())) { - NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()), - AMDGPU::OQAP); - TII->buildDefaultInstruction(*BB, I, AMDGPU::MOV, - MI->getOperand(0).getReg(), - AMDGPU::OQAP); - } else { - NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), - TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode()))); - } + // FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add + // LDS_1A2D support and remove this special case. + if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()) || + MI->getOpcode() == AMDGPU::LDS_CMPST_RET) + return BB; + + NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), + TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode()))); for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) { NewMI.addOperand(MI->getOperand(i)); } @@ -211,7 +276,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( case AMDGPU::RAT_WRITE_CACHELESS_32_eg: case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: { - unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; + unsigned EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode())) .addOperand(MI->getOperand(0)) @@ -461,9 +526,9 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( // Instruction is left unmodified if its not the last one of its type bool isLastInstructionOfItsType = true; unsigned InstExportType = MI->getOperand(1).getImm(); - for (MachineBasicBlock::iterator NextExportInst = llvm::next(I), + for (MachineBasicBlock::iterator NextExportInst = std::next(I), EndBlock = BB->end(); NextExportInst != EndBlock; - NextExportInst = llvm::next(NextExportInst)) { + NextExportInst = std::next(NextExportInst)) { if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz || NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) { unsigned CurrentInstExportType = NextExportInst->getOperand(1) @@ -474,7 +539,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( } } } - bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0; + bool EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; if (!EOP && !isLastInstructionOfItsType) return BB; unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40; @@ -514,12 +579,26 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const R600MachineFunctionInfo *MFI = MF.getInfo(); switch (Op.getOpcode()) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); + case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); + case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); + case ISD::SHL_PARTS: return LowerSHLParts(Op, DAG); + case ISD::SRA_PARTS: + case ISD::SRL_PARTS: return LowerSRXParts(Op, DAG); + case ISD::UADDO: return LowerUADDSUBO(Op, DAG, ISD::ADD, AMDGPUISD::CARRY); + case ISD::USUBO: return LowerUADDSUBO(Op, DAG, ISD::SUB, AMDGPUISD::BORROW); case ISD::FCOS: case ISD::FSIN: return LowerTrig(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); - case ISD::LOAD: return LowerLOAD(Op, DAG); - case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); + case ISD::LOAD: { + SDValue Result = LowerLOAD(Op, DAG); + assert((!Result.getNode() || + Result.getNode()->getNumValues() == 2) && + "Load should return a value and a chain"); + return Result; + } + + case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG); case ISD::INTRINSIC_VOID: { SDValue Chain = Op.getOperand(0); @@ -533,18 +612,18 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2)); } case AMDGPUIntrinsic::R600_store_swizzle: { + SDLoc DL(Op); const SDValue Args[8] = { Chain, Op.getOperand(2), // Export Value Op.getOperand(3), // ArrayBase Op.getOperand(4), // Type - DAG.getConstant(0, MVT::i32), // SWZ_X - DAG.getConstant(1, MVT::i32), // SWZ_Y - DAG.getConstant(2, MVT::i32), // SWZ_Z - DAG.getConstant(3, MVT::i32) // SWZ_W + DAG.getConstant(0, DL, MVT::i32), // SWZ_X + DAG.getConstant(1, DL, MVT::i32), // SWZ_Y + DAG.getConstant(2, DL, MVT::i32), // SWZ_Z + DAG.getConstant(3, DL, MVT::i32) // SWZ_W }; - return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(), - Args, 8); + return DAG.getNode(AMDGPUISD::EXPORT, DL, Op.getValueType(), Args); } // default for switch(IntrinsicID) @@ -575,11 +654,10 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const int ijb = cast(Op.getOperand(2))->getSExtValue(); MachineSDNode *interp; if (ijb < 0) { - const MachineFunction &MF = DAG.getMachineFunction(); const R600InstrInfo *TII = - static_cast(MF.getTarget().getInstrInfo()); + static_cast(Subtarget->getInstrInfo()); interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL, - MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32)); + MVT::v4f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32)); return DAG.getTargetExtractSubreg( TII->getRegisterInfo().getSubRegFromChannel(slot % 4), DL, MVT::f32, SDValue(interp, 0)); @@ -597,11 +675,11 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const if (slot % 4 < 2) interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL, - MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), + MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32), RegisterJNode, RegisterINode); else interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL, - MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), + MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32), RegisterJNode, RegisterINode); return SDValue(interp, slot % 2); } @@ -614,11 +692,11 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy) interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL, - MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32), + MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32), RegisterJNode, RegisterINode); else interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL, - MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32), + MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32), RegisterJNode, RegisterINode); return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32, SDValue(interp, 0), SDValue(interp, 1)); @@ -674,19 +752,19 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const } SDValue TexArgs[19] = { - DAG.getConstant(TextureOp, MVT::i32), + DAG.getConstant(TextureOp, DL, MVT::i32), Op.getOperand(1), - DAG.getConstant(0, MVT::i32), - DAG.getConstant(1, MVT::i32), - DAG.getConstant(2, MVT::i32), - DAG.getConstant(3, MVT::i32), + DAG.getConstant(0, DL, MVT::i32), + DAG.getConstant(1, DL, MVT::i32), + DAG.getConstant(2, DL, MVT::i32), + DAG.getConstant(3, DL, MVT::i32), Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), - DAG.getConstant(0, MVT::i32), - DAG.getConstant(1, MVT::i32), - DAG.getConstant(2, MVT::i32), - DAG.getConstant(3, MVT::i32), + DAG.getConstant(0, DL, MVT::i32), + DAG.getConstant(1, DL, MVT::i32), + DAG.getConstant(2, DL, MVT::i32), + DAG.getConstant(3, DL, MVT::i32), Op.getOperand(5), Op.getOperand(6), Op.getOperand(7), @@ -694,28 +772,28 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const Op.getOperand(9), Op.getOperand(10) }; - return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19); + return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs); } case AMDGPUIntrinsic::AMDGPU_dp4: { SDValue Args[8] = { DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), - DAG.getConstant(0, MVT::i32)), + DAG.getConstant(0, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), - DAG.getConstant(0, MVT::i32)), + DAG.getConstant(0, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), - DAG.getConstant(1, MVT::i32)), + DAG.getConstant(1, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), - DAG.getConstant(1, MVT::i32)), + DAG.getConstant(1, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), - DAG.getConstant(2, MVT::i32)), + DAG.getConstant(2, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), - DAG.getConstant(2, MVT::i32)), + DAG.getConstant(2, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), - DAG.getConstant(3, MVT::i32)), + DAG.getConstant(3, DL, MVT::i32)), DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), - DAG.getConstant(3, MVT::i32)) + DAG.getConstant(3, DL, MVT::i32)) }; - return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8); + return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args); } case Intrinsic::r600_read_ngroups_x: @@ -737,6 +815,9 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const case Intrinsic::r600_read_local_size_z: return LowerImplicitParameter(DAG, VT, DL, 8); + case Intrinsic::AMDGPU_read_workdim: + return LowerImplicitParameter(DAG, VT, DL, MFI->ABIArgOffset / 4); + case Intrinsic::r600_read_tgid_x: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T1_X, VT); @@ -755,6 +836,13 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const case Intrinsic::r600_read_tidig_z: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T0_Z, VT); + case Intrinsic::AMDGPU_rsq: + // XXX - I'm assuming SI's RSQ_LEGACY matches R600's behavior. + return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); + + case AMDGPUIntrinsic::AMDGPU_fract: + case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name. + return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); } // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode()) break; @@ -767,35 +855,99 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { switch (N->getOpcode()) { - default: return; - case ISD::FP_TO_UINT: Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG)); + default: + AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG); return; - case ISD::LOAD: { - SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode(); - Results.push_back(SDValue(Node, 0)); - Results.push_back(SDValue(Node, 1)); - // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode - // function - DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1)); + case ISD::FP_TO_UINT: + if (N->getValueType(0) == MVT::i1) { + Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG)); + return; + } + // Fall-through. Since we don't care about out of bounds values + // we can use FP_TO_SINT for uints too. The DAGLegalizer code for uint + // considers some extra cases which are not necessary here. + case ISD::FP_TO_SINT: { + SDValue Result; + if (expandFP_TO_SINT(N, Result, DAG)) + Results.push_back(Result); return; } - case ISD::STORE: - SDNode *Node = LowerSTORE(SDValue(N, 0), DAG).getNode(); - Results.push_back(SDValue(Node, 0)); - return; + case ISD::SDIVREM: { + SDValue Op = SDValue(N, 1); + SDValue RES = LowerSDIVREM(Op, DAG); + Results.push_back(RES); + Results.push_back(RES.getValue(1)); + break; + } + case ISD::UDIVREM: { + SDValue Op = SDValue(N, 0); + LowerUDIVREM64(Op, DAG, Results); + break; + } } } +SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG, + SDValue Vector) const { + + SDLoc DL(Vector); + EVT VecVT = Vector.getValueType(); + EVT EltVT = VecVT.getVectorElementType(); + SmallVector Args; + + for (unsigned i = 0, e = VecVT.getVectorNumElements(); + i != e; ++i) { + Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector, + DAG.getConstant(i, DL, getVectorIdxTy()))); + } + + return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args); +} + +SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + + SDLoc DL(Op); + SDValue Vector = Op.getOperand(0); + SDValue Index = Op.getOperand(1); + + if (isa(Index) || + Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) + return Op; + + Vector = vectorToVerticalVector(DAG, Vector); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(), + Vector, Index); +} + +SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + SDValue Vector = Op.getOperand(0); + SDValue Value = Op.getOperand(1); + SDValue Index = Op.getOperand(2); + + if (isa(Index) || + Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) + return Op; + + Vector = vectorToVerticalVector(DAG, Vector); + SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(), + Vector, Value, Index); + return vectorToVerticalVector(DAG, Insert); +} + SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { // On hw >= R700, COS/SIN input must be between -1. and 1. // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5) EVT VT = Op.getValueType(); SDValue Arg = Op.getOperand(0); - SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT, - DAG.getNode(ISD::FADD, SDLoc(Op), VT, - DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg, - DAG.getConstantFP(0.15915494309, MVT::f32)), - DAG.getConstantFP(0.5, MVT::f32))); + SDLoc DL(Op); + SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, + DAG.getNode(ISD::FADD, DL, VT, + DAG.getNode(ISD::FMUL, DL, VT, Arg, + DAG.getConstantFP(0.15915494309, DL, MVT::f32)), + DAG.getConstantFP(0.5, DL, MVT::f32))); unsigned TrigNode; switch (Op.getOpcode()) { case ISD::FCOS: @@ -807,22 +959,115 @@ SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { default: llvm_unreachable("Wrong trig opcode"); } - SDValue TrigVal = DAG.getNode(TrigNode, SDLoc(Op), VT, - DAG.getNode(ISD::FADD, SDLoc(Op), VT, FractPart, - DAG.getConstantFP(-0.5, MVT::f32))); + SDValue TrigVal = DAG.getNode(TrigNode, DL, VT, + DAG.getNode(ISD::FADD, DL, VT, FractPart, + DAG.getConstantFP(-0.5, DL, MVT::f32))); if (Gen >= AMDGPUSubtarget::R700) return TrigVal; // On R600 hw, COS/SIN input must be between -Pi and Pi. - return DAG.getNode(ISD::FMUL, SDLoc(Op), VT, TrigVal, - DAG.getConstantFP(3.14159265359, MVT::f32)); + return DAG.getNode(ISD::FMUL, DL, VT, TrigVal, + DAG.getConstantFP(3.14159265359, DL, MVT::f32)); +} + +SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + SDValue Shift = Op.getOperand(2); + SDValue Zero = DAG.getConstant(0, DL, VT); + SDValue One = DAG.getConstant(1, DL, VT); + + SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT); + SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT); + SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width); + SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift); + + // The dance around Width1 is necessary for 0 special case. + // Without it the CompShift might be 32, producing incorrect results in + // Overflow. So we do the shift in two steps, the alternative is to + // add a conditional to filter the special case. + + SDValue Overflow = DAG.getNode(ISD::SRL, DL, VT, Lo, CompShift); + Overflow = DAG.getNode(ISD::SRL, DL, VT, Overflow, One); + + SDValue HiSmall = DAG.getNode(ISD::SHL, DL, VT, Hi, Shift); + HiSmall = DAG.getNode(ISD::OR, DL, VT, HiSmall, Overflow); + SDValue LoSmall = DAG.getNode(ISD::SHL, DL, VT, Lo, Shift); + + SDValue HiBig = DAG.getNode(ISD::SHL, DL, VT, Lo, BigShift); + SDValue LoBig = Zero; + + Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT); + Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT); + + return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi); +} + +SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + SDValue Shift = Op.getOperand(2); + SDValue Zero = DAG.getConstant(0, DL, VT); + SDValue One = DAG.getConstant(1, DL, VT); + + const bool SRA = Op.getOpcode() == ISD::SRA_PARTS; + + SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT); + SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT); + SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width); + SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift); + + // The dance around Width1 is necessary for 0 special case. + // Without it the CompShift might be 32, producing incorrect results in + // Overflow. So we do the shift in two steps, the alternative is to + // add a conditional to filter the special case. + + SDValue Overflow = DAG.getNode(ISD::SHL, DL, VT, Hi, CompShift); + Overflow = DAG.getNode(ISD::SHL, DL, VT, Overflow, One); + + SDValue HiSmall = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, Shift); + SDValue LoSmall = DAG.getNode(ISD::SRL, DL, VT, Lo, Shift); + LoSmall = DAG.getNode(ISD::OR, DL, VT, LoSmall, Overflow); + + SDValue LoBig = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, BigShift); + SDValue HiBig = SRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, Width1) : Zero; + + Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT); + Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT); + + return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi); +} + +SDValue R600TargetLowering::LowerUADDSUBO(SDValue Op, SelectionDAG &DAG, + unsigned mainop, unsigned ovf) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + + SDValue OVF = DAG.getNode(ovf, DL, VT, Lo, Hi); + // Extend sign. + OVF = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, OVF, + DAG.getValueType(MVT::i1)); + + SDValue Res = DAG.getNode(mainop, DL, VT, Lo, Hi); + + return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT, VT), Res, OVF); } SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); return DAG.getNode( ISD::SETCC, - SDLoc(Op), + DL, MVT::i1, - Op, DAG.getConstantFP(0.0f, MVT::f32), + Op, DAG.getConstantFP(0.0f, DL, MVT::f32), DAG.getCondCode(ISD::SETNE) ); } @@ -838,25 +1083,11 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, assert(isInt<16>(ByteOffset)); return DAG.getLoad(VT, DL, DAG.getEntryNode(), - DAG.getConstant(ByteOffset, MVT::i32), // PTR + DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR MachinePointerInfo(ConstantPointerNull::get(PtrType)), false, false, false, 0); } -SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { - - MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = - static_cast(getTargetMachine().getFrameLowering()); - - FrameIndexSDNode *FIN = dyn_cast(Op); - assert(FIN); - - unsigned FrameIndex = FIN->getIndex(); - unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex); - return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32); -} - bool R600TargetLowering::isZero(SDValue Op) const { if(ConstantSDNode *Cst = dyn_cast(Op)) { return Cst->isNullValue(); @@ -878,6 +1109,13 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const SDValue CC = Op.getOperand(4); SDValue Temp; + if (VT == MVT::f32) { + DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); + SDValue MinMax = CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI); + if (MinMax) + return MinMax; + } + // LHS and RHS are guaranteed to be the same value type EVT CompareVT = LHS.getValueType(); @@ -977,26 +1215,19 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode); } - - // Possible Min/Max pattern - SDValue MinMax = LowerMinMax(Op, DAG); - if (MinMax.getNode()) { - return MinMax; - } - // If we make it this for it means we have no native instructions to handle // this SELECT_CC, so we must lower it. SDValue HWTrue, HWFalse; if (CompareVT == MVT::f32) { - HWTrue = DAG.getConstantFP(1.0f, CompareVT); - HWFalse = DAG.getConstantFP(0.0f, CompareVT); + HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT); + HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT); } else if (CompareVT == MVT::i32) { - HWTrue = DAG.getConstant(-1, CompareVT); - HWFalse = DAG.getConstant(0, CompareVT); + HWTrue = DAG.getConstant(-1, DL, CompareVT); + HWFalse = DAG.getConstant(0, DL, CompareVT); } else { - assert(!"Unhandled value type in LowerSELECT_CC"); + llvm_unreachable("Unhandled value type in LowerSELECT_CC"); } // Lower this unsupported SELECT_CC into a combination of two supported @@ -1009,7 +1240,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const DAG.getCondCode(ISD::SETNE)); } -/// LLVM generates byte-addresed pointers. For indirect addressing, we need to +/// LLVM generates byte-addressed pointers. For indirect addressing, we need to /// convert these pointers to a register index. Each register holds /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the /// \p StackWidth, which tells us how many of the 4 sub-registrers will be used @@ -1031,8 +1262,9 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, default: llvm_unreachable("Invalid stack width"); } - return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr, - DAG.getConstant(SRLPad, MVT::i32)); + SDLoc DL(Ptr); + return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr, + DAG.getConstant(SRLPad, DL, MVT::i32)); } void R600TargetLowering::getStackAddress(unsigned StackWidth, @@ -1083,42 +1315,42 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { EVT MemVT = StoreNode->getMemoryVT(); SDValue MaskConstant; if (MemVT == MVT::i8) { - MaskConstant = DAG.getConstant(0xFF, MVT::i32); + MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32); } else { assert(MemVT == MVT::i16); - MaskConstant = DAG.getConstant(0xFFFF, MVT::i32); + MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32); } SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, VT, Ptr, - DAG.getConstant(2, MVT::i32)); + DAG.getConstant(2, DL, MVT::i32)); SDValue ByteIndex = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), Ptr, - DAG.getConstant(0x00000003, VT)); + DAG.getConstant(0x00000003, DL, VT)); SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant); SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex, - DAG.getConstant(3, VT)); + DAG.getConstant(3, DL, VT)); SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, Shift); SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, Shift); // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32 // vector instead. SDValue Src[4] = { ShiftedValue, - DAG.getConstant(0, MVT::i32), - DAG.getConstant(0, MVT::i32), + DAG.getConstant(0, DL, MVT::i32), + DAG.getConstant(0, DL, MVT::i32), Mask }; - SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src, 4); + SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src); SDValue Args[3] = { Chain, Input, DWordAddr }; return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL, - Op->getVTList(), Args, 3, MemVT, + Op->getVTList(), Args, MemVT, StoreNode->getMemOperand()); } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && Value.getValueType().bitsGE(MVT::i32)) { // Convert pointer from byte address to dword address. Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(), DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), - Ptr, DAG.getConstant(2, MVT::i32))); + Ptr, DAG.getConstant(2, DL, MVT::i32))); if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) { - assert(!"Truncated and indexed stores not supported yet"); + llvm_unreachable("Truncated and indexed stores not supported yet"); } else { Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); } @@ -1132,11 +1364,15 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { return SDValue(); } + SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG); + if (Ret.getNode()) { + return Ret; + } // Lowering for indirect addressing const MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = static_cast( - getTargetMachine().getFrameLowering()); + const AMDGPUFrameLowering *TFL = + static_cast(Subtarget->getFrameLowering()); unsigned StackWidth = TFL->getStackWidth(MF); Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); @@ -1144,7 +1380,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { if (ValueVT.isVector()) { unsigned NumElemVT = ValueVT.getVectorNumElements(); EVT ElemVT = ValueVT.getVectorElementType(); - SDValue Stores[4]; + SmallVector Stores(NumElemVT); assert(NumElemVT >= StackWidth && "Stack width cannot be greater than " "vector width in load"); @@ -1153,21 +1389,21 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { unsigned Channel, PtrIncr; getStackAddress(StackWidth, i, Channel, PtrIncr); Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr, - DAG.getConstant(PtrIncr, MVT::i32)); + DAG.getConstant(PtrIncr, DL, MVT::i32)); SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, - Value, DAG.getConstant(i, MVT::i32)); + Value, DAG.getConstant(i, DL, MVT::i32)); Stores[i] = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Elem, Ptr, - DAG.getTargetConstant(Channel, MVT::i32)); + DAG.getTargetConstant(Channel, DL, MVT::i32)); } - Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores, NumElemVT); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); } else { if (ValueVT == MVT::i8) { Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value); } Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr, - DAG.getTargetConstant(0, MVT::i32)); // Channel + DAG.getTargetConstant(0, DL, MVT::i32)); // Channel } return Chain; @@ -1223,12 +1459,36 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const SDValue Ptr = Op.getOperand(1); SDValue LoweredLoad; + SDValue Ret = AMDGPUTargetLowering::LowerLOAD(Op, DAG); + if (Ret.getNode()) { + SDValue Ops[2] = { + Ret, + Chain + }; + return DAG.getMergeValues(Ops, DL); + } + + // Lower loads constant address space global variable loads + if (LoadNode->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && + isa(GetUnderlyingObject( + LoadNode->getMemOperand()->getValue(), *getDataLayout()))) { + + SDValue Ptr = DAG.getZExtOrTrunc(LoadNode->getBasePtr(), DL, + getPointerTy(AMDGPUAS::PRIVATE_ADDRESS)); + Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, + DAG.getConstant(2, DL, MVT::i32)); + return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(), + LoadNode->getChain(), Ptr, + DAG.getTargetConstant(0, DL, MVT::i32), + Op.getOperand(2)); + } + if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) { SDValue MergedValues[2] = { - SplitVectorLoad(Op, DAG), + ScalarizeVectorLoad(Op, DAG), Chain }; - return DAG.getMergeValues(MergedValues, 2, DL); + return DAG.getMergeValues(MergedValues, DL); } int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace()); @@ -1236,8 +1496,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) || (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) { SDValue Result; - if (isa(LoadNode->getSrcValue()) || - isa(LoadNode->getSrcValue()) || + if (isa(LoadNode->getMemOperand()->getValue()) || + isa(LoadNode->getMemOperand()->getValue()) || isa(Ptr)) { SDValue Slots[4]; for (unsigned i = 0; i < 4; i++) { @@ -1247,7 +1507,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and // then div by 4 at the ISel step SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, - DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32)); + DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32)); Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); } EVT NewVT = MVT::v4i32; @@ -1256,26 +1516,28 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const NewVT = VT; NumElements = VT.getVectorNumElements(); } - Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements); + Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, + makeArrayRef(Slots, NumElements)); } else { - // non constant ptr cant be folded, keeps it as a v4f32 load + // non-constant ptr can't be folded, keeps it as a v4f32 load Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, - DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)), - DAG.getConstant(LoadNode->getAddressSpace() - - AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32) + DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, + DAG.getConstant(4, DL, MVT::i32)), + DAG.getConstant(LoadNode->getAddressSpace() - + AMDGPUAS::CONSTANT_BUFFER_0, DL, MVT::i32) ); } if (!VT.isVector()) { Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, - DAG.getConstant(0, MVT::i32)); + DAG.getConstant(0, DL, MVT::i32)); } SDValue MergedValues[2] = { - Result, - Chain + Result, + Chain }; - return DAG.getMergeValues(MergedValues, 2, DL); + return DAG.getMergeValues(MergedValues, DL); } // For most operations returning SDValue() will result in the node being @@ -1288,18 +1550,17 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const if (LoadNode->getExtensionType() == ISD::SEXTLOAD) { EVT MemVT = LoadNode->getMemoryVT(); assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8)); - SDValue ShiftAmount = - DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32); SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT, LoadNode->isVolatile(), LoadNode->isNonTemporal(), + LoadNode->isInvariant(), LoadNode->getAlignment()); - SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, NewLoad, ShiftAmount); - SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount); + SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad, + DAG.getValueType(MemVT)); - SDValue MergedValues[2] = { Sra, Chain }; - return DAG.getMergeValues(MergedValues, 2, DL); + SDValue MergedValues[2] = { Res, Chain }; + return DAG.getMergeValues(MergedValues, DL); } if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { @@ -1308,8 +1569,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const // Lowering for indirect addressing const MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = static_cast( - getTargetMachine().getFrameLowering()); + const AMDGPUFrameLowering *TFL = + static_cast(Subtarget->getFrameLowering()); unsigned StackWidth = TFL->getStackWidth(MF); Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); @@ -1326,29 +1587,39 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const unsigned Channel, PtrIncr; getStackAddress(StackWidth, i, Channel, PtrIncr); Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr, - DAG.getConstant(PtrIncr, MVT::i32)); + DAG.getConstant(PtrIncr, DL, MVT::i32)); Loads[i] = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, ElemVT, Chain, Ptr, - DAG.getTargetConstant(Channel, MVT::i32), + DAG.getTargetConstant(Channel, DL, MVT::i32), Op.getOperand(2)); } for (unsigned i = NumElemVT; i < 4; ++i) { Loads[i] = DAG.getUNDEF(ElemVT); } EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4); - LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads, 4); + LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads); } else { LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT, Chain, Ptr, - DAG.getTargetConstant(0, MVT::i32), // Channel + DAG.getTargetConstant(0, DL, MVT::i32), // Channel Op.getOperand(2)); } - SDValue Ops[2]; - Ops[0] = LoweredLoad; - Ops[1] = Chain; + SDValue Ops[2] = { + LoweredLoad, + Chain + }; - return DAG.getMergeValues(Ops, 2, DL); + return DAG.getMergeValues(Ops, DL); +} + +SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + SDValue Cond = Op.getOperand(1); + SDValue Jump = Op.getOperand(2); + + return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(), + Chain, Jump, Cond); } /// XXX Only kernel functions are supported, so we can assume for now that @@ -1362,24 +1633,28 @@ SDValue R600TargetLowering::LowerFormalArguments( SDLoc DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); MachineFunction &MF = DAG.getMachineFunction(); - unsigned ShaderType = MF.getInfo()->ShaderType; + R600MachineFunctionInfo *MFI = MF.getInfo(); SmallVector LocalIns; - getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins, - LocalIns); + getOriginalFunctionArgs(DAG, MF.getFunction(), Ins, LocalIns); AnalyzeFormalArguments(CCInfo, LocalIns); for (unsigned i = 0, e = Ins.size(); i < e; ++i) { CCValAssign &VA = ArgLocs[i]; - EVT VT = Ins[i].VT; - EVT MemVT = LocalIns[i].VT; + const ISD::InputArg &In = Ins[i]; + EVT VT = In.VT; + EVT MemVT = VA.getLocVT(); + if (!VT.isVector() && MemVT.isVector()) { + // Get load source type if scalarized. + MemVT = MemVT.getVectorElementType(); + } - if (ShaderType != ShaderType::COMPUTE) { + if (MFI->getShaderType() != ShaderType::COMPUTE) { unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass); SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT); InVals.push_back(Register); @@ -1387,36 +1662,62 @@ SDValue R600TargetLowering::LowerFormalArguments( } PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), - AMDGPUAS::CONSTANT_BUFFER_0); + AMDGPUAS::CONSTANT_BUFFER_0); + + // i64 isn't a legal type, so the register type used ends up as i32, which + // isn't expected here. It attempts to create this sextload, but it ends up + // being invalid. Somehow this seems to work with i64 arguments, but breaks + // for <1 x i64>. // The first 36 bytes of the input buffer contains information about // thread group and global sizes. - SDValue Arg = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, - DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32), - MachinePointerInfo(UndefValue::get(PtrTy)), - MemVT, false, false, 4); - // 4 is the prefered alignment for - // the CONSTANT memory space. + ISD::LoadExtType Ext = ISD::NON_EXTLOAD; + if (MemVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) { + // FIXME: This should really check the extload type, but the handling of + // extload vector parameters seems to be broken. + + // Ext = In.Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD; + Ext = ISD::SEXTLOAD; + } + + // Compute the offset from the value. + // XXX - I think PartOffset should give you this, but it seems to give the + // size of the register which isn't useful. + + unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset(); + unsigned PartOffset = VA.getLocMemOffset(); + unsigned Offset = 36 + VA.getLocMemOffset(); + + MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase); + SDValue Arg = DAG.getLoad(ISD::UNINDEXED, Ext, VT, DL, Chain, + DAG.getConstant(Offset, DL, MVT::i32), + DAG.getUNDEF(MVT::i32), + PtrInfo, + MemVT, false, true, true, 4); + + // 4 is the preferred alignment for the CONSTANT memory space. InVals.push_back(Arg); + MFI->ABIArgOffset = Offset + MemVT.getStoreSize(); } return Chain; } EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { - if (!VT.isVector()) return MVT::i32; + if (!VT.isVector()) + return MVT::i32; return VT.changeVectorElementTypeToInteger(); } -static SDValue -CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry, - DenseMap &RemapSwizzle) { +static SDValue CompactSwizzlableVector( + SelectionDAG &DAG, SDValue VectorEntry, + DenseMap &RemapSwizzle) { assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR); assert(RemapSwizzle.empty()); SDValue NewBldVec[4] = { - VectorEntry.getOperand(0), - VectorEntry.getOperand(1), - VectorEntry.getOperand(2), - VectorEntry.getOperand(3) + VectorEntry.getOperand(0), + VectorEntry.getOperand(1), + VectorEntry.getOperand(2), + VectorEntry.getOperand(3) }; for (unsigned i = 0; i < 4; i++) { @@ -1447,7 +1748,7 @@ CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry, } return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry), - VectorEntry.getValueType(), NewBldVec, 4); + VectorEntry.getValueType(), NewBldVec); } static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, @@ -1461,17 +1762,20 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, VectorEntry.getOperand(3) }; bool isUnmovable[4] = { false, false, false, false }; - for (unsigned i = 0; i < 4; i++) + for (unsigned i = 0; i < 4; i++) { RemapSwizzle[i] = i; + if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + unsigned Idx = dyn_cast(NewBldVec[i].getOperand(1)) + ->getZExtValue(); + if (i == Idx) + isUnmovable[Idx] = true; + } + } for (unsigned i = 0; i < 4; i++) { if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { unsigned Idx = dyn_cast(NewBldVec[i].getOperand(1)) ->getZExtValue(); - if (i == Idx) { - isUnmovable[Idx] = true; - continue; - } if (isUnmovable[Idx]) continue; // Swap i and Idx @@ -1482,29 +1786,30 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, } return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry), - VectorEntry.getValueType(), NewBldVec, 4); + VectorEntry.getValueType(), NewBldVec); } SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, -SDValue Swz[4], SelectionDAG &DAG) const { + SDValue Swz[4], SelectionDAG &DAG, + SDLoc DL) const { assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR); // Old -> New swizzle values DenseMap SwizzleRemap; BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast(Swz[i])->getZExtValue(); + unsigned Idx = cast(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) - Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); + Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); } SwizzleRemap.clear(); BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast(Swz[i])->getZExtValue(); + unsigned Idx = cast(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) - Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); + Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); } return BuildVector; @@ -1520,6 +1825,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { + default: return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a) case ISD::FP_ROUND: { SDValue Arg = N->getOperand(0); @@ -1549,11 +1855,12 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } - return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0), + SDLoc dl(N); + return DAG.getNode(ISD::SELECT_CC, dl, N->getValueType(0), SelectCC.getOperand(0), // LHS SelectCC.getOperand(1), // RHS - DAG.getConstant(-1, MVT::i32), // True - DAG.getConstant(0, MVT::i32), // Flase + DAG.getConstant(-1, dl, MVT::i32), // True + DAG.getConstant(0, dl, MVT::i32), // False SelectCC.getOperand(4)); // CC break; @@ -1609,8 +1916,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, } // Return the new vector - return DAG.getNode(ISD::BUILD_VECTOR, dl, - VT, &Ops[0], Ops.size()); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); } // Extract_vec (Build_vector) generated by custom lowering @@ -1634,6 +1940,11 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, } case ISD::SELECT_CC: { + // Try common optimizations + SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI); + if (Ret.getNode()) + return Ret; + // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq -> // selectcc x, y, a, b, inv(cc) // @@ -1692,8 +2003,8 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(7) // SWZ_W }; SDLoc DL(N); - NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG); - return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8); + NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL); + return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs); } case AMDGPUISD::TEXTURE_FETCH: { SDValue Arg = N->getOperand(1); @@ -1721,19 +2032,20 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(17), N->getOperand(18), }; - NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG); - return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(), - NewArgs, 19); + SDLoc DL(N); + NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL); + return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs); } } - return SDValue(); + + return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); } static bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) { const R600InstrInfo *TII = - static_cast(DAG.getTarget().getInstrInfo()); + static_cast(DAG.getSubtarget().getInstrInfo()); if (!Src.isMachineOpcode()) return false; switch (Src.getMachineOpcode()) { @@ -1741,13 +2053,13 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, if (!Neg.getNode()) return false; Src = Src.getOperand(0); - Neg = DAG.getTargetConstant(1, MVT::i32); + Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); return true; case AMDGPU::FABS_R600: if (!Abs.getNode()) return false; Src = Src.getOperand(0); - Abs = DAG.getTargetConstant(1, MVT::i32); + Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); return true; case AMDGPU::CONST_COPY: { unsigned Opcode = ParentNode->getMachineOpcode(); @@ -1775,8 +2087,7 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) }; std::vector Consts; - for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) { - int OtherSrcIdx = SrcIndices[i]; + for (int OtherSrcIdx : SrcIndices) { int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx); if (OtherSrcIdx < 0 || OtherSelIdx < 0) continue; @@ -1787,14 +2098,14 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, if (RegisterSDNode *Reg = dyn_cast(ParentNode->getOperand(OtherSrcIdx))) { if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst = dyn_cast( - ParentNode->getOperand(OtherSelIdx)); + ConstantSDNode *Cst + = cast(ParentNode->getOperand(OtherSelIdx)); Consts.push_back(Cst->getZExtValue()); } } } - ConstantSDNode *Cst = dyn_cast(CstOffset); + ConstantSDNode *Cst = cast(CstOffset); Consts.push_back(Cst->getZExtValue()); if (!TII->fitsConstReadLimitations(Consts)) { return false; @@ -1844,7 +2155,7 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, assert(C); if (C->getZExtValue()) return false; - Imm = DAG.getTargetConstant(ImmValue, MVT::i32); + Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32); } Src = DAG.getRegister(ImmReg, MVT::i32); return true; @@ -1859,16 +2170,13 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { const R600InstrInfo *TII = - static_cast(DAG.getTarget().getInstrInfo()); + static_cast(DAG.getSubtarget().getInstrInfo()); if (!Node->isMachineOpcode()) return Node; unsigned Opcode = Node->getMachineOpcode(); SDValue FakeOp; - std::vector Ops; - for(SDNode::op_iterator I = Node->op_begin(), E = Node->op_end(); - I != E; ++I) - Ops.push_back(*I); + std::vector Ops(Node->op_begin(), Node->op_end()); if (Opcode == AMDGPU::DOT_4) { int OperandIdx[] = { @@ -1930,13 +2238,11 @@ SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, AMDGPU::OpName::clamp); if (ClampIdx < 0) return Node; - std::vector Ops; - unsigned NumOp = Src.getNumOperands(); - for(unsigned i = 0; i < NumOp; ++i) - Ops.push_back(Src.getOperand(i)); - Ops[ClampIdx - 1] = DAG.getTargetConstant(1, MVT::i32); - return DAG.getMachineNode(Src.getMachineOpcode(), SDLoc(Node), - Node->getVTList(), Ops); + SDLoc DL(Node); + std::vector Ops(Src->op_begin(), Src->op_end()); + Ops[ClampIdx - 1] = DAG.getTargetConstant(1, DL, MVT::i32); + return DAG.getMachineNode(Src.getMachineOpcode(), DL, + Node->getVTList(), Ops); } else { if (!TII->hasInstrModifiers(Opcode)) return Node;