1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Custom DAG lowering for SI
13 //===----------------------------------------------------------------------===//
15 #include "SIISelLowering.h"
17 #include "AMDILIntrinsicInfo.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "SIRegisterInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
27 SITargetLowering::SITargetLowering(TargetMachine &TM) :
28 AMDGPUTargetLowering(TM),
29 TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) {
30 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
31 addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
32 addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
33 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
34 addRegisterClass(MVT::i1, &AMDGPU::SCCRegRegClass);
35 addRegisterClass(MVT::i1, &AMDGPU::VCCRegRegClass);
37 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
38 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
40 computeRegisterProperties();
42 setOperationAction(ISD::AND, MVT::i1, Custom);
44 setOperationAction(ISD::ADD, MVT::i64, Legal);
45 setOperationAction(ISD::ADD, MVT::i32, Legal);
47 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
49 // We need to custom lower loads from the USER_SGPR address space, so we can
50 // add the SGPRs as livein registers.
51 setOperationAction(ISD::LOAD, MVT::i32, Custom);
52 setOperationAction(ISD::LOAD, MVT::i64, Custom);
54 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
55 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
57 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
58 setTargetDAGCombine(ISD::SELECT_CC);
60 setTargetDAGCombine(ISD::SETCC);
63 MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
64 MachineInstr * MI, MachineBasicBlock * BB) const {
65 const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
66 MachineRegisterInfo & MRI = BB->getParent()->getRegInfo();
67 MachineBasicBlock::iterator I = MI;
69 switch (MI->getOpcode()) {
71 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
72 case AMDGPU::BRANCH: return BB;
73 case AMDGPU::CLAMP_SI:
74 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
75 .addOperand(MI->getOperand(0))
76 .addOperand(MI->getOperand(1))
77 // VSRC1-2 are unused, but we still need to fill all the
78 // operand slots, so we just reuse the VSRC0 operand
79 .addOperand(MI->getOperand(1))
80 .addOperand(MI->getOperand(1))
85 MI->eraseFromParent();
89 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
90 .addOperand(MI->getOperand(0))
91 .addOperand(MI->getOperand(1))
92 // VSRC1-2 are unused, but we still need to fill all the
93 // operand slots, so we just reuse the VSRC0 operand
94 .addOperand(MI->getOperand(1))
95 .addOperand(MI->getOperand(1))
100 MI->eraseFromParent();
103 case AMDGPU::FNEG_SI:
104 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
105 .addOperand(MI->getOperand(0))
106 .addOperand(MI->getOperand(1))
107 // VSRC1-2 are unused, but we still need to fill all the
108 // operand slots, so we just reuse the VSRC0 operand
109 .addOperand(MI->getOperand(1))
110 .addOperand(MI->getOperand(1))
115 MI->eraseFromParent();
117 case AMDGPU::SHADER_TYPE:
118 BB->getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType =
119 MI->getOperand(0).getImm();
120 MI->eraseFromParent();
123 case AMDGPU::SI_INTERP:
124 LowerSI_INTERP(MI, *BB, I, MRI);
126 case AMDGPU::SI_INTERP_CONST:
127 LowerSI_INTERP_CONST(MI, *BB, I, MRI);
130 LowerSI_WQM(MI, *BB, I, MRI);
132 case AMDGPU::SI_V_CNDLT:
133 LowerSI_V_CNDLT(MI, *BB, I, MRI);
139 void SITargetLowering::LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB,
140 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
141 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WQM_B64), AMDGPU::EXEC)
142 .addReg(AMDGPU::EXEC);
144 MI->eraseFromParent();
147 void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
148 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
149 unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
150 unsigned M0 = MRI.createVirtualRegister(&AMDGPU::M0RegRegClass);
151 MachineOperand dst = MI->getOperand(0);
152 MachineOperand iReg = MI->getOperand(1);
153 MachineOperand jReg = MI->getOperand(2);
154 MachineOperand attr_chan = MI->getOperand(3);
155 MachineOperand attr = MI->getOperand(4);
156 MachineOperand params = MI->getOperand(5);
158 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), M0)
161 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P1_F32), tmp)
163 .addOperand(attr_chan)
167 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32))
171 .addOperand(attr_chan)
175 MI->eraseFromParent();
178 void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
179 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
180 MachineRegisterInfo &MRI) const {
181 MachineOperand dst = MI->getOperand(0);
182 MachineOperand attr_chan = MI->getOperand(1);
183 MachineOperand attr = MI->getOperand(2);
184 MachineOperand params = MI->getOperand(3);
185 unsigned M0 = MRI.createVirtualRegister(&AMDGPU::M0RegRegClass);
187 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), M0)
190 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_MOV_F32))
192 .addOperand(attr_chan)
196 MI->eraseFromParent();
199 void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
200 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
201 unsigned VCC = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
203 BuildMI(BB, I, BB.findDebugLoc(I),
204 TII->get(AMDGPU::V_CMP_GT_F32_e32),
206 .addReg(AMDGPU::SREG_LIT_0)
207 .addOperand(MI->getOperand(1));
209 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CNDMASK_B32_e32))
210 .addOperand(MI->getOperand(0))
211 .addOperand(MI->getOperand(3))
212 .addOperand(MI->getOperand(2))
215 MI->eraseFromParent();
218 EVT SITargetLowering::getSetCCResultType(EVT VT) const {
222 //===----------------------------------------------------------------------===//
223 // Custom DAG Lowering Operations
224 //===----------------------------------------------------------------------===//
226 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
227 switch (Op.getOpcode()) {
228 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
229 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
230 case ISD::LOAD: return LowerLOAD(Op, DAG);
231 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
232 case ISD::AND: return Loweri1ContextSwitch(Op, DAG, ISD::AND);
233 case ISD::INTRINSIC_WO_CHAIN: {
234 unsigned IntrinsicID =
235 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
236 EVT VT = Op.getValueType();
237 switch (IntrinsicID) {
238 case AMDGPUIntrinsic::SI_vs_load_buffer_index:
239 return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
241 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
249 /// \brief The function is for lowering i1 operations on the
252 /// In the VALU context, VCC is a one bit register, but in the
253 /// SALU context the VCC is a 64-bit register (1-bit per thread). Since only
254 /// the SALU can perform operations on the VCC register, we need to promote
255 /// the operand types from i1 to i64 in order for tablegen to be able to match
256 /// this operation to the correct SALU instruction. We do this promotion by
257 /// wrapping the operands in a CopyToReg node.
259 SDValue SITargetLowering::Loweri1ContextSwitch(SDValue Op,
261 unsigned VCCNode) const {
262 DebugLoc DL = Op.getDebugLoc();
264 SDValue OpNode = DAG.getNode(VCCNode, DL, MVT::i64,
265 DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
267 DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
270 return DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i1, OpNode);
273 /// \brief Helper function for LowerBRCOND
274 static SDNode *findUser(SDValue Value, unsigned Opcode) {
276 SDNode *Parent = Value.getNode();
277 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
280 if (I.getUse().get() != Value)
283 if (I->getOpcode() == Opcode)
289 /// This transforms the control flow intrinsics to get the branch destination as
290 /// last parameter, also switches branch target with BR if the need arise
291 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
292 SelectionDAG &DAG) const {
294 DebugLoc DL = BRCOND.getDebugLoc();
296 SDNode *Intr = BRCOND.getOperand(1).getNode();
297 SDValue Target = BRCOND.getOperand(2);
300 if (Intr->getOpcode() == ISD::SETCC) {
301 // As long as we negate the condition everything is fine
302 SDNode *SetCC = Intr;
303 assert(SetCC->getConstantOperandVal(1) == 1);
304 assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
306 Intr = SetCC->getOperand(0).getNode();
309 // Get the target from BR if we don't negate the condition
310 BR = findUser(BRCOND, ISD::BR);
311 Target = BR->getOperand(1);
314 assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
316 // Build the result and
317 SmallVector<EVT, 4> Res;
318 for (unsigned i = 1, e = Intr->getNumValues(); i != e; ++i)
319 Res.push_back(Intr->getValueType(i));
321 // operands of the new intrinsic call
322 SmallVector<SDValue, 4> Ops;
323 Ops.push_back(BRCOND.getOperand(0));
324 for (unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i)
325 Ops.push_back(Intr->getOperand(i));
326 Ops.push_back(Target);
328 // build the new intrinsic call
329 SDNode *Result = DAG.getNode(
330 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
331 DAG.getVTList(Res.data(), Res.size()), Ops.data(), Ops.size()).getNode();
334 // Give the branch instruction our target
339 DAG.MorphNodeTo(BR, ISD::BR, BR->getVTList(), Ops, 2);
342 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
344 // Copy the intrinsic results to registers
345 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
346 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
350 Chain = DAG.getCopyToReg(
352 CopyToReg->getOperand(1),
353 SDValue(Result, i - 1),
356 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
359 // Remove the old intrinsic from the chain
360 DAG.ReplaceAllUsesOfValueWith(
361 SDValue(Intr, Intr->getNumValues() - 1),
362 Intr->getOperand(0));
367 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
368 EVT VT = Op.getValueType();
369 LoadSDNode *Ptr = dyn_cast<LoadSDNode>(Op);
373 unsigned AddrSpace = Ptr->getPointerInfo().getAddrSpace();
375 // We only need to lower USER_SGPR address space loads
376 if (AddrSpace != AMDGPUAS::USER_SGPR_ADDRESS) {
380 // Loads from the USER_SGPR address space can only have constant value
382 ConstantSDNode *BasePtr = dyn_cast<ConstantSDNode>(Ptr->getBasePtr());
385 unsigned TypeDwordWidth = VT.getSizeInBits() / 32;
386 const TargetRegisterClass * dstClass;
387 switch (TypeDwordWidth) {
389 assert(!"USER_SGPR value size not implemented");
392 dstClass = &AMDGPU::SReg_32RegClass;
395 dstClass = &AMDGPU::SReg_64RegClass;
398 uint64_t Index = BasePtr->getZExtValue();
399 assert(Index % TypeDwordWidth == 0 && "USER_SGPR not properly aligned");
400 unsigned SGPRIndex = Index / TypeDwordWidth;
401 unsigned Reg = dstClass->getRegister(SGPRIndex);
403 DAG.ReplaceAllUsesOfValueWith(Op, CreateLiveInRegister(DAG, dstClass, Reg,
408 SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
409 SDValue LHS = Op.getOperand(0);
410 SDValue RHS = Op.getOperand(1);
411 SDValue True = Op.getOperand(2);
412 SDValue False = Op.getOperand(3);
413 SDValue CC = Op.getOperand(4);
414 EVT VT = Op.getValueType();
415 DebugLoc DL = Op.getDebugLoc();
417 // Possible Min/Max pattern
418 SDValue MinMax = LowerMinMax(Op, DAG);
419 if (MinMax.getNode()) {
423 SDValue Cond = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, CC);
424 return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
427 //===----------------------------------------------------------------------===//
428 // Custom DAG optimizations
429 //===----------------------------------------------------------------------===//
431 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
432 DAGCombinerInfo &DCI) const {
433 SelectionDAG &DAG = DCI.DAG;
434 DebugLoc DL = N->getDebugLoc();
435 EVT VT = N->getValueType(0);
437 switch (N->getOpcode()) {
439 case ISD::SELECT_CC: {
441 ConstantSDNode *True, *False;
442 // i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
443 if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
444 && (False = dyn_cast<ConstantSDNode>(N->getOperand(3)))
445 && True->isAllOnesValue()
446 && False->isNullValue()
448 return DAG.getNode(ISD::SETCC, DL, VT, N->getOperand(0),
449 N->getOperand(1), N->getOperand(4));
455 SDValue Arg0 = N->getOperand(0);
456 SDValue Arg1 = N->getOperand(1);
457 SDValue CC = N->getOperand(2);
458 ConstantSDNode * C = NULL;
459 ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
461 // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
463 && Arg0.getOpcode() == ISD::SIGN_EXTEND
464 && Arg0.getOperand(0).getValueType() == MVT::i1
465 && (C = dyn_cast<ConstantSDNode>(Arg1))
467 && CCOp == ISD::SETNE) {
468 return SimplifySetCC(VT, Arg0.getOperand(0),
469 DAG.getConstant(0, MVT::i1), CCOp, true, DCI, DL);
477 #define NODE_NAME_CASE(node) case SIISD::node: return #node;
479 const char* SITargetLowering::getTargetNodeName(unsigned Opcode) const {
481 default: return AMDGPUTargetLowering::getTargetNodeName(Opcode);
482 NODE_NAME_CASE(VCC_AND)
483 NODE_NAME_CASE(VCC_BITCAST)