1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Custom DAG lowering for SI
13 //===----------------------------------------------------------------------===//
15 #include "SIISelLowering.h"
17 #include "AMDILIntrinsicInfo.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "SIRegisterInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
27 SITargetLowering::SITargetLowering(TargetMachine &TM) :
28 AMDGPUTargetLowering(TM),
29 TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) {
30 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
31 addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
32 addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
33 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
34 addRegisterClass(MVT::i1, &AMDGPU::SCCRegRegClass);
35 addRegisterClass(MVT::i1, &AMDGPU::VCCRegRegClass);
37 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
38 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
40 computeRegisterProperties();
42 setOperationAction(ISD::AND, MVT::i1, Custom);
44 setOperationAction(ISD::ADD, MVT::i64, Legal);
45 setOperationAction(ISD::ADD, MVT::i32, Legal);
47 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
49 // We need to custom lower loads from the USER_SGPR address space, so we can
50 // add the SGPRs as livein registers.
51 setOperationAction(ISD::LOAD, MVT::i32, Custom);
52 setOperationAction(ISD::LOAD, MVT::i64, Custom);
54 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
55 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
57 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
58 setTargetDAGCombine(ISD::SELECT_CC);
60 setTargetDAGCombine(ISD::SETCC);
63 MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
64 MachineInstr * MI, MachineBasicBlock * BB) const {
65 const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
66 MachineRegisterInfo & MRI = BB->getParent()->getRegInfo();
67 MachineBasicBlock::iterator I = MI;
69 if (TII->get(MI->getOpcode()).TSFlags & SIInstrFlags::NEED_WAIT) {
70 AppendS_WAITCNT(MI, *BB, llvm::next(I));
74 switch (MI->getOpcode()) {
76 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
77 case AMDGPU::BRANCH: return BB;
78 case AMDGPU::CLAMP_SI:
79 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
80 .addOperand(MI->getOperand(0))
81 .addOperand(MI->getOperand(1))
82 // VSRC1-2 are unused, but we still need to fill all the
83 // operand slots, so we just reuse the VSRC0 operand
84 .addOperand(MI->getOperand(1))
85 .addOperand(MI->getOperand(1))
90 MI->eraseFromParent();
94 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
95 .addOperand(MI->getOperand(0))
96 .addOperand(MI->getOperand(1))
97 // VSRC1-2 are unused, but we still need to fill all the
98 // operand slots, so we just reuse the VSRC0 operand
99 .addOperand(MI->getOperand(1))
100 .addOperand(MI->getOperand(1))
105 MI->eraseFromParent();
108 case AMDGPU::FNEG_SI:
109 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
110 .addOperand(MI->getOperand(0))
111 .addOperand(MI->getOperand(1))
112 // VSRC1-2 are unused, but we still need to fill all the
113 // operand slots, so we just reuse the VSRC0 operand
114 .addOperand(MI->getOperand(1))
115 .addOperand(MI->getOperand(1))
120 MI->eraseFromParent();
122 case AMDGPU::SHADER_TYPE:
123 BB->getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType =
124 MI->getOperand(0).getImm();
125 MI->eraseFromParent();
128 case AMDGPU::SI_INTERP:
129 LowerSI_INTERP(MI, *BB, I, MRI);
131 case AMDGPU::SI_INTERP_CONST:
132 LowerSI_INTERP_CONST(MI, *BB, I, MRI);
135 LowerSI_WQM(MI, *BB, I, MRI);
137 case AMDGPU::SI_V_CNDLT:
138 LowerSI_V_CNDLT(MI, *BB, I, MRI);
144 void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
145 MachineBasicBlock::iterator I) const {
146 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WAITCNT))
151 void SITargetLowering::LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB,
152 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
153 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WQM_B64), AMDGPU::EXEC)
154 .addReg(AMDGPU::EXEC);
156 MI->eraseFromParent();
159 void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
160 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
161 unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
162 unsigned M0 = MRI.createVirtualRegister(&AMDGPU::M0RegRegClass);
163 MachineOperand dst = MI->getOperand(0);
164 MachineOperand iReg = MI->getOperand(1);
165 MachineOperand jReg = MI->getOperand(2);
166 MachineOperand attr_chan = MI->getOperand(3);
167 MachineOperand attr = MI->getOperand(4);
168 MachineOperand params = MI->getOperand(5);
170 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), M0)
173 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P1_F32), tmp)
175 .addOperand(attr_chan)
179 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32))
183 .addOperand(attr_chan)
187 MI->eraseFromParent();
190 void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
191 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
192 MachineRegisterInfo &MRI) const {
193 MachineOperand dst = MI->getOperand(0);
194 MachineOperand attr_chan = MI->getOperand(1);
195 MachineOperand attr = MI->getOperand(2);
196 MachineOperand params = MI->getOperand(3);
197 unsigned M0 = MRI.createVirtualRegister(&AMDGPU::M0RegRegClass);
199 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), M0)
202 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_MOV_F32))
204 .addOperand(attr_chan)
208 MI->eraseFromParent();
211 void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
212 MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
213 unsigned VCC = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
215 BuildMI(BB, I, BB.findDebugLoc(I),
216 TII->get(AMDGPU::V_CMP_GT_F32_e32),
218 .addReg(AMDGPU::SREG_LIT_0)
219 .addOperand(MI->getOperand(1));
221 BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CNDMASK_B32_e32))
222 .addOperand(MI->getOperand(0))
223 .addOperand(MI->getOperand(3))
224 .addOperand(MI->getOperand(2))
227 MI->eraseFromParent();
230 EVT SITargetLowering::getSetCCResultType(EVT VT) const {
234 //===----------------------------------------------------------------------===//
235 // Custom DAG Lowering Operations
236 //===----------------------------------------------------------------------===//
238 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
239 switch (Op.getOpcode()) {
240 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
241 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
242 case ISD::LOAD: return LowerLOAD(Op, DAG);
243 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
244 case ISD::AND: return Loweri1ContextSwitch(Op, DAG, ISD::AND);
245 case ISD::INTRINSIC_WO_CHAIN: {
246 unsigned IntrinsicID =
247 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
248 EVT VT = Op.getValueType();
249 switch (IntrinsicID) {
250 case AMDGPUIntrinsic::SI_vs_load_buffer_index:
251 return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
253 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
261 /// \brief The function is for lowering i1 operations on the
264 /// In the VALU context, VCC is a one bit register, but in the
265 /// SALU context the VCC is a 64-bit register (1-bit per thread). Since only
266 /// the SALU can perform operations on the VCC register, we need to promote
267 /// the operand types from i1 to i64 in order for tablegen to be able to match
268 /// this operation to the correct SALU instruction. We do this promotion by
269 /// wrapping the operands in a CopyToReg node.
271 SDValue SITargetLowering::Loweri1ContextSwitch(SDValue Op,
273 unsigned VCCNode) const {
274 DebugLoc DL = Op.getDebugLoc();
276 SDValue OpNode = DAG.getNode(VCCNode, DL, MVT::i64,
277 DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
279 DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
282 return DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i1, OpNode);
285 /// \brief Helper function for LowerBRCOND
286 static SDNode *findUser(SDValue Value, unsigned Opcode) {
288 SDNode *Parent = Value.getNode();
289 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
292 if (I.getUse().get() != Value)
295 if (I->getOpcode() == Opcode)
301 /// This transforms the control flow intrinsics to get the branch destination as
302 /// last parameter, also switches branch target with BR if the need arise
303 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
304 SelectionDAG &DAG) const {
306 DebugLoc DL = BRCOND.getDebugLoc();
308 SDNode *Intr = BRCOND.getOperand(1).getNode();
309 SDValue Target = BRCOND.getOperand(2);
312 if (Intr->getOpcode() == ISD::SETCC) {
313 // As long as we negate the condition everything is fine
314 SDNode *SetCC = Intr;
315 assert(SetCC->getConstantOperandVal(1) == 1);
316 assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
318 Intr = SetCC->getOperand(0).getNode();
321 // Get the target from BR if we don't negate the condition
322 BR = findUser(BRCOND, ISD::BR);
323 Target = BR->getOperand(1);
326 assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
328 // Build the result and
329 SmallVector<EVT, 4> Res;
330 for (unsigned i = 1, e = Intr->getNumValues(); i != e; ++i)
331 Res.push_back(Intr->getValueType(i));
333 // operands of the new intrinsic call
334 SmallVector<SDValue, 4> Ops;
335 Ops.push_back(BRCOND.getOperand(0));
336 for (unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i)
337 Ops.push_back(Intr->getOperand(i));
338 Ops.push_back(Target);
340 // build the new intrinsic call
341 SDNode *Result = DAG.getNode(
342 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
343 DAG.getVTList(Res.data(), Res.size()), Ops.data(), Ops.size()).getNode();
346 // Give the branch instruction our target
351 DAG.MorphNodeTo(BR, ISD::BR, BR->getVTList(), Ops, 2);
354 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
356 // Copy the intrinsic results to registers
357 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
358 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
362 Chain = DAG.getCopyToReg(
364 CopyToReg->getOperand(1),
365 SDValue(Result, i - 1),
368 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
371 // Remove the old intrinsic from the chain
372 DAG.ReplaceAllUsesOfValueWith(
373 SDValue(Intr, Intr->getNumValues() - 1),
374 Intr->getOperand(0));
379 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
380 EVT VT = Op.getValueType();
381 LoadSDNode *Ptr = dyn_cast<LoadSDNode>(Op);
385 unsigned AddrSpace = Ptr->getPointerInfo().getAddrSpace();
387 // We only need to lower USER_SGPR address space loads
388 if (AddrSpace != AMDGPUAS::USER_SGPR_ADDRESS) {
392 // Loads from the USER_SGPR address space can only have constant value
394 ConstantSDNode *BasePtr = dyn_cast<ConstantSDNode>(Ptr->getBasePtr());
397 unsigned TypeDwordWidth = VT.getSizeInBits() / 32;
398 const TargetRegisterClass * dstClass;
399 switch (TypeDwordWidth) {
401 assert(!"USER_SGPR value size not implemented");
404 dstClass = &AMDGPU::SReg_32RegClass;
407 dstClass = &AMDGPU::SReg_64RegClass;
410 uint64_t Index = BasePtr->getZExtValue();
411 assert(Index % TypeDwordWidth == 0 && "USER_SGPR not properly aligned");
412 unsigned SGPRIndex = Index / TypeDwordWidth;
413 unsigned Reg = dstClass->getRegister(SGPRIndex);
415 DAG.ReplaceAllUsesOfValueWith(Op, CreateLiveInRegister(DAG, dstClass, Reg,
420 SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
421 SDValue LHS = Op.getOperand(0);
422 SDValue RHS = Op.getOperand(1);
423 SDValue True = Op.getOperand(2);
424 SDValue False = Op.getOperand(3);
425 SDValue CC = Op.getOperand(4);
426 EVT VT = Op.getValueType();
427 DebugLoc DL = Op.getDebugLoc();
429 // Possible Min/Max pattern
430 SDValue MinMax = LowerMinMax(Op, DAG);
431 if (MinMax.getNode()) {
435 SDValue Cond = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, CC);
436 return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
439 //===----------------------------------------------------------------------===//
440 // Custom DAG optimizations
441 //===----------------------------------------------------------------------===//
443 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
444 DAGCombinerInfo &DCI) const {
445 SelectionDAG &DAG = DCI.DAG;
446 DebugLoc DL = N->getDebugLoc();
447 EVT VT = N->getValueType(0);
449 switch (N->getOpcode()) {
451 case ISD::SELECT_CC: {
453 ConstantSDNode *True, *False;
454 // i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
455 if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
456 && (False = dyn_cast<ConstantSDNode>(N->getOperand(3)))
457 && True->isAllOnesValue()
458 && False->isNullValue()
460 return DAG.getNode(ISD::SETCC, DL, VT, N->getOperand(0),
461 N->getOperand(1), N->getOperand(4));
467 SDValue Arg0 = N->getOperand(0);
468 SDValue Arg1 = N->getOperand(1);
469 SDValue CC = N->getOperand(2);
470 ConstantSDNode * C = NULL;
471 ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
473 // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
475 && Arg0.getOpcode() == ISD::SIGN_EXTEND
476 && Arg0.getOperand(0).getValueType() == MVT::i1
477 && (C = dyn_cast<ConstantSDNode>(Arg1))
479 && CCOp == ISD::SETNE) {
480 return SimplifySetCC(VT, Arg0.getOperand(0),
481 DAG.getConstant(0, MVT::i1), CCOp, true, DCI, DL);
489 #define NODE_NAME_CASE(node) case SIISD::node: return #node;
491 const char* SITargetLowering::getTargetNodeName(unsigned Opcode) const {
493 default: return AMDGPUTargetLowering::getTargetNodeName(Opcode);
494 NODE_NAME_CASE(VCC_AND)
495 NODE_NAME_CASE(VCC_BITCAST)