1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIDefines.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/IR/Function.h"
32 //===----------------------------------------------------------------------===//
33 // Instruction Selector Implementation
34 //===----------------------------------------------------------------------===//
37 /// AMDGPU specific code to select AMDGPU machine instructions for
38 /// SelectionDAG operations.
39 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
47 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
52 bool isInlineImmediate(SDNode *N) const;
53 inline SDValue getSmallIPtrImm(unsigned Imm);
54 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
55 const R600InstrInfo *TII);
56 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
64 static bool checkType(const Value *ptr, unsigned int addrspace);
65 static bool checkPrivateAddress(const MachineMemOperand *Op);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 /// \returns True if the current basic block being selected is at control
81 /// flow depth 0. Meaning that the current block dominates the
83 bool isCFDepth0() const;
85 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
86 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
87 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
89 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
90 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
91 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
92 unsigned OffsetBits) const;
93 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
94 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
95 SDValue &Offset1) const;
96 void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
97 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
98 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
100 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
101 SDValue &Offset) const;
102 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
103 SDValue &SOffset, SDValue &ImmOffset) const;
104 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
105 SDValue &Offset, SDValue &GLC, SDValue &SLC,
107 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
108 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
109 SDValue &Clamp, SDValue &Omod) const;
111 SDNode *SelectADD_SUB_I64(SDNode *N);
112 SDNode *SelectDIV_SCALE(SDNode *N);
114 // Include the pieces autogenerated from the target description.
115 #include "AMDGPUGenDAGISel.inc"
117 } // end anonymous namespace
119 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
120 // DAG, ready for instruction scheduling.
121 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
122 return new AMDGPUDAGToDAGISel(TM);
125 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
126 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
129 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
132 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
133 const SITargetLowering *TL
134 = static_cast<const SITargetLowering *>(getTargetLowering());
135 return TL->analyzeImmediate(N) == 0;
138 /// \brief Determine the register class for \p OpNo
139 /// \returns The register class of the virtual register that will be used for
140 /// the given operand number \OpNo or NULL if the register class cannot be
142 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
143 unsigned OpNo) const {
144 if (!N->isMachineOpcode())
147 switch (N->getMachineOpcode()) {
149 const MCInstrDesc &Desc =
150 TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode());
151 unsigned OpIdx = Desc.getNumDefs() + OpNo;
152 if (OpIdx >= Desc.getNumOperands())
154 int RegClass = Desc.OpInfo[OpIdx].RegClass;
158 return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass);
160 case AMDGPU::REG_SEQUENCE: {
161 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
162 const TargetRegisterClass *SuperRC =
163 TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID);
165 SDValue SubRegOp = N->getOperand(OpNo + 1);
166 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
167 return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
173 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
174 return CurDAG->getTargetConstant(Imm, MVT::i32);
177 bool AMDGPUDAGToDAGISel::SelectADDRParam(
178 SDValue Addr, SDValue& R1, SDValue& R2) {
180 if (Addr.getOpcode() == ISD::FrameIndex) {
181 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
182 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
183 R2 = CurDAG->getTargetConstant(0, MVT::i32);
186 R2 = CurDAG->getTargetConstant(0, MVT::i32);
188 } else if (Addr.getOpcode() == ISD::ADD) {
189 R1 = Addr.getOperand(0);
190 R2 = Addr.getOperand(1);
193 R2 = CurDAG->getTargetConstant(0, MVT::i32);
198 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
199 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
200 Addr.getOpcode() == ISD::TargetGlobalAddress) {
203 return SelectADDRParam(Addr, R1, R2);
207 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
208 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
209 Addr.getOpcode() == ISD::TargetGlobalAddress) {
213 if (Addr.getOpcode() == ISD::FrameIndex) {
214 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
215 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
216 R2 = CurDAG->getTargetConstant(0, MVT::i64);
219 R2 = CurDAG->getTargetConstant(0, MVT::i64);
221 } else if (Addr.getOpcode() == ISD::ADD) {
222 R1 = Addr.getOperand(0);
223 R2 = Addr.getOperand(1);
226 R2 = CurDAG->getTargetConstant(0, MVT::i64);
231 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
232 unsigned int Opc = N->getOpcode();
233 if (N->isMachineOpcode()) {
235 return nullptr; // Already selected.
238 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
241 // We are selecting i64 ADD here instead of custom lower it during
242 // DAG legalization, so we can fold some i64 ADDs used for address
243 // calculation into the LOAD and STORE instructions.
246 if (N->getValueType(0) != MVT::i64 ||
247 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
250 return SelectADD_SUB_I64(N);
252 case ISD::SCALAR_TO_VECTOR:
253 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
254 case ISD::BUILD_VECTOR: {
256 const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>(
257 TM.getSubtargetImpl()->getRegisterInfo());
258 const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>(
259 TM.getSubtargetImpl()->getRegisterInfo());
260 EVT VT = N->getValueType(0);
261 unsigned NumVectorElts = VT.getVectorNumElements();
262 EVT EltVT = VT.getVectorElementType();
263 assert(EltVT.bitsEq(MVT::i32));
264 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
266 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
268 if (!U->isMachineOpcode()) {
271 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
275 if (SIRI->isSGPRClass(RC)) {
279 switch(NumVectorElts) {
280 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
281 AMDGPU::SReg_32RegClassID;
283 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
284 AMDGPU::SReg_64RegClassID;
286 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
287 AMDGPU::SReg_128RegClassID;
289 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
290 AMDGPU::SReg_256RegClassID;
292 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
293 AMDGPU::SReg_512RegClassID;
295 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
298 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
299 // that adds a 128 bits reg copy when going through TwoAddressInstructions
300 // pass. We want to avoid 128 bits copies as much as possible because they
301 // can't be bundled by our scheduler.
302 switch(NumVectorElts) {
303 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
305 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
306 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
308 RegClassID = AMDGPU::R600_Reg128RegClassID;
310 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
314 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
316 if (NumVectorElts == 1) {
317 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
318 N->getOperand(0), RegClass);
321 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
323 // 16 = Max Num Vector Elements
324 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
325 // 1 = Vector Register Class
326 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
328 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
329 bool IsRegSeq = true;
330 unsigned NOps = N->getNumOperands();
331 for (unsigned i = 0; i < NOps; i++) {
332 // XXX: Why is this here?
333 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
337 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
338 RegSeqArgs[1 + (2 * i) + 1] =
339 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
342 if (NOps != NumVectorElts) {
343 // Fill in the missing undef elements if this was a scalar_to_vector.
344 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
346 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
348 for (unsigned i = NOps; i < NumVectorElts; ++i) {
349 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
350 RegSeqArgs[1 + (2 * i) + 1] =
351 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
357 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
360 case ISD::BUILD_PAIR: {
361 SDValue RC, SubReg0, SubReg1;
362 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
365 if (N->getValueType(0) == MVT::i128) {
366 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
367 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
368 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
369 } else if (N->getValueType(0) == MVT::i64) {
370 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
371 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
372 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
374 llvm_unreachable("Unhandled value type for BUILD_PAIR");
376 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
377 N->getOperand(1), SubReg1 };
378 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
379 SDLoc(N), N->getValueType(0), Ops);
383 case ISD::ConstantFP: {
384 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
385 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
386 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
390 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
391 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
393 ConstantSDNode *C = cast<ConstantSDNode>(N);
394 Imm = C->getZExtValue();
397 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
398 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
399 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
400 CurDAG->getConstant(Imm >> 32, MVT::i32));
401 const SDValue Ops[] = {
402 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
403 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
404 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
407 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
408 N->getValueType(0), Ops);
411 case AMDGPUISD::REGISTER_LOAD: {
412 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
414 SDValue Addr, Offset;
416 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
417 const SDValue Ops[] = {
420 CurDAG->getTargetConstant(0, MVT::i32),
423 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
424 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
427 case AMDGPUISD::REGISTER_STORE: {
428 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
430 SDValue Addr, Offset;
431 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
432 const SDValue Ops[] = {
436 CurDAG->getTargetConstant(0, MVT::i32),
439 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
440 CurDAG->getVTList(MVT::Other),
444 case AMDGPUISD::BFE_I32:
445 case AMDGPUISD::BFE_U32: {
446 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
449 // There is a scalar version available, but unlike the vector version which
450 // has a separate operand for the offset and width, the scalar version packs
451 // the width and offset into a single operand. Try to move to the scalar
452 // version if the offsets are constant, so that we can try to keep extended
453 // loads of kernel arguments in SGPRs.
455 // TODO: Technically we could try to pattern match scalar bitshifts of
456 // dynamic values, but it's probably not useful.
457 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
461 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
465 bool Signed = Opc == AMDGPUISD::BFE_I32;
467 // Transformation function, pack the offset and width of a BFE into
468 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
469 // source, bits [5:0] contain the offset and bits [22:16] the width.
471 uint32_t OffsetVal = Offset->getZExtValue();
472 uint32_t WidthVal = Width->getZExtValue();
474 uint32_t PackedVal = OffsetVal | WidthVal << 16;
476 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
477 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
484 case AMDGPUISD::DIV_SCALE: {
485 return SelectDIV_SCALE(N);
488 return SelectCode(N);
492 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
493 assert(AS != 0 && "Use checkPrivateAddress instead.");
497 return Ptr->getType()->getPointerAddressSpace() == AS;
500 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
501 if (Op->getPseudoValue())
504 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
505 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
510 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
511 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
514 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
515 const Value *MemVal = N->getMemOperand()->getValue();
516 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
517 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
518 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
521 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
522 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
525 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
526 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
529 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
530 const Value *MemVal = N->getMemOperand()->getValue();
532 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
534 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
537 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
538 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
539 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
540 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
541 N->getMemoryVT().bitsLT(MVT::i32)) {
545 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
548 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
549 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
552 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
553 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
556 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
557 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
560 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
561 MachineMemOperand *MMO = N->getMemOperand();
562 if (checkPrivateAddress(N->getMemOperand())) {
564 const PseudoSourceValue *PSV = MMO->getPseudoValue();
565 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
573 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
574 if (checkPrivateAddress(N->getMemOperand())) {
575 // Check to make sure we are not a constant pool load or a constant load
576 // that is marked as a private load
577 if (isCPLoad(N) || isConstantLoad(N, -1)) {
582 const Value *MemVal = N->getMemOperand()->getValue();
583 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
584 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
585 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
586 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
587 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
588 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
594 bool AMDGPUDAGToDAGISel::isCFDepth0() const {
595 // FIXME: Figure out a way to use DominatorTree analysis here.
596 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
597 const Function *Fn = FuncInfo->Fn;
598 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
602 const char *AMDGPUDAGToDAGISel::getPassName() const {
603 return "AMDGPU DAG->DAG Pattern Instruction Selection";
611 //===----------------------------------------------------------------------===//
613 //===----------------------------------------------------------------------===//
615 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
617 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
618 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
624 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
625 SDValue& BaseReg, SDValue &Offset) {
626 if (!isa<ConstantSDNode>(Addr)) {
628 Offset = CurDAG->getIntPtrConstant(0, true);
634 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
636 ConstantSDNode *IMMOffset;
638 if (Addr.getOpcode() == ISD::ADD
639 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
640 && isInt<16>(IMMOffset->getZExtValue())) {
642 Base = Addr.getOperand(0);
643 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
645 // If the pointer address is constant, we can move it to the offset field.
646 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
647 && isInt<16>(IMMOffset->getZExtValue())) {
648 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
649 SDLoc(CurDAG->getEntryNode()),
650 AMDGPU::ZERO, MVT::i32);
651 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
655 // Default case, no offset
657 Offset = CurDAG->getTargetConstant(0, MVT::i32);
661 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
665 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
666 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
667 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
668 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
669 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
670 Base = Addr.getOperand(0);
671 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
674 Offset = CurDAG->getTargetConstant(0, MVT::i32);
680 SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
682 SDValue LHS = N->getOperand(0);
683 SDValue RHS = N->getOperand(1);
685 bool IsAdd = (N->getOpcode() == ISD::ADD);
687 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
688 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
690 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
691 DL, MVT::i32, LHS, Sub0);
692 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
693 DL, MVT::i32, LHS, Sub1);
695 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
696 DL, MVT::i32, RHS, Sub0);
697 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
698 DL, MVT::i32, RHS, Sub1);
700 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
701 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
704 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
705 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
708 Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
709 CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
712 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
713 SDValue Carry(AddLo, 1);
715 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
716 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
719 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
725 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
728 SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
730 EVT VT = N->getValueType(0);
732 assert(VT == MVT::f32 || VT == MVT::f64);
735 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
737 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
749 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
752 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
753 unsigned OffsetBits) const {
754 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
755 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
756 (OffsetBits == 8 && !isUInt<8>(Offset)))
759 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
762 // On Southern Islands instruction with a negative base value and an offset
763 // don't seem to work.
764 return CurDAG->SignBitIsZero(Base);
767 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
768 SDValue &Offset) const {
769 if (CurDAG->isBaseWithConstantOffset(Addr)) {
770 SDValue N0 = Addr.getOperand(0);
771 SDValue N1 = Addr.getOperand(1);
772 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
773 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
783 Offset = CurDAG->getTargetConstant(0, MVT::i16);
787 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
789 SDValue &Offset1) const {
790 if (CurDAG->isBaseWithConstantOffset(Addr)) {
791 SDValue N0 = Addr.getOperand(0);
792 SDValue N1 = Addr.getOperand(1);
793 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
794 unsigned DWordOffset0 = C1->getZExtValue() / 4;
795 unsigned DWordOffset1 = DWordOffset0 + 1;
797 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
799 Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
800 Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
807 Offset0 = CurDAG->getTargetConstant(0, MVT::i8);
808 Offset1 = CurDAG->getTargetConstant(1, MVT::i8);
812 static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
813 return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
817 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
818 return isUInt<12>(Imm->getZExtValue());
821 void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
822 SDValue &VAddr, SDValue &SOffset,
823 SDValue &Offset, SDValue &Offen,
824 SDValue &Idxen, SDValue &Addr64,
825 SDValue &GLC, SDValue &SLC,
826 SDValue &TFE) const {
829 GLC = CurDAG->getTargetConstant(0, MVT::i1);
830 SLC = CurDAG->getTargetConstant(0, MVT::i1);
831 TFE = CurDAG->getTargetConstant(0, MVT::i1);
833 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
834 Offen = CurDAG->getTargetConstant(0, MVT::i1);
835 Addr64 = CurDAG->getTargetConstant(0, MVT::i1);
836 SOffset = CurDAG->getTargetConstant(0, MVT::i32);
838 if (CurDAG->isBaseWithConstantOffset(Addr)) {
839 SDValue N0 = Addr.getOperand(0);
840 SDValue N1 = Addr.getOperand(1);
841 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
843 if (isLegalMUBUFImmOffset(C1)) {
845 if (N0.getOpcode() == ISD::ADD) {
846 // (add (add N2, N3), C1) -> addr64
847 SDValue N2 = N0.getOperand(0);
848 SDValue N3 = N0.getOperand(1);
849 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
852 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
856 // (add N0, C1) -> offset
857 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
859 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
863 if (Addr.getOpcode() == ISD::ADD) {
864 // (add N0, N1) -> addr64
865 SDValue N0 = Addr.getOperand(0);
866 SDValue N1 = Addr.getOperand(1);
867 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
870 Offset = CurDAG->getTargetConstant(0, MVT::i16);
874 // default case -> offset
875 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
877 Offset = CurDAG->getTargetConstant(0, MVT::i16);
881 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
883 SDValue &Offset) const {
884 SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE;
886 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
889 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
890 if (C->getSExtValue()) {
892 SRsrc = wrapAddr64Rsrc(CurDAG, DL, Ptr);
898 static SDValue buildRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr,
899 uint32_t RsrcDword1, uint64_t RsrcDword2And3) {
901 SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
902 SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
904 PtrHi = SDValue(DAG->getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
905 DAG->getConstant(RsrcDword1, MVT::i32)), 0);
907 SDValue DataLo = DAG->getTargetConstant(
908 RsrcDword2And3 & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
909 SDValue DataHi = DAG->getTargetConstant(RsrcDword2And3 >> 32, MVT::i32);
911 const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
912 return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
913 MVT::v4i32, Ops), 0);
916 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
917 /// The TID (Thread ID) is multipled by the stride value (bits [61:48]
918 /// of the resource descriptor) to create an offset, which is added to the
920 static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
922 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
925 return buildRSRC(DAG, DL, Ptr, 0, Rsrc);
928 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
929 SDValue &VAddr, SDValue &SOffset,
930 SDValue &ImmOffset) const {
933 MachineFunction &MF = CurDAG->getMachineFunction();
934 const SIRegisterInfo *TRI =
935 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
936 MachineRegisterInfo &MRI = MF.getRegInfo();
937 const SITargetLowering& Lowering =
938 *static_cast<const SITargetLowering*>(getTargetLowering());
940 unsigned ScratchPtrReg =
941 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
942 unsigned ScratchOffsetReg =
943 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
944 Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
945 ScratchOffsetReg, MVT::i32);
947 Rsrc = buildScratchRSRC(CurDAG, DL,
948 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
949 MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
950 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
951 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
954 if (CurDAG->isBaseWithConstantOffset(Addr)) {
955 SDValue N1 = Addr.getOperand(1);
956 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
958 if (isLegalMUBUFImmOffset(C1)) {
959 VAddr = Addr.getOperand(0);
960 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
966 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
967 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
968 VAddr = Addr.getOperand(1);
969 ImmOffset = Addr.getOperand(0);
974 if (isa<FrameIndexSDNode>(Addr)) {
975 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
976 CurDAG->getConstant(0, MVT::i32)), 0);
983 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
987 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
988 SDValue &SOffset, SDValue &Offset,
989 SDValue &GLC, SDValue &SLC,
990 SDValue &TFE) const {
991 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
993 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
996 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
997 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
998 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
999 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT |
1000 APInt::getAllOnesValue(32).getZExtValue(); // Size
1002 SRsrc = buildRSRC(CurDAG, DL, Ptr, 0, Rsrc);
1008 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1009 SDValue &SrcMods) const {
1015 if (Src.getOpcode() == ISD::FNEG) {
1016 Mods |= SISrcMods::NEG;
1017 Src = Src.getOperand(0);
1020 if (Src.getOpcode() == ISD::FABS) {
1021 Mods |= SISrcMods::ABS;
1022 Src = Src.getOperand(0);
1025 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
1030 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1031 SDValue &SrcMods, SDValue &Clamp,
1032 SDValue &Omod) const {
1033 // FIXME: Handle Clamp and Omod
1034 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
1035 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1037 return SelectVOP3Mods(In, Src, SrcMods);
1040 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1041 const AMDGPUTargetLowering& Lowering =
1042 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1043 bool IsModified = false;
1046 // Go over all selected nodes and try to fold them a bit more
1047 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1048 E = CurDAG->allnodes_end(); I != E; ++I) {
1052 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
1056 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1057 if (ResNode != Node) {
1058 ReplaceUses(Node, ResNode);
1062 CurDAG->RemoveDeadNodes();
1063 } while (IsModified);