1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIISelLowering.h"
20 #include "llvm/CodeGen/FunctionLoweringInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/IR/Function.h"
28 //===----------------------------------------------------------------------===//
29 // Instruction Selector Implementation
30 //===----------------------------------------------------------------------===//
33 /// AMDGPU specific code to select AMDGPU machine instructions for
34 /// SelectionDAG operations.
35 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
36 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
37 // make the right decision when generating code for different targets.
38 const AMDGPUSubtarget &Subtarget;
40 AMDGPUDAGToDAGISel(TargetMachine &TM);
41 virtual ~AMDGPUDAGToDAGISel();
43 SDNode *Select(SDNode *N) override;
44 const char *getPassName() const override;
45 void PostprocessISelDAG() override;
48 bool isInlineImmediate(SDNode *N) const;
49 inline SDValue getSmallIPtrImm(unsigned Imm);
50 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
51 const R600InstrInfo *TII);
52 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
53 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 // Complex pattern selectors
56 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
57 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
58 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
60 static bool checkType(const Value *ptr, unsigned int addrspace);
61 static bool checkPrivateAddress(const MachineMemOperand *Op);
63 static bool isGlobalStore(const StoreSDNode *N);
64 static bool isPrivateStore(const StoreSDNode *N);
65 static bool isLocalStore(const StoreSDNode *N);
66 static bool isRegionStore(const StoreSDNode *N);
68 bool isCPLoad(const LoadSDNode *N) const;
69 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
70 bool isGlobalLoad(const LoadSDNode *N) const;
71 bool isParamLoad(const LoadSDNode *N) const;
72 bool isPrivateLoad(const LoadSDNode *N) const;
73 bool isLocalLoad(const LoadSDNode *N) const;
74 bool isRegionLoad(const LoadSDNode *N) const;
76 /// \returns True if the current basic block being selected is at control
77 /// flow depth 0. Meaning that the current block dominates the
79 bool isCFDepth0() const;
81 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
82 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
83 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
85 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
86 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
88 SDNode *SelectADD_I64(SDNode *N);
90 // Include the pieces autogenerated from the target description.
91 #include "AMDGPUGenDAGISel.inc"
93 } // end anonymous namespace
95 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
96 // DAG, ready for instruction scheduling.
97 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
98 return new AMDGPUDAGToDAGISel(TM);
101 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
102 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
105 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
108 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
109 const SITargetLowering *TL
110 = static_cast<const SITargetLowering *>(getTargetLowering());
111 return TL->analyzeImmediate(N) == 0;
114 /// \brief Determine the register class for \p OpNo
115 /// \returns The register class of the virtual register that will be used for
116 /// the given operand number \OpNo or NULL if the register class cannot be
118 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
119 unsigned OpNo) const {
120 if (!N->isMachineOpcode())
123 switch (N->getMachineOpcode()) {
125 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
126 unsigned OpIdx = Desc.getNumDefs() + OpNo;
127 if (OpIdx >= Desc.getNumOperands())
129 int RegClass = Desc.OpInfo[OpIdx].RegClass;
133 return TM.getRegisterInfo()->getRegClass(RegClass);
135 case AMDGPU::REG_SEQUENCE: {
136 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
137 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
139 SDValue SubRegOp = N->getOperand(OpNo + 1);
140 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
141 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
146 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
147 return CurDAG->getTargetConstant(Imm, MVT::i32);
150 bool AMDGPUDAGToDAGISel::SelectADDRParam(
151 SDValue Addr, SDValue& R1, SDValue& R2) {
153 if (Addr.getOpcode() == ISD::FrameIndex) {
154 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
155 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
156 R2 = CurDAG->getTargetConstant(0, MVT::i32);
159 R2 = CurDAG->getTargetConstant(0, MVT::i32);
161 } else if (Addr.getOpcode() == ISD::ADD) {
162 R1 = Addr.getOperand(0);
163 R2 = Addr.getOperand(1);
166 R2 = CurDAG->getTargetConstant(0, MVT::i32);
171 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
172 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
173 Addr.getOpcode() == ISD::TargetGlobalAddress) {
176 return SelectADDRParam(Addr, R1, R2);
180 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
181 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
182 Addr.getOpcode() == ISD::TargetGlobalAddress) {
186 if (Addr.getOpcode() == ISD::FrameIndex) {
187 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
188 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
189 R2 = CurDAG->getTargetConstant(0, MVT::i64);
192 R2 = CurDAG->getTargetConstant(0, MVT::i64);
194 } else if (Addr.getOpcode() == ISD::ADD) {
195 R1 = Addr.getOperand(0);
196 R2 = Addr.getOperand(1);
199 R2 = CurDAG->getTargetConstant(0, MVT::i64);
204 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
205 unsigned int Opc = N->getOpcode();
206 if (N->isMachineOpcode()) {
208 return nullptr; // Already selected.
211 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
214 // We are selecting i64 ADD here instead of custom lower it during
215 // DAG legalization, so we can fold some i64 ADDs used for address
216 // calculation into the LOAD and STORE instructions.
218 if (N->getValueType(0) != MVT::i64 ||
219 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
222 return SelectADD_I64(N);
224 case ISD::SCALAR_TO_VECTOR:
225 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
226 case ISD::BUILD_VECTOR: {
228 const AMDGPURegisterInfo *TRI =
229 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
230 const SIRegisterInfo *SIRI =
231 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
232 EVT VT = N->getValueType(0);
233 unsigned NumVectorElts = VT.getVectorNumElements();
234 EVT EltVT = VT.getVectorElementType();
235 assert(EltVT.bitsEq(MVT::i32));
236 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
238 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
240 if (!U->isMachineOpcode()) {
243 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
247 if (SIRI->isSGPRClass(RC)) {
251 switch(NumVectorElts) {
252 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
253 AMDGPU::SReg_32RegClassID;
255 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
256 AMDGPU::SReg_64RegClassID;
258 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
259 AMDGPU::SReg_128RegClassID;
261 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
262 AMDGPU::SReg_256RegClassID;
264 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
265 AMDGPU::SReg_512RegClassID;
267 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
270 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
271 // that adds a 128 bits reg copy when going through TwoAddressInstructions
272 // pass. We want to avoid 128 bits copies as much as possible because they
273 // can't be bundled by our scheduler.
274 switch(NumVectorElts) {
275 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
277 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
278 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
280 RegClassID = AMDGPU::R600_Reg128RegClassID;
282 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
286 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
288 if (NumVectorElts == 1) {
289 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
290 N->getOperand(0), RegClass);
293 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
295 // 16 = Max Num Vector Elements
296 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
297 // 1 = Vector Register Class
298 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
300 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
301 bool IsRegSeq = true;
302 unsigned NOps = N->getNumOperands();
303 for (unsigned i = 0; i < NOps; i++) {
304 // XXX: Why is this here?
305 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
309 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
310 RegSeqArgs[1 + (2 * i) + 1] =
311 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
314 if (NOps != NumVectorElts) {
315 // Fill in the missing undef elements if this was a scalar_to_vector.
316 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
318 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
320 for (unsigned i = NOps; i < NumVectorElts; ++i) {
321 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
322 RegSeqArgs[1 + (2 * i) + 1] =
323 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
329 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
332 case ISD::BUILD_PAIR: {
333 SDValue RC, SubReg0, SubReg1;
334 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
337 if (N->getValueType(0) == MVT::i128) {
338 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
339 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
340 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
341 } else if (N->getValueType(0) == MVT::i64) {
342 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
343 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
344 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
346 llvm_unreachable("Unhandled value type for BUILD_PAIR");
348 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
349 N->getOperand(1), SubReg1 };
350 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
351 SDLoc(N), N->getValueType(0), Ops);
355 case ISD::ConstantFP: {
356 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
357 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
358 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
362 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
363 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
365 ConstantSDNode *C = cast<ConstantSDNode>(N);
366 Imm = C->getZExtValue();
369 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
370 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
371 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
372 CurDAG->getConstant(Imm >> 32, MVT::i32));
373 const SDValue Ops[] = {
374 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
375 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
376 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
379 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
380 N->getValueType(0), Ops);
383 case AMDGPUISD::REGISTER_LOAD: {
384 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
386 SDValue Addr, Offset;
388 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
389 const SDValue Ops[] = {
392 CurDAG->getTargetConstant(0, MVT::i32),
395 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
396 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
399 case AMDGPUISD::REGISTER_STORE: {
400 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
402 SDValue Addr, Offset;
403 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
404 const SDValue Ops[] = {
408 CurDAG->getTargetConstant(0, MVT::i32),
411 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
412 CurDAG->getVTList(MVT::Other),
416 case AMDGPUISD::BFE_I32:
417 case AMDGPUISD::BFE_U32: {
418 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
421 // There is a scalar version available, but unlike the vector version which
422 // has a separate operand for the offset and width, the scalar version packs
423 // the width and offset into a single operand. Try to move to the scalar
424 // version if the offsets are constant, so that we can try to keep extended
425 // loads of kernel arguments in SGPRs.
427 // TODO: Technically we could try to pattern match scalar bitshifts of
428 // dynamic values, but it's probably not useful.
429 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
433 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
437 bool Signed = Opc == AMDGPUISD::BFE_I32;
439 // Transformation function, pack the offset and width of a BFE into
440 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
441 // source, bits [5:0] contain the offset and bits [22:16] the width.
443 uint32_t OffsetVal = Offset->getZExtValue();
444 uint32_t WidthVal = Width->getZExtValue();
446 uint32_t PackedVal = OffsetVal | WidthVal << 16;
448 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
449 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
457 return SelectCode(N);
461 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
462 assert(AS != 0 && "Use checkPrivateAddress instead.");
466 return Ptr->getType()->getPointerAddressSpace() == AS;
469 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
470 if (Op->getPseudoValue())
473 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
474 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
479 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
480 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
483 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
484 const Value *MemVal = N->getMemOperand()->getValue();
485 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
486 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
487 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
490 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
491 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
494 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
495 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
498 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
499 const Value *MemVal = N->getMemOperand()->getValue();
501 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
503 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
506 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
507 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
508 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
509 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
510 N->getMemoryVT().bitsLT(MVT::i32)) {
514 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
517 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
518 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
521 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
522 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
525 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
526 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
529 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
530 MachineMemOperand *MMO = N->getMemOperand();
531 if (checkPrivateAddress(N->getMemOperand())) {
533 const PseudoSourceValue *PSV = MMO->getPseudoValue();
534 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
542 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
543 if (checkPrivateAddress(N->getMemOperand())) {
544 // Check to make sure we are not a constant pool load or a constant load
545 // that is marked as a private load
546 if (isCPLoad(N) || isConstantLoad(N, -1)) {
551 const Value *MemVal = N->getMemOperand()->getValue();
552 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
553 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
554 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
555 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
556 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
557 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
563 bool AMDGPUDAGToDAGISel::isCFDepth0() const {
564 // FIXME: Figure out a way to use DominatorTree analysis here.
565 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
566 const Function *Fn = FuncInfo->Fn;
567 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
571 const char *AMDGPUDAGToDAGISel::getPassName() const {
572 return "AMDGPU DAG->DAG Pattern Instruction Selection";
580 //===----------------------------------------------------------------------===//
582 //===----------------------------------------------------------------------===//
584 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
586 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
587 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
593 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
594 SDValue& BaseReg, SDValue &Offset) {
595 if (!isa<ConstantSDNode>(Addr)) {
597 Offset = CurDAG->getIntPtrConstant(0, true);
603 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
605 ConstantSDNode *IMMOffset;
607 if (Addr.getOpcode() == ISD::ADD
608 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
609 && isInt<16>(IMMOffset->getZExtValue())) {
611 Base = Addr.getOperand(0);
612 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
614 // If the pointer address is constant, we can move it to the offset field.
615 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
616 && isInt<16>(IMMOffset->getZExtValue())) {
617 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
618 SDLoc(CurDAG->getEntryNode()),
619 AMDGPU::ZERO, MVT::i32);
620 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
624 // Default case, no offset
626 Offset = CurDAG->getTargetConstant(0, MVT::i32);
630 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
634 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
635 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
636 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
637 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
638 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
639 Base = Addr.getOperand(0);
640 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
643 Offset = CurDAG->getTargetConstant(0, MVT::i32);
649 SDNode *AMDGPUDAGToDAGISel::SelectADD_I64(SDNode *N) {
651 SDValue LHS = N->getOperand(0);
652 SDValue RHS = N->getOperand(1);
654 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
655 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
657 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
658 DL, MVT::i32, LHS, Sub0);
659 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
660 DL, MVT::i32, LHS, Sub1);
662 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
663 DL, MVT::i32, RHS, Sub0);
664 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
665 DL, MVT::i32, RHS, Sub1);
667 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
669 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
671 SDNode *AddLo = CurDAG->getMachineNode(
672 isCFDepth0() ? AMDGPU::S_ADD_I32 : AMDGPU::V_ADD_I32_e32,
673 DL, VTList, AddLoArgs);
674 SDValue Carry = SDValue(AddLo, 1);
675 SDNode *AddHi = CurDAG->getMachineNode(
676 isCFDepth0() ? AMDGPU::S_ADDC_U32 : AMDGPU::V_ADDC_U32_e32,
677 DL, MVT::i32, SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
680 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
686 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
689 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
690 const AMDGPUTargetLowering& Lowering =
691 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
692 bool IsModified = false;
695 // Go over all selected nodes and try to fold them a bit more
696 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
697 E = CurDAG->allnodes_end(); I != E; ++I) {
701 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
705 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
706 if (ResNode != Node) {
707 ReplaceUses(Node, ResNode);
711 CurDAG->RemoveDeadNodes();
712 } while (IsModified);