1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIDefines.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/IR/Function.h"
32 //===----------------------------------------------------------------------===//
33 // Instruction Selector Implementation
34 //===----------------------------------------------------------------------===//
37 /// AMDGPU specific code to select AMDGPU machine instructions for
38 /// SelectionDAG operations.
39 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget *Subtarget;
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
46 bool runOnMachineFunction(MachineFunction &MF) override;
47 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
52 bool isInlineImmediate(SDNode *N) const;
53 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
54 const R600InstrInfo *TII);
55 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
56 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
58 // Complex pattern selectors
59 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
60 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
61 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
63 static bool checkType(const Value *ptr, unsigned int addrspace);
64 static bool checkPrivateAddress(const MachineMemOperand *Op);
66 static bool isGlobalStore(const StoreSDNode *N);
67 static bool isFlatStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isFlatLoad(const LoadSDNode *N) const;
76 bool isParamLoad(const LoadSDNode *N) const;
77 bool isPrivateLoad(const LoadSDNode *N) const;
78 bool isLocalLoad(const LoadSDNode *N) const;
79 bool isRegionLoad(const LoadSDNode *N) const;
81 SDNode *glueCopyToM0(SDNode *N) const;
83 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
84 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
85 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
87 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
88 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
89 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
90 unsigned OffsetBits) const;
91 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
92 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
93 SDValue &Offset1) const;
94 void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
95 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
96 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
98 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
99 SDValue &SOffset, SDValue &Offset, SDValue &GLC,
100 SDValue &SLC, SDValue &TFE) const;
101 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
102 SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
104 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
105 SDValue &SOffset, SDValue &ImmOffset) const;
106 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
107 SDValue &Offset, SDValue &GLC, SDValue &SLC,
109 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
110 SDValue &Offset, SDValue &GLC) const;
111 SDNode *SelectAddrSpaceCast(SDNode *N);
112 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
113 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
114 SDValue &Clamp, SDValue &Omod) const;
116 bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
117 SDValue &Omod) const;
118 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
120 SDValue &Omod) const;
122 SDNode *SelectADD_SUB_I64(SDNode *N);
123 SDNode *SelectDIV_SCALE(SDNode *N);
125 SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
126 uint32_t Offset, uint32_t Width);
127 SDNode *SelectS_BFEFromShifts(SDNode *N);
128 SDNode *SelectS_BFE(SDNode *N);
130 // Include the pieces autogenerated from the target description.
131 #include "AMDGPUGenDAGISel.inc"
133 } // end anonymous namespace
135 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
136 // DAG, ready for instruction scheduling.
137 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
138 return new AMDGPUDAGToDAGISel(TM);
141 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
142 : SelectionDAGISel(TM) {}
144 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
145 Subtarget = &static_cast<const AMDGPUSubtarget &>(MF.getSubtarget());
146 return SelectionDAGISel::runOnMachineFunction(MF);
149 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
152 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
153 const SITargetLowering *TL
154 = static_cast<const SITargetLowering *>(getTargetLowering());
155 return TL->analyzeImmediate(N) == 0;
158 /// \brief Determine the register class for \p OpNo
159 /// \returns The register class of the virtual register that will be used for
160 /// the given operand number \OpNo or NULL if the register class cannot be
162 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
163 unsigned OpNo) const {
164 if (!N->isMachineOpcode())
167 switch (N->getMachineOpcode()) {
169 const MCInstrDesc &Desc =
170 Subtarget->getInstrInfo()->get(N->getMachineOpcode());
171 unsigned OpIdx = Desc.getNumDefs() + OpNo;
172 if (OpIdx >= Desc.getNumOperands())
174 int RegClass = Desc.OpInfo[OpIdx].RegClass;
178 return Subtarget->getRegisterInfo()->getRegClass(RegClass);
180 case AMDGPU::REG_SEQUENCE: {
181 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
182 const TargetRegisterClass *SuperRC =
183 Subtarget->getRegisterInfo()->getRegClass(RCID);
185 SDValue SubRegOp = N->getOperand(OpNo + 1);
186 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
187 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
193 bool AMDGPUDAGToDAGISel::SelectADDRParam(
194 SDValue Addr, SDValue& R1, SDValue& R2) {
196 if (Addr.getOpcode() == ISD::FrameIndex) {
197 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
198 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
199 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
202 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
204 } else if (Addr.getOpcode() == ISD::ADD) {
205 R1 = Addr.getOperand(0);
206 R2 = Addr.getOperand(1);
209 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
214 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
215 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
216 Addr.getOpcode() == ISD::TargetGlobalAddress) {
219 return SelectADDRParam(Addr, R1, R2);
223 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
224 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
225 Addr.getOpcode() == ISD::TargetGlobalAddress) {
229 if (Addr.getOpcode() == ISD::FrameIndex) {
230 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
231 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
232 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
235 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
237 } else if (Addr.getOpcode() == ISD::ADD) {
238 R1 = Addr.getOperand(0);
239 R2 = Addr.getOperand(1);
242 R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
247 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N) const {
248 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
249 !checkType(cast<MemSDNode>(N)->getMemOperand()->getValue(),
250 AMDGPUAS::LOCAL_ADDRESS))
253 const SITargetLowering& Lowering =
254 *static_cast<const SITargetLowering*>(getTargetLowering());
256 // Write max value to m0 before each load operation
258 SDValue M0 = Lowering.copyToM0(*CurDAG, CurDAG->getEntryNode(), SDLoc(N),
259 CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
261 SDValue Glue = M0.getValue(1);
263 SmallVector <SDValue, 8> Ops;
264 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
265 Ops.push_back(N->getOperand(i));
268 CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
273 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
274 unsigned int Opc = N->getOpcode();
275 if (N->isMachineOpcode()) {
277 return nullptr; // Already selected.
280 if (isa<AtomicSDNode>(N))
285 // We are selecting i64 ADD here instead of custom lower it during
286 // DAG legalization, so we can fold some i64 ADDs used for address
287 // calculation into the LOAD and STORE instructions.
290 if (N->getValueType(0) != MVT::i64 ||
291 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
294 return SelectADD_SUB_I64(N);
296 case ISD::SCALAR_TO_VECTOR:
297 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
298 case ISD::BUILD_VECTOR: {
300 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
301 EVT VT = N->getValueType(0);
302 unsigned NumVectorElts = VT.getVectorNumElements();
303 EVT EltVT = VT.getVectorElementType();
304 assert(EltVT.bitsEq(MVT::i32));
305 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
307 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
309 if (!U->isMachineOpcode()) {
312 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
316 if (static_cast<const SIRegisterInfo *>(TRI)->isSGPRClass(RC)) {
320 switch(NumVectorElts) {
321 case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID :
322 AMDGPU::SReg_32RegClassID;
324 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
325 AMDGPU::SReg_64RegClassID;
327 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
328 AMDGPU::SReg_128RegClassID;
330 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
331 AMDGPU::SReg_256RegClassID;
333 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
334 AMDGPU::SReg_512RegClassID;
336 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
339 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
340 // that adds a 128 bits reg copy when going through TwoAddressInstructions
341 // pass. We want to avoid 128 bits copies as much as possible because they
342 // can't be bundled by our scheduler.
343 switch(NumVectorElts) {
344 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
346 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
347 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
349 RegClassID = AMDGPU::R600_Reg128RegClassID;
351 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
356 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
358 if (NumVectorElts == 1) {
359 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
360 N->getOperand(0), RegClass);
363 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
365 // 16 = Max Num Vector Elements
366 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
367 // 1 = Vector Register Class
368 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
370 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
371 bool IsRegSeq = true;
372 unsigned NOps = N->getNumOperands();
373 for (unsigned i = 0; i < NOps; i++) {
374 // XXX: Why is this here?
375 if (isa<RegisterSDNode>(N->getOperand(i))) {
379 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
380 RegSeqArgs[1 + (2 * i) + 1] =
381 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL,
385 if (NOps != NumVectorElts) {
386 // Fill in the missing undef elements if this was a scalar_to_vector.
387 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
389 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
391 for (unsigned i = NOps; i < NumVectorElts; ++i) {
392 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
393 RegSeqArgs[1 + (2 * i) + 1] =
394 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL, MVT::i32);
400 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
403 case ISD::BUILD_PAIR: {
404 SDValue RC, SubReg0, SubReg1;
405 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
409 if (N->getValueType(0) == MVT::i128) {
410 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
411 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
412 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
413 } else if (N->getValueType(0) == MVT::i64) {
414 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
415 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
416 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
418 llvm_unreachable("Unhandled value type for BUILD_PAIR");
420 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
421 N->getOperand(1), SubReg1 };
422 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
423 DL, N->getValueType(0), Ops);
427 case ISD::ConstantFP: {
428 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
429 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
433 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
434 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
436 ConstantSDNode *C = cast<ConstantSDNode>(N);
437 Imm = C->getZExtValue();
441 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
442 CurDAG->getConstant(Imm & 0xFFFFFFFF, DL,
444 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
445 CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
446 const SDValue Ops[] = {
447 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
448 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
449 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
452 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
453 N->getValueType(0), Ops);
457 LoadSDNode *LD = cast<LoadSDNode>(N);
459 EVT VT = N->getValueType(0);
461 if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD) {
466 // To simplify the TableGen patters, we replace all i64 loads with
467 // v2i32 loads. Alternatively, we could promote i64 loads to v2i32
468 // during DAG legalization, however, so places (ExpandUnalignedLoad)
469 // in the DAG legalizer assume that if i64 is legal, so doing this
470 // promotion early can cause problems.
472 SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SDLoc(N), LD->getChain(),
473 LD->getBasePtr(), LD->getMemOperand());
474 SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SL,
476 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLoad.getValue(1));
477 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), BitCast);
478 SDNode *Load = glueCopyToM0(NewLoad.getNode());
480 N = BitCast.getNode();
485 // Handle i64 stores here for the same reason mentioned above for loads.
486 StoreSDNode *ST = cast<StoreSDNode>(N);
487 SDValue Value = ST->getValue();
488 if (Value.getValueType() == MVT::i64 && !ST->isTruncatingStore()) {
490 SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
492 SDValue NewStore = CurDAG->getStore(ST->getChain(), SDLoc(N), NewValue,
493 ST->getBasePtr(), ST->getMemOperand());
495 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewStore);
497 if (NewValue.getOpcode() == ISD::BITCAST) {
498 Select(NewStore.getNode());
499 return SelectCode(NewValue.getNode());
502 // getNode() may fold the bitcast if its input was another bitcast. If that
503 // happens we should only select the new store.
504 N = NewStore.getNode();
511 case AMDGPUISD::REGISTER_LOAD: {
512 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
514 SDValue Addr, Offset;
517 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
518 const SDValue Ops[] = {
521 CurDAG->getTargetConstant(0, DL, MVT::i32),
524 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, DL,
525 CurDAG->getVTList(MVT::i32, MVT::i64,
529 case AMDGPUISD::REGISTER_STORE: {
530 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
532 SDValue Addr, Offset;
533 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
535 const SDValue Ops[] = {
539 CurDAG->getTargetConstant(0, DL, MVT::i32),
542 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, DL,
543 CurDAG->getVTList(MVT::Other),
547 case AMDGPUISD::BFE_I32:
548 case AMDGPUISD::BFE_U32: {
549 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
552 // There is a scalar version available, but unlike the vector version which
553 // has a separate operand for the offset and width, the scalar version packs
554 // the width and offset into a single operand. Try to move to the scalar
555 // version if the offsets are constant, so that we can try to keep extended
556 // loads of kernel arguments in SGPRs.
558 // TODO: Technically we could try to pattern match scalar bitshifts of
559 // dynamic values, but it's probably not useful.
560 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
564 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
568 bool Signed = Opc == AMDGPUISD::BFE_I32;
570 uint32_t OffsetVal = Offset->getZExtValue();
571 uint32_t WidthVal = Width->getZExtValue();
573 return getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N),
574 N->getOperand(0), OffsetVal, WidthVal);
577 case AMDGPUISD::DIV_SCALE: {
578 return SelectDIV_SCALE(N);
580 case ISD::CopyToReg: {
581 const SITargetLowering& Lowering =
582 *static_cast<const SITargetLowering*>(getTargetLowering());
583 Lowering.legalizeTargetIndependentNode(N, *CurDAG);
586 case ISD::ADDRSPACECAST:
587 return SelectAddrSpaceCast(N);
591 if (N->getValueType(0) != MVT::i32 ||
592 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
595 return SelectS_BFE(N);
598 return SelectCode(N);
602 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
603 assert(AS != 0 && "Use checkPrivateAddress instead.");
607 return Ptr->getType()->getPointerAddressSpace() == AS;
610 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
611 if (Op->getPseudoValue())
614 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
615 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
620 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
621 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
624 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
625 const Value *MemVal = N->getMemOperand()->getValue();
626 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
627 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
628 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
631 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
632 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
635 bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) {
636 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
639 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
640 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
643 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
644 const Value *MemVal = N->getMemOperand()->getValue();
646 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
648 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
651 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
652 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS)
653 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
654 N->getMemoryVT().bitsLT(MVT::i32))
657 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
660 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
661 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
664 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
665 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
668 bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const {
669 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
672 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
673 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
676 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
677 MachineMemOperand *MMO = N->getMemOperand();
678 if (checkPrivateAddress(N->getMemOperand())) {
680 const PseudoSourceValue *PSV = MMO->getPseudoValue();
681 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
689 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
690 if (checkPrivateAddress(N->getMemOperand())) {
691 // Check to make sure we are not a constant pool load or a constant load
692 // that is marked as a private load
693 if (isCPLoad(N) || isConstantLoad(N, -1)) {
698 const Value *MemVal = N->getMemOperand()->getValue();
699 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
700 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
701 !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) &&
702 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
703 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
704 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
705 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) {
711 const char *AMDGPUDAGToDAGISel::getPassName() const {
712 return "AMDGPU DAG->DAG Pattern Instruction Selection";
720 //===----------------------------------------------------------------------===//
722 //===----------------------------------------------------------------------===//
724 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
726 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
727 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
734 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
735 SDValue& BaseReg, SDValue &Offset) {
736 if (!isa<ConstantSDNode>(Addr)) {
738 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
744 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
746 ConstantSDNode *IMMOffset;
748 if (Addr.getOpcode() == ISD::ADD
749 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
750 && isInt<16>(IMMOffset->getZExtValue())) {
752 Base = Addr.getOperand(0);
753 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
756 // If the pointer address is constant, we can move it to the offset field.
757 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
758 && isInt<16>(IMMOffset->getZExtValue())) {
759 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
760 SDLoc(CurDAG->getEntryNode()),
761 AMDGPU::ZERO, MVT::i32);
762 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
767 // Default case, no offset
769 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
773 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
778 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
779 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
780 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
781 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
782 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
783 Base = Addr.getOperand(0);
784 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
787 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
793 SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
795 SDValue LHS = N->getOperand(0);
796 SDValue RHS = N->getOperand(1);
798 bool IsAdd = (N->getOpcode() == ISD::ADD);
800 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
801 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
803 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
804 DL, MVT::i32, LHS, Sub0);
805 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
806 DL, MVT::i32, LHS, Sub1);
808 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
809 DL, MVT::i32, RHS, Sub0);
810 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
811 DL, MVT::i32, RHS, Sub1);
813 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
814 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
817 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
818 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
820 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
821 SDValue Carry(AddLo, 1);
823 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
824 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
827 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
833 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
836 // We need to handle this here because tablegen doesn't support matching
837 // instructions with multiple outputs.
838 SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
840 EVT VT = N->getValueType(0);
842 assert(VT == MVT::f32 || VT == MVT::f64);
845 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
847 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
850 SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
851 SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
852 SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
853 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
856 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
857 unsigned OffsetBits) const {
858 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
859 (OffsetBits == 8 && !isUInt<8>(Offset)))
862 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
865 // On Southern Islands instruction with a negative base value and an offset
866 // don't seem to work.
867 return CurDAG->SignBitIsZero(Base);
870 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
871 SDValue &Offset) const {
872 if (CurDAG->isBaseWithConstantOffset(Addr)) {
873 SDValue N0 = Addr.getOperand(0);
874 SDValue N1 = Addr.getOperand(1);
875 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
876 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
886 // If we have a constant address, prefer to put the constant into the
887 // offset. This can save moves to load the constant address since multiple
888 // operations can share the zero base address register, and enables merging
889 // into read2 / write2 instructions.
890 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
891 if (isUInt<16>(CAddr->getZExtValue())) {
892 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
893 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
895 Base = SDValue(MovZero, 0);
903 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
907 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
909 SDValue &Offset1) const {
912 if (CurDAG->isBaseWithConstantOffset(Addr)) {
913 SDValue N0 = Addr.getOperand(0);
914 SDValue N1 = Addr.getOperand(1);
915 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
916 unsigned DWordOffset0 = C1->getZExtValue() / 4;
917 unsigned DWordOffset1 = DWordOffset0 + 1;
919 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
921 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
922 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
927 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
928 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
929 unsigned DWordOffset1 = DWordOffset0 + 1;
930 assert(4 * DWordOffset0 == CAddr->getZExtValue());
932 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
933 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
934 MachineSDNode *MovZero
935 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
937 Base = SDValue(MovZero, 0);
938 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
939 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
946 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
947 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
951 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
952 return isUInt<12>(Imm->getZExtValue());
955 void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
956 SDValue &VAddr, SDValue &SOffset,
957 SDValue &Offset, SDValue &Offen,
958 SDValue &Idxen, SDValue &Addr64,
959 SDValue &GLC, SDValue &SLC,
960 SDValue &TFE) const {
963 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
964 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
965 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
967 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
968 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
969 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
970 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
972 if (CurDAG->isBaseWithConstantOffset(Addr)) {
973 SDValue N0 = Addr.getOperand(0);
974 SDValue N1 = Addr.getOperand(1);
975 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
977 if (N0.getOpcode() == ISD::ADD) {
978 // (add (add N2, N3), C1) -> addr64
979 SDValue N2 = N0.getOperand(0);
980 SDValue N3 = N0.getOperand(1);
981 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
986 // (add N0, C1) -> offset
987 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
991 if (isLegalMUBUFImmOffset(C1)) {
992 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
994 } else if (isUInt<32>(C1->getZExtValue())) {
995 // Illegal offset, store it in soffset.
996 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
997 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
998 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1004 if (Addr.getOpcode() == ISD::ADD) {
1005 // (add N0, N1) -> addr64
1006 SDValue N0 = Addr.getOperand(0);
1007 SDValue N1 = Addr.getOperand(1);
1008 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1011 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1015 // default case -> offset
1016 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1018 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1022 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1023 SDValue &VAddr, SDValue &SOffset,
1024 SDValue &Offset, SDValue &GLC,
1025 SDValue &SLC, SDValue &TFE) const {
1026 SDValue Ptr, Offen, Idxen, Addr64;
1028 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1031 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1032 if (C->getSExtValue()) {
1035 const SITargetLowering& Lowering =
1036 *static_cast<const SITargetLowering*>(getTargetLowering());
1038 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1045 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1046 SDValue &VAddr, SDValue &SOffset,
1048 SDValue &SLC) const {
1049 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
1052 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
1055 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
1056 SDValue &VAddr, SDValue &SOffset,
1057 SDValue &ImmOffset) const {
1060 MachineFunction &MF = CurDAG->getMachineFunction();
1061 const SIRegisterInfo *TRI =
1062 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
1063 MachineRegisterInfo &MRI = MF.getRegInfo();
1064 const SITargetLowering& Lowering =
1065 *static_cast<const SITargetLowering*>(getTargetLowering());
1067 unsigned ScratchOffsetReg =
1068 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
1069 Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
1070 ScratchOffsetReg, MVT::i32);
1071 SDValue Sym0 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD0", MVT::i32);
1072 SDValue ScratchRsrcDword0 =
1073 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym0), 0);
1075 SDValue Sym1 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD1", MVT::i32);
1076 SDValue ScratchRsrcDword1 =
1077 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0);
1079 const SDValue RsrcOps[] = {
1080 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1082 CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
1084 CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
1086 SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1087 MVT::v2i32, RsrcOps), 0);
1088 Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0);
1089 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
1090 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
1093 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1094 SDValue N1 = Addr.getOperand(1);
1095 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1097 if (isLegalMUBUFImmOffset(C1)) {
1098 VAddr = Addr.getOperand(0);
1099 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1106 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1110 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1111 SDValue &SOffset, SDValue &Offset,
1112 SDValue &GLC, SDValue &SLC,
1113 SDValue &TFE) const {
1114 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1115 const SIInstrInfo *TII =
1116 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1118 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1121 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1122 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1123 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1124 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1125 APInt::getAllOnesValue(32).getZExtValue(); // Size
1128 const SITargetLowering& Lowering =
1129 *static_cast<const SITargetLowering*>(getTargetLowering());
1131 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1137 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1138 SDValue &Soffset, SDValue &Offset,
1139 SDValue &GLC) const {
1142 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
1145 // FIXME: This is incorrect and only enough to be able to compile.
1146 SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
1147 AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
1150 assert(Subtarget->hasFlatAddressSpace() &&
1151 "addrspacecast only supported with flat address space!");
1153 assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
1154 ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) &&
1155 "Cannot cast address space to / from constant address!");
1157 assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
1158 ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) &&
1159 "Can only cast to / from flat address space!");
1161 // The flat instructions read the address as the index of the VGPR holding the
1162 // address, so casting should just be reinterpreting the base VGPR, so just
1163 // insert trunc / bitcast / zext.
1165 SDValue Src = ASC->getOperand(0);
1166 EVT DestVT = ASC->getValueType(0);
1167 EVT SrcVT = Src.getValueType();
1169 unsigned SrcSize = SrcVT.getSizeInBits();
1170 unsigned DestSize = DestVT.getSizeInBits();
1172 if (SrcSize > DestSize) {
1173 assert(SrcSize == 64 && DestSize == 32);
1174 return CurDAG->getMachineNode(
1175 TargetOpcode::EXTRACT_SUBREG,
1179 CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32));
1183 if (DestSize > SrcSize) {
1184 assert(SrcSize == 32 && DestSize == 64);
1186 // FIXME: This is probably wrong, we should never be defining
1187 // a register class with both VGPRs and SGPRs
1188 SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, DL,
1191 const SDValue Ops[] = {
1194 CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
1195 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
1196 CurDAG->getConstant(0, DL, MVT::i32)), 0),
1197 CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
1200 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
1201 DL, N->getValueType(0), Ops);
1204 assert(SrcSize == 64 && DestSize == 64);
1205 return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode();
1208 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
1209 uint32_t Offset, uint32_t Width) {
1210 // Transformation function, pack the offset and width of a BFE into
1211 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1212 // source, bits [5:0] contain the offset and bits [22:16] the width.
1213 uint32_t PackedVal = Offset | (Width << 16);
1214 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
1216 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1219 SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
1220 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1221 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1222 // Predicate: 0 < b <= c < 32
1224 const SDValue &Shl = N->getOperand(0);
1225 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1226 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1229 uint32_t BVal = B->getZExtValue();
1230 uint32_t CVal = C->getZExtValue();
1232 if (0 < BVal && BVal <= CVal && CVal < 32) {
1233 bool Signed = N->getOpcode() == ISD::SRA;
1234 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1236 return getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0),
1237 CVal - BVal, 32 - CVal);
1240 return SelectCode(N);
1243 SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
1244 switch (N->getOpcode()) {
1246 if (N->getOperand(0).getOpcode() == ISD::SRL) {
1247 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
1248 // Predicate: isMask(mask)
1249 const SDValue &Srl = N->getOperand(0);
1250 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
1251 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
1253 if (Shift && Mask) {
1254 uint32_t ShiftVal = Shift->getZExtValue();
1255 uint32_t MaskVal = Mask->getZExtValue();
1257 if (isMask_32(MaskVal)) {
1258 uint32_t WidthVal = countPopulation(MaskVal);
1260 return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0),
1261 ShiftVal, WidthVal);
1267 if (N->getOperand(0).getOpcode() == ISD::AND) {
1268 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
1269 // Predicate: isMask(mask >> b)
1270 const SDValue &And = N->getOperand(0);
1271 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
1272 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
1274 if (Shift && Mask) {
1275 uint32_t ShiftVal = Shift->getZExtValue();
1276 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
1278 if (isMask_32(MaskVal)) {
1279 uint32_t WidthVal = countPopulation(MaskVal);
1281 return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0),
1282 ShiftVal, WidthVal);
1285 } else if (N->getOperand(0).getOpcode() == ISD::SHL)
1286 return SelectS_BFEFromShifts(N);
1289 if (N->getOperand(0).getOpcode() == ISD::SHL)
1290 return SelectS_BFEFromShifts(N);
1294 return SelectCode(N);
1297 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1298 SDValue &SrcMods) const {
1304 if (Src.getOpcode() == ISD::FNEG) {
1305 Mods |= SISrcMods::NEG;
1306 Src = Src.getOperand(0);
1309 if (Src.getOpcode() == ISD::FABS) {
1310 Mods |= SISrcMods::ABS;
1311 Src = Src.getOperand(0);
1314 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
1319 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1320 SDValue &SrcMods, SDValue &Clamp,
1321 SDValue &Omod) const {
1323 // FIXME: Handle Clamp and Omod
1324 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1325 Omod = CurDAG->getTargetConstant(0, DL, MVT::i32);
1327 return SelectVOP3Mods(In, Src, SrcMods);
1330 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
1332 SDValue &Omod) const {
1333 // FIXME: Handle Omod
1334 Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1336 return SelectVOP3Mods(In, Src, SrcMods);
1339 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
1342 SDValue &Omod) const {
1343 Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1344 return SelectVOP3Mods(In, Src, SrcMods);
1347 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1348 const AMDGPUTargetLowering& Lowering =
1349 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1350 bool IsModified = false;
1353 // Go over all selected nodes and try to fold them a bit more
1354 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1355 E = CurDAG->allnodes_end(); I != E; ++I) {
1359 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
1363 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1364 if (ResNode != Node) {
1365 ReplaceUses(Node, ResNode);
1369 CurDAG->RemoveDeadNodes();
1370 } while (IsModified);