1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIDefines.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/IR/Function.h"
32 //===----------------------------------------------------------------------===//
33 // Instruction Selector Implementation
34 //===----------------------------------------------------------------------===//
37 /// AMDGPU specific code to select AMDGPU machine instructions for
38 /// SelectionDAG operations.
39 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget *Subtarget;
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
46 bool runOnMachineFunction(MachineFunction &MF) override;
47 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
52 bool isInlineImmediate(SDNode *N) const;
53 inline SDValue getSmallIPtrImm(unsigned Imm);
54 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
55 const R600InstrInfo *TII);
56 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
64 static bool checkType(const Value *ptr, unsigned int addrspace);
65 static bool checkPrivateAddress(const MachineMemOperand *Op);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isFlatStore(const StoreSDNode *N);
69 static bool isPrivateStore(const StoreSDNode *N);
70 static bool isLocalStore(const StoreSDNode *N);
71 static bool isRegionStore(const StoreSDNode *N);
73 bool isCPLoad(const LoadSDNode *N) const;
74 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
75 bool isGlobalLoad(const LoadSDNode *N) const;
76 bool isFlatLoad(const LoadSDNode *N) const;
77 bool isParamLoad(const LoadSDNode *N) const;
78 bool isPrivateLoad(const LoadSDNode *N) const;
79 bool isLocalLoad(const LoadSDNode *N) const;
80 bool isRegionLoad(const LoadSDNode *N) const;
82 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
83 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
84 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
86 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
87 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
88 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
89 unsigned OffsetBits) const;
90 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
91 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
92 SDValue &Offset1) const;
93 void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
94 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
95 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
97 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
98 SDValue &SOffset, SDValue &Offset, SDValue &GLC,
99 SDValue &SLC, SDValue &TFE) const;
100 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
101 SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
103 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
104 SDValue &SOffset, SDValue &ImmOffset) const;
105 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
106 SDValue &Offset, SDValue &GLC, SDValue &SLC,
108 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
109 SDValue &Offset, SDValue &GLC) const;
110 SDNode *SelectAddrSpaceCast(SDNode *N);
111 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
112 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
113 SDValue &Clamp, SDValue &Omod) const;
115 bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
116 SDValue &Omod) const;
117 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
119 SDValue &Omod) const;
121 SDNode *SelectADD_SUB_I64(SDNode *N);
122 SDNode *SelectDIV_SCALE(SDNode *N);
124 // Include the pieces autogenerated from the target description.
125 #include "AMDGPUGenDAGISel.inc"
127 } // end anonymous namespace
129 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
130 // DAG, ready for instruction scheduling.
131 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
132 return new AMDGPUDAGToDAGISel(TM);
135 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
136 : SelectionDAGISel(TM) {}
138 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
139 Subtarget = &static_cast<const AMDGPUSubtarget &>(MF.getSubtarget());
140 return SelectionDAGISel::runOnMachineFunction(MF);
143 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
146 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
147 const SITargetLowering *TL
148 = static_cast<const SITargetLowering *>(getTargetLowering());
149 return TL->analyzeImmediate(N) == 0;
152 /// \brief Determine the register class for \p OpNo
153 /// \returns The register class of the virtual register that will be used for
154 /// the given operand number \OpNo or NULL if the register class cannot be
156 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
157 unsigned OpNo) const {
158 if (!N->isMachineOpcode())
161 switch (N->getMachineOpcode()) {
163 const MCInstrDesc &Desc =
164 Subtarget->getInstrInfo()->get(N->getMachineOpcode());
165 unsigned OpIdx = Desc.getNumDefs() + OpNo;
166 if (OpIdx >= Desc.getNumOperands())
168 int RegClass = Desc.OpInfo[OpIdx].RegClass;
172 return Subtarget->getRegisterInfo()->getRegClass(RegClass);
174 case AMDGPU::REG_SEQUENCE: {
175 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
176 const TargetRegisterClass *SuperRC =
177 Subtarget->getRegisterInfo()->getRegClass(RCID);
179 SDValue SubRegOp = N->getOperand(OpNo + 1);
180 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
181 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
187 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
188 return CurDAG->getTargetConstant(Imm, MVT::i32);
191 bool AMDGPUDAGToDAGISel::SelectADDRParam(
192 SDValue Addr, SDValue& R1, SDValue& R2) {
194 if (Addr.getOpcode() == ISD::FrameIndex) {
195 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
196 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
197 R2 = CurDAG->getTargetConstant(0, MVT::i32);
200 R2 = CurDAG->getTargetConstant(0, MVT::i32);
202 } else if (Addr.getOpcode() == ISD::ADD) {
203 R1 = Addr.getOperand(0);
204 R2 = Addr.getOperand(1);
207 R2 = CurDAG->getTargetConstant(0, MVT::i32);
212 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
213 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
214 Addr.getOpcode() == ISD::TargetGlobalAddress) {
217 return SelectADDRParam(Addr, R1, R2);
221 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
222 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
223 Addr.getOpcode() == ISD::TargetGlobalAddress) {
227 if (Addr.getOpcode() == ISD::FrameIndex) {
228 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
229 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
230 R2 = CurDAG->getTargetConstant(0, MVT::i64);
233 R2 = CurDAG->getTargetConstant(0, MVT::i64);
235 } else if (Addr.getOpcode() == ISD::ADD) {
236 R1 = Addr.getOperand(0);
237 R2 = Addr.getOperand(1);
240 R2 = CurDAG->getTargetConstant(0, MVT::i64);
245 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
246 unsigned int Opc = N->getOpcode();
247 if (N->isMachineOpcode()) {
249 return nullptr; // Already selected.
254 // We are selecting i64 ADD here instead of custom lower it during
255 // DAG legalization, so we can fold some i64 ADDs used for address
256 // calculation into the LOAD and STORE instructions.
259 if (N->getValueType(0) != MVT::i64 ||
260 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
263 return SelectADD_SUB_I64(N);
265 case ISD::SCALAR_TO_VECTOR:
266 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
267 case ISD::BUILD_VECTOR: {
269 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
270 EVT VT = N->getValueType(0);
271 unsigned NumVectorElts = VT.getVectorNumElements();
272 EVT EltVT = VT.getVectorElementType();
273 assert(EltVT.bitsEq(MVT::i32));
274 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
276 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
278 if (!U->isMachineOpcode()) {
281 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
285 if (static_cast<const SIRegisterInfo *>(TRI)->isSGPRClass(RC)) {
289 switch(NumVectorElts) {
290 case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID :
291 AMDGPU::SReg_32RegClassID;
293 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
294 AMDGPU::SReg_64RegClassID;
296 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
297 AMDGPU::SReg_128RegClassID;
299 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
300 AMDGPU::SReg_256RegClassID;
302 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
303 AMDGPU::SReg_512RegClassID;
305 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
308 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
309 // that adds a 128 bits reg copy when going through TwoAddressInstructions
310 // pass. We want to avoid 128 bits copies as much as possible because they
311 // can't be bundled by our scheduler.
312 switch(NumVectorElts) {
313 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
315 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
316 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
318 RegClassID = AMDGPU::R600_Reg128RegClassID;
320 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
324 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
326 if (NumVectorElts == 1) {
327 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
328 N->getOperand(0), RegClass);
331 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
333 // 16 = Max Num Vector Elements
334 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
335 // 1 = Vector Register Class
336 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
338 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
339 bool IsRegSeq = true;
340 unsigned NOps = N->getNumOperands();
341 for (unsigned i = 0; i < NOps; i++) {
342 // XXX: Why is this here?
343 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
347 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
348 RegSeqArgs[1 + (2 * i) + 1] =
349 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
352 if (NOps != NumVectorElts) {
353 // Fill in the missing undef elements if this was a scalar_to_vector.
354 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
356 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
358 for (unsigned i = NOps; i < NumVectorElts; ++i) {
359 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
360 RegSeqArgs[1 + (2 * i) + 1] =
361 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
367 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
370 case ISD::BUILD_PAIR: {
371 SDValue RC, SubReg0, SubReg1;
372 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
375 if (N->getValueType(0) == MVT::i128) {
376 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
377 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
378 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
379 } else if (N->getValueType(0) == MVT::i64) {
380 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
381 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
382 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
384 llvm_unreachable("Unhandled value type for BUILD_PAIR");
386 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
387 N->getOperand(1), SubReg1 };
388 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
389 SDLoc(N), N->getValueType(0), Ops);
393 case ISD::ConstantFP: {
394 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
395 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
399 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
400 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
402 ConstantSDNode *C = cast<ConstantSDNode>(N);
403 Imm = C->getZExtValue();
406 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
407 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
408 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
409 CurDAG->getConstant(Imm >> 32, MVT::i32));
410 const SDValue Ops[] = {
411 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
412 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
413 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
416 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
417 N->getValueType(0), Ops);
421 // To simplify the TableGen patters, we replace all i64 loads with
422 // v2i32 loads. Alternatively, we could promote i64 loads to v2i32
423 // during DAG legalization, however, so places (ExpandUnalignedLoad)
424 // in the DAG legalizer assume that if i64 is legal, so doing this
425 // promotion early can cause problems.
426 EVT VT = N->getValueType(0);
427 LoadSDNode *LD = cast<LoadSDNode>(N);
428 if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD)
431 SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SDLoc(N), LD->getChain(),
432 LD->getBasePtr(), LD->getMemOperand());
433 SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
435 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLoad.getValue(1));
436 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), BitCast);
437 SelectCode(NewLoad.getNode());
438 N = BitCast.getNode();
443 // Handle i64 stores here for the same reason mentioned above for loads.
444 StoreSDNode *ST = cast<StoreSDNode>(N);
445 SDValue Value = ST->getValue();
446 if (Value.getValueType() != MVT::i64 || ST->isTruncatingStore())
449 SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
451 SDValue NewStore = CurDAG->getStore(ST->getChain(), SDLoc(N), NewValue,
452 ST->getBasePtr(), ST->getMemOperand());
454 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewStore);
456 if (NewValue.getOpcode() == ISD::BITCAST) {
457 Select(NewStore.getNode());
458 return SelectCode(NewValue.getNode());
461 // getNode() may fold the bitcast if its input was another bitcast. If that
462 // happens we should only select the new store.
463 N = NewStore.getNode();
467 case AMDGPUISD::REGISTER_LOAD: {
468 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
470 SDValue Addr, Offset;
472 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
473 const SDValue Ops[] = {
476 CurDAG->getTargetConstant(0, MVT::i32),
479 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
480 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
483 case AMDGPUISD::REGISTER_STORE: {
484 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
486 SDValue Addr, Offset;
487 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
488 const SDValue Ops[] = {
492 CurDAG->getTargetConstant(0, MVT::i32),
495 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
496 CurDAG->getVTList(MVT::Other),
500 case AMDGPUISD::BFE_I32:
501 case AMDGPUISD::BFE_U32: {
502 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
505 // There is a scalar version available, but unlike the vector version which
506 // has a separate operand for the offset and width, the scalar version packs
507 // the width and offset into a single operand. Try to move to the scalar
508 // version if the offsets are constant, so that we can try to keep extended
509 // loads of kernel arguments in SGPRs.
511 // TODO: Technically we could try to pattern match scalar bitshifts of
512 // dynamic values, but it's probably not useful.
513 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
517 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
521 bool Signed = Opc == AMDGPUISD::BFE_I32;
523 // Transformation function, pack the offset and width of a BFE into
524 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
525 // source, bits [5:0] contain the offset and bits [22:16] the width.
527 uint32_t OffsetVal = Offset->getZExtValue();
528 uint32_t WidthVal = Width->getZExtValue();
530 uint32_t PackedVal = OffsetVal | WidthVal << 16;
532 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
533 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
540 case AMDGPUISD::DIV_SCALE: {
541 return SelectDIV_SCALE(N);
543 case ISD::CopyToReg: {
544 const SITargetLowering& Lowering =
545 *static_cast<const SITargetLowering*>(getTargetLowering());
546 Lowering.legalizeTargetIndependentNode(N, *CurDAG);
549 case ISD::ADDRSPACECAST:
550 return SelectAddrSpaceCast(N);
553 return SelectCode(N);
557 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
558 assert(AS != 0 && "Use checkPrivateAddress instead.");
562 return Ptr->getType()->getPointerAddressSpace() == AS;
565 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
566 if (Op->getPseudoValue())
569 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
570 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
575 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
576 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
579 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
580 const Value *MemVal = N->getMemOperand()->getValue();
581 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
582 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
583 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
586 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
587 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
590 bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) {
591 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
594 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
595 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
598 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
599 const Value *MemVal = N->getMemOperand()->getValue();
601 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
603 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
606 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
607 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS)
608 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
609 N->getMemoryVT().bitsLT(MVT::i32))
612 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
615 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
616 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
619 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
620 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
623 bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const {
624 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
627 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
628 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
631 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
632 MachineMemOperand *MMO = N->getMemOperand();
633 if (checkPrivateAddress(N->getMemOperand())) {
635 const PseudoSourceValue *PSV = MMO->getPseudoValue();
636 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
644 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
645 if (checkPrivateAddress(N->getMemOperand())) {
646 // Check to make sure we are not a constant pool load or a constant load
647 // that is marked as a private load
648 if (isCPLoad(N) || isConstantLoad(N, -1)) {
653 const Value *MemVal = N->getMemOperand()->getValue();
654 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
655 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
656 !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) &&
657 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
658 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
659 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
660 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) {
666 const char *AMDGPUDAGToDAGISel::getPassName() const {
667 return "AMDGPU DAG->DAG Pattern Instruction Selection";
675 //===----------------------------------------------------------------------===//
677 //===----------------------------------------------------------------------===//
679 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
681 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
682 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
688 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
689 SDValue& BaseReg, SDValue &Offset) {
690 if (!isa<ConstantSDNode>(Addr)) {
692 Offset = CurDAG->getIntPtrConstant(0, true);
698 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
700 ConstantSDNode *IMMOffset;
702 if (Addr.getOpcode() == ISD::ADD
703 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
704 && isInt<16>(IMMOffset->getZExtValue())) {
706 Base = Addr.getOperand(0);
707 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
709 // If the pointer address is constant, we can move it to the offset field.
710 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
711 && isInt<16>(IMMOffset->getZExtValue())) {
712 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
713 SDLoc(CurDAG->getEntryNode()),
714 AMDGPU::ZERO, MVT::i32);
715 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
719 // Default case, no offset
721 Offset = CurDAG->getTargetConstant(0, MVT::i32);
725 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
729 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
730 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
731 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
732 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
733 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
734 Base = Addr.getOperand(0);
735 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
738 Offset = CurDAG->getTargetConstant(0, MVT::i32);
744 SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
746 SDValue LHS = N->getOperand(0);
747 SDValue RHS = N->getOperand(1);
749 bool IsAdd = (N->getOpcode() == ISD::ADD);
751 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
752 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
754 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
755 DL, MVT::i32, LHS, Sub0);
756 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
757 DL, MVT::i32, LHS, Sub1);
759 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
760 DL, MVT::i32, RHS, Sub0);
761 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
762 DL, MVT::i32, RHS, Sub1);
764 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
765 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
768 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
769 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
771 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
772 SDValue Carry(AddLo, 1);
774 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
775 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
778 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
784 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
787 // We need to handle this here because tablegen doesn't support matching
788 // instructions with multiple outputs.
789 SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
791 EVT VT = N->getValueType(0);
793 assert(VT == MVT::f32 || VT == MVT::f64);
796 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
798 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
801 SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
802 SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
803 SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
804 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
807 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
808 unsigned OffsetBits) const {
809 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
810 (OffsetBits == 8 && !isUInt<8>(Offset)))
813 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
816 // On Southern Islands instruction with a negative base value and an offset
817 // don't seem to work.
818 return CurDAG->SignBitIsZero(Base);
821 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
822 SDValue &Offset) const {
823 if (CurDAG->isBaseWithConstantOffset(Addr)) {
824 SDValue N0 = Addr.getOperand(0);
825 SDValue N1 = Addr.getOperand(1);
826 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
827 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
835 // If we have a constant address, prefer to put the constant into the
836 // offset. This can save moves to load the constant address since multiple
837 // operations can share the zero base address register, and enables merging
838 // into read2 / write2 instructions.
839 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
840 if (isUInt<16>(CAddr->getZExtValue())) {
841 SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
842 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
843 SDLoc(Addr), MVT::i32, Zero);
844 Base = SDValue(MovZero, 0);
852 Offset = CurDAG->getTargetConstant(0, MVT::i16);
856 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
858 SDValue &Offset1) const {
859 if (CurDAG->isBaseWithConstantOffset(Addr)) {
860 SDValue N0 = Addr.getOperand(0);
861 SDValue N1 = Addr.getOperand(1);
862 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
863 unsigned DWordOffset0 = C1->getZExtValue() / 4;
864 unsigned DWordOffset1 = DWordOffset0 + 1;
866 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
868 Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
869 Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
874 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
875 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
876 unsigned DWordOffset1 = DWordOffset0 + 1;
877 assert(4 * DWordOffset0 == CAddr->getZExtValue());
879 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
880 SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
881 MachineSDNode *MovZero
882 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
883 SDLoc(Addr), MVT::i32, Zero);
884 Base = SDValue(MovZero, 0);
885 Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
886 Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
893 Offset0 = CurDAG->getTargetConstant(0, MVT::i8);
894 Offset1 = CurDAG->getTargetConstant(1, MVT::i8);
898 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
899 return isUInt<12>(Imm->getZExtValue());
902 void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
903 SDValue &VAddr, SDValue &SOffset,
904 SDValue &Offset, SDValue &Offen,
905 SDValue &Idxen, SDValue &Addr64,
906 SDValue &GLC, SDValue &SLC,
907 SDValue &TFE) const {
910 GLC = CurDAG->getTargetConstant(0, MVT::i1);
911 SLC = CurDAG->getTargetConstant(0, MVT::i1);
912 TFE = CurDAG->getTargetConstant(0, MVT::i1);
914 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
915 Offen = CurDAG->getTargetConstant(0, MVT::i1);
916 Addr64 = CurDAG->getTargetConstant(0, MVT::i1);
917 SOffset = CurDAG->getTargetConstant(0, MVT::i32);
919 if (CurDAG->isBaseWithConstantOffset(Addr)) {
920 SDValue N0 = Addr.getOperand(0);
921 SDValue N1 = Addr.getOperand(1);
922 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
924 if (N0.getOpcode() == ISD::ADD) {
925 // (add (add N2, N3), C1) -> addr64
926 SDValue N2 = N0.getOperand(0);
927 SDValue N3 = N0.getOperand(1);
928 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
933 // (add N0, C1) -> offset
934 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
938 if (isLegalMUBUFImmOffset(C1)) {
939 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
941 } else if (isUInt<32>(C1->getZExtValue())) {
942 // Illegal offset, store it in soffset.
943 Offset = CurDAG->getTargetConstant(0, MVT::i16);
944 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
945 CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i32)), 0);
950 if (Addr.getOpcode() == ISD::ADD) {
951 // (add N0, N1) -> addr64
952 SDValue N0 = Addr.getOperand(0);
953 SDValue N1 = Addr.getOperand(1);
954 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
957 Offset = CurDAG->getTargetConstant(0, MVT::i16);
961 // default case -> offset
962 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
964 Offset = CurDAG->getTargetConstant(0, MVT::i16);
968 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
969 SDValue &VAddr, SDValue &SOffset,
970 SDValue &Offset, SDValue &GLC,
971 SDValue &SLC, SDValue &TFE) const {
972 SDValue Ptr, Offen, Idxen, Addr64;
974 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
977 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
978 if (C->getSExtValue()) {
981 const SITargetLowering& Lowering =
982 *static_cast<const SITargetLowering*>(getTargetLowering());
984 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
991 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
992 SDValue &VAddr, SDValue &SOffset,
994 SDValue &SLC) const {
995 SLC = CurDAG->getTargetConstant(0, MVT::i1);
998 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
1001 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
1002 SDValue &VAddr, SDValue &SOffset,
1003 SDValue &ImmOffset) const {
1006 MachineFunction &MF = CurDAG->getMachineFunction();
1007 const SIRegisterInfo *TRI =
1008 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
1009 MachineRegisterInfo &MRI = MF.getRegInfo();
1010 const SITargetLowering& Lowering =
1011 *static_cast<const SITargetLowering*>(getTargetLowering());
1013 unsigned ScratchOffsetReg =
1014 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
1015 Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
1016 ScratchOffsetReg, MVT::i32);
1017 SDValue Sym0 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD0", MVT::i32);
1018 SDValue ScratchRsrcDword0 =
1019 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym0), 0);
1021 SDValue Sym1 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD1", MVT::i32);
1022 SDValue ScratchRsrcDword1 =
1023 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0);
1025 const SDValue RsrcOps[] = {
1026 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
1028 CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
1030 CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
1032 SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1033 MVT::v2i32, RsrcOps), 0);
1034 Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0);
1035 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
1036 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
1039 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1040 SDValue N1 = Addr.getOperand(1);
1041 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1043 if (isLegalMUBUFImmOffset(C1)) {
1044 VAddr = Addr.getOperand(0);
1045 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
1052 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
1056 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1057 SDValue &SOffset, SDValue &Offset,
1058 SDValue &GLC, SDValue &SLC,
1059 SDValue &TFE) const {
1060 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1061 const SIInstrInfo *TII =
1062 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1064 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1067 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1068 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1069 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1070 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1071 APInt::getAllOnesValue(32).getZExtValue(); // Size
1074 const SITargetLowering& Lowering =
1075 *static_cast<const SITargetLowering*>(getTargetLowering());
1077 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1083 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1084 SDValue &Soffset, SDValue &Offset,
1085 SDValue &GLC) const {
1088 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
1091 // FIXME: This is incorrect and only enough to be able to compile.
1092 SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
1093 AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
1096 assert(Subtarget->hasFlatAddressSpace() &&
1097 "addrspacecast only supported with flat address space!");
1099 assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
1100 ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) &&
1101 "Cannot cast address space to / from constant address!");
1103 assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
1104 ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) &&
1105 "Can only cast to / from flat address space!");
1107 // The flat instructions read the address as the index of the VGPR holding the
1108 // address, so casting should just be reinterpreting the base VGPR, so just
1109 // insert trunc / bitcast / zext.
1111 SDValue Src = ASC->getOperand(0);
1112 EVT DestVT = ASC->getValueType(0);
1113 EVT SrcVT = Src.getValueType();
1115 unsigned SrcSize = SrcVT.getSizeInBits();
1116 unsigned DestSize = DestVT.getSizeInBits();
1118 if (SrcSize > DestSize) {
1119 assert(SrcSize == 64 && DestSize == 32);
1120 return CurDAG->getMachineNode(
1121 TargetOpcode::EXTRACT_SUBREG,
1125 CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32));
1129 if (DestSize > SrcSize) {
1130 assert(SrcSize == 32 && DestSize == 64);
1132 // FIXME: This is probably wrong, we should never be defining
1133 // a register class with both VGPRs and SGPRs
1134 SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, MVT::i32);
1136 const SDValue Ops[] = {
1139 CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
1140 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
1141 CurDAG->getConstant(0, MVT::i32)), 0),
1142 CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
1145 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
1146 SDLoc(N), N->getValueType(0), Ops);
1149 assert(SrcSize == 64 && DestSize == 64);
1150 return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode();
1153 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1154 SDValue &SrcMods) const {
1160 if (Src.getOpcode() == ISD::FNEG) {
1161 Mods |= SISrcMods::NEG;
1162 Src = Src.getOperand(0);
1165 if (Src.getOpcode() == ISD::FABS) {
1166 Mods |= SISrcMods::ABS;
1167 Src = Src.getOperand(0);
1170 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
1175 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1176 SDValue &SrcMods, SDValue &Clamp,
1177 SDValue &Omod) const {
1178 // FIXME: Handle Clamp and Omod
1179 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
1180 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1182 return SelectVOP3Mods(In, Src, SrcMods);
1185 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
1187 SDValue &Omod) const {
1188 // FIXME: Handle Omod
1189 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1191 return SelectVOP3Mods(In, Src, SrcMods);
1194 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
1197 SDValue &Omod) const {
1198 Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32);
1199 return SelectVOP3Mods(In, Src, SrcMods);
1202 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1203 const AMDGPUTargetLowering& Lowering =
1204 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1205 bool IsModified = false;
1208 // Go over all selected nodes and try to fold them a bit more
1209 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1210 E = CurDAG->allnodes_end(); I != E; ++I) {
1214 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
1218 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1219 if (ResNode != Node) {
1220 ReplaceUses(Node, ResNode);
1224 CurDAG->RemoveDeadNodes();
1225 } while (IsModified);