1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/Support/Compiler.h"
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
48 virtual void PostprocessISelDAG();
51 inline SDValue getSmallIPtrImm(unsigned Imm);
52 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53 const R600InstrInfo *TII);
54 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
61 SDValue SimplifyI24(SDValue &Op);
62 bool SelectI24(SDValue Addr, SDValue &Op);
63 bool SelectU24(SDValue Addr, SDValue &Op);
65 static bool checkType(const Value *ptr, unsigned int addrspace);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
81 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82 bool SelectGlobalValueVariableOffset(SDValue Addr,
83 SDValue &BaseReg, SDValue& Offset);
84 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
85 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
87 // Include the pieces autogenerated from the target description.
88 #include "AMDGPUGenDAGISel.inc"
90 } // end anonymous namespace
92 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
93 // DAG, ready for instruction scheduling.
94 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
96 return new AMDGPUDAGToDAGISel(TM);
99 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
100 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
103 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
106 /// \brief Determine the register class for \p OpNo
107 /// \returns The register class of the virtual register that will be used for
108 /// the given operand number \OpNo or NULL if the register class cannot be
110 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
111 unsigned OpNo) const {
112 if (!N->isMachineOpcode()) {
115 switch (N->getMachineOpcode()) {
117 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
118 unsigned OpIdx = Desc.getNumDefs() + OpNo;
119 if (OpIdx >= Desc.getNumOperands())
121 int RegClass = Desc.OpInfo[OpIdx].RegClass;
122 if (RegClass == -1) {
125 return TM.getRegisterInfo()->getRegClass(RegClass);
127 case AMDGPU::REG_SEQUENCE: {
128 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
129 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
131 dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
132 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
137 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
138 return CurDAG->getTargetConstant(Imm, MVT::i32);
141 bool AMDGPUDAGToDAGISel::SelectADDRParam(
142 SDValue Addr, SDValue& R1, SDValue& R2) {
144 if (Addr.getOpcode() == ISD::FrameIndex) {
145 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
146 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
147 R2 = CurDAG->getTargetConstant(0, MVT::i32);
150 R2 = CurDAG->getTargetConstant(0, MVT::i32);
152 } else if (Addr.getOpcode() == ISD::ADD) {
153 R1 = Addr.getOperand(0);
154 R2 = Addr.getOperand(1);
157 R2 = CurDAG->getTargetConstant(0, MVT::i32);
162 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
163 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
164 Addr.getOpcode() == ISD::TargetGlobalAddress) {
167 return SelectADDRParam(Addr, R1, R2);
171 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
172 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
173 Addr.getOpcode() == ISD::TargetGlobalAddress) {
177 if (Addr.getOpcode() == ISD::FrameIndex) {
178 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
179 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
180 R2 = CurDAG->getTargetConstant(0, MVT::i64);
183 R2 = CurDAG->getTargetConstant(0, MVT::i64);
185 } else if (Addr.getOpcode() == ISD::ADD) {
186 R1 = Addr.getOperand(0);
187 R2 = Addr.getOperand(1);
190 R2 = CurDAG->getTargetConstant(0, MVT::i64);
195 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
196 unsigned int Opc = N->getOpcode();
197 if (N->isMachineOpcode()) {
198 return NULL; // Already selected.
202 case ISD::BUILD_VECTOR: {
204 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
205 const AMDGPURegisterInfo *TRI =
206 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
207 const SIRegisterInfo *SIRI =
208 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
209 EVT VT = N->getValueType(0);
210 unsigned NumVectorElts = VT.getVectorNumElements();
211 assert(VT.getVectorElementType().bitsEq(MVT::i32));
212 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
214 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
216 if (!U->isMachineOpcode()) {
219 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
223 if (SIRI->isSGPRClass(RC)) {
227 switch(NumVectorElts) {
228 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
229 AMDGPU::SReg_32RegClassID;
231 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
232 AMDGPU::SReg_64RegClassID;
234 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
235 AMDGPU::SReg_128RegClassID;
237 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
238 AMDGPU::SReg_256RegClassID;
240 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
241 AMDGPU::SReg_512RegClassID;
243 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
246 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
247 // that adds a 128 bits reg copy when going through TwoAddressInstructions
248 // pass. We want to avoid 128 bits copies as much as possible because they
249 // can't be bundled by our scheduler.
250 switch(NumVectorElts) {
251 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
252 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
253 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
257 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
259 if (NumVectorElts == 1) {
260 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
261 VT.getVectorElementType(),
262 N->getOperand(0), RegClass);
265 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
267 // 16 = Max Num Vector Elements
268 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
269 // 1 = Vector Register Class
270 SDValue RegSeqArgs[16 * 2 + 1];
272 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
273 bool IsRegSeq = true;
274 for (unsigned i = 0; i < N->getNumOperands(); i++) {
275 // XXX: Why is this here?
276 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
280 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
281 RegSeqArgs[1 + (2 * i) + 1] =
282 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
286 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
287 RegSeqArgs, 2 * N->getNumOperands() + 1);
289 case ISD::BUILD_PAIR: {
290 SDValue RC, SubReg0, SubReg1;
291 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
292 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
295 if (N->getValueType(0) == MVT::i128) {
296 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
297 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
298 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
299 } else if (N->getValueType(0) == MVT::i64) {
300 RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
301 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
302 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
304 llvm_unreachable("Unhandled value type for BUILD_PAIR");
306 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
307 N->getOperand(1), SubReg1 };
308 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
309 SDLoc(N), N->getValueType(0), Ops);
312 return SelectCode(N);
316 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
320 Type *ptrType = ptr->getType();
321 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
324 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
325 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
328 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
329 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
330 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
331 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
334 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
335 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
338 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
339 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
342 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
344 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
346 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
349 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
350 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
351 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
352 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
353 N->getMemoryVT().bitsLT(MVT::i32)) {
357 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
360 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
361 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
364 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
365 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
368 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
369 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
372 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
373 MachineMemOperand *MMO = N->getMemOperand();
374 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
376 const Value *V = MMO->getValue();
377 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
378 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
386 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
387 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
388 // Check to make sure we are not a constant pool load or a constant load
389 // that is marked as a private load
390 if (isCPLoad(N) || isConstantLoad(N, -1)) {
394 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
395 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
396 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
397 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
398 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
399 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
405 const char *AMDGPUDAGToDAGISel::getPassName() const {
406 return "AMDGPU DAG->DAG Pattern Instruction Selection";
414 //===----------------------------------------------------------------------===//
416 //===----------------------------------------------------------------------===//
418 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
420 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
421 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
427 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
428 SDValue& BaseReg, SDValue &Offset) {
429 if (!dyn_cast<ConstantSDNode>(Addr)) {
431 Offset = CurDAG->getIntPtrConstant(0, true);
437 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
439 ConstantSDNode * IMMOffset;
441 if (Addr.getOpcode() == ISD::ADD
442 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
443 && isInt<16>(IMMOffset->getZExtValue())) {
445 Base = Addr.getOperand(0);
446 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
448 // If the pointer address is constant, we can move it to the offset field.
449 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
450 && isInt<16>(IMMOffset->getZExtValue())) {
451 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
452 SDLoc(CurDAG->getEntryNode()),
453 AMDGPU::ZERO, MVT::i32);
454 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
458 // Default case, no offset
460 Offset = CurDAG->getTargetConstant(0, MVT::i32);
464 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
468 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
469 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
470 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
471 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
472 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
473 Base = Addr.getOperand(0);
474 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
477 Offset = CurDAG->getTargetConstant(0, MVT::i32);
483 SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
484 APInt Demanded = APInt(32, 0x00FFFFFF);
485 APInt KnownZero, KnownOne;
486 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
487 const TargetLowering *TLI = getTargetLowering();
488 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
489 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
490 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
491 return SimplifyI24(TLO.New);
497 bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
499 assert(Op.getValueType() == MVT::i32);
501 if (CurDAG->ComputeNumSignBits(Op) == 9) {
502 I24 = SimplifyI24(Op);
508 bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
511 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
513 assert (Op.getValueType() == MVT::i32);
515 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
516 // i32. These smaller types are legal to use with the i24 instructions.
517 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
518 Op.getOpcode() == ISD::ANY_EXTEND ||
519 ISD::isEXTLoad(Op.getNode())) {
520 U24 = SimplifyI24(Op);
526 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
527 const AMDGPUTargetLowering& Lowering =
528 (*(const AMDGPUTargetLowering*)getTargetLowering());
529 bool IsModified = false;
532 // Go over all selected nodes and try to fold them a bit more
533 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
534 E = CurDAG->allnodes_end(); I != E; ++I) {
538 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
542 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
543 if (ResNode != Node) {
544 ReplaceUses(Node, ResNode);
548 CurDAG->RemoveDeadNodes();
549 } while (IsModified);