1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/Support/Compiler.h"
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
48 virtual void PostprocessISelDAG();
51 inline SDValue getSmallIPtrImm(unsigned Imm);
52 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53 const R600InstrInfo *TII);
54 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
61 SDValue SimplifyI24(SDValue &Op);
62 bool SelectI24(SDValue Addr, SDValue &Op);
63 bool SelectU24(SDValue Addr, SDValue &Op);
65 static bool checkType(const Value *ptr, unsigned int addrspace);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
81 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82 bool SelectGlobalValueVariableOffset(SDValue Addr,
83 SDValue &BaseReg, SDValue& Offset);
84 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
85 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
87 // Include the pieces autogenerated from the target description.
88 #include "AMDGPUGenDAGISel.inc"
90 } // end anonymous namespace
92 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
93 // DAG, ready for instruction scheduling.
94 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
96 return new AMDGPUDAGToDAGISel(TM);
99 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
100 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
103 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
106 /// \brief Determine the register class for \p OpNo
107 /// \returns The register class of the virtual register that will be used for
108 /// the given operand number \OpNo or NULL if the register class cannot be
110 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
111 unsigned OpNo) const {
112 if (!N->isMachineOpcode()) {
115 switch (N->getMachineOpcode()) {
117 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
118 int RegClass = Desc.OpInfo[Desc.getNumDefs() + OpNo].RegClass;
119 if (RegClass == -1) {
122 return TM.getRegisterInfo()->getRegClass(RegClass);
124 case AMDGPU::REG_SEQUENCE: {
125 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
126 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
128 dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
129 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
134 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
135 return CurDAG->getTargetConstant(Imm, MVT::i32);
138 bool AMDGPUDAGToDAGISel::SelectADDRParam(
139 SDValue Addr, SDValue& R1, SDValue& R2) {
141 if (Addr.getOpcode() == ISD::FrameIndex) {
142 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
143 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
144 R2 = CurDAG->getTargetConstant(0, MVT::i32);
147 R2 = CurDAG->getTargetConstant(0, MVT::i32);
149 } else if (Addr.getOpcode() == ISD::ADD) {
150 R1 = Addr.getOperand(0);
151 R2 = Addr.getOperand(1);
154 R2 = CurDAG->getTargetConstant(0, MVT::i32);
159 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
160 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
161 Addr.getOpcode() == ISD::TargetGlobalAddress) {
164 return SelectADDRParam(Addr, R1, R2);
168 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
169 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
170 Addr.getOpcode() == ISD::TargetGlobalAddress) {
174 if (Addr.getOpcode() == ISD::FrameIndex) {
175 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
176 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
177 R2 = CurDAG->getTargetConstant(0, MVT::i64);
180 R2 = CurDAG->getTargetConstant(0, MVT::i64);
182 } else if (Addr.getOpcode() == ISD::ADD) {
183 R1 = Addr.getOperand(0);
184 R2 = Addr.getOperand(1);
187 R2 = CurDAG->getTargetConstant(0, MVT::i64);
192 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
193 const R600InstrInfo *TII =
194 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
195 unsigned int Opc = N->getOpcode();
196 if (N->isMachineOpcode()) {
197 return NULL; // Already selected.
201 case AMDGPUISD::CONST_ADDRESS: {
202 for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
203 I != SDNode::use_end(); I = Next) {
204 Next = llvm::next(I);
205 if (!I->isMachineOpcode()) {
208 unsigned Opcode = I->getMachineOpcode();
209 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
210 int SrcIdx = I.getOperandNo();
212 // Unlike MachineInstrs, SDNodes do not have results in their operand
213 // list, so we need to increment the SrcIdx, since
214 // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
219 SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
225 if (N->getValueType(0).isVector() ||
226 !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
229 // Gather constants values
231 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
232 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
233 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
234 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
235 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
236 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
237 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
238 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
239 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
240 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
241 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
243 std::vector<unsigned> Consts;
244 for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
245 int OtherSrcIdx = SrcIndices[i];
246 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
247 if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
254 if (RegisterSDNode *Reg =
255 dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
256 if (Reg->getReg() == AMDGPU::ALU_CONST) {
257 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
258 Consts.push_back(Cst->getZExtValue());
263 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
264 Consts.push_back(Cst->getZExtValue());
265 if (!TII->fitsConstReadLimitations(Consts))
268 // Convert back to SDNode indices
273 std::vector<SDValue> Ops;
274 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
276 Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
277 } else if (i == SelIdx) {
278 Ops.push_back(CstOffset);
280 Ops.push_back(I->getOperand(i));
283 CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
287 case ISD::BUILD_VECTOR: {
289 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
290 const AMDGPURegisterInfo *TRI =
291 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
292 const SIRegisterInfo *SIRI =
293 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
294 EVT VT = N->getValueType(0);
295 unsigned NumVectorElts = VT.getVectorNumElements();
296 assert(VT.getVectorElementType().bitsEq(MVT::i32));
297 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
299 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
301 if (!U->isMachineOpcode()) {
304 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
308 if (SIRI->isSGPRClass(RC)) {
312 switch(NumVectorElts) {
313 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
314 AMDGPU::SReg_32RegClassID;
316 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
317 AMDGPU::SReg_64RegClassID;
319 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
320 AMDGPU::SReg_128RegClassID;
322 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
323 AMDGPU::SReg_256RegClassID;
325 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
326 AMDGPU::SReg_512RegClassID;
330 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
331 // that adds a 128 bits reg copy when going through TwoAddressInstructions
332 // pass. We want to avoid 128 bits copies as much as possible because they
333 // can't be bundled by our scheduler.
334 switch(NumVectorElts) {
335 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
336 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
337 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
341 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
343 if (NumVectorElts == 1) {
344 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
345 VT.getVectorElementType(),
346 N->getOperand(0), RegClass);
349 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
351 // 16 = Max Num Vector Elements
352 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
353 // 1 = Vector Register Class
354 SDValue RegSeqArgs[16 * 2 + 1];
356 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
357 bool IsRegSeq = true;
358 for (unsigned i = 0; i < N->getNumOperands(); i++) {
359 // XXX: Why is this here?
360 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
364 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
365 RegSeqArgs[1 + (2 * i) + 1] =
366 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
370 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
371 RegSeqArgs, 2 * N->getNumOperands() + 1);
373 case ISD::BUILD_PAIR: {
374 SDValue RC, SubReg0, SubReg1;
375 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
376 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
379 if (N->getValueType(0) == MVT::i128) {
380 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
381 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
382 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
383 } else if (N->getValueType(0) == MVT::i64) {
384 RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
385 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
386 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
388 llvm_unreachable("Unhandled value type for BUILD_PAIR");
390 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
391 N->getOperand(1), SubReg1 };
392 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
393 SDLoc(N), N->getValueType(0), Ops);
396 case ISD::ConstantFP:
397 case ISD::Constant: {
398 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
399 // XXX: Custom immediate lowering not implemented yet. Instead we use
400 // pseudo instructions defined in SIInstructions.td
401 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
405 uint64_t ImmValue = 0;
406 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
408 if (N->getOpcode() == ISD::ConstantFP) {
409 // XXX: 64-bit Immediates not supported yet
410 assert(N->getValueType(0) != MVT::f64);
412 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
413 APFloat Value = C->getValueAPF();
414 float FloatValue = Value.convertToFloat();
415 if (FloatValue == 0.0) {
416 ImmReg = AMDGPU::ZERO;
417 } else if (FloatValue == 0.5) {
418 ImmReg = AMDGPU::HALF;
419 } else if (FloatValue == 1.0) {
420 ImmReg = AMDGPU::ONE;
422 ImmValue = Value.bitcastToAPInt().getZExtValue();
425 // XXX: 64-bit Immediates not supported yet
426 assert(N->getValueType(0) != MVT::i64);
428 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
429 if (C->getZExtValue() == 0) {
430 ImmReg = AMDGPU::ZERO;
431 } else if (C->getZExtValue() == 1) {
432 ImmReg = AMDGPU::ONE_INT;
434 ImmValue = C->getZExtValue();
438 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
439 Use != SDNode::use_end(); Use = Next) {
440 Next = llvm::next(Use);
441 std::vector<SDValue> Ops;
442 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
443 Ops.push_back(Use->getOperand(i));
446 if (!Use->isMachineOpcode()) {
447 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
448 // We can only use literal constants (e.g. AMDGPU::ZERO,
449 // AMDGPU::ONE, etc) in machine opcodes.
453 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
454 (TII->get(Use->getMachineOpcode()).TSFlags &
455 R600_InstFlag::VECTOR)) {
459 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
460 AMDGPU::OpName::literal);
465 if (TII->getOperandIdx(Use->getMachineOpcode(),
466 AMDGPU::OpName::dst) != -1) {
467 // subtract one from ImmIdx, because the DST operand is usually index
468 // 0 for MachineInstrs, but we have no DST in the Ops vector.
472 // Check that we aren't already using an immediate.
473 // XXX: It's possible for an instruction to have more than one
474 // immediate operand, but this is not supported yet.
475 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
476 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
479 if (C->getZExtValue() != 0) {
480 // This instruction is already using an immediate.
484 // Set the immediate value
485 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
488 // Set the immediate register
489 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
491 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
496 SDNode *Result = SelectCode(N);
498 // Fold operands of selected node
500 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
501 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
502 const R600InstrInfo *TII =
503 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
504 if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
505 bool IsModified = false;
507 std::vector<SDValue> Ops;
508 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
511 IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
513 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
515 } while (IsModified);
518 if (Result && Result->isMachineOpcode() &&
519 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
520 && TII->hasInstrModifiers(Result->getMachineOpcode())) {
522 // TODO: Isel can generate multiple MachineInst, we need to recursively
524 bool IsModified = false;
526 std::vector<SDValue> Ops;
527 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
530 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
532 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
534 } while (IsModified);
536 // If node has a single use which is CLAMP_R600, folds it
537 if (Result->hasOneUse() && Result->isMachineOpcode()) {
538 SDNode *PotentialClamp = *Result->use_begin();
539 if (PotentialClamp->isMachineOpcode() &&
540 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
542 TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
543 std::vector<SDValue> Ops;
544 unsigned NumOp = Result->getNumOperands();
545 for (unsigned i = 0; i < NumOp; ++i) {
546 Ops.push_back(Result->getOperand(i));
548 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
549 Result = CurDAG->SelectNodeTo(PotentialClamp,
550 Result->getMachineOpcode(), PotentialClamp->getVTList(),
560 bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
561 SDValue &Abs, const R600InstrInfo *TII) {
562 switch (Src.getOpcode()) {
564 Src = Src.getOperand(0);
565 Neg = CurDAG->getTargetConstant(1, MVT::i32);
570 Src = Src.getOperand(0);
571 Abs = CurDAG->getTargetConstant(1, MVT::i32);
574 Src = Src.getOperand(0);
581 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
582 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
584 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
585 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
586 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
589 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
590 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
591 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
594 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
595 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
596 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
599 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
600 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
605 for (unsigned i = 0; i < 3; i++) {
606 if (OperandIdx[i] < 0)
608 SDValue &Src = Ops[OperandIdx[i] - 1];
609 SDValue &Sel = Ops[SelIdx[i] - 1];
610 SDValue &Neg = Ops[NegIdx[i] - 1];
612 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
613 if (FoldOperand(Src, Sel, Neg, Abs, TII))
619 bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
620 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
622 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
623 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
624 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
625 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
626 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
627 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
628 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
629 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
632 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
633 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
634 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
635 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
636 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
637 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
638 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
639 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
642 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
643 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
644 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
645 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
646 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
647 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
648 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
649 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
652 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
653 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
654 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
655 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
656 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
657 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
658 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
659 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
662 for (unsigned i = 0; i < 8; i++) {
663 if (OperandIdx[i] < 0)
665 SDValue &Src = Ops[OperandIdx[i] - 1];
666 SDValue &Sel = Ops[SelIdx[i] - 1];
667 SDValue &Neg = Ops[NegIdx[i] - 1];
668 SDValue &Abs = Ops[AbsIdx[i] - 1];
669 if (FoldOperand(Src, Sel, Neg, Abs, TII))
675 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
679 Type *ptrType = ptr->getType();
680 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
683 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
684 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
687 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
688 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
689 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
690 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
693 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
694 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
697 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
698 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
701 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
703 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
705 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
708 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
709 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
710 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
711 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
712 N->getMemoryVT().bitsLT(MVT::i32)) {
716 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
719 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
720 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
723 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
724 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
727 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
728 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
731 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
732 MachineMemOperand *MMO = N->getMemOperand();
733 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
735 const Value *V = MMO->getValue();
736 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
737 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
745 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
746 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
747 // Check to make sure we are not a constant pool load or a constant load
748 // that is marked as a private load
749 if (isCPLoad(N) || isConstantLoad(N, -1)) {
753 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
754 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
755 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
756 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
757 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
758 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
764 const char *AMDGPUDAGToDAGISel::getPassName() const {
765 return "AMDGPU DAG->DAG Pattern Instruction Selection";
773 //===----------------------------------------------------------------------===//
775 //===----------------------------------------------------------------------===//
777 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
779 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
780 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
786 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
787 SDValue& BaseReg, SDValue &Offset) {
788 if (!dyn_cast<ConstantSDNode>(Addr)) {
790 Offset = CurDAG->getIntPtrConstant(0, true);
796 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
798 ConstantSDNode * IMMOffset;
800 if (Addr.getOpcode() == ISD::ADD
801 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
802 && isInt<16>(IMMOffset->getZExtValue())) {
804 Base = Addr.getOperand(0);
805 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
807 // If the pointer address is constant, we can move it to the offset field.
808 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
809 && isInt<16>(IMMOffset->getZExtValue())) {
810 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
811 SDLoc(CurDAG->getEntryNode()),
812 AMDGPU::ZERO, MVT::i32);
813 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
817 // Default case, no offset
819 Offset = CurDAG->getTargetConstant(0, MVT::i32);
823 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
827 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
828 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
829 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
830 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
831 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
832 Base = Addr.getOperand(0);
833 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
836 Offset = CurDAG->getTargetConstant(0, MVT::i32);
842 SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
843 APInt Demanded = APInt(32, 0x00FFFFFF);
844 APInt KnownZero, KnownOne;
845 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
846 const TargetLowering *TLI = getTargetLowering();
847 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
848 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
849 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
850 return SimplifyI24(TLO.New);
856 bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
858 assert(Op.getValueType() == MVT::i32);
860 if (CurDAG->ComputeNumSignBits(Op) == 9) {
861 I24 = SimplifyI24(Op);
867 bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
870 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
872 assert (Op.getValueType() == MVT::i32);
874 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
875 // i32. These smaller types are legal to use with the i24 instructions.
876 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
877 Op.getOpcode() == ISD::ANY_EXTEND ||
878 ISD::isEXTLoad(Op.getNode())) {
879 U24 = SimplifyI24(Op);
885 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
887 if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
891 // Go over all selected nodes and try to fold them a bit more
892 const AMDGPUTargetLowering& Lowering =
893 (*(const AMDGPUTargetLowering*)getTargetLowering());
894 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
895 E = CurDAG->allnodes_end(); I != E; ++I) {
899 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
903 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
904 if (ResNode != Node) {
905 ReplaceUses(Node, ResNode);