1 //===-- AMDILISelLowering.cpp - AMDIL DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief TargetLowering functions borrowed from AMDIL.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDILIntrinsicInfo.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallingConv.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Target/TargetInstrInfo.h"
31 #include "llvm/Target/TargetOptions.h"
34 //===----------------------------------------------------------------------===//
35 // TargetLowering Implementation Help Functions End
36 //===----------------------------------------------------------------------===//
38 //===----------------------------------------------------------------------===//
39 // TargetLowering Class Implementation Begins
40 //===----------------------------------------------------------------------===//
41 void AMDGPUTargetLowering::InitAMDILLowering() {
42 static const int types[] = {
61 static const int IntTypes[] = {
68 static const int FloatTypes[] = {
73 static const int VectorTypes[] = {
85 const size_t NumTypes = array_lengthof(types);
86 const size_t NumFloatTypes = array_lengthof(FloatTypes);
87 const size_t NumIntTypes = array_lengthof(IntTypes);
88 const size_t NumVectorTypes = array_lengthof(VectorTypes);
90 const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>();
91 // These are the current register classes that are
94 for (unsigned int x = 0; x < NumTypes; ++x) {
95 MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x];
97 //FIXME: SIGN_EXTEND_INREG is not meaningful for floating point types
98 // We cannot sextinreg, expand to shifts
99 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
100 setOperationAction(ISD::SUBE, VT, Expand);
101 setOperationAction(ISD::SUBC, VT, Expand);
102 setOperationAction(ISD::ADDE, VT, Expand);
103 setOperationAction(ISD::ADDC, VT, Expand);
104 setOperationAction(ISD::BRCOND, VT, Custom);
105 setOperationAction(ISD::BR_JT, VT, Expand);
106 setOperationAction(ISD::BRIND, VT, Expand);
107 // TODO: Implement custom UREM/SREM routines
108 setOperationAction(ISD::SREM, VT, Expand);
109 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
110 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
111 if (VT != MVT::i64 && VT != MVT::v2i64) {
112 setOperationAction(ISD::SDIV, VT, Custom);
115 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
116 MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x];
118 // IL does not have these operations for floating point types
119 setOperationAction(ISD::FP_ROUND_INREG, VT, Expand);
120 setOperationAction(ISD::SETOLT, VT, Expand);
121 setOperationAction(ISD::SETOGE, VT, Expand);
122 setOperationAction(ISD::SETOGT, VT, Expand);
123 setOperationAction(ISD::SETOLE, VT, Expand);
124 setOperationAction(ISD::SETULT, VT, Expand);
125 setOperationAction(ISD::SETUGE, VT, Expand);
126 setOperationAction(ISD::SETUGT, VT, Expand);
127 setOperationAction(ISD::SETULE, VT, Expand);
130 for (unsigned int x = 0; x < NumIntTypes; ++x) {
131 MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x];
133 // GPU also does not have divrem function for signed or unsigned
134 setOperationAction(ISD::SDIVREM, VT, Expand);
136 // GPU does not have [S|U]MUL_LOHI functions as a single instruction
137 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
138 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
140 setOperationAction(ISD::BSWAP, VT, Expand);
142 // GPU doesn't have any counting operators
143 setOperationAction(ISD::CTPOP, VT, Expand);
144 setOperationAction(ISD::CTTZ, VT, Expand);
145 setOperationAction(ISD::CTLZ, VT, Expand);
148 for (unsigned int ii = 0; ii < NumVectorTypes; ++ii) {
149 MVT::SimpleValueType VT = (MVT::SimpleValueType)VectorTypes[ii];
151 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
152 setOperationAction(ISD::SDIVREM, VT, Expand);
153 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
154 // setOperationAction(ISD::VSETCC, VT, Expand);
155 setOperationAction(ISD::SELECT_CC, VT, Expand);
158 setOperationAction(ISD::MULHU, MVT::i64, Expand);
159 setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
160 setOperationAction(ISD::MULHS, MVT::i64, Expand);
161 setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
162 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
163 setOperationAction(ISD::SREM, MVT::v2i64, Expand);
164 setOperationAction(ISD::Constant , MVT::i64 , Legal);
165 setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
166 setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
167 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
168 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
169 setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
170 if (STM.hasHWFP64()) {
171 // we support loading/storing v2f64 but not operations on the type
172 setOperationAction(ISD::FADD, MVT::v2f64, Expand);
173 setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
174 setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
175 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand);
176 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
177 setOperationAction(ISD::ConstantFP , MVT::f64 , Legal);
178 // We want to expand vector conversions into their scalar
180 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand);
181 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand);
182 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand);
183 setOperationAction(ISD::ANY_EXTEND, MVT::v2f64, Expand);
184 setOperationAction(ISD::FABS, MVT::f64, Expand);
185 setOperationAction(ISD::FABS, MVT::v2f64, Expand);
187 // TODO: Fix the UDIV24 algorithm so it works for these
188 // types correctly. This needs vector comparisons
189 // for this to work correctly.
190 setOperationAction(ISD::UDIV, MVT::v2i8, Expand);
191 setOperationAction(ISD::UDIV, MVT::v4i8, Expand);
192 setOperationAction(ISD::UDIV, MVT::v2i16, Expand);
193 setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
194 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
195 setOperationAction(ISD::SUBC, MVT::Other, Expand);
196 setOperationAction(ISD::ADDE, MVT::Other, Expand);
197 setOperationAction(ISD::ADDC, MVT::Other, Expand);
198 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
199 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
200 setOperationAction(ISD::BRIND, MVT::Other, Expand);
201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
204 // Use the default implementation.
205 setOperationAction(ISD::ConstantFP , MVT::f32 , Legal);
206 setOperationAction(ISD::Constant , MVT::i32 , Legal);
208 setSchedulingPreference(Sched::RegPressure);
209 setPow2DivIsCheap(false);
210 setSelectIsExpensive(true);
211 setJumpIsExpensive(true);
213 MaxStoresPerMemcpy = 4096;
214 MaxStoresPerMemmove = 4096;
215 MaxStoresPerMemset = 4096;
220 AMDGPUTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
221 const CallInst &I, unsigned Intrinsic) const {
225 // The backend supports 32 and 64 bit floating point immediates
227 AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
228 if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
229 || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
237 AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
238 if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
239 || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
247 // isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
248 // be zero. Op is expected to be a target specific node. Used by DAG
252 AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
256 const SelectionDAG &DAG,
257 unsigned Depth) const {
260 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
261 switch (Op.getOpcode()) {
264 DAG.ComputeMaskedBits(
270 DAG.ComputeMaskedBits(
275 assert((KnownZero & KnownOne) == 0
276 && "Bits known to be one AND zero?");
277 assert((KnownZero2 & KnownOne2) == 0
278 && "Bits known to be one AND zero?");
279 // Only known if known in both the LHS and RHS
280 KnownOne &= KnownOne2;
281 KnownZero &= KnownZero2;
286 //===----------------------------------------------------------------------===//
287 // Other Lowering Hooks
288 //===----------------------------------------------------------------------===//
291 AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
292 EVT OVT = Op.getValueType();
294 if (OVT.getScalarType() == MVT::i64) {
295 DST = LowerSDIV64(Op, DAG);
296 } else if (OVT.getScalarType() == MVT::i32) {
297 DST = LowerSDIV32(Op, DAG);
298 } else if (OVT.getScalarType() == MVT::i16
299 || OVT.getScalarType() == MVT::i8) {
300 DST = LowerSDIV24(Op, DAG);
302 DST = SDValue(Op.getNode(), 0);
308 AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
309 EVT OVT = Op.getValueType();
311 if (OVT.getScalarType() == MVT::i64) {
312 DST = LowerSREM64(Op, DAG);
313 } else if (OVT.getScalarType() == MVT::i32) {
314 DST = LowerSREM32(Op, DAG);
315 } else if (OVT.getScalarType() == MVT::i16) {
316 DST = LowerSREM16(Op, DAG);
317 } else if (OVT.getScalarType() == MVT::i8) {
318 DST = LowerSREM8(Op, DAG);
320 DST = SDValue(Op.getNode(), 0);
326 AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const {
327 SDValue Data = Op.getOperand(0);
328 VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
330 EVT DVT = Data.getValueType();
331 EVT BVT = BaseType->getVT();
332 unsigned baseBits = BVT.getScalarType().getSizeInBits();
333 unsigned srcBits = DVT.isSimple() ? DVT.getScalarType().getSizeInBits() : 1;
334 unsigned shiftBits = srcBits - baseBits;
336 // If the op is less than 32 bits, then it needs to extend to 32bits
337 // so it can properly keep the upper bits valid.
338 EVT IVT = genIntType(32, DVT.isVector() ? DVT.getVectorNumElements() : 1);
339 Data = DAG.getNode(ISD::ZERO_EXTEND, DL, IVT, Data);
340 shiftBits = 32 - baseBits;
343 SDValue Shift = DAG.getConstant(shiftBits, DVT);
344 // Shift left by 'Shift' bits.
345 Data = DAG.getNode(ISD::SHL, DL, DVT, Data, Shift);
346 // Signed shift Right by 'Shift' bits.
347 Data = DAG.getNode(ISD::SRA, DL, DVT, Data, Shift);
349 // Once the sign extension is done, the op needs to be converted to
350 // its original type.
351 Data = DAG.getSExtOrTrunc(Data, DL, Op.getOperand(0).getValueType());
356 AMDGPUTargetLowering::genIntType(uint32_t size, uint32_t numEle) const {
357 int iSize = (size * numEle);
358 int vEle = (iSize >> ((size == 64) ? 6 : 5));
364 return EVT(MVT::i64);
366 return EVT(MVT::getVectorVT(MVT::i64, vEle));
370 return EVT(MVT::i32);
372 return EVT(MVT::getVectorVT(MVT::i32, vEle));
378 AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
379 SDValue Chain = Op.getOperand(0);
380 SDValue Cond = Op.getOperand(1);
381 SDValue Jump = Op.getOperand(2);
383 Result = DAG.getNode(
384 AMDGPUISD::BRANCH_COND,
392 AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
394 EVT OVT = Op.getValueType();
395 SDValue LHS = Op.getOperand(0);
396 SDValue RHS = Op.getOperand(1);
399 if (!OVT.isVector()) {
402 } else if (OVT.getVectorNumElements() == 2) {
405 } else if (OVT.getVectorNumElements() == 4) {
409 unsigned bitsize = OVT.getScalarType().getSizeInBits();
410 // char|short jq = ia ^ ib;
411 SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
413 // jq = jq >> (bitsize - 2)
414 jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
417 jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
420 jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
422 // int ia = (int)LHS;
423 SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
426 SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
428 // float fa = (float)ia;
429 SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
431 // float fb = (float)ib;
432 SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
434 // float fq = native_divide(fa, fb);
435 SDValue fq = DAG.getNode(AMDGPUISD::DIV_INF, DL, FLTTY, fa, fb);
438 fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
440 // float fqneg = -fq;
441 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
443 // float fr = mad(fqneg, fb, fa);
444 SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
445 DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
448 SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
451 fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
454 fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
456 // int cv = fr >= fb;
458 if (INTTY == MVT::i32) {
459 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
461 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
463 // jq = (cv ? jq : 0);
464 jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
465 DAG.getConstant(0, OVT));
467 iq = DAG.getSExtOrTrunc(iq, DL, OVT);
468 iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
473 AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
475 EVT OVT = Op.getValueType();
476 SDValue LHS = Op.getOperand(0);
477 SDValue RHS = Op.getOperand(1);
478 // The LowerSDIV32 function generates equivalent to the following IL.
488 // ixor r10, r10, r11
499 SDValue r10 = DAG.getSelectCC(DL,
500 r0, DAG.getConstant(0, OVT),
501 DAG.getConstant(-1, MVT::i32),
502 DAG.getConstant(0, MVT::i32),
506 SDValue r11 = DAG.getSelectCC(DL,
507 r1, DAG.getConstant(0, OVT),
508 DAG.getConstant(-1, MVT::i32),
509 DAG.getConstant(0, MVT::i32),
513 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
516 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
519 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
522 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
525 r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
527 // ixor r10, r10, r11
528 r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
531 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
534 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
539 AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
540 return SDValue(Op.getNode(), 0);
544 AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
546 EVT OVT = Op.getValueType();
547 MVT INTTY = MVT::i32;
548 if (OVT == MVT::v2i8) {
550 } else if (OVT == MVT::v4i8) {
553 SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
554 SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
555 LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
556 LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
561 AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
563 EVT OVT = Op.getValueType();
564 MVT INTTY = MVT::i32;
565 if (OVT == MVT::v2i16) {
567 } else if (OVT == MVT::v4i16) {
570 SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
571 SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
572 LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
573 LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
578 AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
580 EVT OVT = Op.getValueType();
581 SDValue LHS = Op.getOperand(0);
582 SDValue RHS = Op.getOperand(1);
583 // The LowerSREM32 function generates equivalent to the following IL.
605 SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
608 SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
611 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
614 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
617 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
620 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
623 SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
626 r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
629 r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
632 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
635 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
640 AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
641 return SDValue(Op.getNode(), 0);