1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPURegisterInfo.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDILIntrinsicInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
33 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
36 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
43 #include "AMDGPUGenCallingConv.inc"
45 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
48 // Initialize target lowering borrowed from AMDIL
51 // We need to custom lower some of the intrinsics
52 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
54 // Library functions. These default to Expand, but we have instructions
56 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
57 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
58 setOperationAction(ISD::FPOW, MVT::f32, Legal);
59 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
60 setOperationAction(ISD::FABS, MVT::f32, Legal);
61 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
62 setOperationAction(ISD::FRINT, MVT::f32, Legal);
63 setOperationAction(ISD::FROUND, MVT::f32, Legal);
64 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
66 // The hardware supports ROTR, but not ROTL
67 setOperationAction(ISD::ROTL, MVT::i32, Expand);
69 // Lower floating point store/load to integer store/load to reduce the number
70 // of patterns in tablegen.
71 setOperationAction(ISD::STORE, MVT::f32, Promote);
72 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
74 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
75 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
77 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
78 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
80 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
81 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
83 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
84 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
86 setOperationAction(ISD::STORE, MVT::f64, Promote);
87 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
89 // Custom lowering of vector stores is required for local address space
91 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
92 // XXX: Native v2i32 local address space stores are possible, but not
93 // currently implemented.
94 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
96 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
97 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
98 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
99 // XXX: This can be change to Custom, once ExpandVectorStores can
100 // handle 64-bit stores.
101 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
103 setOperationAction(ISD::LOAD, MVT::f32, Promote);
104 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
106 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
107 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
109 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
110 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
112 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
113 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
115 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
116 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
118 setOperationAction(ISD::LOAD, MVT::f64, Promote);
119 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
121 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
122 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
123 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
124 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
125 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
126 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
127 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
128 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
129 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
130 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
132 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
133 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
134 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
135 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
136 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
137 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
138 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
139 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
140 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
141 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
142 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
143 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
145 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
147 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
148 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
150 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
152 setOperationAction(ISD::MUL, MVT::i64, Expand);
154 setOperationAction(ISD::UDIV, MVT::i32, Expand);
155 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
156 setOperationAction(ISD::UREM, MVT::i32, Expand);
157 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
158 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
160 static const MVT::SimpleValueType IntTypes[] = {
161 MVT::v2i32, MVT::v4i32
163 const size_t NumIntTypes = array_lengthof(IntTypes);
165 for (unsigned int x = 0; x < NumIntTypes; ++x) {
166 MVT::SimpleValueType VT = IntTypes[x];
167 //Expand the following operations for the current type by default
168 setOperationAction(ISD::ADD, VT, Expand);
169 setOperationAction(ISD::AND, VT, Expand);
170 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
171 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
172 setOperationAction(ISD::MUL, VT, Expand);
173 setOperationAction(ISD::OR, VT, Expand);
174 setOperationAction(ISD::SHL, VT, Expand);
175 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
176 setOperationAction(ISD::SRL, VT, Expand);
177 setOperationAction(ISD::SRA, VT, Expand);
178 setOperationAction(ISD::SUB, VT, Expand);
179 setOperationAction(ISD::UDIV, VT, Expand);
180 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
181 setOperationAction(ISD::UREM, VT, Expand);
182 setOperationAction(ISD::VSELECT, VT, Expand);
183 setOperationAction(ISD::XOR, VT, Expand);
186 static const MVT::SimpleValueType FloatTypes[] = {
187 MVT::v2f32, MVT::v4f32
189 const size_t NumFloatTypes = array_lengthof(FloatTypes);
191 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
192 MVT::SimpleValueType VT = FloatTypes[x];
193 setOperationAction(ISD::FABS, VT, Expand);
194 setOperationAction(ISD::FADD, VT, Expand);
195 setOperationAction(ISD::FDIV, VT, Expand);
196 setOperationAction(ISD::FPOW, VT, Expand);
197 setOperationAction(ISD::FFLOOR, VT, Expand);
198 setOperationAction(ISD::FTRUNC, VT, Expand);
199 setOperationAction(ISD::FMUL, VT, Expand);
200 setOperationAction(ISD::FRINT, VT, Expand);
201 setOperationAction(ISD::FSQRT, VT, Expand);
202 setOperationAction(ISD::FSUB, VT, Expand);
206 //===----------------------------------------------------------------------===//
207 // Target Information
208 //===----------------------------------------------------------------------===//
210 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
214 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
216 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
219 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
220 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
222 return ((LScalarSize <= CastScalarSize) ||
223 (CastScalarSize >= 32) ||
227 //===---------------------------------------------------------------------===//
229 //===---------------------------------------------------------------------===//
231 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
232 assert(VT.isFloatingPoint());
233 return VT == MVT::f32;
236 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
237 assert(VT.isFloatingPoint());
238 return VT == MVT::f32;
241 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
242 // Truncate is just accessing a subregister.
243 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
246 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
247 // Truncate is just accessing a subregister.
248 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
249 (Dest->getPrimitiveSizeInBits() % 32 == 0);
252 //===---------------------------------------------------------------------===//
253 // TargetLowering Callbacks
254 //===---------------------------------------------------------------------===//
256 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
257 const SmallVectorImpl<ISD::InputArg> &Ins) const {
259 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
262 SDValue AMDGPUTargetLowering::LowerReturn(
264 CallingConv::ID CallConv,
266 const SmallVectorImpl<ISD::OutputArg> &Outs,
267 const SmallVectorImpl<SDValue> &OutVals,
268 SDLoc DL, SelectionDAG &DAG) const {
269 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
272 //===---------------------------------------------------------------------===//
273 // Target specific lowering
274 //===---------------------------------------------------------------------===//
276 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
278 switch (Op.getOpcode()) {
280 Op.getNode()->dump();
281 llvm_unreachable("Custom lowering code for this"
282 "instruction is not implemented yet!");
284 // AMDIL DAG lowering
285 case ISD::SDIV: return LowerSDIV(Op, DAG);
286 case ISD::SREM: return LowerSREM(Op, DAG);
287 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
288 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
289 // AMDGPU DAG lowering
290 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
291 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
292 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
293 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
294 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
295 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
300 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
301 const GlobalValue *GV,
302 const SDValue &InitPtr,
304 SelectionDAG &DAG) const {
305 const DataLayout *TD = getTargetMachine().getDataLayout();
307 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
308 EVT VT = EVT::getEVT(CI->getType());
309 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
310 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
311 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
312 TD->getPrefTypeAlignment(CI->getType()));
313 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
314 EVT VT = EVT::getEVT(CFP->getType());
315 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
316 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
317 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
318 TD->getPrefTypeAlignment(CFP->getType()));
319 } else if (Init->getType()->isAggregateType()) {
320 EVT PtrVT = InitPtr.getValueType();
321 unsigned NumElements = Init->getType()->getArrayNumElements();
322 SmallVector<SDValue, 8> Chains;
323 for (unsigned i = 0; i < NumElements; ++i) {
324 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
325 Init->getType()->getArrayElementType()), PtrVT);
326 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
327 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
328 GV, Ptr, Chain, DAG));
330 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
334 llvm_unreachable("Unhandled constant initializer");
338 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
340 SelectionDAG &DAG) const {
342 const DataLayout *TD = getTargetMachine().getDataLayout();
343 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
344 const GlobalValue *GV = G->getGlobal();
346 switch (G->getAddressSpace()) {
347 default: llvm_unreachable("Global Address lowering not implemented for this "
349 case AMDGPUAS::LOCAL_ADDRESS: {
350 // XXX: What does the value of G->getOffset() mean?
351 assert(G->getOffset() == 0 &&
352 "Do not know what to do with an non-zero offset");
355 if (MFI->LocalMemoryObjects.count(GV) == 0) {
356 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
357 Offset = MFI->LDSSize;
358 MFI->LocalMemoryObjects[GV] = Offset;
359 // XXX: Account for alignment?
360 MFI->LDSSize += Size;
362 Offset = MFI->LocalMemoryObjects[GV];
365 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
367 case AMDGPUAS::CONSTANT_ADDRESS: {
368 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
369 Type *EltType = GV->getType()->getElementType();
370 unsigned Size = TD->getTypeAllocSize(EltType);
371 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
373 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
374 const Constant *Init = Var->getInitializer();
375 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
376 SDValue InitPtr = DAG.getFrameIndex(FI,
377 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
378 SmallVector<SDNode*, 8> WorkList;
380 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
381 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
382 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
384 WorkList.push_back(*I);
386 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
387 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
388 E = WorkList.end(); I != E; ++I) {
389 SmallVector<SDValue, 8> Ops;
390 Ops.push_back(Chain);
391 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
392 Ops.push_back((*I)->getOperand(i));
394 DAG.UpdateNodeOperands(*I, &Ops[0], Ops.size());
396 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
397 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
402 void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
403 SmallVectorImpl<SDValue> &Args,
405 unsigned Count) const {
406 EVT VT = Op.getValueType();
407 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
408 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
409 VT.getVectorElementType(),
410 Op, DAG.getConstant(i, MVT::i32)));
414 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
415 SelectionDAG &DAG) const {
416 SmallVector<SDValue, 8> Args;
417 SDValue A = Op.getOperand(0);
418 SDValue B = Op.getOperand(1);
420 ExtractVectorElements(A, DAG, Args, 0,
421 A.getValueType().getVectorNumElements());
422 ExtractVectorElements(B, DAG, Args, 0,
423 B.getValueType().getVectorNumElements());
425 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
426 &Args[0], Args.size());
429 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
430 SelectionDAG &DAG) const {
432 SmallVector<SDValue, 8> Args;
433 EVT VT = Op.getValueType();
434 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
435 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
436 VT.getVectorNumElements());
438 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
439 &Args[0], Args.size());
442 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
443 SelectionDAG &DAG) const {
445 MachineFunction &MF = DAG.getMachineFunction();
446 const AMDGPUFrameLowering *TFL =
447 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
449 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
452 unsigned FrameIndex = FIN->getIndex();
453 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
454 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
458 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
459 SelectionDAG &DAG) const {
460 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
462 EVT VT = Op.getValueType();
464 switch (IntrinsicID) {
466 case AMDGPUIntrinsic::AMDIL_abs:
467 return LowerIntrinsicIABS(Op, DAG);
468 case AMDGPUIntrinsic::AMDIL_exp:
469 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
470 case AMDGPUIntrinsic::AMDGPU_lrp:
471 return LowerIntrinsicLRP(Op, DAG);
472 case AMDGPUIntrinsic::AMDIL_fraction:
473 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
474 case AMDGPUIntrinsic::AMDIL_max:
475 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
477 case AMDGPUIntrinsic::AMDGPU_imax:
478 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
480 case AMDGPUIntrinsic::AMDGPU_umax:
481 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
483 case AMDGPUIntrinsic::AMDIL_min:
484 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
486 case AMDGPUIntrinsic::AMDGPU_imin:
487 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
489 case AMDGPUIntrinsic::AMDGPU_umin:
490 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
492 case AMDGPUIntrinsic::AMDIL_round_nearest:
493 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
497 ///IABS(a) = SMAX(sub(0, a), a)
498 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
499 SelectionDAG &DAG) const {
502 EVT VT = Op.getValueType();
503 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
506 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
509 /// Linear Interpolation
510 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
511 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
512 SelectionDAG &DAG) const {
514 EVT VT = Op.getValueType();
515 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
516 DAG.getConstantFP(1.0f, MVT::f32),
518 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
520 return DAG.getNode(ISD::FADD, DL, VT,
521 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
525 /// \brief Generate Min/Max node
526 SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
527 SelectionDAG &DAG) const {
529 EVT VT = Op.getValueType();
531 SDValue LHS = Op.getOperand(0);
532 SDValue RHS = Op.getOperand(1);
533 SDValue True = Op.getOperand(2);
534 SDValue False = Op.getOperand(3);
535 SDValue CC = Op.getOperand(4);
537 if (VT != MVT::f32 ||
538 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
542 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
556 llvm_unreachable("Operation should already be optimised!");
564 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
566 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
575 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
577 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
579 case ISD::SETCC_INVALID:
580 llvm_unreachable("Invalid setcc condcode!");
585 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
586 SelectionDAG &DAG) const {
587 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
588 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
589 EVT EltVT = Op.getValueType().getVectorElementType();
590 EVT PtrVT = Load->getBasePtr().getValueType();
591 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
592 SmallVector<SDValue, 8> Loads;
595 for (unsigned i = 0, e = NumElts; i != e; ++i) {
596 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
597 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
598 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
599 Load->getChain(), Ptr,
600 MachinePointerInfo(Load->getMemOperand()->getValue()),
601 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
602 Load->getAlignment()));
604 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), &Loads[0],
608 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
609 SelectionDAG &DAG) const {
610 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
611 EVT MemVT = Store->getMemoryVT();
612 unsigned MemBits = MemVT.getSizeInBits();
614 // Byte stores are really expensive, so if possible, try to pack
615 // 32-bit vector truncatating store into an i32 store.
616 // XXX: We could also handle optimize other vector bitwidths
617 if (!MemVT.isVector() || MemBits > 32) {
622 const SDValue &Value = Store->getValue();
623 EVT VT = Value.getValueType();
624 const SDValue &Ptr = Store->getBasePtr();
625 EVT MemEltVT = MemVT.getVectorElementType();
626 unsigned MemEltBits = MemEltVT.getSizeInBits();
627 unsigned MemNumElements = MemVT.getVectorNumElements();
628 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
632 Mask = DAG.getConstant(0xFF, PackedVT);
635 Mask = DAG.getConstant(0xFFFF, PackedVT);
638 llvm_unreachable("Cannot lower this vector store");
641 for (unsigned i = 0; i < MemNumElements; ++i) {
642 EVT ElemVT = VT.getVectorElementType();
643 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
644 DAG.getConstant(i, MVT::i32));
645 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
646 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
647 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
648 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
652 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
655 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
656 MachinePointerInfo(Store->getMemOperand()->getValue()),
657 Store->isVolatile(), Store->isNonTemporal(),
658 Store->getAlignment());
661 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
662 SelectionDAG &DAG) const {
663 StoreSDNode *Store = cast<StoreSDNode>(Op);
664 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
665 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
666 EVT PtrVT = Store->getBasePtr().getValueType();
667 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
670 SmallVector<SDValue, 8> Chains;
672 for (unsigned i = 0, e = NumElts; i != e; ++i) {
673 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
674 Store->getValue(), DAG.getConstant(i, MVT::i32));
675 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
677 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
679 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
680 MachinePointerInfo(Store->getMemOperand()->getValue()),
681 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
682 Store->getAlignment()));
684 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
687 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
689 LoadSDNode *Load = cast<LoadSDNode>(Op);
690 ISD::LoadExtType ExtType = Load->getExtensionType();
692 // Lower loads constant address space global variable loads
693 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
694 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
696 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
697 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
698 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
699 DAG.getConstant(2, MVT::i32));
700 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
701 Load->getChain(), Ptr,
702 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
705 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
706 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
710 EVT VT = Op.getValueType();
711 EVT MemVT = Load->getMemoryVT();
713 if (Load->getMemoryVT() == MVT::i8) {
715 } else if (Load->getMemoryVT() == MVT::i16) {
718 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
719 DAG.getConstant(2, MVT::i32));
720 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
721 Load->getChain(), Ptr,
722 DAG.getTargetConstant(0, MVT::i32),
724 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
726 DAG.getConstant(0x3, MVT::i32));
727 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
728 DAG.getConstant(3, MVT::i32));
729 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
730 Ret = DAG.getNode(ISD::AND, DL, MVT::i32, Ret,
731 DAG.getConstant(Mask, MVT::i32));
732 if (ExtType == ISD::SEXTLOAD) {
733 SDValue SExtShift = DAG.getConstant(
734 VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
735 Ret = DAG.getNode(ISD::SHL, DL, MVT::i32, Ret, SExtShift);
736 Ret = DAG.getNode(ISD::SRA, DL, MVT::i32, Ret, SExtShift);
742 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
744 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
745 if (Result.getNode()) {
749 StoreSDNode *Store = cast<StoreSDNode>(Op);
750 SDValue Chain = Store->getChain();
751 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
752 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
753 Store->getValue().getValueType().isVector()) {
754 return SplitVectorStore(Op, DAG);
757 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
758 Store->getMemoryVT().bitsLT(MVT::i32)) {
760 if (Store->getMemoryVT() == MVT::i8) {
762 } else if (Store->getMemoryVT() == MVT::i16) {
765 SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
766 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
767 DAG.getConstant(2, MVT::i32));
768 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
769 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
770 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, TruncPtr,
771 DAG.getConstant(0x3, MVT::i32));
772 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
773 DAG.getConstant(3, MVT::i32));
774 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
776 SDValue MaskedValue = DAG.getNode(ISD::AND, DL, MVT::i32, SExtValue,
777 DAG.getConstant(Mask, MVT::i32));
778 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
779 MaskedValue, ShiftAmt);
780 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
782 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
783 DAG.getConstant(0xffffffff, MVT::i32));
784 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
786 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
787 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
788 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
793 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
794 SelectionDAG &DAG) const {
796 EVT VT = Op.getValueType();
798 SDValue Num = Op.getOperand(0);
799 SDValue Den = Op.getOperand(1);
801 SmallVector<SDValue, 8> Results;
803 // RCP = URECIP(Den) = 2^32 / Den + e
804 // e is rounding error.
805 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
807 // RCP_LO = umulo(RCP, Den) */
808 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
810 // RCP_HI = mulhu (RCP, Den) */
811 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
813 // NEG_RCP_LO = -RCP_LO
814 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
817 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
818 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
821 // Calculate the rounding error from the URECIP instruction
822 // E = mulhu(ABS_RCP_LO, RCP)
823 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
826 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
829 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
831 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
832 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
835 // Quotient = mulhu(Tmp0, Num)
836 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
838 // Num_S_Remainder = Quotient * Den
839 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
841 // Remainder = Num - Num_S_Remainder
842 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
844 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
845 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
846 DAG.getConstant(-1, VT),
847 DAG.getConstant(0, VT),
849 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
850 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
852 DAG.getConstant(-1, VT),
853 DAG.getConstant(0, VT),
855 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
856 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
859 // Calculate Division result:
861 // Quotient_A_One = Quotient + 1
862 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
863 DAG.getConstant(1, VT));
865 // Quotient_S_One = Quotient - 1
866 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
867 DAG.getConstant(1, VT));
869 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
870 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
871 Quotient, Quotient_A_One, ISD::SETEQ);
873 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
874 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
875 Quotient_S_One, Div, ISD::SETEQ);
877 // Calculate Rem result:
879 // Remainder_S_Den = Remainder - Den
880 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
882 // Remainder_A_Den = Remainder + Den
883 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
885 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
886 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
887 Remainder, Remainder_S_Den, ISD::SETEQ);
889 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
890 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
891 Remainder_A_Den, Rem, ISD::SETEQ);
895 return DAG.getMergeValues(Ops, 2, DL);
898 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
899 SelectionDAG &DAG) const {
900 SDValue S0 = Op.getOperand(0);
902 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
905 // f32 uint_to_fp i64
906 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
907 DAG.getConstant(0, MVT::i32));
908 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
909 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
910 DAG.getConstant(1, MVT::i32));
911 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
912 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
913 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
914 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
918 //===----------------------------------------------------------------------===//
920 //===----------------------------------------------------------------------===//
922 void AMDGPUTargetLowering::getOriginalFunctionArgs(
925 const SmallVectorImpl<ISD::InputArg> &Ins,
926 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
928 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
929 if (Ins[i].ArgVT == Ins[i].VT) {
930 OrigIns.push_back(Ins[i]);
935 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
936 // Vector has been split into scalars.
937 VT = Ins[i].ArgVT.getVectorElementType();
938 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
939 Ins[i].ArgVT.getVectorElementType() !=
940 Ins[i].VT.getVectorElementType()) {
941 // Vector elements have been promoted
944 // Vector has been spilt into smaller vectors.
948 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
949 Ins[i].OrigArgIndex, Ins[i].PartOffset);
950 OrigIns.push_back(Arg);
954 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
955 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
956 return CFP->isExactlyValue(1.0);
958 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
959 return C->isAllOnesValue();
964 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
965 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
966 return CFP->getValueAPF().isZero();
968 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
969 return C->isNullValue();
974 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
975 const TargetRegisterClass *RC,
976 unsigned Reg, EVT VT) const {
977 MachineFunction &MF = DAG.getMachineFunction();
978 MachineRegisterInfo &MRI = MF.getRegInfo();
979 unsigned VirtualRegister;
980 if (!MRI.isLiveIn(Reg)) {
981 VirtualRegister = MRI.createVirtualRegister(RC);
982 MRI.addLiveIn(Reg, VirtualRegister);
984 VirtualRegister = MRI.getLiveInVirtReg(Reg);
986 return DAG.getRegister(VirtualRegister, VT);
989 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
991 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
995 NODE_NAME_CASE(CALL);
996 NODE_NAME_CASE(UMUL);
997 NODE_NAME_CASE(DIV_INF);
998 NODE_NAME_CASE(RET_FLAG);
999 NODE_NAME_CASE(BRANCH_COND);
1002 NODE_NAME_CASE(DWORDADDR)
1003 NODE_NAME_CASE(FRACT)
1004 NODE_NAME_CASE(FMAX)
1005 NODE_NAME_CASE(SMAX)
1006 NODE_NAME_CASE(UMAX)
1007 NODE_NAME_CASE(FMIN)
1008 NODE_NAME_CASE(SMIN)
1009 NODE_NAME_CASE(UMIN)
1010 NODE_NAME_CASE(URECIP)
1011 NODE_NAME_CASE(DOT4)
1012 NODE_NAME_CASE(EXPORT)
1013 NODE_NAME_CASE(CONST_ADDRESS)
1014 NODE_NAME_CASE(REGISTER_LOAD)
1015 NODE_NAME_CASE(REGISTER_STORE)
1016 NODE_NAME_CASE(LOAD_CONSTANT)
1017 NODE_NAME_CASE(LOAD_INPUT)
1018 NODE_NAME_CASE(SAMPLE)
1019 NODE_NAME_CASE(SAMPLEB)
1020 NODE_NAME_CASE(SAMPLED)
1021 NODE_NAME_CASE(SAMPLEL)
1022 NODE_NAME_CASE(STORE_MSKOR)
1023 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)