1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUIntrinsicInfo.h"
20 #include "AMDGPURegisterInfo.h"
21 #include "AMDGPUSubtarget.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
37 /// Diagnostic information for unimplemented or unsupported feature reporting.
38 class DiagnosticInfoUnsupported : public DiagnosticInfo {
40 const Twine &Description;
45 static int getKindID() {
47 KindID = llvm::getNextAvailablePluginDiagnosticKind();
52 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
53 DiagnosticSeverity Severity = DS_Error)
54 : DiagnosticInfo(getKindID(), Severity),
58 const Function &getFunction() const { return Fn; }
59 const Twine &getDescription() const { return Description; }
61 void print(DiagnosticPrinter &DP) const override {
62 DP << "unsupported " << getDescription() << " in " << Fn.getName();
65 static bool classof(const DiagnosticInfo *DI) {
66 return DI->getKind() == getKindID();
70 int DiagnosticInfoUnsupported::KindID = 0;
74 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
75 CCValAssign::LocInfo LocInfo,
76 ISD::ArgFlagsTy ArgFlags, CCState &State) {
77 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
78 ArgFlags.getOrigAlign());
79 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
84 #include "AMDGPUGenCallingConv.inc"
86 // Find a larger type to do a load / store of a vector with.
87 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
88 unsigned StoreSize = VT.getStoreSizeInBits();
90 return EVT::getIntegerVT(Ctx, StoreSize);
92 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
93 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
96 // Type for a vector that will be loaded to.
97 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
98 unsigned StoreSize = VT.getStoreSizeInBits();
100 return EVT::getIntegerVT(Ctx, 32);
102 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
105 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
106 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
108 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
110 setOperationAction(ISD::Constant, MVT::i32, Legal);
111 setOperationAction(ISD::Constant, MVT::i64, Legal);
112 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
113 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
115 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
116 setOperationAction(ISD::BRIND, MVT::Other, Expand);
118 // We need to custom lower some of the intrinsics
119 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
121 // Library functions. These default to Expand, but we have instructions
123 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
124 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
125 setOperationAction(ISD::FPOW, MVT::f32, Legal);
126 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
127 setOperationAction(ISD::FABS, MVT::f32, Legal);
128 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
129 setOperationAction(ISD::FRINT, MVT::f32, Legal);
130 setOperationAction(ISD::FROUND, MVT::f32, Legal);
131 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
133 // Lower floating point store/load to integer store/load to reduce the number
134 // of patterns in tablegen.
135 setOperationAction(ISD::STORE, MVT::f32, Promote);
136 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
138 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
139 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
141 setOperationAction(ISD::STORE, MVT::i64, Promote);
142 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
144 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
145 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
147 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
148 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
150 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
151 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
153 setOperationAction(ISD::STORE, MVT::f64, Promote);
154 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
156 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
157 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
159 // Custom lowering of vector stores is required for local address space
161 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
162 // XXX: Native v2i32 local address space stores are possible, but not
163 // currently implemented.
164 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
166 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
167 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
168 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
170 // XXX: This can be change to Custom, once ExpandVectorStores can
171 // handle 64-bit stores.
172 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
174 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
175 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
176 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
177 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
178 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
181 setOperationAction(ISD::LOAD, MVT::f32, Promote);
182 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
184 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
185 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
187 setOperationAction(ISD::LOAD, MVT::i64, Promote);
188 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
190 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
191 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
193 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
194 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
196 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
197 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
199 setOperationAction(ISD::LOAD, MVT::f64, Promote);
200 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
202 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
203 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
205 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
206 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
207 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
208 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
209 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
210 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
211 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
212 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
213 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
214 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
216 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
217 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
218 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
219 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
220 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
221 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
222 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
223 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
224 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
225 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
226 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
227 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
229 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
231 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
232 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
233 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
234 setOperationAction(ISD::FRINT, MVT::f64, Custom);
235 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
238 if (!Subtarget->hasBFI()) {
239 // fcopysign can be done in a single instruction with BFI.
240 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
241 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
244 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
246 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
247 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
248 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
250 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
251 for (MVT VT : ScalarIntVTs) {
252 setOperationAction(ISD::SREM, VT, Expand);
253 setOperationAction(ISD::SDIV, VT, Custom);
255 // GPU does not have divrem function for signed or unsigned.
256 setOperationAction(ISD::SDIVREM, VT, Custom);
257 setOperationAction(ISD::UDIVREM, VT, Custom);
259 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
260 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
261 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
263 setOperationAction(ISD::BSWAP, VT, Expand);
264 setOperationAction(ISD::CTTZ, VT, Expand);
265 setOperationAction(ISD::CTLZ, VT, Expand);
268 if (!Subtarget->hasBCNT(32))
269 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
271 if (!Subtarget->hasBCNT(64))
272 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
274 // The hardware supports 32-bit ROTR, but not ROTL.
275 setOperationAction(ISD::ROTL, MVT::i32, Expand);
276 setOperationAction(ISD::ROTL, MVT::i64, Expand);
277 setOperationAction(ISD::ROTR, MVT::i64, Expand);
279 setOperationAction(ISD::MUL, MVT::i64, Expand);
280 setOperationAction(ISD::MULHU, MVT::i64, Expand);
281 setOperationAction(ISD::MULHS, MVT::i64, Expand);
282 setOperationAction(ISD::UDIV, MVT::i32, Expand);
283 setOperationAction(ISD::UREM, MVT::i32, Expand);
284 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
285 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
287 if (!Subtarget->hasFFBH())
288 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
290 if (!Subtarget->hasFFBL())
291 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
293 static const MVT::SimpleValueType VectorIntTypes[] = {
294 MVT::v2i32, MVT::v4i32
297 for (MVT VT : VectorIntTypes) {
298 // Expand the following operations for the current type by default.
299 setOperationAction(ISD::ADD, VT, Expand);
300 setOperationAction(ISD::AND, VT, Expand);
301 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
302 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
303 setOperationAction(ISD::MUL, VT, Expand);
304 setOperationAction(ISD::OR, VT, Expand);
305 setOperationAction(ISD::SHL, VT, Expand);
306 setOperationAction(ISD::SRA, VT, Expand);
307 setOperationAction(ISD::SRL, VT, Expand);
308 setOperationAction(ISD::ROTL, VT, Expand);
309 setOperationAction(ISD::ROTR, VT, Expand);
310 setOperationAction(ISD::SUB, VT, Expand);
311 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
312 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
313 // TODO: Implement custom UREM / SREM routines.
314 setOperationAction(ISD::SDIV, VT, Expand);
315 setOperationAction(ISD::UDIV, VT, Expand);
316 setOperationAction(ISD::SREM, VT, Expand);
317 setOperationAction(ISD::UREM, VT, Expand);
318 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
319 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
320 setOperationAction(ISD::SDIVREM, VT, Custom);
321 setOperationAction(ISD::UDIVREM, VT, Custom);
322 setOperationAction(ISD::ADDC, VT, Expand);
323 setOperationAction(ISD::SUBC, VT, Expand);
324 setOperationAction(ISD::ADDE, VT, Expand);
325 setOperationAction(ISD::SUBE, VT, Expand);
326 setOperationAction(ISD::SELECT, VT, Expand);
327 setOperationAction(ISD::VSELECT, VT, Expand);
328 setOperationAction(ISD::SELECT_CC, VT, Expand);
329 setOperationAction(ISD::XOR, VT, Expand);
330 setOperationAction(ISD::BSWAP, VT, Expand);
331 setOperationAction(ISD::CTPOP, VT, Expand);
332 setOperationAction(ISD::CTTZ, VT, Expand);
333 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
334 setOperationAction(ISD::CTLZ, VT, Expand);
335 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
336 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
339 static const MVT::SimpleValueType FloatVectorTypes[] = {
340 MVT::v2f32, MVT::v4f32
343 for (MVT VT : FloatVectorTypes) {
344 setOperationAction(ISD::FABS, VT, Expand);
345 setOperationAction(ISD::FADD, VT, Expand);
346 setOperationAction(ISD::FCEIL, VT, Expand);
347 setOperationAction(ISD::FCOS, VT, Expand);
348 setOperationAction(ISD::FDIV, VT, Expand);
349 setOperationAction(ISD::FEXP2, VT, Expand);
350 setOperationAction(ISD::FLOG2, VT, Expand);
351 setOperationAction(ISD::FPOW, VT, Expand);
352 setOperationAction(ISD::FFLOOR, VT, Expand);
353 setOperationAction(ISD::FTRUNC, VT, Expand);
354 setOperationAction(ISD::FMUL, VT, Expand);
355 setOperationAction(ISD::FMA, VT, Expand);
356 setOperationAction(ISD::FRINT, VT, Expand);
357 setOperationAction(ISD::FNEARBYINT, VT, Expand);
358 setOperationAction(ISD::FSQRT, VT, Expand);
359 setOperationAction(ISD::FSIN, VT, Expand);
360 setOperationAction(ISD::FSUB, VT, Expand);
361 setOperationAction(ISD::FNEG, VT, Expand);
362 setOperationAction(ISD::SELECT, VT, Expand);
363 setOperationAction(ISD::VSELECT, VT, Expand);
364 setOperationAction(ISD::SELECT_CC, VT, Expand);
365 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
366 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
369 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
370 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
372 setTargetDAGCombine(ISD::MUL);
373 setTargetDAGCombine(ISD::SELECT_CC);
374 setTargetDAGCombine(ISD::STORE);
376 setSchedulingPreference(Sched::RegPressure);
377 setJumpIsExpensive(true);
379 // SI at least has hardware support for floating point exceptions, but no way
380 // of using or handling them is implemented. They are also optional in OpenCL
382 setHasFloatingPointExceptions(false);
384 setSelectIsExpensive(false);
385 PredictableSelectIsExpensive = false;
387 // There are no integer divide instructions, and these expand to a pretty
388 // large sequence of instructions.
389 setIntDivIsCheap(false);
390 setPow2DivIsCheap(false);
392 // TODO: Investigate this when 64-bit divides are implemented.
393 addBypassSlowDiv(64, 32);
395 // FIXME: Need to really handle these.
396 MaxStoresPerMemcpy = 4096;
397 MaxStoresPerMemmove = 4096;
398 MaxStoresPerMemset = 4096;
401 //===----------------------------------------------------------------------===//
402 // Target Information
403 //===----------------------------------------------------------------------===//
405 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
409 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
413 // The backend supports 32 and 64 bit floating point immediates.
414 // FIXME: Why are we reporting vectors of FP immediates as legal?
415 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
416 EVT ScalarVT = VT.getScalarType();
417 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
420 // We don't want to shrink f64 / f32 constants.
421 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
422 EVT ScalarVT = VT.getScalarType();
423 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
426 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
428 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
431 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
432 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
434 return ((LScalarSize <= CastScalarSize) ||
435 (CastScalarSize >= 32) ||
439 //===---------------------------------------------------------------------===//
441 //===---------------------------------------------------------------------===//
443 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
444 assert(VT.isFloatingPoint());
445 return VT == MVT::f32;
448 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
449 assert(VT.isFloatingPoint());
450 return VT == MVT::f32;
453 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
454 // Truncate is just accessing a subregister.
455 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
458 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
459 // Truncate is just accessing a subregister.
460 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
461 (Dest->getPrimitiveSizeInBits() % 32 == 0);
464 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
465 const DataLayout *DL = getDataLayout();
466 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
467 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
469 return SrcSize == 32 && DestSize == 64;
472 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
473 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
474 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
475 // this will enable reducing 64-bit operations the 32-bit, which is always
477 return Src == MVT::i32 && Dest == MVT::i64;
480 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
481 return isZExtFree(Val.getValueType(), VT2);
484 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
485 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
486 // limited number of native 64-bit operations. Shrinking an operation to fit
487 // in a single 32-bit register should always be helpful. As currently used,
488 // this is much less general than the name suggests, and is only used in
489 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
490 // not profitable, and may actually be harmful.
491 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
494 //===---------------------------------------------------------------------===//
495 // TargetLowering Callbacks
496 //===---------------------------------------------------------------------===//
498 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
499 const SmallVectorImpl<ISD::InputArg> &Ins) const {
501 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
504 SDValue AMDGPUTargetLowering::LowerReturn(
506 CallingConv::ID CallConv,
508 const SmallVectorImpl<ISD::OutputArg> &Outs,
509 const SmallVectorImpl<SDValue> &OutVals,
510 SDLoc DL, SelectionDAG &DAG) const {
511 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
514 //===---------------------------------------------------------------------===//
515 // Target specific lowering
516 //===---------------------------------------------------------------------===//
518 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
519 SmallVectorImpl<SDValue> &InVals) const {
520 SDValue Callee = CLI.Callee;
521 SelectionDAG &DAG = CLI.DAG;
523 const Function &Fn = *DAG.getMachineFunction().getFunction();
525 StringRef FuncName("<unknown>");
527 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
528 FuncName = G->getSymbol();
529 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
530 FuncName = G->getGlobal()->getName();
532 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
533 DAG.getContext()->diagnose(NoCalls);
537 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
538 SelectionDAG &DAG) const {
539 switch (Op.getOpcode()) {
541 Op.getNode()->dump();
542 llvm_unreachable("Custom lowering code for this"
543 "instruction is not implemented yet!");
545 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
546 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
547 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
548 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
549 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
550 case ISD::SDIV: return LowerSDIV(Op, DAG);
551 case ISD::SREM: return LowerSREM(Op, DAG);
552 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
553 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
554 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
555 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
556 case ISD::FRINT: return LowerFRINT(Op, DAG);
557 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
558 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
559 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
564 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
565 SmallVectorImpl<SDValue> &Results,
566 SelectionDAG &DAG) const {
567 switch (N->getOpcode()) {
568 case ISD::SIGN_EXTEND_INREG:
569 // Different parts of legalization seem to interpret which type of
570 // sign_extend_inreg is the one to check for custom lowering. The extended
571 // from type is what really matters, but some places check for custom
572 // lowering of the result type. This results in trying to use
573 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
574 // nothing here and let the illegal result integer be handled normally.
577 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
581 Results.push_back(SDValue(Node, 0));
582 Results.push_back(SDValue(Node, 1));
583 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
585 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
589 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
590 if (Lowered.getNode())
591 Results.push_back(Lowered);
599 // FIXME: This implements accesses to initialized globals in the constant
600 // address space by copying them to private and accessing that. It does not
601 // properly handle illegal types or vectors. The private vector loads are not
602 // scalarized, and the illegal scalars hit an assertion. This technique will not
603 // work well with large initializers, and this should eventually be
604 // removed. Initialized globals should be placed into a data section that the
605 // runtime will load into a buffer before the kernel is executed. Uses of the
606 // global need to be replaced with a pointer loaded from an implicit kernel
607 // argument into this buffer holding the copy of the data, which will remove the
608 // need for any of this.
609 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
610 const GlobalValue *GV,
611 const SDValue &InitPtr,
613 SelectionDAG &DAG) const {
614 const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
616 Type *InitTy = Init->getType();
618 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
619 EVT VT = EVT::getEVT(InitTy);
620 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
621 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
622 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
623 TD->getPrefTypeAlignment(InitTy));
626 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
627 EVT VT = EVT::getEVT(CFP->getType());
628 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
629 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
630 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
631 TD->getPrefTypeAlignment(CFP->getType()));
634 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
635 const StructLayout *SL = TD->getStructLayout(ST);
637 EVT PtrVT = InitPtr.getValueType();
638 SmallVector<SDValue, 8> Chains;
640 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
641 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
642 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
644 Constant *Elt = Init->getAggregateElement(I);
645 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
648 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
651 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
652 EVT PtrVT = InitPtr.getValueType();
654 unsigned NumElements;
655 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
656 NumElements = AT->getNumElements();
657 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
658 NumElements = VT->getNumElements();
660 llvm_unreachable("Unexpected type");
662 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
663 SmallVector<SDValue, 8> Chains;
664 for (unsigned i = 0; i < NumElements; ++i) {
665 SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
666 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
668 Constant *Elt = Init->getAggregateElement(i);
669 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
672 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
675 if (isa<UndefValue>(Init)) {
676 EVT VT = EVT::getEVT(InitTy);
677 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
678 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
679 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
680 TD->getPrefTypeAlignment(InitTy));
684 llvm_unreachable("Unhandled constant initializer");
687 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
689 SelectionDAG &DAG) const {
691 const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
692 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
693 const GlobalValue *GV = G->getGlobal();
695 switch (G->getAddressSpace()) {
696 default: llvm_unreachable("Global Address lowering not implemented for this "
698 case AMDGPUAS::LOCAL_ADDRESS: {
699 // XXX: What does the value of G->getOffset() mean?
700 assert(G->getOffset() == 0 &&
701 "Do not know what to do with an non-zero offset");
704 if (MFI->LocalMemoryObjects.count(GV) == 0) {
705 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
706 Offset = MFI->LDSSize;
707 MFI->LocalMemoryObjects[GV] = Offset;
708 // XXX: Account for alignment?
709 MFI->LDSSize += Size;
711 Offset = MFI->LocalMemoryObjects[GV];
714 return DAG.getConstant(Offset, getPointerTy(AMDGPUAS::LOCAL_ADDRESS));
716 case AMDGPUAS::CONSTANT_ADDRESS: {
717 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
718 Type *EltType = GV->getType()->getElementType();
719 unsigned Size = TD->getTypeAllocSize(EltType);
720 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
722 MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
723 MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
725 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
726 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
728 const GlobalVariable *Var = cast<GlobalVariable>(GV);
729 if (!Var->hasInitializer()) {
730 // This has no use, but bugpoint will hit it.
731 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
734 const Constant *Init = Var->getInitializer();
735 SmallVector<SDNode*, 8> WorkList;
737 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
738 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
739 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
741 WorkList.push_back(*I);
743 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
744 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
745 E = WorkList.end(); I != E; ++I) {
746 SmallVector<SDValue, 8> Ops;
747 Ops.push_back(Chain);
748 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
749 Ops.push_back((*I)->getOperand(i));
751 DAG.UpdateNodeOperands(*I, Ops);
753 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
758 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
759 SelectionDAG &DAG) const {
760 SmallVector<SDValue, 8> Args;
761 SDValue A = Op.getOperand(0);
762 SDValue B = Op.getOperand(1);
764 DAG.ExtractVectorElements(A, Args);
765 DAG.ExtractVectorElements(B, Args);
767 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
770 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
771 SelectionDAG &DAG) const {
773 SmallVector<SDValue, 8> Args;
774 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
775 EVT VT = Op.getValueType();
776 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
777 VT.getVectorNumElements());
779 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
782 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
783 SelectionDAG &DAG) const {
785 MachineFunction &MF = DAG.getMachineFunction();
786 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
787 getTargetMachine().getSubtargetImpl()->getFrameLowering());
789 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
791 unsigned FrameIndex = FIN->getIndex();
792 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
793 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
797 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
798 SelectionDAG &DAG) const {
799 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
801 EVT VT = Op.getValueType();
803 switch (IntrinsicID) {
805 case AMDGPUIntrinsic::AMDGPU_abs:
806 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
807 return LowerIntrinsicIABS(Op, DAG);
808 case AMDGPUIntrinsic::AMDGPU_lrp:
809 return LowerIntrinsicLRP(Op, DAG);
810 case AMDGPUIntrinsic::AMDGPU_fract:
811 case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
812 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
814 case AMDGPUIntrinsic::AMDGPU_clamp:
815 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
816 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
817 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
819 case Intrinsic::AMDGPU_div_scale: {
820 // 3rd parameter required to be a constant.
821 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
823 return DAG.getUNDEF(VT);
825 // Translate to the operands expected by the machine instruction. The
826 // first parameter must be the same as the first instruction.
827 SDValue Numerator = Op.getOperand(1);
828 SDValue Denominator = Op.getOperand(2);
829 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
831 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
832 Denominator, Numerator);
835 case Intrinsic::AMDGPU_div_fmas:
836 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
837 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
839 case Intrinsic::AMDGPU_div_fixup:
840 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
841 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
843 case Intrinsic::AMDGPU_trig_preop:
844 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
845 Op.getOperand(1), Op.getOperand(2));
847 case Intrinsic::AMDGPU_rcp:
848 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
850 case Intrinsic::AMDGPU_rsq:
851 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
853 case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
854 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
856 case Intrinsic::AMDGPU_rsq_clamped:
857 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
859 case AMDGPUIntrinsic::AMDGPU_imax:
860 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
862 case AMDGPUIntrinsic::AMDGPU_umax:
863 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
865 case AMDGPUIntrinsic::AMDGPU_imin:
866 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
868 case AMDGPUIntrinsic::AMDGPU_umin:
869 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
872 case AMDGPUIntrinsic::AMDGPU_umul24:
873 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
874 Op.getOperand(1), Op.getOperand(2));
876 case AMDGPUIntrinsic::AMDGPU_imul24:
877 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
878 Op.getOperand(1), Op.getOperand(2));
880 case AMDGPUIntrinsic::AMDGPU_umad24:
881 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
882 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
884 case AMDGPUIntrinsic::AMDGPU_imad24:
885 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
886 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
888 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
889 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
891 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
892 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
894 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
895 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
897 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
898 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
900 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
901 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
906 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
907 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
912 case AMDGPUIntrinsic::AMDGPU_bfi:
913 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
918 case AMDGPUIntrinsic::AMDGPU_bfm:
919 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
923 case AMDGPUIntrinsic::AMDGPU_brev:
924 return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
926 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
927 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
929 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
930 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
931 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
932 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
936 ///IABS(a) = SMAX(sub(0, a), a)
937 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
938 SelectionDAG &DAG) const {
940 EVT VT = Op.getValueType();
941 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
944 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
947 /// Linear Interpolation
948 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
949 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
950 SelectionDAG &DAG) const {
952 EVT VT = Op.getValueType();
953 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
954 DAG.getConstantFP(1.0f, MVT::f32),
956 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
958 return DAG.getNode(ISD::FADD, DL, VT,
959 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
963 /// \brief Generate Min/Max node
964 SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
965 SelectionDAG &DAG) const {
967 EVT VT = N->getValueType(0);
969 SDValue LHS = N->getOperand(0);
970 SDValue RHS = N->getOperand(1);
971 SDValue True = N->getOperand(2);
972 SDValue False = N->getOperand(3);
973 SDValue CC = N->getOperand(4);
975 if (VT != MVT::f32 ||
976 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
980 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
994 llvm_unreachable("Operation should already be optimised!");
1001 unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
1002 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1010 unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
1011 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1013 case ISD::SETCC_INVALID:
1014 llvm_unreachable("Invalid setcc condcode!");
1019 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
1020 SelectionDAG &DAG) const {
1021 LoadSDNode *Load = cast<LoadSDNode>(Op);
1022 EVT MemVT = Load->getMemoryVT();
1023 EVT MemEltVT = MemVT.getVectorElementType();
1025 EVT LoadVT = Op.getValueType();
1026 EVT EltVT = LoadVT.getVectorElementType();
1027 EVT PtrVT = Load->getBasePtr().getValueType();
1029 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1030 SmallVector<SDValue, 8> Loads;
1031 SmallVector<SDValue, 8> Chains;
1034 unsigned MemEltSize = MemEltVT.getStoreSize();
1035 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1037 for (unsigned i = 0; i < NumElts; ++i) {
1038 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1039 DAG.getConstant(i * MemEltSize, PtrVT));
1042 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1043 Load->getChain(), Ptr,
1044 SrcValue.getWithOffset(i * MemEltSize),
1045 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1046 Load->isInvariant(), Load->getAlignment());
1047 Loads.push_back(NewLoad.getValue(0));
1048 Chains.push_back(NewLoad.getValue(1));
1052 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
1053 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
1056 return DAG.getMergeValues(Ops, SL);
1059 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1060 SelectionDAG &DAG) const {
1061 EVT VT = Op.getValueType();
1063 // If this is a 2 element vector, we really want to scalarize and not create
1064 // weird 1 element vectors.
1065 if (VT.getVectorNumElements() == 2)
1066 return ScalarizeVectorLoad(Op, DAG);
1068 LoadSDNode *Load = cast<LoadSDNode>(Op);
1069 SDValue BasePtr = Load->getBasePtr();
1070 EVT PtrVT = BasePtr.getValueType();
1071 EVT MemVT = Load->getMemoryVT();
1073 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1076 EVT LoMemVT, HiMemVT;
1079 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1080 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1081 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1083 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1084 Load->getChain(), BasePtr,
1086 LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
1087 Load->isInvariant(), Load->getAlignment());
1089 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1090 DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
1093 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
1094 Load->getChain(), HiPtr,
1095 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1096 HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
1097 Load->isInvariant(), Load->getAlignment());
1100 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1101 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1102 LoLoad.getValue(1), HiLoad.getValue(1))
1105 return DAG.getMergeValues(Ops, SL);
1108 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1109 SelectionDAG &DAG) const {
1110 StoreSDNode *Store = cast<StoreSDNode>(Op);
1111 EVT MemVT = Store->getMemoryVT();
1112 unsigned MemBits = MemVT.getSizeInBits();
1114 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1115 // truncating store into an i32 store.
1116 // XXX: We could also handle optimize other vector bitwidths.
1117 if (!MemVT.isVector() || MemBits > 32) {
1122 SDValue Value = Store->getValue();
1123 EVT VT = Value.getValueType();
1124 EVT ElemVT = VT.getVectorElementType();
1125 SDValue Ptr = Store->getBasePtr();
1126 EVT MemEltVT = MemVT.getVectorElementType();
1127 unsigned MemEltBits = MemEltVT.getSizeInBits();
1128 unsigned MemNumElements = MemVT.getVectorNumElements();
1129 unsigned PackedSize = MemVT.getStoreSizeInBits();
1130 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
1132 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1134 SDValue PackedValue;
1135 for (unsigned i = 0; i < MemNumElements; ++i) {
1136 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1137 DAG.getConstant(i, MVT::i32));
1138 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1139 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1141 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
1142 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1147 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1151 if (PackedSize < 32) {
1152 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1153 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1154 Store->getMemOperand()->getPointerInfo(),
1156 Store->isNonTemporal(), Store->isVolatile(),
1157 Store->getAlignment());
1160 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1161 Store->getMemOperand()->getPointerInfo(),
1162 Store->isVolatile(), Store->isNonTemporal(),
1163 Store->getAlignment());
1166 SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
1167 SelectionDAG &DAG) const {
1168 StoreSDNode *Store = cast<StoreSDNode>(Op);
1169 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1170 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1171 EVT PtrVT = Store->getBasePtr().getValueType();
1172 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1175 SmallVector<SDValue, 8> Chains;
1177 unsigned EltSize = MemEltVT.getStoreSize();
1178 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1180 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1181 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1183 DAG.getConstant(i, MVT::i32));
1185 SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), PtrVT);
1186 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
1188 DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1189 SrcValue.getWithOffset(i * EltSize),
1190 MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
1191 Store->getAlignment());
1192 Chains.push_back(NewStore);
1195 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1198 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1199 SelectionDAG &DAG) const {
1200 StoreSDNode *Store = cast<StoreSDNode>(Op);
1201 SDValue Val = Store->getValue();
1202 EVT VT = Val.getValueType();
1204 // If this is a 2 element vector, we really want to scalarize and not create
1205 // weird 1 element vectors.
1206 if (VT.getVectorNumElements() == 2)
1207 return ScalarizeVectorStore(Op, DAG);
1209 EVT MemVT = Store->getMemoryVT();
1210 SDValue Chain = Store->getChain();
1211 SDValue BasePtr = Store->getBasePtr();
1215 EVT LoMemVT, HiMemVT;
1218 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1219 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1220 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1222 EVT PtrVT = BasePtr.getValueType();
1223 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1224 DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
1226 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1228 = DAG.getTruncStore(Chain, SL, Lo,
1232 Store->isNonTemporal(),
1233 Store->isVolatile(),
1234 Store->getAlignment());
1236 = DAG.getTruncStore(Chain, SL, Hi,
1238 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1240 Store->isNonTemporal(),
1241 Store->isVolatile(),
1242 Store->getAlignment());
1244 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1248 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1250 LoadSDNode *Load = cast<LoadSDNode>(Op);
1251 ISD::LoadExtType ExtType = Load->getExtensionType();
1252 EVT VT = Op.getValueType();
1253 EVT MemVT = Load->getMemoryVT();
1255 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
1256 // We can do the extload to 32-bits, and then need to separately extend to
1259 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
1263 Load->getMemOperand());
1266 DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32),
1267 ExtLoad32.getValue(1)
1270 return DAG.getMergeValues(Ops, DL);
1273 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1274 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1275 // FIXME: Copied from PPC
1276 // First, load into 32 bits, then truncate to 1 bit.
1278 SDValue Chain = Load->getChain();
1279 SDValue BasePtr = Load->getBasePtr();
1280 MachineMemOperand *MMO = Load->getMemOperand();
1282 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1283 BasePtr, MVT::i8, MMO);
1286 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
1290 return DAG.getMergeValues(Ops, DL);
1293 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
1294 Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1295 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1299 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1300 DAG.getConstant(2, MVT::i32));
1301 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1302 Load->getChain(), Ptr,
1303 DAG.getTargetConstant(0, MVT::i32),
1305 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1307 DAG.getConstant(0x3, MVT::i32));
1308 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1309 DAG.getConstant(3, MVT::i32));
1311 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1313 EVT MemEltVT = MemVT.getScalarType();
1314 if (ExtType == ISD::SEXTLOAD) {
1315 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1318 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
1322 return DAG.getMergeValues(Ops, DL);
1326 DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
1330 return DAG.getMergeValues(Ops, DL);
1333 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1335 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1336 if (Result.getNode()) {
1340 StoreSDNode *Store = cast<StoreSDNode>(Op);
1341 SDValue Chain = Store->getChain();
1342 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1343 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1344 Store->getValue().getValueType().isVector()) {
1345 return ScalarizeVectorStore(Op, DAG);
1348 EVT MemVT = Store->getMemoryVT();
1349 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1350 MemVT.bitsLT(MVT::i32)) {
1352 if (Store->getMemoryVT() == MVT::i8) {
1354 } else if (Store->getMemoryVT() == MVT::i16) {
1357 SDValue BasePtr = Store->getBasePtr();
1358 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1359 DAG.getConstant(2, MVT::i32));
1360 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1361 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
1363 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1364 DAG.getConstant(0x3, MVT::i32));
1366 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1367 DAG.getConstant(3, MVT::i32));
1369 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1372 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1374 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1375 MaskedValue, ShiftAmt);
1377 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1379 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1380 DAG.getConstant(0xffffffff, MVT::i32));
1381 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1383 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1384 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1385 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1390 // This is a shortcut for integer division because we have fast i32<->f32
1391 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1392 // float is enough to accurately represent up to a 24-bit integer.
1393 SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
1395 EVT VT = Op.getValueType();
1396 SDValue LHS = Op.getOperand(0);
1397 SDValue RHS = Op.getOperand(1);
1398 MVT IntVT = MVT::i32;
1399 MVT FltVT = MVT::f32;
1401 if (VT.isVector()) {
1402 unsigned NElts = VT.getVectorNumElements();
1403 IntVT = MVT::getVectorVT(MVT::i32, NElts);
1404 FltVT = MVT::getVectorVT(MVT::f32, NElts);
1407 unsigned BitSize = VT.getScalarType().getSizeInBits();
1409 // char|short jq = ia ^ ib;
1410 SDValue jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1412 // jq = jq >> (bitsize - 2)
1413 jq = DAG.getNode(ISD::SRA, DL, VT, jq, DAG.getConstant(BitSize - 2, VT));
1416 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, VT));
1419 jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
1421 // int ia = (int)LHS;
1422 SDValue ia = DAG.getSExtOrTrunc(LHS, DL, IntVT);
1424 // int ib, (int)RHS;
1425 SDValue ib = DAG.getSExtOrTrunc(RHS, DL, IntVT);
1427 // float fa = (float)ia;
1428 SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FltVT, ia);
1430 // float fb = (float)ib;
1431 SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FltVT, ib);
1433 // float fq = native_divide(fa, fb);
1434 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1435 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1438 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1440 // float fqneg = -fq;
1441 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1443 // float fr = mad(fqneg, fb, fa);
1444 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
1445 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
1447 // int iq = (int)fq;
1448 SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, fq);
1451 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1454 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1456 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), VT);
1458 // int cv = fr >= fb;
1459 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1461 // jq = (cv ? jq : 0);
1462 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, VT));
1465 iq = DAG.getSExtOrTrunc(iq, DL, VT);
1466 return DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1469 SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
1471 EVT OVT = Op.getValueType();
1472 SDValue LHS = Op.getOperand(0);
1473 SDValue RHS = Op.getOperand(1);
1474 // The LowerSDIV32 function generates equivalent to the following IL.
1484 // ixor r10, r10, r11
1486 // ixor DST, r0, r10
1495 SDValue r10 = DAG.getSelectCC(DL,
1496 r0, DAG.getConstant(0, OVT),
1497 DAG.getConstant(-1, OVT),
1498 DAG.getConstant(0, OVT),
1502 SDValue r11 = DAG.getSelectCC(DL,
1503 r1, DAG.getConstant(0, OVT),
1504 DAG.getConstant(-1, OVT),
1505 DAG.getConstant(0, OVT),
1509 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1512 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1515 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1518 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1521 r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
1523 // ixor r10, r10, r11
1524 r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
1527 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1529 // ixor DST, r0, r10
1530 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1534 SDValue AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
1535 return SDValue(Op.getNode(), 0);
1538 SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
1539 EVT OVT = Op.getValueType().getScalarType();
1541 if (OVT == MVT::i32) {
1542 if (DAG.ComputeNumSignBits(Op.getOperand(0)) > 8 &&
1543 DAG.ComputeNumSignBits(Op.getOperand(1)) > 8) {
1544 // TODO: We technically could do this for i64, but shouldn't that just be
1545 // handled by something generally reducing 64-bit division on 32-bit
1546 // values to 32-bit?
1547 return LowerSDIV24(Op, DAG);
1550 return LowerSDIV32(Op, DAG);
1553 assert(OVT == MVT::i64);
1554 return LowerSDIV64(Op, DAG);
1557 SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
1559 EVT OVT = Op.getValueType();
1560 SDValue LHS = Op.getOperand(0);
1561 SDValue RHS = Op.getOperand(1);
1562 // The LowerSREM32 function generates equivalent to the following IL.
1572 // umul r20, r20, r1
1575 // ixor DST, r0, r10
1584 SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
1587 SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
1590 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1593 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1596 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1599 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1602 SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
1604 // umul r20, r20, r1
1605 r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
1608 r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
1611 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1613 // ixor DST, r0, r10
1614 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1618 SDValue AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
1619 return SDValue(Op.getNode(), 0);
1622 SDValue AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
1623 EVT OVT = Op.getValueType();
1625 if (OVT.getScalarType() == MVT::i64)
1626 return LowerSREM64(Op, DAG);
1628 if (OVT.getScalarType() == MVT::i32)
1629 return LowerSREM32(Op, DAG);
1631 return SDValue(Op.getNode(), 0);
1634 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1635 SelectionDAG &DAG) const {
1637 EVT VT = Op.getValueType();
1639 SDValue Num = Op.getOperand(0);
1640 SDValue Den = Op.getOperand(1);
1642 // RCP = URECIP(Den) = 2^32 / Den + e
1643 // e is rounding error.
1644 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1646 // RCP_LO = umulo(RCP, Den) */
1647 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
1649 // RCP_HI = mulhu (RCP, Den) */
1650 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1652 // NEG_RCP_LO = -RCP_LO
1653 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1656 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1657 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1660 // Calculate the rounding error from the URECIP instruction
1661 // E = mulhu(ABS_RCP_LO, RCP)
1662 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1664 // RCP_A_E = RCP + E
1665 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1667 // RCP_S_E = RCP - E
1668 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1670 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1671 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1674 // Quotient = mulhu(Tmp0, Num)
1675 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1677 // Num_S_Remainder = Quotient * Den
1678 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
1680 // Remainder = Num - Num_S_Remainder
1681 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1683 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1684 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1685 DAG.getConstant(-1, VT),
1686 DAG.getConstant(0, VT),
1688 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1689 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1691 DAG.getConstant(-1, VT),
1692 DAG.getConstant(0, VT),
1694 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1695 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1698 // Calculate Division result:
1700 // Quotient_A_One = Quotient + 1
1701 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1702 DAG.getConstant(1, VT));
1704 // Quotient_S_One = Quotient - 1
1705 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1706 DAG.getConstant(1, VT));
1708 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1709 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1710 Quotient, Quotient_A_One, ISD::SETEQ);
1712 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1713 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1714 Quotient_S_One, Div, ISD::SETEQ);
1716 // Calculate Rem result:
1718 // Remainder_S_Den = Remainder - Den
1719 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1721 // Remainder_A_Den = Remainder + Den
1722 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1724 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1725 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1726 Remainder, Remainder_S_Den, ISD::SETEQ);
1728 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1729 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1730 Remainder_A_Den, Rem, ISD::SETEQ);
1735 return DAG.getMergeValues(Ops, DL);
1738 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1739 SelectionDAG &DAG) const {
1741 EVT VT = Op.getValueType();
1743 SDValue Zero = DAG.getConstant(0, VT);
1744 SDValue NegOne = DAG.getConstant(-1, VT);
1746 SDValue LHS = Op.getOperand(0);
1747 SDValue RHS = Op.getOperand(1);
1749 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1750 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1751 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1752 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1754 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1755 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1757 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1758 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1760 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1761 SDValue Rem = Div.getValue(1);
1763 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1764 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1766 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1767 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1773 return DAG.getMergeValues(Res, DL);
1776 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1778 SDValue Src = Op.getOperand(0);
1780 // result = trunc(src)
1781 // if (src > 0.0 && src != result)
1784 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1786 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1787 const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
1789 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1791 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1792 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1793 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1795 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1796 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1799 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1801 SDValue Src = Op.getOperand(0);
1803 assert(Op.getValueType() == MVT::f64);
1805 const SDValue Zero = DAG.getConstant(0, MVT::i32);
1806 const SDValue One = DAG.getConstant(1, MVT::i32);
1808 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1810 // Extract the upper half, since this is where we will find the sign and
1812 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1814 const unsigned FractBits = 52;
1815 const unsigned ExpBits = 11;
1817 // Extract the exponent.
1818 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_I32, SL, MVT::i32,
1820 DAG.getConstant(FractBits - 32, MVT::i32),
1821 DAG.getConstant(ExpBits, MVT::i32));
1822 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1823 DAG.getConstant(1023, MVT::i32));
1825 // Extract the sign bit.
1826 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
1827 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1829 // Extend back to to 64-bits.
1830 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1832 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1834 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1835 const SDValue FractMask
1836 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
1838 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1839 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1840 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1842 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
1844 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
1846 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1847 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1849 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1850 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1852 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1855 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1857 SDValue Src = Op.getOperand(0);
1859 assert(Op.getValueType() == MVT::f64);
1861 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
1862 SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
1863 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1865 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1866 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1868 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1870 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
1871 SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
1873 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1874 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1876 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1879 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1880 // FNEARBYINT and FRINT are the same, except in their handling of FP
1881 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1882 // rint, so just treat them as equivalent.
1883 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1886 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1888 SDValue Src = Op.getOperand(0);
1890 // result = trunc(src);
1891 // if (src < 0.0 && src != result)
1894 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1896 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1897 const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
1899 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1901 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1902 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1903 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1905 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1906 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1909 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1910 SelectionDAG &DAG) const {
1911 SDValue S0 = Op.getOperand(0);
1913 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1916 // f32 uint_to_fp i64
1917 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1918 DAG.getConstant(0, MVT::i32));
1919 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1920 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1921 DAG.getConstant(1, MVT::i32));
1922 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1923 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1924 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1925 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1928 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1930 SelectionDAG &DAG) const {
1931 MVT VT = Op.getSimpleValueType();
1933 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1934 // Shift left by 'Shift' bits.
1935 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1936 // Signed shift Right by 'Shift' bits.
1937 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1940 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1941 SelectionDAG &DAG) const {
1942 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1943 MVT VT = Op.getSimpleValueType();
1944 MVT ScalarVT = VT.getScalarType();
1949 SDValue Src = Op.getOperand(0);
1952 // TODO: Don't scalarize on Evergreen?
1953 unsigned NElts = VT.getVectorNumElements();
1954 SmallVector<SDValue, 8> Args;
1955 DAG.ExtractVectorElements(Src, Args, 0, NElts);
1957 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1958 for (unsigned I = 0; I < NElts; ++I)
1959 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1961 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
1964 //===----------------------------------------------------------------------===//
1965 // Custom DAG optimizations
1966 //===----------------------------------------------------------------------===//
1968 static bool isU24(SDValue Op, SelectionDAG &DAG) {
1969 APInt KnownZero, KnownOne;
1970 EVT VT = Op.getValueType();
1971 DAG.computeKnownBits(Op, KnownZero, KnownOne);
1973 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1976 static bool isI24(SDValue Op, SelectionDAG &DAG) {
1977 EVT VT = Op.getValueType();
1979 // In order for this to be a signed 24-bit value, bit 23, must
1981 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1982 // as unsigned 24-bit values.
1983 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1986 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1988 SelectionDAG &DAG = DCI.DAG;
1989 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1990 EVT VT = Op.getValueType();
1992 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1993 APInt KnownZero, KnownOne;
1994 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1995 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1996 DCI.CommitTargetLoweringOpt(TLO);
1999 template <typename IntTy>
2000 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
2001 uint32_t Offset, uint32_t Width) {
2002 if (Width + Offset < 32) {
2003 IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
2004 return DAG.getConstant(Result, MVT::i32);
2007 return DAG.getConstant(Src0 >> Offset, MVT::i32);
2010 static bool usesAllNormalStores(SDNode *LoadVal) {
2011 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
2012 if (!ISD::isNormalStore(*I))
2019 // If we have a copy of an illegal type, replace it with a load / store of an
2020 // equivalently sized legal type. This avoids intermediate bit pack / unpack
2021 // instructions emitted when handling extloads and truncstores. Ideally we could
2022 // recognize the pack / unpack pattern to eliminate it.
2023 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2024 DAGCombinerInfo &DCI) const {
2025 if (!DCI.isBeforeLegalize())
2028 StoreSDNode *SN = cast<StoreSDNode>(N);
2029 SDValue Value = SN->getValue();
2030 EVT VT = Value.getValueType();
2032 if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
2035 LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
2036 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
2039 EVT MemVT = LoadVal->getMemoryVT();
2042 SelectionDAG &DAG = DCI.DAG;
2043 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
2045 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
2047 LoadVal->getChain(),
2048 LoadVal->getBasePtr(),
2049 LoadVal->getOffset(),
2051 LoadVal->getMemOperand());
2053 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
2054 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
2056 return DAG.getStore(SN->getChain(), SL, NewLoad,
2057 SN->getBasePtr(), SN->getMemOperand());
2060 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2061 DAGCombinerInfo &DCI) const {
2062 EVT VT = N->getValueType(0);
2064 if (VT.isVector() || VT.getSizeInBits() > 32)
2067 SelectionDAG &DAG = DCI.DAG;
2070 SDValue N0 = N->getOperand(0);
2071 SDValue N1 = N->getOperand(1);
2074 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2075 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2076 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2077 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
2078 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2079 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2080 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2081 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
2086 // We need to use sext even for MUL_U24, because MUL_U24 is used
2087 // for signed multiply of 8 and 16-bit types.
2088 return DAG.getSExtOrTrunc(Mul, DL, VT);
2091 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
2092 DAGCombinerInfo &DCI) const {
2093 SelectionDAG &DAG = DCI.DAG;
2096 switch(N->getOpcode()) {
2099 return performMulCombine(N, DCI);
2100 case AMDGPUISD::MUL_I24:
2101 case AMDGPUISD::MUL_U24: {
2102 SDValue N0 = N->getOperand(0);
2103 SDValue N1 = N->getOperand(1);
2104 simplifyI24(N0, DCI);
2105 simplifyI24(N1, DCI);
2108 case ISD::SELECT_CC: {
2109 return CombineMinMax(N, DAG);
2111 case AMDGPUISD::BFE_I32:
2112 case AMDGPUISD::BFE_U32: {
2113 assert(!N->getValueType(0).isVector() &&
2114 "Vector handling of BFE not implemented");
2115 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2119 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2121 return DAG.getConstant(0, MVT::i32);
2123 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2127 SDValue BitsFrom = N->getOperand(0);
2128 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2130 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2132 if (OffsetVal == 0) {
2133 // This is already sign / zero extended, so try to fold away extra BFEs.
2134 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2136 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2137 if (OpSignBits >= SignBits)
2140 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2142 // This is a sign_extend_inreg. Replace it to take advantage of existing
2143 // DAG Combines. If not eliminated, we will match back to BFE during
2146 // TODO: The sext_inreg of extended types ends, although we can could
2147 // handle them in a single BFE.
2148 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2149 DAG.getValueType(SmallVT));
2152 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2155 if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2157 return constantFoldBFE<int32_t>(DAG,
2158 Val->getSExtValue(),
2163 return constantFoldBFE<uint32_t>(DAG,
2164 Val->getZExtValue(),
2169 APInt Demanded = APInt::getBitsSet(32,
2171 OffsetVal + WidthVal);
2173 if ((OffsetVal + WidthVal) >= 32) {
2174 SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
2175 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2176 BitsFrom, ShiftVal);
2179 APInt KnownZero, KnownOne;
2180 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2181 !DCI.isBeforeLegalizeOps());
2182 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2183 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2184 TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
2185 DCI.CommitTargetLoweringOpt(TLO);
2192 return performStoreCombine(N, DCI);
2197 //===----------------------------------------------------------------------===//
2199 //===----------------------------------------------------------------------===//
2201 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2204 const SmallVectorImpl<ISD::InputArg> &Ins,
2205 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2207 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2208 if (Ins[i].ArgVT == Ins[i].VT) {
2209 OrigIns.push_back(Ins[i]);
2214 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2215 // Vector has been split into scalars.
2216 VT = Ins[i].ArgVT.getVectorElementType();
2217 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2218 Ins[i].ArgVT.getVectorElementType() !=
2219 Ins[i].VT.getVectorElementType()) {
2220 // Vector elements have been promoted
2223 // Vector has been spilt into smaller vectors.
2227 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2228 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2229 OrigIns.push_back(Arg);
2233 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2234 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2235 return CFP->isExactlyValue(1.0);
2237 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2238 return C->isAllOnesValue();
2243 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2244 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2245 return CFP->getValueAPF().isZero();
2247 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2248 return C->isNullValue();
2253 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2254 const TargetRegisterClass *RC,
2255 unsigned Reg, EVT VT) const {
2256 MachineFunction &MF = DAG.getMachineFunction();
2257 MachineRegisterInfo &MRI = MF.getRegInfo();
2258 unsigned VirtualRegister;
2259 if (!MRI.isLiveIn(Reg)) {
2260 VirtualRegister = MRI.createVirtualRegister(RC);
2261 MRI.addLiveIn(Reg, VirtualRegister);
2263 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2265 return DAG.getRegister(VirtualRegister, VT);
2268 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2270 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2272 default: return nullptr;
2274 NODE_NAME_CASE(CALL);
2275 NODE_NAME_CASE(UMUL);
2276 NODE_NAME_CASE(RET_FLAG);
2277 NODE_NAME_CASE(BRANCH_COND);
2280 NODE_NAME_CASE(DWORDADDR)
2281 NODE_NAME_CASE(FRACT)
2282 NODE_NAME_CASE(CLAMP)
2283 NODE_NAME_CASE(FMAX)
2284 NODE_NAME_CASE(SMAX)
2285 NODE_NAME_CASE(UMAX)
2286 NODE_NAME_CASE(FMIN)
2287 NODE_NAME_CASE(SMIN)
2288 NODE_NAME_CASE(UMIN)
2289 NODE_NAME_CASE(URECIP)
2290 NODE_NAME_CASE(DIV_SCALE)
2291 NODE_NAME_CASE(DIV_FMAS)
2292 NODE_NAME_CASE(DIV_FIXUP)
2293 NODE_NAME_CASE(TRIG_PREOP)
2296 NODE_NAME_CASE(RSQ_LEGACY)
2297 NODE_NAME_CASE(RSQ_CLAMPED)
2298 NODE_NAME_CASE(DOT4)
2299 NODE_NAME_CASE(BFE_U32)
2300 NODE_NAME_CASE(BFE_I32)
2303 NODE_NAME_CASE(BREV)
2304 NODE_NAME_CASE(MUL_U24)
2305 NODE_NAME_CASE(MUL_I24)
2306 NODE_NAME_CASE(MAD_U24)
2307 NODE_NAME_CASE(MAD_I24)
2308 NODE_NAME_CASE(EXPORT)
2309 NODE_NAME_CASE(CONST_ADDRESS)
2310 NODE_NAME_CASE(REGISTER_LOAD)
2311 NODE_NAME_CASE(REGISTER_STORE)
2312 NODE_NAME_CASE(LOAD_CONSTANT)
2313 NODE_NAME_CASE(LOAD_INPUT)
2314 NODE_NAME_CASE(SAMPLE)
2315 NODE_NAME_CASE(SAMPLEB)
2316 NODE_NAME_CASE(SAMPLED)
2317 NODE_NAME_CASE(SAMPLEL)
2318 NODE_NAME_CASE(CVT_F32_UBYTE0)
2319 NODE_NAME_CASE(CVT_F32_UBYTE1)
2320 NODE_NAME_CASE(CVT_F32_UBYTE2)
2321 NODE_NAME_CASE(CVT_F32_UBYTE3)
2322 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2323 NODE_NAME_CASE(CONST_DATA_PTR)
2324 NODE_NAME_CASE(STORE_MSKOR)
2325 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2329 static void computeKnownBitsForMinMax(const SDValue Op0,
2333 const SelectionDAG &DAG,
2335 APInt Op0Zero, Op0One;
2336 APInt Op1Zero, Op1One;
2337 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
2338 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
2340 KnownZero = Op0Zero & Op1Zero;
2341 KnownOne = Op0One & Op1One;
2344 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2348 const SelectionDAG &DAG,
2349 unsigned Depth) const {
2351 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2355 unsigned Opc = Op.getOpcode();
2360 case ISD::INTRINSIC_WO_CHAIN: {
2361 // FIXME: The intrinsic should just use the node.
2362 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
2363 case AMDGPUIntrinsic::AMDGPU_imax:
2364 case AMDGPUIntrinsic::AMDGPU_umax:
2365 case AMDGPUIntrinsic::AMDGPU_imin:
2366 case AMDGPUIntrinsic::AMDGPU_umin:
2367 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
2368 KnownZero, KnownOne, DAG, Depth);
2376 case AMDGPUISD::SMAX:
2377 case AMDGPUISD::UMAX:
2378 case AMDGPUISD::SMIN:
2379 case AMDGPUISD::UMIN:
2380 computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
2381 KnownZero, KnownOne, DAG, Depth);
2384 case AMDGPUISD::BFE_I32:
2385 case AMDGPUISD::BFE_U32: {
2386 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2390 unsigned BitWidth = 32;
2391 uint32_t Width = CWidth->getZExtValue() & 0x1f;
2393 KnownZero = APInt::getAllOnesValue(BitWidth);
2394 KnownOne = APInt::getNullValue(BitWidth);
2398 // FIXME: This could do a lot more. If offset is 0, should be the same as
2399 // sign_extend_inreg implementation, but that involves duplicating it.
2400 if (Opc == AMDGPUISD::BFE_I32)
2401 KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2403 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2410 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2412 const SelectionDAG &DAG,
2413 unsigned Depth) const {
2414 switch (Op.getOpcode()) {
2415 case AMDGPUISD::BFE_I32: {
2416 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2420 unsigned SignBits = 32 - Width->getZExtValue() + 1;
2421 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2422 if (!Offset || !Offset->isNullValue())
2425 // TODO: Could probably figure something out with non-0 offsets.
2426 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2427 return std::max(SignBits, Op0SignBits);
2430 case AMDGPUISD::BFE_U32: {
2431 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2432 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;