1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUIntrinsicInfo.h"
20 #include "AMDGPURegisterInfo.h"
21 #include "AMDGPUSubtarget.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
38 /// Diagnostic information for unimplemented or unsupported feature reporting.
39 class DiagnosticInfoUnsupported : public DiagnosticInfo {
41 const Twine &Description;
46 static int getKindID() {
48 KindID = llvm::getNextAvailablePluginDiagnosticKind();
53 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
54 DiagnosticSeverity Severity = DS_Error)
55 : DiagnosticInfo(getKindID(), Severity),
59 const Function &getFunction() const { return Fn; }
60 const Twine &getDescription() const { return Description; }
62 void print(DiagnosticPrinter &DP) const override {
63 DP << "unsupported " << getDescription() << " in " << Fn.getName();
66 static bool classof(const DiagnosticInfo *DI) {
67 return DI->getKind() == getKindID();
71 int DiagnosticInfoUnsupported::KindID = 0;
75 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
76 CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State) {
78 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
79 ArgFlags.getOrigAlign());
80 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
85 #include "AMDGPUGenCallingConv.inc"
87 // Find a larger type to do a load / store of a vector with.
88 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
89 unsigned StoreSize = VT.getStoreSizeInBits();
91 return EVT::getIntegerVT(Ctx, StoreSize);
93 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
94 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
97 // Type for a vector that will be loaded to.
98 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
99 unsigned StoreSize = VT.getStoreSizeInBits();
101 return EVT::getIntegerVT(Ctx, 32);
103 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
106 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
107 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
109 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
111 setOperationAction(ISD::Constant, MVT::i32, Legal);
112 setOperationAction(ISD::Constant, MVT::i64, Legal);
113 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
114 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
116 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
117 setOperationAction(ISD::BRIND, MVT::Other, Expand);
119 // We need to custom lower some of the intrinsics
120 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
122 // Library functions. These default to Expand, but we have instructions
124 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
125 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
126 setOperationAction(ISD::FPOW, MVT::f32, Legal);
127 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
128 setOperationAction(ISD::FABS, MVT::f32, Legal);
129 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
130 setOperationAction(ISD::FRINT, MVT::f32, Legal);
131 setOperationAction(ISD::FROUND, MVT::f32, Legal);
132 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
134 // Lower floating point store/load to integer store/load to reduce the number
135 // of patterns in tablegen.
136 setOperationAction(ISD::STORE, MVT::f32, Promote);
137 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
139 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
140 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
142 setOperationAction(ISD::STORE, MVT::i64, Promote);
143 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
145 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
146 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
148 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
149 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
151 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
152 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
154 setOperationAction(ISD::STORE, MVT::f64, Promote);
155 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
157 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
158 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
160 // Custom lowering of vector stores is required for local address space
162 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
163 // XXX: Native v2i32 local address space stores are possible, but not
164 // currently implemented.
165 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
167 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
168 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
169 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
171 // XXX: This can be change to Custom, once ExpandVectorStores can
172 // handle 64-bit stores.
173 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
175 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
176 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
177 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
178 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
179 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
182 setOperationAction(ISD::LOAD, MVT::f32, Promote);
183 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
185 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
186 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
188 setOperationAction(ISD::LOAD, MVT::i64, Promote);
189 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
191 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
192 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
194 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
195 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
197 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
198 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
200 setOperationAction(ISD::LOAD, MVT::f64, Promote);
201 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
203 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
204 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
206 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
207 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
208 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
209 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
210 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
211 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
212 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
213 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
214 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
215 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
217 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
218 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
219 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
220 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
221 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
222 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
223 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
224 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
225 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
226 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
227 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
228 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
230 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
232 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
233 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
234 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
235 setOperationAction(ISD::FRINT, MVT::f64, Custom);
236 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
239 if (!Subtarget->hasBFI()) {
240 // fcopysign can be done in a single instruction with BFI.
241 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
242 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
245 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
246 for (MVT VT : ScalarIntVTs) {
247 setOperationAction(ISD::SREM, VT, Expand);
248 setOperationAction(ISD::SDIV, VT, Expand);
250 // GPU does not have divrem function for signed or unsigned.
251 setOperationAction(ISD::SDIVREM, VT, Custom);
252 setOperationAction(ISD::UDIVREM, VT, Custom);
254 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
255 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
256 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
258 setOperationAction(ISD::BSWAP, VT, Expand);
259 setOperationAction(ISD::CTTZ, VT, Expand);
260 setOperationAction(ISD::CTLZ, VT, Expand);
263 if (!Subtarget->hasBCNT(32))
264 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
266 if (!Subtarget->hasBCNT(64))
267 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
269 // The hardware supports 32-bit ROTR, but not ROTL.
270 setOperationAction(ISD::ROTL, MVT::i32, Expand);
271 setOperationAction(ISD::ROTL, MVT::i64, Expand);
272 setOperationAction(ISD::ROTR, MVT::i64, Expand);
274 setOperationAction(ISD::MUL, MVT::i64, Expand);
275 setOperationAction(ISD::MULHU, MVT::i64, Expand);
276 setOperationAction(ISD::MULHS, MVT::i64, Expand);
277 setOperationAction(ISD::UDIV, MVT::i32, Expand);
278 setOperationAction(ISD::UREM, MVT::i32, Expand);
279 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
280 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
282 if (!Subtarget->hasFFBH())
283 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
285 if (!Subtarget->hasFFBL())
286 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
288 static const MVT::SimpleValueType VectorIntTypes[] = {
289 MVT::v2i32, MVT::v4i32
292 for (MVT VT : VectorIntTypes) {
293 // Expand the following operations for the current type by default.
294 setOperationAction(ISD::ADD, VT, Expand);
295 setOperationAction(ISD::AND, VT, Expand);
296 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
297 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
298 setOperationAction(ISD::MUL, VT, Expand);
299 setOperationAction(ISD::OR, VT, Expand);
300 setOperationAction(ISD::SHL, VT, Expand);
301 setOperationAction(ISD::SRA, VT, Expand);
302 setOperationAction(ISD::SRL, VT, Expand);
303 setOperationAction(ISD::ROTL, VT, Expand);
304 setOperationAction(ISD::ROTR, VT, Expand);
305 setOperationAction(ISD::SUB, VT, Expand);
306 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
307 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
308 // TODO: Implement custom UREM / SREM routines.
309 setOperationAction(ISD::SDIV, VT, Expand);
310 setOperationAction(ISD::UDIV, VT, Expand);
311 setOperationAction(ISD::SREM, VT, Expand);
312 setOperationAction(ISD::UREM, VT, Expand);
313 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
314 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
315 setOperationAction(ISD::SDIVREM, VT, Custom);
316 setOperationAction(ISD::UDIVREM, VT, Custom);
317 setOperationAction(ISD::ADDC, VT, Expand);
318 setOperationAction(ISD::SUBC, VT, Expand);
319 setOperationAction(ISD::ADDE, VT, Expand);
320 setOperationAction(ISD::SUBE, VT, Expand);
321 setOperationAction(ISD::SELECT, VT, Expand);
322 setOperationAction(ISD::VSELECT, VT, Expand);
323 setOperationAction(ISD::SELECT_CC, VT, Expand);
324 setOperationAction(ISD::XOR, VT, Expand);
325 setOperationAction(ISD::BSWAP, VT, Expand);
326 setOperationAction(ISD::CTPOP, VT, Expand);
327 setOperationAction(ISD::CTTZ, VT, Expand);
328 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
329 setOperationAction(ISD::CTLZ, VT, Expand);
330 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
331 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
334 static const MVT::SimpleValueType FloatVectorTypes[] = {
335 MVT::v2f32, MVT::v4f32
338 for (MVT VT : FloatVectorTypes) {
339 setOperationAction(ISD::FABS, VT, Expand);
340 setOperationAction(ISD::FADD, VT, Expand);
341 setOperationAction(ISD::FCEIL, VT, Expand);
342 setOperationAction(ISD::FCOS, VT, Expand);
343 setOperationAction(ISD::FDIV, VT, Expand);
344 setOperationAction(ISD::FEXP2, VT, Expand);
345 setOperationAction(ISD::FLOG2, VT, Expand);
346 setOperationAction(ISD::FPOW, VT, Expand);
347 setOperationAction(ISD::FFLOOR, VT, Expand);
348 setOperationAction(ISD::FTRUNC, VT, Expand);
349 setOperationAction(ISD::FMUL, VT, Expand);
350 setOperationAction(ISD::FMA, VT, Expand);
351 setOperationAction(ISD::FRINT, VT, Expand);
352 setOperationAction(ISD::FNEARBYINT, VT, Expand);
353 setOperationAction(ISD::FSQRT, VT, Expand);
354 setOperationAction(ISD::FSIN, VT, Expand);
355 setOperationAction(ISD::FSUB, VT, Expand);
356 setOperationAction(ISD::FNEG, VT, Expand);
357 setOperationAction(ISD::SELECT, VT, Expand);
358 setOperationAction(ISD::VSELECT, VT, Expand);
359 setOperationAction(ISD::SELECT_CC, VT, Expand);
360 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
361 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
364 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
365 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
367 setTargetDAGCombine(ISD::MUL);
368 setTargetDAGCombine(ISD::SELECT_CC);
369 setTargetDAGCombine(ISD::STORE);
371 setSchedulingPreference(Sched::RegPressure);
372 setJumpIsExpensive(true);
374 setSelectIsExpensive(false);
375 PredictableSelectIsExpensive = false;
377 // There are no integer divide instructions, and these expand to a pretty
378 // large sequence of instructions.
379 setIntDivIsCheap(false);
380 setPow2DivIsCheap(false);
382 // TODO: Investigate this when 64-bit divides are implemented.
383 addBypassSlowDiv(64, 32);
385 // FIXME: Need to really handle these.
386 MaxStoresPerMemcpy = 4096;
387 MaxStoresPerMemmove = 4096;
388 MaxStoresPerMemset = 4096;
391 //===----------------------------------------------------------------------===//
392 // Target Information
393 //===----------------------------------------------------------------------===//
395 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
399 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
403 // The backend supports 32 and 64 bit floating point immediates.
404 // FIXME: Why are we reporting vectors of FP immediates as legal?
405 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
406 EVT ScalarVT = VT.getScalarType();
407 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
410 // We don't want to shrink f64 / f32 constants.
411 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
412 EVT ScalarVT = VT.getScalarType();
413 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
416 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
418 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
421 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
422 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
424 return ((LScalarSize <= CastScalarSize) ||
425 (CastScalarSize >= 32) ||
429 //===---------------------------------------------------------------------===//
431 //===---------------------------------------------------------------------===//
433 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
434 assert(VT.isFloatingPoint());
435 return VT == MVT::f32;
438 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
439 assert(VT.isFloatingPoint());
440 return VT == MVT::f32;
443 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
444 // Truncate is just accessing a subregister.
445 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
448 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
449 // Truncate is just accessing a subregister.
450 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
451 (Dest->getPrimitiveSizeInBits() % 32 == 0);
454 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
455 const DataLayout *DL = getDataLayout();
456 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
457 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
459 return SrcSize == 32 && DestSize == 64;
462 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
463 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
464 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
465 // this will enable reducing 64-bit operations the 32-bit, which is always
467 return Src == MVT::i32 && Dest == MVT::i64;
470 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
471 return isZExtFree(Val.getValueType(), VT2);
474 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
475 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
476 // limited number of native 64-bit operations. Shrinking an operation to fit
477 // in a single 32-bit register should always be helpful. As currently used,
478 // this is much less general than the name suggests, and is only used in
479 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
480 // not profitable, and may actually be harmful.
481 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
484 //===---------------------------------------------------------------------===//
485 // TargetLowering Callbacks
486 //===---------------------------------------------------------------------===//
488 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
489 const SmallVectorImpl<ISD::InputArg> &Ins) const {
491 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
494 SDValue AMDGPUTargetLowering::LowerReturn(
496 CallingConv::ID CallConv,
498 const SmallVectorImpl<ISD::OutputArg> &Outs,
499 const SmallVectorImpl<SDValue> &OutVals,
500 SDLoc DL, SelectionDAG &DAG) const {
501 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
504 //===---------------------------------------------------------------------===//
505 // Target specific lowering
506 //===---------------------------------------------------------------------===//
508 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
509 SmallVectorImpl<SDValue> &InVals) const {
510 SDValue Callee = CLI.Callee;
511 SelectionDAG &DAG = CLI.DAG;
513 const Function &Fn = *DAG.getMachineFunction().getFunction();
515 StringRef FuncName("<unknown>");
517 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
518 FuncName = G->getSymbol();
519 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
520 FuncName = G->getGlobal()->getName();
522 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
523 DAG.getContext()->diagnose(NoCalls);
527 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
528 SelectionDAG &DAG) const {
529 switch (Op.getOpcode()) {
531 Op.getNode()->dump();
532 llvm_unreachable("Custom lowering code for this"
533 "instruction is not implemented yet!");
535 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
536 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
537 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
538 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
539 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
540 case ISD::SDIV: return LowerSDIV(Op, DAG);
541 case ISD::SREM: return LowerSREM(Op, DAG);
542 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
543 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
544 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
545 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
546 case ISD::FRINT: return LowerFRINT(Op, DAG);
547 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
548 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
549 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
554 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
555 SmallVectorImpl<SDValue> &Results,
556 SelectionDAG &DAG) const {
557 switch (N->getOpcode()) {
558 case ISD::SIGN_EXTEND_INREG:
559 // Different parts of legalization seem to interpret which type of
560 // sign_extend_inreg is the one to check for custom lowering. The extended
561 // from type is what really matters, but some places check for custom
562 // lowering of the result type. This results in trying to use
563 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
564 // nothing here and let the illegal result integer be handled normally.
567 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
571 Results.push_back(SDValue(Node, 0));
572 Results.push_back(SDValue(Node, 1));
573 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
575 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
579 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
580 if (Lowered.getNode())
581 Results.push_back(Lowered);
589 // FIXME: This implements accesses to initialized globals in the constant
590 // address space by copying them to private and accessing that. It does not
591 // properly handle illegal types or vectors. The private vector loads are not
592 // scalarized, and the illegal scalars hit an assertion. This technique will not
593 // work well with large initializers, and this should eventually be
594 // removed. Initialized globals should be placed into a data section that the
595 // runtime will load into a buffer before the kernel is executed. Uses of the
596 // global need to be replaced with a pointer loaded from an implicit kernel
597 // argument into this buffer holding the copy of the data, which will remove the
598 // need for any of this.
599 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
600 const GlobalValue *GV,
601 const SDValue &InitPtr,
603 SelectionDAG &DAG) const {
604 const DataLayout *TD = getTargetMachine().getDataLayout();
606 Type *InitTy = Init->getType();
608 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
609 EVT VT = EVT::getEVT(InitTy);
610 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
611 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
612 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
613 TD->getPrefTypeAlignment(InitTy));
616 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
617 EVT VT = EVT::getEVT(CFP->getType());
618 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
619 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
620 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
621 TD->getPrefTypeAlignment(CFP->getType()));
624 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
625 const StructLayout *SL = TD->getStructLayout(ST);
627 EVT PtrVT = InitPtr.getValueType();
628 SmallVector<SDValue, 8> Chains;
630 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
631 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
632 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
634 Constant *Elt = Init->getAggregateElement(I);
635 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
638 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
641 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
642 EVT PtrVT = InitPtr.getValueType();
644 unsigned NumElements;
645 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
646 NumElements = AT->getNumElements();
647 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
648 NumElements = VT->getNumElements();
650 llvm_unreachable("Unexpected type");
652 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
653 SmallVector<SDValue, 8> Chains;
654 for (unsigned i = 0; i < NumElements; ++i) {
655 SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
656 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
658 Constant *Elt = Init->getAggregateElement(i);
659 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
662 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
665 if (isa<UndefValue>(Init)) {
666 EVT VT = EVT::getEVT(InitTy);
667 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
668 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
669 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
670 TD->getPrefTypeAlignment(InitTy));
674 llvm_unreachable("Unhandled constant initializer");
677 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
679 SelectionDAG &DAG) const {
681 const DataLayout *TD = getTargetMachine().getDataLayout();
682 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
683 const GlobalValue *GV = G->getGlobal();
685 switch (G->getAddressSpace()) {
686 default: llvm_unreachable("Global Address lowering not implemented for this "
688 case AMDGPUAS::LOCAL_ADDRESS: {
689 // XXX: What does the value of G->getOffset() mean?
690 assert(G->getOffset() == 0 &&
691 "Do not know what to do with an non-zero offset");
694 if (MFI->LocalMemoryObjects.count(GV) == 0) {
695 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
696 Offset = MFI->LDSSize;
697 MFI->LocalMemoryObjects[GV] = Offset;
698 // XXX: Account for alignment?
699 MFI->LDSSize += Size;
701 Offset = MFI->LocalMemoryObjects[GV];
704 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
706 case AMDGPUAS::CONSTANT_ADDRESS: {
707 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
708 Type *EltType = GV->getType()->getElementType();
709 unsigned Size = TD->getTypeAllocSize(EltType);
710 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
712 MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
713 MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
715 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
716 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
718 const GlobalVariable *Var = cast<GlobalVariable>(GV);
719 if (!Var->hasInitializer()) {
720 // This has no use, but bugpoint will hit it.
721 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
724 const Constant *Init = Var->getInitializer();
725 SmallVector<SDNode*, 8> WorkList;
727 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
728 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
729 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
731 WorkList.push_back(*I);
733 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
734 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
735 E = WorkList.end(); I != E; ++I) {
736 SmallVector<SDValue, 8> Ops;
737 Ops.push_back(Chain);
738 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
739 Ops.push_back((*I)->getOperand(i));
741 DAG.UpdateNodeOperands(*I, Ops);
743 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
748 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
749 SelectionDAG &DAG) const {
750 SmallVector<SDValue, 8> Args;
751 SDValue A = Op.getOperand(0);
752 SDValue B = Op.getOperand(1);
754 DAG.ExtractVectorElements(A, Args);
755 DAG.ExtractVectorElements(B, Args);
757 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
760 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
761 SelectionDAG &DAG) const {
763 SmallVector<SDValue, 8> Args;
764 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
765 EVT VT = Op.getValueType();
766 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
767 VT.getVectorNumElements());
769 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
772 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
773 SelectionDAG &DAG) const {
775 MachineFunction &MF = DAG.getMachineFunction();
776 const AMDGPUFrameLowering *TFL =
777 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
779 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
781 unsigned FrameIndex = FIN->getIndex();
782 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
783 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
787 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
788 SelectionDAG &DAG) const {
789 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
791 EVT VT = Op.getValueType();
793 switch (IntrinsicID) {
795 case AMDGPUIntrinsic::AMDGPU_abs:
796 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
797 return LowerIntrinsicIABS(Op, DAG);
798 case AMDGPUIntrinsic::AMDGPU_lrp:
799 return LowerIntrinsicLRP(Op, DAG);
800 case AMDGPUIntrinsic::AMDGPU_fract:
801 case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
802 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
804 case AMDGPUIntrinsic::AMDGPU_clamp:
805 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
806 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
807 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
809 case Intrinsic::AMDGPU_div_scale: {
810 // 3rd parameter required to be a constant.
811 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
813 return DAG.getUNDEF(VT);
815 // Translate to the operands expected by the machine instruction. The
816 // first parameter must be the same as the first instruction.
817 SDValue Numerator = Op.getOperand(1);
818 SDValue Denominator = Op.getOperand(2);
819 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
821 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, VT,
822 Src0, Denominator, Numerator);
825 case Intrinsic::AMDGPU_div_fmas:
826 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
827 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
829 case Intrinsic::AMDGPU_div_fixup:
830 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
831 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
833 case Intrinsic::AMDGPU_trig_preop:
834 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
835 Op.getOperand(1), Op.getOperand(2));
837 case Intrinsic::AMDGPU_rcp:
838 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
840 case Intrinsic::AMDGPU_rsq:
841 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
843 case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
844 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
846 case Intrinsic::AMDGPU_rsq_clamped:
847 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
849 case AMDGPUIntrinsic::AMDGPU_imax:
850 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
852 case AMDGPUIntrinsic::AMDGPU_umax:
853 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
855 case AMDGPUIntrinsic::AMDGPU_imin:
856 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
858 case AMDGPUIntrinsic::AMDGPU_umin:
859 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
862 case AMDGPUIntrinsic::AMDGPU_umul24:
863 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
864 Op.getOperand(1), Op.getOperand(2));
866 case AMDGPUIntrinsic::AMDGPU_imul24:
867 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
868 Op.getOperand(1), Op.getOperand(2));
870 case AMDGPUIntrinsic::AMDGPU_umad24:
871 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
872 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
874 case AMDGPUIntrinsic::AMDGPU_imad24:
875 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
876 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
878 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
879 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
881 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
882 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
884 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
885 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
887 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
888 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
890 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
891 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
896 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
897 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
902 case AMDGPUIntrinsic::AMDGPU_bfi:
903 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
908 case AMDGPUIntrinsic::AMDGPU_bfm:
909 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
913 case AMDGPUIntrinsic::AMDGPU_brev:
914 return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
916 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
917 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
919 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
920 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
921 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
922 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
926 ///IABS(a) = SMAX(sub(0, a), a)
927 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
928 SelectionDAG &DAG) const {
930 EVT VT = Op.getValueType();
931 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
934 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
937 /// Linear Interpolation
938 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
939 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
940 SelectionDAG &DAG) const {
942 EVT VT = Op.getValueType();
943 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
944 DAG.getConstantFP(1.0f, MVT::f32),
946 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
948 return DAG.getNode(ISD::FADD, DL, VT,
949 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
953 /// \brief Generate Min/Max node
954 SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
955 SelectionDAG &DAG) const {
957 EVT VT = N->getValueType(0);
959 SDValue LHS = N->getOperand(0);
960 SDValue RHS = N->getOperand(1);
961 SDValue True = N->getOperand(2);
962 SDValue False = N->getOperand(3);
963 SDValue CC = N->getOperand(4);
965 if (VT != MVT::f32 ||
966 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
970 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
984 llvm_unreachable("Operation should already be optimised!");
991 unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
992 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1000 unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
1001 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1003 case ISD::SETCC_INVALID:
1004 llvm_unreachable("Invalid setcc condcode!");
1009 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
1010 SelectionDAG &DAG) const {
1011 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
1012 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
1013 EVT LoadVT = Op.getValueType();
1014 EVT EltVT = Op.getValueType().getVectorElementType();
1015 EVT PtrVT = Load->getBasePtr().getValueType();
1017 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1018 SmallVector<SDValue, 8> Loads;
1019 SmallVector<SDValue, 8> Chains;
1023 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1024 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1025 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
1028 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1029 Load->getChain(), Ptr,
1030 MachinePointerInfo(Load->getMemOperand()->getValue()),
1031 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1032 Load->getAlignment());
1033 Loads.push_back(NewLoad.getValue(0));
1034 Chains.push_back(NewLoad.getValue(1));
1038 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
1039 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
1042 return DAG.getMergeValues(Ops, SL);
1045 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1046 SelectionDAG &DAG) const {
1047 StoreSDNode *Store = cast<StoreSDNode>(Op);
1048 EVT MemVT = Store->getMemoryVT();
1049 unsigned MemBits = MemVT.getSizeInBits();
1051 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1052 // truncating store into an i32 store.
1053 // XXX: We could also handle optimize other vector bitwidths.
1054 if (!MemVT.isVector() || MemBits > 32) {
1059 SDValue Value = Store->getValue();
1060 EVT VT = Value.getValueType();
1061 EVT ElemVT = VT.getVectorElementType();
1062 SDValue Ptr = Store->getBasePtr();
1063 EVT MemEltVT = MemVT.getVectorElementType();
1064 unsigned MemEltBits = MemEltVT.getSizeInBits();
1065 unsigned MemNumElements = MemVT.getVectorNumElements();
1066 unsigned PackedSize = MemVT.getStoreSizeInBits();
1067 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
1069 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1071 SDValue PackedValue;
1072 for (unsigned i = 0; i < MemNumElements; ++i) {
1073 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1074 DAG.getConstant(i, MVT::i32));
1075 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1076 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1078 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
1079 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1084 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1088 if (PackedSize < 32) {
1089 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1090 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1091 Store->getMemOperand()->getPointerInfo(),
1093 Store->isNonTemporal(), Store->isVolatile(),
1094 Store->getAlignment());
1097 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1098 Store->getMemOperand()->getPointerInfo(),
1099 Store->isVolatile(), Store->isNonTemporal(),
1100 Store->getAlignment());
1103 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1104 SelectionDAG &DAG) const {
1105 StoreSDNode *Store = cast<StoreSDNode>(Op);
1106 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1107 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1108 EVT PtrVT = Store->getBasePtr().getValueType();
1109 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1112 SmallVector<SDValue, 8> Chains;
1114 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1115 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1116 Store->getValue(), DAG.getConstant(i, MVT::i32));
1117 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
1118 Store->getBasePtr(),
1119 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
1121 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1122 MachinePointerInfo(Store->getMemOperand()->getValue()),
1123 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
1124 Store->getAlignment()));
1126 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1129 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1131 LoadSDNode *Load = cast<LoadSDNode>(Op);
1132 ISD::LoadExtType ExtType = Load->getExtensionType();
1133 EVT VT = Op.getValueType();
1134 EVT MemVT = Load->getMemoryVT();
1136 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
1137 // We can do the extload to 32-bits, and then need to separately extend to
1140 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
1144 Load->getMemOperand());
1147 DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32),
1148 ExtLoad32.getValue(1)
1151 return DAG.getMergeValues(Ops, DL);
1154 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1155 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1156 // FIXME: Copied from PPC
1157 // First, load into 32 bits, then truncate to 1 bit.
1159 SDValue Chain = Load->getChain();
1160 SDValue BasePtr = Load->getBasePtr();
1161 MachineMemOperand *MMO = Load->getMemOperand();
1163 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1164 BasePtr, MVT::i8, MMO);
1167 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
1171 return DAG.getMergeValues(Ops, DL);
1174 // Lower loads constant address space global variable loads
1175 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
1176 isa<GlobalVariable>(
1177 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
1180 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
1181 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
1182 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
1183 DAG.getConstant(2, MVT::i32));
1184 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(),
1185 Load->getChain(), Ptr,
1186 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
1189 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1190 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1194 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1195 DAG.getConstant(2, MVT::i32));
1196 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1197 Load->getChain(), Ptr,
1198 DAG.getTargetConstant(0, MVT::i32),
1200 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1202 DAG.getConstant(0x3, MVT::i32));
1203 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1204 DAG.getConstant(3, MVT::i32));
1206 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1208 EVT MemEltVT = MemVT.getScalarType();
1209 if (ExtType == ISD::SEXTLOAD) {
1210 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1213 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
1217 return DAG.getMergeValues(Ops, DL);
1221 DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
1225 return DAG.getMergeValues(Ops, DL);
1228 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1230 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1231 if (Result.getNode()) {
1235 StoreSDNode *Store = cast<StoreSDNode>(Op);
1236 SDValue Chain = Store->getChain();
1237 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1238 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1239 Store->getValue().getValueType().isVector()) {
1240 return SplitVectorStore(Op, DAG);
1243 EVT MemVT = Store->getMemoryVT();
1244 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1245 MemVT.bitsLT(MVT::i32)) {
1247 if (Store->getMemoryVT() == MVT::i8) {
1249 } else if (Store->getMemoryVT() == MVT::i16) {
1252 SDValue BasePtr = Store->getBasePtr();
1253 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1254 DAG.getConstant(2, MVT::i32));
1255 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1256 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
1258 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1259 DAG.getConstant(0x3, MVT::i32));
1261 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1262 DAG.getConstant(3, MVT::i32));
1264 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1267 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1269 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1270 MaskedValue, ShiftAmt);
1272 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1274 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1275 DAG.getConstant(0xffffffff, MVT::i32));
1276 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1278 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1279 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1280 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1285 SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
1287 EVT OVT = Op.getValueType();
1288 SDValue LHS = Op.getOperand(0);
1289 SDValue RHS = Op.getOperand(1);
1292 if (!OVT.isVector()) {
1295 } else if (OVT.getVectorNumElements() == 2) {
1298 } else if (OVT.getVectorNumElements() == 4) {
1302 unsigned bitsize = OVT.getScalarType().getSizeInBits();
1303 // char|short jq = ia ^ ib;
1304 SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
1306 // jq = jq >> (bitsize - 2)
1307 jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
1310 jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
1313 jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
1315 // int ia = (int)LHS;
1316 SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
1318 // int ib, (int)RHS;
1319 SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
1321 // float fa = (float)ia;
1322 SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
1324 // float fb = (float)ib;
1325 SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
1327 // float fq = native_divide(fa, fb);
1328 SDValue fq = DAG.getNode(ISD::FMUL, DL, FLTTY,
1329 fa, DAG.getNode(AMDGPUISD::RCP, DL, FLTTY, fb));
1332 fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
1334 // float fqneg = -fq;
1335 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
1337 // float fr = mad(fqneg, fb, fa);
1338 SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
1339 DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
1341 // int iq = (int)fq;
1342 SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
1345 fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
1348 fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
1350 // int cv = fr >= fb;
1352 if (INTTY == MVT::i32) {
1353 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
1355 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
1357 // jq = (cv ? jq : 0);
1358 jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
1359 DAG.getConstant(0, OVT));
1361 iq = DAG.getSExtOrTrunc(iq, DL, OVT);
1362 iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
1366 SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
1368 EVT OVT = Op.getValueType();
1369 SDValue LHS = Op.getOperand(0);
1370 SDValue RHS = Op.getOperand(1);
1371 // The LowerSDIV32 function generates equivalent to the following IL.
1381 // ixor r10, r10, r11
1383 // ixor DST, r0, r10
1392 SDValue r10 = DAG.getSelectCC(DL,
1393 r0, DAG.getConstant(0, OVT),
1394 DAG.getConstant(-1, OVT),
1395 DAG.getConstant(0, OVT),
1399 SDValue r11 = DAG.getSelectCC(DL,
1400 r1, DAG.getConstant(0, OVT),
1401 DAG.getConstant(-1, OVT),
1402 DAG.getConstant(0, OVT),
1406 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1409 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1412 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1415 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1418 r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
1420 // ixor r10, r10, r11
1421 r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
1424 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1426 // ixor DST, r0, r10
1427 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1431 SDValue AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
1432 return SDValue(Op.getNode(), 0);
1435 SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
1436 EVT OVT = Op.getValueType().getScalarType();
1438 if (OVT == MVT::i64)
1439 return LowerSDIV64(Op, DAG);
1441 if (OVT.getScalarType() == MVT::i32)
1442 return LowerSDIV32(Op, DAG);
1444 if (OVT == MVT::i16 || OVT == MVT::i8) {
1445 // FIXME: We should be checking for the masked bits. This isn't reached
1446 // because i8 and i16 are not legal types.
1447 return LowerSDIV24(Op, DAG);
1450 return SDValue(Op.getNode(), 0);
1453 SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
1455 EVT OVT = Op.getValueType();
1456 SDValue LHS = Op.getOperand(0);
1457 SDValue RHS = Op.getOperand(1);
1458 // The LowerSREM32 function generates equivalent to the following IL.
1468 // umul r20, r20, r1
1471 // ixor DST, r0, r10
1480 SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
1483 SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
1486 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1489 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1492 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1495 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1498 SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
1500 // umul r20, r20, r1
1501 r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
1504 r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
1507 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1509 // ixor DST, r0, r10
1510 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1514 SDValue AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
1515 return SDValue(Op.getNode(), 0);
1518 SDValue AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
1519 EVT OVT = Op.getValueType();
1521 if (OVT.getScalarType() == MVT::i64)
1522 return LowerSREM64(Op, DAG);
1524 if (OVT.getScalarType() == MVT::i32)
1525 return LowerSREM32(Op, DAG);
1527 return SDValue(Op.getNode(), 0);
1530 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1531 SelectionDAG &DAG) const {
1533 EVT VT = Op.getValueType();
1535 SDValue Num = Op.getOperand(0);
1536 SDValue Den = Op.getOperand(1);
1538 // RCP = URECIP(Den) = 2^32 / Den + e
1539 // e is rounding error.
1540 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1542 // RCP_LO = umulo(RCP, Den) */
1543 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
1545 // RCP_HI = mulhu (RCP, Den) */
1546 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1548 // NEG_RCP_LO = -RCP_LO
1549 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1552 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1553 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1556 // Calculate the rounding error from the URECIP instruction
1557 // E = mulhu(ABS_RCP_LO, RCP)
1558 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1560 // RCP_A_E = RCP + E
1561 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1563 // RCP_S_E = RCP - E
1564 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1566 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1567 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1570 // Quotient = mulhu(Tmp0, Num)
1571 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1573 // Num_S_Remainder = Quotient * Den
1574 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
1576 // Remainder = Num - Num_S_Remainder
1577 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1579 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1580 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1581 DAG.getConstant(-1, VT),
1582 DAG.getConstant(0, VT),
1584 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1585 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1587 DAG.getConstant(-1, VT),
1588 DAG.getConstant(0, VT),
1590 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1591 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1594 // Calculate Division result:
1596 // Quotient_A_One = Quotient + 1
1597 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1598 DAG.getConstant(1, VT));
1600 // Quotient_S_One = Quotient - 1
1601 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1602 DAG.getConstant(1, VT));
1604 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1605 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1606 Quotient, Quotient_A_One, ISD::SETEQ);
1608 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1609 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1610 Quotient_S_One, Div, ISD::SETEQ);
1612 // Calculate Rem result:
1614 // Remainder_S_Den = Remainder - Den
1615 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1617 // Remainder_A_Den = Remainder + Den
1618 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1620 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1621 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1622 Remainder, Remainder_S_Den, ISD::SETEQ);
1624 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1625 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1626 Remainder_A_Den, Rem, ISD::SETEQ);
1631 return DAG.getMergeValues(Ops, DL);
1634 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1635 SelectionDAG &DAG) const {
1637 EVT VT = Op.getValueType();
1639 SDValue Zero = DAG.getConstant(0, VT);
1640 SDValue NegOne = DAG.getConstant(-1, VT);
1642 SDValue LHS = Op.getOperand(0);
1643 SDValue RHS = Op.getOperand(1);
1645 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1646 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1647 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1648 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1650 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1651 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1653 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1654 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1656 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1657 SDValue Rem = Div.getValue(1);
1659 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1660 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1662 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1663 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1669 return DAG.getMergeValues(Res, DL);
1672 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1674 SDValue Src = Op.getOperand(0);
1676 // result = trunc(src)
1677 // if (src > 0.0 && src != result)
1680 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1682 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1683 const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
1685 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1687 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1688 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1689 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1691 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1692 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1695 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1697 SDValue Src = Op.getOperand(0);
1699 assert(Op.getValueType() == MVT::f64);
1701 const SDValue Zero = DAG.getConstant(0, MVT::i32);
1702 const SDValue One = DAG.getConstant(1, MVT::i32);
1704 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1706 // Extract the upper half, since this is where we will find the sign and
1708 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1710 const unsigned FractBits = 52;
1711 const unsigned ExpBits = 11;
1713 // Extract the exponent.
1714 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_I32, SL, MVT::i32,
1716 DAG.getConstant(FractBits - 32, MVT::i32),
1717 DAG.getConstant(ExpBits, MVT::i32));
1718 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1719 DAG.getConstant(1023, MVT::i32));
1721 // Extract the sign bit.
1722 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
1723 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1725 // Extend back to to 64-bits.
1726 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1728 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1730 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1731 const SDValue FractMask
1732 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
1734 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1735 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1736 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1738 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
1740 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
1742 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1743 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1745 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1746 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1748 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1751 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1753 SDValue Src = Op.getOperand(0);
1755 assert(Op.getValueType() == MVT::f64);
1757 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
1758 SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
1759 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1761 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1762 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1764 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1766 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
1767 SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
1769 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1770 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1772 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1775 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1776 // FNEARBYINT and FRINT are the same, except in their handling of FP
1777 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1778 // rint, so just treat them as equivalent.
1779 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1782 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1784 SDValue Src = Op.getOperand(0);
1786 // result = trunc(src);
1787 // if (src < 0.0 && src != result)
1790 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1792 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1793 const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
1795 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1797 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1798 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1799 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1801 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1802 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1805 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1806 SelectionDAG &DAG) const {
1807 SDValue S0 = Op.getOperand(0);
1809 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1812 // f32 uint_to_fp i64
1813 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1814 DAG.getConstant(0, MVT::i32));
1815 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1816 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1817 DAG.getConstant(1, MVT::i32));
1818 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1819 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1820 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1821 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1824 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1826 SelectionDAG &DAG) const {
1827 MVT VT = Op.getSimpleValueType();
1829 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1830 // Shift left by 'Shift' bits.
1831 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1832 // Signed shift Right by 'Shift' bits.
1833 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1836 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1837 SelectionDAG &DAG) const {
1838 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1839 MVT VT = Op.getSimpleValueType();
1840 MVT ScalarVT = VT.getScalarType();
1845 SDValue Src = Op.getOperand(0);
1848 // TODO: Don't scalarize on Evergreen?
1849 unsigned NElts = VT.getVectorNumElements();
1850 SmallVector<SDValue, 8> Args;
1851 DAG.ExtractVectorElements(Src, Args, 0, NElts);
1853 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1854 for (unsigned I = 0; I < NElts; ++I)
1855 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1857 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
1860 //===----------------------------------------------------------------------===//
1861 // Custom DAG optimizations
1862 //===----------------------------------------------------------------------===//
1864 static bool isU24(SDValue Op, SelectionDAG &DAG) {
1865 APInt KnownZero, KnownOne;
1866 EVT VT = Op.getValueType();
1867 DAG.computeKnownBits(Op, KnownZero, KnownOne);
1869 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1872 static bool isI24(SDValue Op, SelectionDAG &DAG) {
1873 EVT VT = Op.getValueType();
1875 // In order for this to be a signed 24-bit value, bit 23, must
1877 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1878 // as unsigned 24-bit values.
1879 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1882 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1884 SelectionDAG &DAG = DCI.DAG;
1885 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1886 EVT VT = Op.getValueType();
1888 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1889 APInt KnownZero, KnownOne;
1890 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1891 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1892 DCI.CommitTargetLoweringOpt(TLO);
1895 template <typename IntTy>
1896 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
1897 uint32_t Offset, uint32_t Width) {
1898 if (Width + Offset < 32) {
1899 IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
1900 return DAG.getConstant(Result, MVT::i32);
1903 return DAG.getConstant(Src0 >> Offset, MVT::i32);
1906 static bool usesAllNormalStores(SDNode *LoadVal) {
1907 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
1908 if (!ISD::isNormalStore(*I))
1915 // If we have a copy of an illegal type, replace it with a load / store of an
1916 // equivalently sized legal type. This avoids intermediate bit pack / unpack
1917 // instructions emitted when handling extloads and truncstores. Ideally we could
1918 // recognize the pack / unpack pattern to eliminate it.
1919 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
1920 DAGCombinerInfo &DCI) const {
1921 if (!DCI.isBeforeLegalize())
1924 StoreSDNode *SN = cast<StoreSDNode>(N);
1925 SDValue Value = SN->getValue();
1926 EVT VT = Value.getValueType();
1928 if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
1931 LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
1932 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
1935 EVT MemVT = LoadVal->getMemoryVT();
1938 SelectionDAG &DAG = DCI.DAG;
1939 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
1941 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
1943 LoadVal->getChain(),
1944 LoadVal->getBasePtr(),
1945 LoadVal->getOffset(),
1947 LoadVal->getMemOperand());
1949 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
1950 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
1952 return DAG.getStore(SN->getChain(), SL, NewLoad,
1953 SN->getBasePtr(), SN->getMemOperand());
1956 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
1957 DAGCombinerInfo &DCI) const {
1958 EVT VT = N->getValueType(0);
1960 if (VT.isVector() || VT.getSizeInBits() > 32)
1963 SelectionDAG &DAG = DCI.DAG;
1966 SDValue N0 = N->getOperand(0);
1967 SDValue N1 = N->getOperand(1);
1970 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1971 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1972 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1973 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1974 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1975 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1976 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1977 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1982 // We need to use sext even for MUL_U24, because MUL_U24 is used
1983 // for signed multiply of 8 and 16-bit types.
1984 return DAG.getSExtOrTrunc(Mul, DL, VT);
1987 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1988 DAGCombinerInfo &DCI) const {
1989 SelectionDAG &DAG = DCI.DAG;
1992 switch(N->getOpcode()) {
1995 return performMulCombine(N, DCI);
1996 case AMDGPUISD::MUL_I24:
1997 case AMDGPUISD::MUL_U24: {
1998 SDValue N0 = N->getOperand(0);
1999 SDValue N1 = N->getOperand(1);
2000 simplifyI24(N0, DCI);
2001 simplifyI24(N1, DCI);
2004 case ISD::SELECT_CC: {
2005 return CombineMinMax(N, DAG);
2007 case AMDGPUISD::BFE_I32:
2008 case AMDGPUISD::BFE_U32: {
2009 assert(!N->getValueType(0).isVector() &&
2010 "Vector handling of BFE not implemented");
2011 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2015 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2017 return DAG.getConstant(0, MVT::i32);
2019 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2023 SDValue BitsFrom = N->getOperand(0);
2024 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2026 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2028 if (OffsetVal == 0) {
2029 // This is already sign / zero extended, so try to fold away extra BFEs.
2030 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2032 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2033 if (OpSignBits >= SignBits)
2036 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2038 // This is a sign_extend_inreg. Replace it to take advantage of existing
2039 // DAG Combines. If not eliminated, we will match back to BFE during
2042 // TODO: The sext_inreg of extended types ends, although we can could
2043 // handle them in a single BFE.
2044 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2045 DAG.getValueType(SmallVT));
2048 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2051 if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2053 return constantFoldBFE<int32_t>(DAG,
2054 Val->getSExtValue(),
2059 return constantFoldBFE<uint32_t>(DAG,
2060 Val->getZExtValue(),
2065 APInt Demanded = APInt::getBitsSet(32,
2067 OffsetVal + WidthVal);
2069 if ((OffsetVal + WidthVal) >= 32) {
2070 SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
2071 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2072 BitsFrom, ShiftVal);
2075 APInt KnownZero, KnownOne;
2076 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2077 !DCI.isBeforeLegalizeOps());
2078 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2079 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2080 TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
2081 DCI.CommitTargetLoweringOpt(TLO);
2088 return performStoreCombine(N, DCI);
2093 //===----------------------------------------------------------------------===//
2095 //===----------------------------------------------------------------------===//
2097 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2100 const SmallVectorImpl<ISD::InputArg> &Ins,
2101 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2103 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2104 if (Ins[i].ArgVT == Ins[i].VT) {
2105 OrigIns.push_back(Ins[i]);
2110 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2111 // Vector has been split into scalars.
2112 VT = Ins[i].ArgVT.getVectorElementType();
2113 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2114 Ins[i].ArgVT.getVectorElementType() !=
2115 Ins[i].VT.getVectorElementType()) {
2116 // Vector elements have been promoted
2119 // Vector has been spilt into smaller vectors.
2123 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2124 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2125 OrigIns.push_back(Arg);
2129 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2130 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2131 return CFP->isExactlyValue(1.0);
2133 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2134 return C->isAllOnesValue();
2139 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2140 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2141 return CFP->getValueAPF().isZero();
2143 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2144 return C->isNullValue();
2149 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2150 const TargetRegisterClass *RC,
2151 unsigned Reg, EVT VT) const {
2152 MachineFunction &MF = DAG.getMachineFunction();
2153 MachineRegisterInfo &MRI = MF.getRegInfo();
2154 unsigned VirtualRegister;
2155 if (!MRI.isLiveIn(Reg)) {
2156 VirtualRegister = MRI.createVirtualRegister(RC);
2157 MRI.addLiveIn(Reg, VirtualRegister);
2159 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2161 return DAG.getRegister(VirtualRegister, VT);
2164 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2166 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2168 default: return nullptr;
2170 NODE_NAME_CASE(CALL);
2171 NODE_NAME_CASE(UMUL);
2172 NODE_NAME_CASE(RET_FLAG);
2173 NODE_NAME_CASE(BRANCH_COND);
2176 NODE_NAME_CASE(DWORDADDR)
2177 NODE_NAME_CASE(FRACT)
2178 NODE_NAME_CASE(CLAMP)
2179 NODE_NAME_CASE(FMAX)
2180 NODE_NAME_CASE(SMAX)
2181 NODE_NAME_CASE(UMAX)
2182 NODE_NAME_CASE(FMIN)
2183 NODE_NAME_CASE(SMIN)
2184 NODE_NAME_CASE(UMIN)
2185 NODE_NAME_CASE(URECIP)
2186 NODE_NAME_CASE(DIV_SCALE)
2187 NODE_NAME_CASE(DIV_FMAS)
2188 NODE_NAME_CASE(DIV_FIXUP)
2189 NODE_NAME_CASE(TRIG_PREOP)
2192 NODE_NAME_CASE(RSQ_LEGACY)
2193 NODE_NAME_CASE(RSQ_CLAMPED)
2194 NODE_NAME_CASE(DOT4)
2195 NODE_NAME_CASE(BFE_U32)
2196 NODE_NAME_CASE(BFE_I32)
2199 NODE_NAME_CASE(BREV)
2200 NODE_NAME_CASE(MUL_U24)
2201 NODE_NAME_CASE(MUL_I24)
2202 NODE_NAME_CASE(MAD_U24)
2203 NODE_NAME_CASE(MAD_I24)
2204 NODE_NAME_CASE(EXPORT)
2205 NODE_NAME_CASE(CONST_ADDRESS)
2206 NODE_NAME_CASE(REGISTER_LOAD)
2207 NODE_NAME_CASE(REGISTER_STORE)
2208 NODE_NAME_CASE(LOAD_CONSTANT)
2209 NODE_NAME_CASE(LOAD_INPUT)
2210 NODE_NAME_CASE(SAMPLE)
2211 NODE_NAME_CASE(SAMPLEB)
2212 NODE_NAME_CASE(SAMPLED)
2213 NODE_NAME_CASE(SAMPLEL)
2214 NODE_NAME_CASE(CVT_F32_UBYTE0)
2215 NODE_NAME_CASE(CVT_F32_UBYTE1)
2216 NODE_NAME_CASE(CVT_F32_UBYTE2)
2217 NODE_NAME_CASE(CVT_F32_UBYTE3)
2218 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2219 NODE_NAME_CASE(STORE_MSKOR)
2220 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2224 static void computeKnownBitsForMinMax(const SDValue Op0,
2228 const SelectionDAG &DAG,
2230 APInt Op0Zero, Op0One;
2231 APInt Op1Zero, Op1One;
2232 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
2233 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
2235 KnownZero = Op0Zero & Op1Zero;
2236 KnownOne = Op0One & Op1One;
2239 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2243 const SelectionDAG &DAG,
2244 unsigned Depth) const {
2246 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2250 unsigned Opc = Op.getOpcode();
2255 case ISD::INTRINSIC_WO_CHAIN: {
2256 // FIXME: The intrinsic should just use the node.
2257 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
2258 case AMDGPUIntrinsic::AMDGPU_imax:
2259 case AMDGPUIntrinsic::AMDGPU_umax:
2260 case AMDGPUIntrinsic::AMDGPU_imin:
2261 case AMDGPUIntrinsic::AMDGPU_umin:
2262 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
2263 KnownZero, KnownOne, DAG, Depth);
2271 case AMDGPUISD::SMAX:
2272 case AMDGPUISD::UMAX:
2273 case AMDGPUISD::SMIN:
2274 case AMDGPUISD::UMIN:
2275 computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
2276 KnownZero, KnownOne, DAG, Depth);
2279 case AMDGPUISD::BFE_I32:
2280 case AMDGPUISD::BFE_U32: {
2281 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2285 unsigned BitWidth = 32;
2286 uint32_t Width = CWidth->getZExtValue() & 0x1f;
2288 KnownZero = APInt::getAllOnesValue(BitWidth);
2289 KnownOne = APInt::getNullValue(BitWidth);
2293 // FIXME: This could do a lot more. If offset is 0, should be the same as
2294 // sign_extend_inreg implementation, but that involves duplicating it.
2295 if (Opc == AMDGPUISD::BFE_I32)
2296 KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2298 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2305 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2307 const SelectionDAG &DAG,
2308 unsigned Depth) const {
2309 switch (Op.getOpcode()) {
2310 case AMDGPUISD::BFE_I32: {
2311 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2315 unsigned SignBits = 32 - Width->getZExtValue() + 1;
2316 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2317 if (!Offset || !Offset->isNullValue())
2320 // TODO: Could probably figure something out with non-0 offsets.
2321 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2322 return std::max(SignBits, Op0SignBits);
2325 case AMDGPUISD::BFE_U32: {
2326 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2327 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;