2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
71 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
72 /// into their primitive components.
73 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
74 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
75 /// LowerCall, and LowerReturn.
76 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
77 SmallVectorImpl<EVT> &ValueVTs,
78 SmallVectorImpl<uint64_t> *Offsets = nullptr,
79 uint64_t StartingOffset = 0) {
80 SmallVector<EVT, 16> TempVTs;
81 SmallVector<uint64_t, 16> TempOffsets;
83 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
84 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
86 uint64_t Off = TempOffsets[i];
88 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
89 ValueVTs.push_back(VT.getVectorElementType());
91 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
94 ValueVTs.push_back(VT);
96 Offsets->push_back(Off);
101 // NVPTXTargetLowering Constructor.
102 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
103 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
104 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
106 // always lower memset, memcpy, and memmove intrinsics to load/store
107 // instructions, rather
108 // then generating calls to memset, mempcy or memmove.
109 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
110 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
111 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
113 setBooleanContents(ZeroOrNegativeOneBooleanContent);
115 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
116 // condition branches.
117 setJumpIsExpensive(true);
119 // By default, use the Source scheduling
121 setSchedulingPreference(Sched::RegPressure);
123 setSchedulingPreference(Sched::Source);
125 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
126 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
127 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
128 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
129 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
130 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
132 // Operations not directly supported by NVPTX.
133 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
134 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
135 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
136 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
137 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
138 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
139 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
140 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
141 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
142 // For others we will expand to a SHL/SRA pair.
143 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
144 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
145 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
146 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
147 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
149 if (nvptxSubtarget.hasROT64()) {
150 setOperationAction(ISD::ROTL, MVT::i64, Legal);
151 setOperationAction(ISD::ROTR, MVT::i64, Legal);
153 setOperationAction(ISD::ROTL, MVT::i64, Expand);
154 setOperationAction(ISD::ROTR, MVT::i64, Expand);
156 if (nvptxSubtarget.hasROT32()) {
157 setOperationAction(ISD::ROTL, MVT::i32, Legal);
158 setOperationAction(ISD::ROTR, MVT::i32, Legal);
160 setOperationAction(ISD::ROTL, MVT::i32, Expand);
161 setOperationAction(ISD::ROTR, MVT::i32, Expand);
164 setOperationAction(ISD::ROTL, MVT::i16, Expand);
165 setOperationAction(ISD::ROTR, MVT::i16, Expand);
166 setOperationAction(ISD::ROTL, MVT::i8, Expand);
167 setOperationAction(ISD::ROTR, MVT::i8, Expand);
168 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
169 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
170 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
172 // Indirect branch is not supported.
173 // This also disables Jump Table creation.
174 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
175 setOperationAction(ISD::BRIND, MVT::Other, Expand);
177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
180 // We want to legalize constant related memmove and memcopy
182 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
184 // Turn FP extload into load/fextend
185 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
186 // Turn FP truncstore into trunc + store.
187 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
189 // PTX does not support load / store predicate registers
190 setOperationAction(ISD::LOAD, MVT::i1, Custom);
191 setOperationAction(ISD::STORE, MVT::i1, Custom);
193 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
194 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
195 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
196 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
197 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
198 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
200 // This is legal in NVPTX
201 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
202 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
204 // TRAP can be lowered to PTX trap
205 setOperationAction(ISD::TRAP, MVT::Other, Legal);
207 setOperationAction(ISD::ADDC, MVT::i64, Expand);
208 setOperationAction(ISD::ADDE, MVT::i64, Expand);
210 // Register custom handling for vector loads/stores
211 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
213 MVT VT = (MVT::SimpleValueType) i;
214 if (IsPTXVectorType(VT)) {
215 setOperationAction(ISD::LOAD, VT, Custom);
216 setOperationAction(ISD::STORE, VT, Custom);
217 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
221 // Custom handling for i8 intrinsics
222 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
224 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
225 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
226 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
227 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
228 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
229 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
230 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
231 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
232 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
233 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
234 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
235 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
236 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
237 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
238 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
240 // Now deduce the information based on the above mentioned
242 computeRegisterProperties();
245 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
250 return "NVPTXISD::CALL";
251 case NVPTXISD::RET_FLAG:
252 return "NVPTXISD::RET_FLAG";
253 case NVPTXISD::Wrapper:
254 return "NVPTXISD::Wrapper";
255 case NVPTXISD::DeclareParam:
256 return "NVPTXISD::DeclareParam";
257 case NVPTXISD::DeclareScalarParam:
258 return "NVPTXISD::DeclareScalarParam";
259 case NVPTXISD::DeclareRet:
260 return "NVPTXISD::DeclareRet";
261 case NVPTXISD::DeclareRetParam:
262 return "NVPTXISD::DeclareRetParam";
263 case NVPTXISD::PrintCall:
264 return "NVPTXISD::PrintCall";
265 case NVPTXISD::LoadParam:
266 return "NVPTXISD::LoadParam";
267 case NVPTXISD::LoadParamV2:
268 return "NVPTXISD::LoadParamV2";
269 case NVPTXISD::LoadParamV4:
270 return "NVPTXISD::LoadParamV4";
271 case NVPTXISD::StoreParam:
272 return "NVPTXISD::StoreParam";
273 case NVPTXISD::StoreParamV2:
274 return "NVPTXISD::StoreParamV2";
275 case NVPTXISD::StoreParamV4:
276 return "NVPTXISD::StoreParamV4";
277 case NVPTXISD::StoreParamS32:
278 return "NVPTXISD::StoreParamS32";
279 case NVPTXISD::StoreParamU32:
280 return "NVPTXISD::StoreParamU32";
281 case NVPTXISD::CallArgBegin:
282 return "NVPTXISD::CallArgBegin";
283 case NVPTXISD::CallArg:
284 return "NVPTXISD::CallArg";
285 case NVPTXISD::LastCallArg:
286 return "NVPTXISD::LastCallArg";
287 case NVPTXISD::CallArgEnd:
288 return "NVPTXISD::CallArgEnd";
289 case NVPTXISD::CallVoid:
290 return "NVPTXISD::CallVoid";
291 case NVPTXISD::CallVal:
292 return "NVPTXISD::CallVal";
293 case NVPTXISD::CallSymbol:
294 return "NVPTXISD::CallSymbol";
295 case NVPTXISD::Prototype:
296 return "NVPTXISD::Prototype";
297 case NVPTXISD::MoveParam:
298 return "NVPTXISD::MoveParam";
299 case NVPTXISD::StoreRetval:
300 return "NVPTXISD::StoreRetval";
301 case NVPTXISD::StoreRetvalV2:
302 return "NVPTXISD::StoreRetvalV2";
303 case NVPTXISD::StoreRetvalV4:
304 return "NVPTXISD::StoreRetvalV4";
305 case NVPTXISD::PseudoUseParam:
306 return "NVPTXISD::PseudoUseParam";
307 case NVPTXISD::RETURN:
308 return "NVPTXISD::RETURN";
309 case NVPTXISD::CallSeqBegin:
310 return "NVPTXISD::CallSeqBegin";
311 case NVPTXISD::CallSeqEnd:
312 return "NVPTXISD::CallSeqEnd";
313 case NVPTXISD::CallPrototype:
314 return "NVPTXISD::CallPrototype";
315 case NVPTXISD::LoadV2:
316 return "NVPTXISD::LoadV2";
317 case NVPTXISD::LoadV4:
318 return "NVPTXISD::LoadV4";
319 case NVPTXISD::LDGV2:
320 return "NVPTXISD::LDGV2";
321 case NVPTXISD::LDGV4:
322 return "NVPTXISD::LDGV4";
323 case NVPTXISD::LDUV2:
324 return "NVPTXISD::LDUV2";
325 case NVPTXISD::LDUV4:
326 return "NVPTXISD::LDUV4";
327 case NVPTXISD::StoreV2:
328 return "NVPTXISD::StoreV2";
329 case NVPTXISD::StoreV4:
330 return "NVPTXISD::StoreV4";
331 case NVPTXISD::Tex1DFloatI32: return "NVPTXISD::Tex1DFloatI32";
332 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
333 case NVPTXISD::Tex1DFloatFloatLevel:
334 return "NVPTXISD::Tex1DFloatFloatLevel";
335 case NVPTXISD::Tex1DFloatFloatGrad:
336 return "NVPTXISD::Tex1DFloatFloatGrad";
337 case NVPTXISD::Tex1DI32I32: return "NVPTXISD::Tex1DI32I32";
338 case NVPTXISD::Tex1DI32Float: return "NVPTXISD::Tex1DI32Float";
339 case NVPTXISD::Tex1DI32FloatLevel:
340 return "NVPTXISD::Tex1DI32FloatLevel";
341 case NVPTXISD::Tex1DI32FloatGrad:
342 return "NVPTXISD::Tex1DI32FloatGrad";
343 case NVPTXISD::Tex1DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
344 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
345 case NVPTXISD::Tex1DArrayFloatFloatLevel:
346 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
347 case NVPTXISD::Tex1DArrayFloatFloatGrad:
348 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
349 case NVPTXISD::Tex1DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
350 case NVPTXISD::Tex1DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
351 case NVPTXISD::Tex1DArrayI32FloatLevel:
352 return "NVPTXISD::Tex2DArrayI32FloatLevel";
353 case NVPTXISD::Tex1DArrayI32FloatGrad:
354 return "NVPTXISD::Tex2DArrayI32FloatGrad";
355 case NVPTXISD::Tex2DFloatI32: return "NVPTXISD::Tex2DFloatI32";
356 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
357 case NVPTXISD::Tex2DFloatFloatLevel:
358 return "NVPTXISD::Tex2DFloatFloatLevel";
359 case NVPTXISD::Tex2DFloatFloatGrad:
360 return "NVPTXISD::Tex2DFloatFloatGrad";
361 case NVPTXISD::Tex2DI32I32: return "NVPTXISD::Tex2DI32I32";
362 case NVPTXISD::Tex2DI32Float: return "NVPTXISD::Tex2DI32Float";
363 case NVPTXISD::Tex2DI32FloatLevel:
364 return "NVPTXISD::Tex2DI32FloatLevel";
365 case NVPTXISD::Tex2DI32FloatGrad:
366 return "NVPTXISD::Tex2DI32FloatGrad";
367 case NVPTXISD::Tex2DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
368 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
369 case NVPTXISD::Tex2DArrayFloatFloatLevel:
370 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
371 case NVPTXISD::Tex2DArrayFloatFloatGrad:
372 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
373 case NVPTXISD::Tex2DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
374 case NVPTXISD::Tex2DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
375 case NVPTXISD::Tex2DArrayI32FloatLevel:
376 return "NVPTXISD::Tex2DArrayI32FloatLevel";
377 case NVPTXISD::Tex2DArrayI32FloatGrad:
378 return "NVPTXISD::Tex2DArrayI32FloatGrad";
379 case NVPTXISD::Tex3DFloatI32: return "NVPTXISD::Tex3DFloatI32";
380 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
381 case NVPTXISD::Tex3DFloatFloatLevel:
382 return "NVPTXISD::Tex3DFloatFloatLevel";
383 case NVPTXISD::Tex3DFloatFloatGrad:
384 return "NVPTXISD::Tex3DFloatFloatGrad";
385 case NVPTXISD::Tex3DI32I32: return "NVPTXISD::Tex3DI32I32";
386 case NVPTXISD::Tex3DI32Float: return "NVPTXISD::Tex3DI32Float";
387 case NVPTXISD::Tex3DI32FloatLevel:
388 return "NVPTXISD::Tex3DI32FloatLevel";
389 case NVPTXISD::Tex3DI32FloatGrad:
390 return "NVPTXISD::Tex3DI32FloatGrad";
392 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
393 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
394 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
395 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
396 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
397 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
398 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
399 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
400 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
402 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
403 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
404 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
405 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
406 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
407 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
408 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
409 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
410 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
412 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
413 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
414 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
415 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
416 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
417 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
418 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
419 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
420 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
422 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
423 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
424 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
425 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
426 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
427 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
428 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
429 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
430 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
432 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
433 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
434 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
435 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
436 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
437 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
438 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
439 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
440 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
444 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
445 return VT.getScalarType() == MVT::i1;
449 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
451 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
452 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
453 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
457 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
458 const SmallVectorImpl<ISD::OutputArg> &Outs,
459 unsigned retAlignment,
460 const ImmutableCallSite *CS) const {
462 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
463 assert(isABI && "Non-ABI compilation is not supported");
468 O << "prototype_" << uniqueCallSite << " : .callprototype ";
470 if (retTy->getTypeID() == Type::VoidTyID) {
474 if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
476 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
477 size = ITy->getBitWidth();
481 assert(retTy->isFloatingPointTy() &&
482 "Floating point type expected here");
483 size = retTy->getPrimitiveSizeInBits();
486 O << ".param .b" << size << " _";
487 } else if (isa<PointerType>(retTy)) {
488 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
490 if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
491 SmallVector<EVT, 16> vtparts;
492 ComputeValueVTs(*this, retTy, vtparts);
493 unsigned totalsz = 0;
494 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
496 EVT elemtype = vtparts[i];
497 if (vtparts[i].isVector()) {
498 elems = vtparts[i].getVectorNumElements();
499 elemtype = vtparts[i].getVectorElementType();
501 // TODO: no need to loop
502 for (unsigned j = 0, je = elems; j != je; ++j) {
503 unsigned sz = elemtype.getSizeInBits();
504 if (elemtype.isInteger() && (sz < 8))
509 O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
511 assert(false && "Unknown return type");
519 MVT thePointerTy = getPointerTy();
522 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
523 Type *Ty = Args[i].Ty;
529 if (Outs[OIdx].Flags.isByVal() == false) {
530 if (Ty->isAggregateType() || Ty->isVectorTy()) {
532 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
533 const DataLayout *TD = getDataLayout();
534 // +1 because index 0 is reserved for return type alignment
535 if (!llvm::getAlign(*CallI, i + 1, align))
536 align = TD->getABITypeAlignment(Ty);
537 unsigned sz = TD->getTypeAllocSize(Ty);
538 O << ".param .align " << align << " .b8 ";
540 O << "[" << sz << "]";
541 // update the index for Outs
542 SmallVector<EVT, 16> vtparts;
543 ComputeValueVTs(*this, Ty, vtparts);
544 if (unsigned len = vtparts.size())
548 // i8 types in IR will be i16 types in SDAG
549 assert((getValueType(Ty) == Outs[OIdx].VT ||
550 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
551 "type mismatch between callee prototype and arguments");
554 if (isa<IntegerType>(Ty)) {
555 sz = cast<IntegerType>(Ty)->getBitWidth();
558 } else if (isa<PointerType>(Ty))
559 sz = thePointerTy.getSizeInBits();
561 sz = Ty->getPrimitiveSizeInBits();
562 O << ".param .b" << sz << " ";
566 const PointerType *PTy = dyn_cast<PointerType>(Ty);
567 assert(PTy && "Param with byval attribute should be a pointer type");
568 Type *ETy = PTy->getElementType();
570 unsigned align = Outs[OIdx].Flags.getByValAlign();
571 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
572 O << ".param .align " << align << " .b8 ";
574 O << "[" << sz << "]";
581 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
582 const ImmutableCallSite *CS,
584 unsigned Idx) const {
585 const DataLayout *TD = getDataLayout();
587 const Value *DirectCallee = CS->getCalledFunction();
590 // We don't have a direct function symbol, but that may be because of
591 // constant cast instructions in the call.
592 const Instruction *CalleeI = CS->getInstruction();
593 assert(CalleeI && "Call target is not a function or derived value?");
595 // With bitcast'd call targets, the instruction will be the call
596 if (isa<CallInst>(CalleeI)) {
597 // Check if we have call alignment metadata
598 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
601 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
602 // Ignore any bitcast instructions
603 while(isa<ConstantExpr>(CalleeV)) {
604 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
607 // Look through the bitcast
608 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
611 // We have now looked past all of the bitcasts. Do we finally have a
613 if (isa<Function>(CalleeV))
614 DirectCallee = CalleeV;
618 // Check for function alignment information if we found that the
619 // ultimate target is a Function
621 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
624 // Call is indirect or alignment information is not available, fall back to
625 // the ABI type alignment
626 return TD->getABITypeAlignment(Ty);
629 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
630 SmallVectorImpl<SDValue> &InVals) const {
631 SelectionDAG &DAG = CLI.DAG;
633 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
634 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
635 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
636 SDValue Chain = CLI.Chain;
637 SDValue Callee = CLI.Callee;
638 bool &isTailCall = CLI.IsTailCall;
639 ArgListTy &Args = CLI.Args;
640 Type *retTy = CLI.RetTy;
641 ImmutableCallSite *CS = CLI.CS;
643 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
644 assert(isABI && "Non-ABI compilation is not supported");
647 const DataLayout *TD = getDataLayout();
648 MachineFunction &MF = DAG.getMachineFunction();
649 const Function *F = MF.getFunction();
651 SDValue tempChain = Chain;
653 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
655 SDValue InFlag = Chain.getValue(1);
657 unsigned paramCount = 0;
658 // Args.size() and Outs.size() need not match.
659 // Outs.size() will be larger
660 // * if there is an aggregate argument with multiple fields (each field
661 // showing up separately in Outs)
662 // * if there is a vector argument with more than typical vector-length
663 // elements (generally if more than 4) where each vector element is
664 // individually present in Outs.
665 // So a different index should be used for indexing into Outs/OutVals.
666 // See similar issue in LowerFormalArguments.
668 // Declare the .params or .reg need to pass values
670 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
671 EVT VT = Outs[OIdx].VT;
672 Type *Ty = Args[i].Ty;
674 if (Outs[OIdx].Flags.isByVal() == false) {
675 if (Ty->isAggregateType()) {
677 SmallVector<EVT, 16> vtparts;
678 ComputeValueVTs(*this, Ty, vtparts);
680 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
681 // declare .param .align <align> .b8 .param<n>[<size>];
682 unsigned sz = TD->getTypeAllocSize(Ty);
683 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
684 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
685 DAG.getConstant(paramCount, MVT::i32),
686 DAG.getConstant(sz, MVT::i32), InFlag };
687 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
689 InFlag = Chain.getValue(1);
690 unsigned curOffset = 0;
691 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
693 EVT elemtype = vtparts[j];
694 if (vtparts[j].isVector()) {
695 elems = vtparts[j].getVectorNumElements();
696 elemtype = vtparts[j].getVectorElementType();
698 for (unsigned k = 0, ke = elems; k != ke; ++k) {
699 unsigned sz = elemtype.getSizeInBits();
700 if (elemtype.isInteger() && (sz < 8))
702 SDValue StVal = OutVals[OIdx];
703 if (elemtype.getSizeInBits() < 16) {
704 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
706 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
707 SDValue CopyParamOps[] = { Chain,
708 DAG.getConstant(paramCount, MVT::i32),
709 DAG.getConstant(curOffset, MVT::i32),
711 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
712 CopyParamVTs, &CopyParamOps[0], 5,
713 elemtype, MachinePointerInfo());
714 InFlag = Chain.getValue(1);
719 if (vtparts.size() > 0)
724 if (Ty->isVectorTy()) {
725 EVT ObjectVT = getValueType(Ty);
726 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
727 // declare .param .align <align> .b8 .param<n>[<size>];
728 unsigned sz = TD->getTypeAllocSize(Ty);
729 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
730 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
731 DAG.getConstant(paramCount, MVT::i32),
732 DAG.getConstant(sz, MVT::i32), InFlag };
733 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
735 InFlag = Chain.getValue(1);
736 unsigned NumElts = ObjectVT.getVectorNumElements();
737 EVT EltVT = ObjectVT.getVectorElementType();
739 bool NeedExtend = false;
740 if (EltVT.getSizeInBits() < 16) {
747 SDValue Elt = OutVals[OIdx++];
749 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
751 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
752 SDValue CopyParamOps[] = { Chain,
753 DAG.getConstant(paramCount, MVT::i32),
754 DAG.getConstant(0, MVT::i32), Elt,
756 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
757 CopyParamVTs, &CopyParamOps[0], 5,
758 MemVT, MachinePointerInfo());
759 InFlag = Chain.getValue(1);
760 } else if (NumElts == 2) {
761 SDValue Elt0 = OutVals[OIdx++];
762 SDValue Elt1 = OutVals[OIdx++];
764 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
765 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
768 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
769 SDValue CopyParamOps[] = { Chain,
770 DAG.getConstant(paramCount, MVT::i32),
771 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
773 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
774 CopyParamVTs, &CopyParamOps[0], 6,
775 MemVT, MachinePointerInfo());
776 InFlag = Chain.getValue(1);
778 unsigned curOffset = 0;
780 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
782 // vector will be expanded to a power of 2 elements, so we know we can
783 // always round up to the next multiple of 4 when creating the vector
785 // e.g. 4 elem => 1 st.v4
788 // 11 elem => 3 st.v4
789 unsigned VecSize = 4;
790 if (EltVT.getSizeInBits() == 64)
793 // This is potentially only part of a vector, so assume all elements
794 // are packed together.
795 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
797 for (unsigned i = 0; i < NumElts; i += VecSize) {
800 SmallVector<SDValue, 8> Ops;
801 Ops.push_back(Chain);
802 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
803 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
805 unsigned Opc = NVPTXISD::StoreParamV2;
807 StoreVal = OutVals[OIdx++];
809 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
810 Ops.push_back(StoreVal);
812 if (i + 1 < NumElts) {
813 StoreVal = OutVals[OIdx++];
816 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
818 StoreVal = DAG.getUNDEF(EltVT);
820 Ops.push_back(StoreVal);
823 Opc = NVPTXISD::StoreParamV4;
824 if (i + 2 < NumElts) {
825 StoreVal = OutVals[OIdx++];
828 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
830 StoreVal = DAG.getUNDEF(EltVT);
832 Ops.push_back(StoreVal);
834 if (i + 3 < NumElts) {
835 StoreVal = OutVals[OIdx++];
838 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
840 StoreVal = DAG.getUNDEF(EltVT);
842 Ops.push_back(StoreVal);
845 Ops.push_back(InFlag);
847 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
848 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
850 MachinePointerInfo());
851 InFlag = Chain.getValue(1);
852 curOffset += PerStoreOffset;
860 // for ABI, declare .param .b<size> .param<n>;
861 unsigned sz = VT.getSizeInBits();
862 bool needExtend = false;
863 if (VT.isInteger()) {
869 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
870 SDValue DeclareParamOps[] = { Chain,
871 DAG.getConstant(paramCount, MVT::i32),
872 DAG.getConstant(sz, MVT::i32),
873 DAG.getConstant(0, MVT::i32), InFlag };
874 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
876 InFlag = Chain.getValue(1);
877 SDValue OutV = OutVals[OIdx];
879 // zext/sext i1 to i16
880 unsigned opc = ISD::ZERO_EXTEND;
881 if (Outs[OIdx].Flags.isSExt())
882 opc = ISD::SIGN_EXTEND;
883 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
885 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
886 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
887 DAG.getConstant(0, MVT::i32), OutV, InFlag };
889 unsigned opcode = NVPTXISD::StoreParam;
890 if (Outs[OIdx].Flags.isZExt())
891 opcode = NVPTXISD::StoreParamU32;
892 else if (Outs[OIdx].Flags.isSExt())
893 opcode = NVPTXISD::StoreParamS32;
894 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
895 VT, MachinePointerInfo());
897 InFlag = Chain.getValue(1);
902 SmallVector<EVT, 16> vtparts;
903 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
904 assert(PTy && "Type of a byval parameter should be pointer");
905 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
907 // declare .param .align <align> .b8 .param<n>[<size>];
908 unsigned sz = Outs[OIdx].Flags.getByValSize();
909 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
910 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
911 // so we don't need to worry about natural alignment or not.
912 // See TargetLowering::LowerCallTo().
913 SDValue DeclareParamOps[] = {
914 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
915 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
918 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
920 InFlag = Chain.getValue(1);
921 unsigned curOffset = 0;
922 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
924 EVT elemtype = vtparts[j];
925 if (vtparts[j].isVector()) {
926 elems = vtparts[j].getVectorNumElements();
927 elemtype = vtparts[j].getVectorElementType();
929 for (unsigned k = 0, ke = elems; k != ke; ++k) {
930 unsigned sz = elemtype.getSizeInBits();
931 if (elemtype.isInteger() && (sz < 8))
934 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
935 DAG.getConstant(curOffset, getPointerTy()));
936 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
937 MachinePointerInfo(), false, false, false,
939 if (elemtype.getSizeInBits() < 16) {
940 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
942 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
943 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
944 DAG.getConstant(curOffset, MVT::i32), theVal,
946 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
947 CopyParamOps, 5, elemtype,
948 MachinePointerInfo());
950 InFlag = Chain.getValue(1);
957 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
958 unsigned retAlignment = 0;
961 if (Ins.size() > 0) {
962 SmallVector<EVT, 16> resvtparts;
963 ComputeValueVTs(*this, retTy, resvtparts);
966 // .param .align 16 .b8 retval0[<size-in-bytes>], or
967 // .param .b<size-in-bits> retval0
968 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
969 if (retTy->isSingleValueType()) {
970 // Scalar needs to be at least 32bit wide
973 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
974 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
975 DAG.getConstant(resultsz, MVT::i32),
976 DAG.getConstant(0, MVT::i32), InFlag };
977 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
979 InFlag = Chain.getValue(1);
981 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
982 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
983 SDValue DeclareRetOps[] = { Chain,
984 DAG.getConstant(retAlignment, MVT::i32),
985 DAG.getConstant(resultsz / 8, MVT::i32),
986 DAG.getConstant(0, MVT::i32), InFlag };
987 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
989 InFlag = Chain.getValue(1);
994 // This is indirect function call case : PTX requires a prototype of the
996 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
997 // to be emitted, and the label has to used as the last arg of call
999 // The prototype is embedded in a string and put as the operand for a
1000 // CallPrototype SDNode which will print out to the value of the string.
1001 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1002 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1003 const char *ProtoStr =
1004 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1005 SDValue ProtoOps[] = {
1006 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1008 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1009 InFlag = Chain.getValue(1);
1011 // Op to just print "call"
1012 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1013 SDValue PrintCallOps[] = {
1014 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
1016 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
1017 dl, PrintCallVTs, PrintCallOps);
1018 InFlag = Chain.getValue(1);
1020 // Ops to print out the function name
1021 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1022 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1023 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1024 InFlag = Chain.getValue(1);
1026 // Ops to print out the param list
1027 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1028 SDValue CallArgBeginOps[] = { Chain, InFlag };
1029 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1031 InFlag = Chain.getValue(1);
1033 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1036 opcode = NVPTXISD::LastCallArg;
1038 opcode = NVPTXISD::CallArg;
1039 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1040 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
1041 DAG.getConstant(i, MVT::i32), InFlag };
1042 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1043 InFlag = Chain.getValue(1);
1045 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1046 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
1048 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1049 InFlag = Chain.getValue(1);
1052 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1053 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
1055 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1056 InFlag = Chain.getValue(1);
1059 // Generate loads from param memory/moves from registers for result
1060 if (Ins.size() > 0) {
1061 unsigned resoffset = 0;
1062 if (retTy && retTy->isVectorTy()) {
1063 EVT ObjectVT = getValueType(retTy);
1064 unsigned NumElts = ObjectVT.getVectorNumElements();
1065 EVT EltVT = ObjectVT.getVectorElementType();
1066 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
1067 ObjectVT) == NumElts &&
1068 "Vector was not scalarized");
1069 unsigned sz = EltVT.getSizeInBits();
1070 bool needTruncate = sz < 16 ? true : false;
1073 // Just a simple load
1074 std::vector<EVT> LoadRetVTs;
1076 // If loading i1 result, generate
1079 LoadRetVTs.push_back(MVT::i16);
1081 LoadRetVTs.push_back(EltVT);
1082 LoadRetVTs.push_back(MVT::Other);
1083 LoadRetVTs.push_back(MVT::Glue);
1084 std::vector<SDValue> LoadRetOps;
1085 LoadRetOps.push_back(Chain);
1086 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1087 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1088 LoadRetOps.push_back(InFlag);
1089 SDValue retval = DAG.getMemIntrinsicNode(
1090 NVPTXISD::LoadParam, dl,
1091 DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
1092 LoadRetOps.size(), EltVT, MachinePointerInfo());
1093 Chain = retval.getValue(1);
1094 InFlag = retval.getValue(2);
1095 SDValue Ret0 = retval;
1097 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
1098 InVals.push_back(Ret0);
1099 } else if (NumElts == 2) {
1101 std::vector<EVT> LoadRetVTs;
1103 // If loading i1 result, generate
1106 LoadRetVTs.push_back(MVT::i16);
1107 LoadRetVTs.push_back(MVT::i16);
1109 LoadRetVTs.push_back(EltVT);
1110 LoadRetVTs.push_back(EltVT);
1112 LoadRetVTs.push_back(MVT::Other);
1113 LoadRetVTs.push_back(MVT::Glue);
1114 std::vector<SDValue> LoadRetOps;
1115 LoadRetOps.push_back(Chain);
1116 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1117 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1118 LoadRetOps.push_back(InFlag);
1119 SDValue retval = DAG.getMemIntrinsicNode(
1120 NVPTXISD::LoadParamV2, dl,
1121 DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
1122 LoadRetOps.size(), EltVT, MachinePointerInfo());
1123 Chain = retval.getValue(2);
1124 InFlag = retval.getValue(3);
1125 SDValue Ret0 = retval.getValue(0);
1126 SDValue Ret1 = retval.getValue(1);
1128 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1129 InVals.push_back(Ret0);
1130 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1131 InVals.push_back(Ret1);
1133 InVals.push_back(Ret0);
1134 InVals.push_back(Ret1);
1137 // Split into N LoadV4
1139 unsigned VecSize = 4;
1140 unsigned Opc = NVPTXISD::LoadParamV4;
1141 if (EltVT.getSizeInBits() == 64) {
1143 Opc = NVPTXISD::LoadParamV2;
1145 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1146 for (unsigned i = 0; i < NumElts; i += VecSize) {
1147 SmallVector<EVT, 8> LoadRetVTs;
1149 // If loading i1 result, generate
1152 for (unsigned j = 0; j < VecSize; ++j)
1153 LoadRetVTs.push_back(MVT::i16);
1155 for (unsigned j = 0; j < VecSize; ++j)
1156 LoadRetVTs.push_back(EltVT);
1158 LoadRetVTs.push_back(MVT::Other);
1159 LoadRetVTs.push_back(MVT::Glue);
1160 SmallVector<SDValue, 4> LoadRetOps;
1161 LoadRetOps.push_back(Chain);
1162 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1163 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1164 LoadRetOps.push_back(InFlag);
1165 SDValue retval = DAG.getMemIntrinsicNode(
1166 Opc, dl, DAG.getVTList(LoadRetVTs),
1167 &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
1169 Chain = retval.getValue(2);
1170 InFlag = retval.getValue(3);
1172 Chain = retval.getValue(4);
1173 InFlag = retval.getValue(5);
1176 for (unsigned j = 0; j < VecSize; ++j) {
1177 if (i + j >= NumElts)
1179 SDValue Elt = retval.getValue(j);
1181 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1182 InVals.push_back(Elt);
1184 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1188 SmallVector<EVT, 16> VTs;
1189 ComputePTXValueVTs(*this, retTy, VTs);
1190 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1191 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1192 unsigned sz = VTs[i].getSizeInBits();
1193 bool needTruncate = sz < 8 ? true : false;
1194 if (VTs[i].isInteger() && (sz < 8))
1197 SmallVector<EVT, 4> LoadRetVTs;
1198 EVT TheLoadType = VTs[i];
1199 if (retTy->isIntegerTy() &&
1200 TD->getTypeAllocSizeInBits(retTy) < 32) {
1201 // This is for integer types only, and specifically not for
1203 LoadRetVTs.push_back(MVT::i32);
1204 TheLoadType = MVT::i32;
1205 } else if (sz < 16) {
1206 // If loading i1/i8 result, generate
1208 // trunc i16 to i1/i8
1209 LoadRetVTs.push_back(MVT::i16);
1211 LoadRetVTs.push_back(Ins[i].VT);
1212 LoadRetVTs.push_back(MVT::Other);
1213 LoadRetVTs.push_back(MVT::Glue);
1215 SmallVector<SDValue, 4> LoadRetOps;
1216 LoadRetOps.push_back(Chain);
1217 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1218 LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
1219 LoadRetOps.push_back(InFlag);
1220 SDValue retval = DAG.getMemIntrinsicNode(
1221 NVPTXISD::LoadParam, dl,
1222 DAG.getVTList(LoadRetVTs), &LoadRetOps[0],
1223 LoadRetOps.size(), TheLoadType, MachinePointerInfo());
1224 Chain = retval.getValue(1);
1225 InFlag = retval.getValue(2);
1226 SDValue Ret0 = retval.getValue(0);
1228 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1229 InVals.push_back(Ret0);
1230 resoffset += sz / 8;
1235 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1236 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1240 // set isTailCall to false for now, until we figure out how to express
1241 // tail call optimization in PTX
1246 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1247 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1248 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1250 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1251 SDNode *Node = Op.getNode();
1253 SmallVector<SDValue, 8> Ops;
1254 unsigned NumOperands = Node->getNumOperands();
1255 for (unsigned i = 0; i < NumOperands; ++i) {
1256 SDValue SubOp = Node->getOperand(i);
1257 EVT VVT = SubOp.getNode()->getValueType(0);
1258 EVT EltVT = VVT.getVectorElementType();
1259 unsigned NumSubElem = VVT.getVectorNumElements();
1260 for (unsigned j = 0; j < NumSubElem; ++j) {
1261 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1262 DAG.getIntPtrConstant(j)));
1265 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
1269 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1270 switch (Op.getOpcode()) {
1271 case ISD::RETURNADDR:
1273 case ISD::FRAMEADDR:
1275 case ISD::GlobalAddress:
1276 return LowerGlobalAddress(Op, DAG);
1277 case ISD::INTRINSIC_W_CHAIN:
1279 case ISD::BUILD_VECTOR:
1280 case ISD::EXTRACT_SUBVECTOR:
1282 case ISD::CONCAT_VECTORS:
1283 return LowerCONCAT_VECTORS(Op, DAG);
1285 return LowerSTORE(Op, DAG);
1287 return LowerLOAD(Op, DAG);
1289 llvm_unreachable("Custom lowering not defined for operation");
1293 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1294 if (Op.getValueType() == MVT::i1)
1295 return LowerLOADi1(Op, DAG);
1302 // v1 = ld i8* addr (-> i16)
1303 // v = trunc i16 to i1
1304 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1305 SDNode *Node = Op.getNode();
1306 LoadSDNode *LD = cast<LoadSDNode>(Node);
1308 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1309 assert(Node->getValueType(0) == MVT::i1 &&
1310 "Custom lowering for i1 load only");
1312 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1313 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1314 LD->isInvariant(), LD->getAlignment());
1315 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1316 // The legalizer (the caller) is expecting two values from the legalized
1317 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1318 // in LegalizeDAG.cpp which also uses MergeValues.
1319 SDValue Ops[] = { result, LD->getChain() };
1320 return DAG.getMergeValues(Ops, 2, dl);
1323 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1324 EVT ValVT = Op.getOperand(1).getValueType();
1325 if (ValVT == MVT::i1)
1326 return LowerSTOREi1(Op, DAG);
1327 else if (ValVT.isVector())
1328 return LowerSTOREVector(Op, DAG);
1334 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1335 SDNode *N = Op.getNode();
1336 SDValue Val = N->getOperand(1);
1338 EVT ValVT = Val.getValueType();
1340 if (ValVT.isVector()) {
1341 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1342 // legal. We can (and should) split that into 2 stores of <2 x double> here
1343 // but I'm leaving that as a TODO for now.
1344 if (!ValVT.isSimple())
1346 switch (ValVT.getSimpleVT().SimpleTy) {
1359 // This is a "native" vector type
1363 unsigned Opcode = 0;
1364 EVT EltVT = ValVT.getVectorElementType();
1365 unsigned NumElts = ValVT.getVectorNumElements();
1367 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1368 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1369 // stored type to i16 and propagate the "real" type as the memory type.
1370 bool NeedExt = false;
1371 if (EltVT.getSizeInBits() < 16)
1378 Opcode = NVPTXISD::StoreV2;
1381 Opcode = NVPTXISD::StoreV4;
1386 SmallVector<SDValue, 8> Ops;
1388 // First is the chain
1389 Ops.push_back(N->getOperand(0));
1391 // Then the split values
1392 for (unsigned i = 0; i < NumElts; ++i) {
1393 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1394 DAG.getIntPtrConstant(i));
1396 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1397 Ops.push_back(ExtVal);
1400 // Then any remaining arguments
1401 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1402 Ops.push_back(N->getOperand(i));
1405 MemSDNode *MemSD = cast<MemSDNode>(N);
1407 SDValue NewSt = DAG.getMemIntrinsicNode(
1408 Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
1409 MemSD->getMemoryVT(), MemSD->getMemOperand());
1411 //return DCI.CombineTo(N, NewSt, true);
1420 // v1 = zxt v to i16
1422 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1423 SDNode *Node = Op.getNode();
1425 StoreSDNode *ST = cast<StoreSDNode>(Node);
1426 SDValue Tmp1 = ST->getChain();
1427 SDValue Tmp2 = ST->getBasePtr();
1428 SDValue Tmp3 = ST->getValue();
1429 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1430 unsigned Alignment = ST->getAlignment();
1431 bool isVolatile = ST->isVolatile();
1432 bool isNonTemporal = ST->isNonTemporal();
1433 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1434 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1435 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1436 isVolatile, Alignment);
1440 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1441 int idx, EVT v) const {
1442 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1443 std::stringstream suffix;
1445 *name += suffix.str();
1446 return DAG.getTargetExternalSymbol(name->c_str(), v);
1450 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1451 std::string ParamSym;
1452 raw_string_ostream ParamStr(ParamSym);
1454 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1457 std::string *SavedStr =
1458 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1459 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1462 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1463 return getExtSymb(DAG, ".HLPPARAM", idx);
1466 // Check to see if the kernel argument is image*_t or sampler_t
1468 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1469 static const char *const specialTypes[] = { "struct._image2d_t",
1470 "struct._image3d_t",
1471 "struct._sampler_t" };
1473 const Type *Ty = arg->getType();
1474 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1482 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1483 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1485 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1486 if (TypeName == specialTypes[i])
1492 SDValue NVPTXTargetLowering::LowerFormalArguments(
1493 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1494 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1495 SmallVectorImpl<SDValue> &InVals) const {
1496 MachineFunction &MF = DAG.getMachineFunction();
1497 const DataLayout *TD = getDataLayout();
1499 const Function *F = MF.getFunction();
1500 const AttributeSet &PAL = F->getAttributes();
1501 const TargetLowering *TLI = nvTM->getTargetLowering();
1503 SDValue Root = DAG.getRoot();
1504 std::vector<SDValue> OutChains;
1506 bool isKernel = llvm::isKernelFunction(*F);
1507 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1508 assert(isABI && "Non-ABI compilation is not supported");
1512 std::vector<Type *> argTypes;
1513 std::vector<const Argument *> theArgs;
1514 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1516 theArgs.push_back(I);
1517 argTypes.push_back(I->getType());
1519 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1520 // Ins.size() will be larger
1521 // * if there is an aggregate argument with multiple fields (each field
1522 // showing up separately in Ins)
1523 // * if there is a vector argument with more than typical vector-length
1524 // elements (generally if more than 4) where each vector element is
1525 // individually present in Ins.
1526 // So a different index should be used for indexing into Ins.
1527 // See similar issue in LowerCall.
1528 unsigned InsIdx = 0;
1531 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1532 Type *Ty = argTypes[i];
1534 // If the kernel argument is image*_t or sampler_t, convert it to
1535 // a i32 constant holding the parameter position. This can later
1536 // matched in the AsmPrinter to output the correct mangled name.
1537 if (isImageOrSamplerVal(
1539 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1541 assert(isKernel && "Only kernels can have image/sampler params");
1542 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1546 if (theArgs[i]->use_empty()) {
1548 if (Ty->isAggregateType()) {
1549 SmallVector<EVT, 16> vtparts;
1551 ComputePTXValueVTs(*this, Ty, vtparts);
1552 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1553 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1555 EVT partVT = vtparts[parti];
1556 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
1559 if (vtparts.size() > 0)
1563 if (Ty->isVectorTy()) {
1564 EVT ObjectVT = getValueType(Ty);
1565 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1566 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1567 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1574 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1578 // In the following cases, assign a node order of "idx+1"
1579 // to newly created nodes. The SDNodes for params have to
1580 // appear in the same order as their order of appearance
1581 // in the original function. "idx+1" holds that order.
1582 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1583 if (Ty->isAggregateType()) {
1584 SmallVector<EVT, 16> vtparts;
1585 SmallVector<uint64_t, 16> offsets;
1587 // NOTE: Here, we lose the ability to issue vector loads for vectors
1588 // that are a part of a struct. This should be investigated in the
1590 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1591 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1592 bool aggregateIsPacked = false;
1593 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1594 aggregateIsPacked = STy->isPacked();
1596 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1597 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1599 EVT partVT = vtparts[parti];
1600 Value *srcValue = Constant::getNullValue(
1601 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1602 llvm::ADDRESS_SPACE_PARAM));
1604 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1605 DAG.getConstant(offsets[parti], getPointerTy()));
1606 unsigned partAlign =
1607 aggregateIsPacked ? 1
1608 : TD->getABITypeAlignment(
1609 partVT.getTypeForEVT(F->getContext()));
1611 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1612 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1613 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1614 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1615 MachinePointerInfo(srcValue), partVT, false,
1618 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1619 MachinePointerInfo(srcValue), false, false, false,
1623 p.getNode()->setIROrder(idx + 1);
1624 InVals.push_back(p);
1627 if (vtparts.size() > 0)
1631 if (Ty->isVectorTy()) {
1632 EVT ObjectVT = getValueType(Ty);
1633 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1634 unsigned NumElts = ObjectVT.getVectorNumElements();
1635 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1636 "Vector was not scalarized");
1638 EVT EltVT = ObjectVT.getVectorElementType();
1643 // We only have one element, so just directly load it
1644 Value *SrcValue = Constant::getNullValue(PointerType::get(
1645 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1646 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1647 DAG.getConstant(Ofst, getPointerTy()));
1648 SDValue P = DAG.getLoad(
1649 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1651 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1653 P.getNode()->setIROrder(idx + 1);
1655 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1656 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1657 InVals.push_back(P);
1658 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1660 } else if (NumElts == 2) {
1662 // f32,f32 = load ...
1663 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1664 Value *SrcValue = Constant::getNullValue(PointerType::get(
1665 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1666 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1667 DAG.getConstant(Ofst, getPointerTy()));
1668 SDValue P = DAG.getLoad(
1669 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1671 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1673 P.getNode()->setIROrder(idx + 1);
1675 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1676 DAG.getIntPtrConstant(0));
1677 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1678 DAG.getIntPtrConstant(1));
1680 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1681 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1682 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1685 InVals.push_back(Elt0);
1686 InVals.push_back(Elt1);
1687 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1691 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1693 // vector will be expanded to a power of 2 elements, so we know we can
1694 // always round up to the next multiple of 4 when creating the vector
1696 // e.g. 4 elem => 1 ld.v4
1697 // 6 elem => 2 ld.v4
1698 // 8 elem => 2 ld.v4
1699 // 11 elem => 3 ld.v4
1700 unsigned VecSize = 4;
1701 if (EltVT.getSizeInBits() == 64) {
1704 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1705 for (unsigned i = 0; i < NumElts; i += VecSize) {
1706 Value *SrcValue = Constant::getNullValue(
1707 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1708 llvm::ADDRESS_SPACE_PARAM));
1710 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1711 DAG.getConstant(Ofst, getPointerTy()));
1712 SDValue P = DAG.getLoad(
1713 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1715 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1717 P.getNode()->setIROrder(idx + 1);
1719 for (unsigned j = 0; j < VecSize; ++j) {
1720 if (i + j >= NumElts)
1722 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1723 DAG.getIntPtrConstant(j));
1724 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1725 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1726 InVals.push_back(Elt);
1728 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1738 EVT ObjectVT = getValueType(Ty);
1739 // If ABI, load from the param symbol
1740 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1741 Value *srcValue = Constant::getNullValue(PointerType::get(
1742 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1744 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1745 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1746 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1747 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1748 MachinePointerInfo(srcValue), ObjectVT, false, false,
1749 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1751 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1752 MachinePointerInfo(srcValue), false, false, false,
1753 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1756 p.getNode()->setIROrder(idx + 1);
1757 InVals.push_back(p);
1761 // Param has ByVal attribute
1762 // Return MoveParam(param symbol).
1763 // Ideally, the param symbol can be returned directly,
1764 // but when SDNode builder decides to use it in a CopyToReg(),
1765 // machine instruction fails because TargetExternalSymbol
1766 // (not lowered) is target dependent, and CopyToReg assumes
1767 // the source is lowered.
1768 EVT ObjectVT = getValueType(Ty);
1769 assert(ObjectVT == Ins[InsIdx].VT &&
1770 "Ins type did not match function type");
1771 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1772 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1774 p.getNode()->setIROrder(idx + 1);
1776 InVals.push_back(p);
1778 SDValue p2 = DAG.getNode(
1779 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1780 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1781 InVals.push_back(p2);
1785 // Clang will check explicit VarArg and issue error if any. However, Clang
1786 // will let code with
1787 // implicit var arg like f() pass. See bug 617733.
1788 // We treat this case as if the arg list is empty.
1789 // if (F.isVarArg()) {
1790 // assert(0 && "VarArg not supported yet!");
1793 if (!OutChains.empty())
1794 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
1801 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1803 const SmallVectorImpl<ISD::OutputArg> &Outs,
1804 const SmallVectorImpl<SDValue> &OutVals,
1805 SDLoc dl, SelectionDAG &DAG) const {
1806 MachineFunction &MF = DAG.getMachineFunction();
1807 const Function *F = MF.getFunction();
1808 Type *RetTy = F->getReturnType();
1809 const DataLayout *TD = getDataLayout();
1811 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1812 assert(isABI && "Non-ABI compilation is not supported");
1816 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1817 // If we have a vector type, the OutVals array will be the scalarized
1818 // components and we have combine them into 1 or more vector stores.
1819 unsigned NumElts = VTy->getNumElements();
1820 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1822 // const_cast can be removed in later LLVM versions
1823 EVT EltVT = getValueType(RetTy).getVectorElementType();
1824 bool NeedExtend = false;
1825 if (EltVT.getSizeInBits() < 16)
1830 SDValue StoreVal = OutVals[0];
1831 // We only have one element, so just directly store it
1833 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1834 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1835 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1836 DAG.getVTList(MVT::Other), &Ops[0], 3,
1837 EltVT, MachinePointerInfo());
1839 } else if (NumElts == 2) {
1841 SDValue StoreVal0 = OutVals[0];
1842 SDValue StoreVal1 = OutVals[1];
1845 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1846 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1849 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1851 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1852 DAG.getVTList(MVT::Other), &Ops[0], 4,
1853 EltVT, MachinePointerInfo());
1856 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1857 // vector will be expanded to a power of 2 elements, so we know we can
1858 // always round up to the next multiple of 4 when creating the vector
1860 // e.g. 4 elem => 1 st.v4
1861 // 6 elem => 2 st.v4
1862 // 8 elem => 2 st.v4
1863 // 11 elem => 3 st.v4
1865 unsigned VecSize = 4;
1866 if (OutVals[0].getValueType().getSizeInBits() == 64)
1869 unsigned Offset = 0;
1872 EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
1873 unsigned PerStoreOffset =
1874 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1876 for (unsigned i = 0; i < NumElts; i += VecSize) {
1879 SmallVector<SDValue, 8> Ops;
1880 Ops.push_back(Chain);
1881 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
1882 unsigned Opc = NVPTXISD::StoreRetvalV2;
1883 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
1885 StoreVal = OutVals[i];
1887 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1888 Ops.push_back(StoreVal);
1890 if (i + 1 < NumElts) {
1891 StoreVal = OutVals[i + 1];
1893 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1895 StoreVal = DAG.getUNDEF(ExtendedVT);
1897 Ops.push_back(StoreVal);
1900 Opc = NVPTXISD::StoreRetvalV4;
1901 if (i + 2 < NumElts) {
1902 StoreVal = OutVals[i + 2];
1905 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1907 StoreVal = DAG.getUNDEF(ExtendedVT);
1909 Ops.push_back(StoreVal);
1911 if (i + 3 < NumElts) {
1912 StoreVal = OutVals[i + 3];
1915 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1917 StoreVal = DAG.getUNDEF(ExtendedVT);
1919 Ops.push_back(StoreVal);
1922 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
1924 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
1925 Ops.size(), EltVT, MachinePointerInfo());
1926 Offset += PerStoreOffset;
1930 SmallVector<EVT, 16> ValVTs;
1931 // const_cast is necessary since we are still using an LLVM version from
1932 // before the type system re-write.
1933 ComputePTXValueVTs(*this, RetTy, ValVTs);
1934 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
1936 unsigned SizeSoFar = 0;
1937 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1938 SDValue theVal = OutVals[i];
1939 EVT TheValType = theVal.getValueType();
1940 unsigned numElems = 1;
1941 if (TheValType.isVector())
1942 numElems = TheValType.getVectorNumElements();
1943 for (unsigned j = 0, je = numElems; j != je; ++j) {
1944 SDValue TmpVal = theVal;
1945 if (TheValType.isVector())
1946 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1947 TheValType.getVectorElementType(), TmpVal,
1948 DAG.getIntPtrConstant(j));
1949 EVT TheStoreType = ValVTs[i];
1950 if (RetTy->isIntegerTy() &&
1951 TD->getTypeAllocSizeInBits(RetTy) < 32) {
1952 // The following zero-extension is for integer types only, and
1953 // specifically not for aggregates.
1954 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
1955 TheStoreType = MVT::i32;
1957 else if (TmpVal.getValueType().getSizeInBits() < 16)
1958 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
1960 SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
1961 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1962 DAG.getVTList(MVT::Other), &Ops[0],
1964 MachinePointerInfo());
1965 if(TheValType.isVector())
1967 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
1969 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
1974 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1978 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
1979 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
1980 SelectionDAG &DAG) const {
1981 if (Constraint.length() > 1)
1984 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1987 // NVPTX suuport vector of legal types of any length in Intrinsics because the
1988 // NVPTX specific type legalizer
1989 // will legalize them to the PTX supported length.
1990 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1991 if (isTypeLegal(VT))
1993 if (VT.isVector()) {
1994 MVT eVT = VT.getVectorElementType();
1995 if (isTypeLegal(eVT))
2001 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2002 switch (Intrinsic) {
2006 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2007 return NVPTXISD::Tex1DFloatI32;
2008 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2009 return NVPTXISD::Tex1DFloatFloat;
2010 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2011 return NVPTXISD::Tex1DFloatFloatLevel;
2012 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2013 return NVPTXISD::Tex1DFloatFloatGrad;
2014 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2015 return NVPTXISD::Tex1DI32I32;
2016 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2017 return NVPTXISD::Tex1DI32Float;
2018 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2019 return NVPTXISD::Tex1DI32FloatLevel;
2020 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2021 return NVPTXISD::Tex1DI32FloatGrad;
2023 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2024 return NVPTXISD::Tex1DArrayFloatI32;
2025 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2026 return NVPTXISD::Tex1DArrayFloatFloat;
2027 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2028 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2029 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2030 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2031 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2032 return NVPTXISD::Tex1DArrayI32I32;
2033 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2034 return NVPTXISD::Tex1DArrayI32Float;
2035 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2036 return NVPTXISD::Tex1DArrayI32FloatLevel;
2037 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2038 return NVPTXISD::Tex1DArrayI32FloatGrad;
2040 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2041 return NVPTXISD::Tex2DFloatI32;
2042 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2043 return NVPTXISD::Tex2DFloatFloat;
2044 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2045 return NVPTXISD::Tex2DFloatFloatLevel;
2046 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2047 return NVPTXISD::Tex2DFloatFloatGrad;
2048 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2049 return NVPTXISD::Tex2DI32I32;
2050 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2051 return NVPTXISD::Tex2DI32Float;
2052 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2053 return NVPTXISD::Tex2DI32FloatLevel;
2054 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2055 return NVPTXISD::Tex2DI32FloatGrad;
2057 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2058 return NVPTXISD::Tex2DArrayFloatI32;
2059 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2060 return NVPTXISD::Tex2DArrayFloatFloat;
2061 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2062 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2063 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2064 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2065 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2066 return NVPTXISD::Tex2DArrayI32I32;
2067 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2068 return NVPTXISD::Tex2DArrayI32Float;
2069 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2070 return NVPTXISD::Tex2DArrayI32FloatLevel;
2071 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2072 return NVPTXISD::Tex2DArrayI32FloatGrad;
2074 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2075 return NVPTXISD::Tex3DFloatI32;
2076 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2077 return NVPTXISD::Tex3DFloatFloat;
2078 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2079 return NVPTXISD::Tex3DFloatFloatLevel;
2080 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2081 return NVPTXISD::Tex3DFloatFloatGrad;
2082 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2083 return NVPTXISD::Tex3DI32I32;
2084 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2085 return NVPTXISD::Tex3DI32Float;
2086 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2087 return NVPTXISD::Tex3DI32FloatLevel;
2088 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
2089 return NVPTXISD::Tex3DI32FloatGrad;
2093 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
2094 switch (Intrinsic) {
2097 case Intrinsic::nvvm_suld_1d_i8_trap:
2098 return NVPTXISD::Suld1DI8Trap;
2099 case Intrinsic::nvvm_suld_1d_i16_trap:
2100 return NVPTXISD::Suld1DI16Trap;
2101 case Intrinsic::nvvm_suld_1d_i32_trap:
2102 return NVPTXISD::Suld1DI32Trap;
2103 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2104 return NVPTXISD::Suld1DV2I8Trap;
2105 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2106 return NVPTXISD::Suld1DV2I16Trap;
2107 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2108 return NVPTXISD::Suld1DV2I32Trap;
2109 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2110 return NVPTXISD::Suld1DV4I8Trap;
2111 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2112 return NVPTXISD::Suld1DV4I16Trap;
2113 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2114 return NVPTXISD::Suld1DV4I32Trap;
2115 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2116 return NVPTXISD::Suld1DArrayI8Trap;
2117 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2118 return NVPTXISD::Suld1DArrayI16Trap;
2119 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2120 return NVPTXISD::Suld1DArrayI32Trap;
2121 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2122 return NVPTXISD::Suld1DArrayV2I8Trap;
2123 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2124 return NVPTXISD::Suld1DArrayV2I16Trap;
2125 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2126 return NVPTXISD::Suld1DArrayV2I32Trap;
2127 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2128 return NVPTXISD::Suld1DArrayV4I8Trap;
2129 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2130 return NVPTXISD::Suld1DArrayV4I16Trap;
2131 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2132 return NVPTXISD::Suld1DArrayV4I32Trap;
2133 case Intrinsic::nvvm_suld_2d_i8_trap:
2134 return NVPTXISD::Suld2DI8Trap;
2135 case Intrinsic::nvvm_suld_2d_i16_trap:
2136 return NVPTXISD::Suld2DI16Trap;
2137 case Intrinsic::nvvm_suld_2d_i32_trap:
2138 return NVPTXISD::Suld2DI32Trap;
2139 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2140 return NVPTXISD::Suld2DV2I8Trap;
2141 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2142 return NVPTXISD::Suld2DV2I16Trap;
2143 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2144 return NVPTXISD::Suld2DV2I32Trap;
2145 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2146 return NVPTXISD::Suld2DV4I8Trap;
2147 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2148 return NVPTXISD::Suld2DV4I16Trap;
2149 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2150 return NVPTXISD::Suld2DV4I32Trap;
2151 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2152 return NVPTXISD::Suld2DArrayI8Trap;
2153 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2154 return NVPTXISD::Suld2DArrayI16Trap;
2155 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2156 return NVPTXISD::Suld2DArrayI32Trap;
2157 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2158 return NVPTXISD::Suld2DArrayV2I8Trap;
2159 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2160 return NVPTXISD::Suld2DArrayV2I16Trap;
2161 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2162 return NVPTXISD::Suld2DArrayV2I32Trap;
2163 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2164 return NVPTXISD::Suld2DArrayV4I8Trap;
2165 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2166 return NVPTXISD::Suld2DArrayV4I16Trap;
2167 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2168 return NVPTXISD::Suld2DArrayV4I32Trap;
2169 case Intrinsic::nvvm_suld_3d_i8_trap:
2170 return NVPTXISD::Suld3DI8Trap;
2171 case Intrinsic::nvvm_suld_3d_i16_trap:
2172 return NVPTXISD::Suld3DI16Trap;
2173 case Intrinsic::nvvm_suld_3d_i32_trap:
2174 return NVPTXISD::Suld3DI32Trap;
2175 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2176 return NVPTXISD::Suld3DV2I8Trap;
2177 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2178 return NVPTXISD::Suld3DV2I16Trap;
2179 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2180 return NVPTXISD::Suld3DV2I32Trap;
2181 case Intrinsic::nvvm_suld_3d_v4i8_trap:
2182 return NVPTXISD::Suld3DV4I8Trap;
2183 case Intrinsic::nvvm_suld_3d_v4i16_trap:
2184 return NVPTXISD::Suld3DV4I16Trap;
2185 case Intrinsic::nvvm_suld_3d_v4i32_trap:
2186 return NVPTXISD::Suld3DV4I32Trap;
2190 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
2192 // because we need the information that is only available in the "Value" type
2194 // pointer. In particular, the address space information.
2195 bool NVPTXTargetLowering::getTgtMemIntrinsic(
2196 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
2197 switch (Intrinsic) {
2201 case Intrinsic::nvvm_atomic_load_add_f32:
2202 Info.opc = ISD::INTRINSIC_W_CHAIN;
2203 Info.memVT = MVT::f32;
2204 Info.ptrVal = I.getArgOperand(0);
2207 Info.readMem = true;
2208 Info.writeMem = true;
2212 case Intrinsic::nvvm_atomic_load_inc_32:
2213 case Intrinsic::nvvm_atomic_load_dec_32:
2214 Info.opc = ISD::INTRINSIC_W_CHAIN;
2215 Info.memVT = MVT::i32;
2216 Info.ptrVal = I.getArgOperand(0);
2219 Info.readMem = true;
2220 Info.writeMem = true;
2224 case Intrinsic::nvvm_ldu_global_i:
2225 case Intrinsic::nvvm_ldu_global_f:
2226 case Intrinsic::nvvm_ldu_global_p:
2228 Info.opc = ISD::INTRINSIC_W_CHAIN;
2229 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
2230 Info.memVT = getValueType(I.getType());
2231 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
2232 Info.memVT = getValueType(I.getType());
2234 Info.memVT = MVT::f32;
2235 Info.ptrVal = I.getArgOperand(0);
2238 Info.readMem = true;
2239 Info.writeMem = false;
2243 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2244 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2245 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2246 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2247 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2248 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2249 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2250 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2251 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2252 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2253 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2254 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2255 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2256 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2257 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2258 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2259 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2260 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2261 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2262 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
2263 Info.opc = getOpcForTextureInstr(Intrinsic);
2264 Info.memVT = MVT::f32;
2265 Info.ptrVal = nullptr;
2268 Info.readMem = true;
2269 Info.writeMem = false;
2273 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2274 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2275 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2276 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2277 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2278 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2279 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2280 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2281 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2282 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2283 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2284 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2285 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2286 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2287 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2288 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2289 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2290 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2291 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2292 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
2293 Info.opc = getOpcForTextureInstr(Intrinsic);
2294 Info.memVT = MVT::i32;
2295 Info.ptrVal = nullptr;
2298 Info.readMem = true;
2299 Info.writeMem = false;
2303 case Intrinsic::nvvm_suld_1d_i8_trap:
2304 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2305 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2306 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2307 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2308 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2309 case Intrinsic::nvvm_suld_2d_i8_trap:
2310 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2311 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2312 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2313 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2314 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2315 case Intrinsic::nvvm_suld_3d_i8_trap:
2316 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2317 case Intrinsic::nvvm_suld_3d_v4i8_trap: {
2318 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2319 Info.memVT = MVT::i8;
2320 Info.ptrVal = nullptr;
2323 Info.readMem = true;
2324 Info.writeMem = false;
2328 case Intrinsic::nvvm_suld_1d_i16_trap:
2329 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2330 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2331 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2332 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2333 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2334 case Intrinsic::nvvm_suld_2d_i16_trap:
2335 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2336 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2337 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2338 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2339 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2340 case Intrinsic::nvvm_suld_3d_i16_trap:
2341 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2342 case Intrinsic::nvvm_suld_3d_v4i16_trap: {
2343 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2344 Info.memVT = MVT::i16;
2345 Info.ptrVal = nullptr;
2348 Info.readMem = true;
2349 Info.writeMem = false;
2353 case Intrinsic::nvvm_suld_1d_i32_trap:
2354 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2355 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2356 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2357 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2358 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2359 case Intrinsic::nvvm_suld_2d_i32_trap:
2360 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2361 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2362 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2363 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2364 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2365 case Intrinsic::nvvm_suld_3d_i32_trap:
2366 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2367 case Intrinsic::nvvm_suld_3d_v4i32_trap: {
2368 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2369 Info.memVT = MVT::i32;
2370 Info.ptrVal = nullptr;
2373 Info.readMem = true;
2374 Info.writeMem = false;
2383 /// isLegalAddressingMode - Return true if the addressing mode represented
2384 /// by AM is legal for this target, for a load/store of the specified type.
2385 /// Used to guide target specific optimizations, like loop strength reduction
2386 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
2387 /// (CodeGenPrepare.cpp)
2388 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2391 // AddrMode - This represents an addressing mode of:
2392 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2394 // The legal address modes are
2401 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
2407 case 0: // "r", "r+i" or "i" is allowed
2410 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
2412 // Otherwise we have r+i.
2415 // No scale > 1 is allowed
2421 //===----------------------------------------------------------------------===//
2422 // NVPTX Inline Assembly Support
2423 //===----------------------------------------------------------------------===//
2425 /// getConstraintType - Given a constraint letter, return the type of
2426 /// constraint it is for this target.
2427 NVPTXTargetLowering::ConstraintType
2428 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
2429 if (Constraint.size() == 1) {
2430 switch (Constraint[0]) {
2441 return C_RegisterClass;
2444 return TargetLowering::getConstraintType(Constraint);
2447 std::pair<unsigned, const TargetRegisterClass *>
2448 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2450 if (Constraint.size() == 1) {
2451 switch (Constraint[0]) {
2453 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2455 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2457 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2460 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2462 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2464 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2467 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2470 /// getFunctionAlignment - Return the Log2 alignment of this function.
2471 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2475 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
2476 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
2477 SmallVectorImpl<SDValue> &Results) {
2478 EVT ResVT = N->getValueType(0);
2481 assert(ResVT.isVector() && "Vector load must have vector type");
2483 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
2484 // legal. We can (and should) split that into 2 loads of <2 x double> here
2485 // but I'm leaving that as a TODO for now.
2486 assert(ResVT.isSimple() && "Can only handle simple types");
2487 switch (ResVT.getSimpleVT().SimpleTy) {
2500 // This is a "native" vector type
2504 EVT EltVT = ResVT.getVectorElementType();
2505 unsigned NumElts = ResVT.getVectorNumElements();
2507 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
2508 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2509 // loaded type to i16 and propagate the "real" type as the memory type.
2510 bool NeedTrunc = false;
2511 if (EltVT.getSizeInBits() < 16) {
2516 unsigned Opcode = 0;
2523 Opcode = NVPTXISD::LoadV2;
2524 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2527 Opcode = NVPTXISD::LoadV4;
2528 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2529 LdResVTs = DAG.getVTList(ListVTs);
2534 SmallVector<SDValue, 8> OtherOps;
2536 // Copy regular operands
2537 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2538 OtherOps.push_back(N->getOperand(i));
2540 LoadSDNode *LD = cast<LoadSDNode>(N);
2542 // The select routine does not have access to the LoadSDNode instance, so
2543 // pass along the extension information
2544 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
2546 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
2547 OtherOps.size(), LD->getMemoryVT(),
2548 LD->getMemOperand());
2550 SmallVector<SDValue, 4> ScalarRes;
2552 for (unsigned i = 0; i < NumElts; ++i) {
2553 SDValue Res = NewLD.getValue(i);
2555 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2556 ScalarRes.push_back(Res);
2559 SDValue LoadChain = NewLD.getValue(NumElts);
2561 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2563 Results.push_back(BuildVec);
2564 Results.push_back(LoadChain);
2567 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
2568 SmallVectorImpl<SDValue> &Results) {
2569 SDValue Chain = N->getOperand(0);
2570 SDValue Intrin = N->getOperand(1);
2573 // Get the intrinsic ID
2574 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2578 case Intrinsic::nvvm_ldg_global_i:
2579 case Intrinsic::nvvm_ldg_global_f:
2580 case Intrinsic::nvvm_ldg_global_p:
2581 case Intrinsic::nvvm_ldu_global_i:
2582 case Intrinsic::nvvm_ldu_global_f:
2583 case Intrinsic::nvvm_ldu_global_p: {
2584 EVT ResVT = N->getValueType(0);
2586 if (ResVT.isVector()) {
2589 unsigned NumElts = ResVT.getVectorNumElements();
2590 EVT EltVT = ResVT.getVectorElementType();
2592 // Since LDU/LDG are target nodes, we cannot rely on DAG type
2594 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2595 // loaded type to i16 and propagate the "real" type as the memory type.
2596 bool NeedTrunc = false;
2597 if (EltVT.getSizeInBits() < 16) {
2602 unsigned Opcode = 0;
2612 case Intrinsic::nvvm_ldg_global_i:
2613 case Intrinsic::nvvm_ldg_global_f:
2614 case Intrinsic::nvvm_ldg_global_p:
2615 Opcode = NVPTXISD::LDGV2;
2617 case Intrinsic::nvvm_ldu_global_i:
2618 case Intrinsic::nvvm_ldu_global_f:
2619 case Intrinsic::nvvm_ldu_global_p:
2620 Opcode = NVPTXISD::LDUV2;
2623 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2629 case Intrinsic::nvvm_ldg_global_i:
2630 case Intrinsic::nvvm_ldg_global_f:
2631 case Intrinsic::nvvm_ldg_global_p:
2632 Opcode = NVPTXISD::LDGV4;
2634 case Intrinsic::nvvm_ldu_global_i:
2635 case Intrinsic::nvvm_ldu_global_f:
2636 case Intrinsic::nvvm_ldu_global_p:
2637 Opcode = NVPTXISD::LDUV4;
2640 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2641 LdResVTs = DAG.getVTList(ListVTs);
2646 SmallVector<SDValue, 8> OtherOps;
2648 // Copy regular operands
2650 OtherOps.push_back(Chain); // Chain
2651 // Skip operand 1 (intrinsic ID)
2653 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
2654 OtherOps.push_back(N->getOperand(i));
2656 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2658 SDValue NewLD = DAG.getMemIntrinsicNode(
2659 Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
2660 MemSD->getMemoryVT(), MemSD->getMemOperand());
2662 SmallVector<SDValue, 4> ScalarRes;
2664 for (unsigned i = 0; i < NumElts; ++i) {
2665 SDValue Res = NewLD.getValue(i);
2668 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2669 ScalarRes.push_back(Res);
2672 SDValue LoadChain = NewLD.getValue(NumElts);
2675 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2677 Results.push_back(BuildVec);
2678 Results.push_back(LoadChain);
2681 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
2682 "Custom handling of non-i8 ldu/ldg?");
2684 // Just copy all operands as-is
2685 SmallVector<SDValue, 4> Ops;
2686 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2687 Ops.push_back(N->getOperand(i));
2689 // Force output to i16
2690 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
2692 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2694 // We make sure the memory type is i8, which will be used during isel
2695 // to select the proper instruction.
2697 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
2698 Ops.size(), MVT::i8, MemSD->getMemOperand());
2700 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
2701 NewLD.getValue(0)));
2702 Results.push_back(NewLD.getValue(1));
2708 void NVPTXTargetLowering::ReplaceNodeResults(
2709 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
2710 switch (N->getOpcode()) {
2712 report_fatal_error("Unhandled custom legalization");
2714 ReplaceLoadVector(N, DAG, Results);
2716 case ISD::INTRINSIC_W_CHAIN:
2717 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
2722 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
2723 void NVPTXSection::anchor() {}
2725 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
2729 delete ReadOnlySection;
2731 delete StaticCtorSection;
2732 delete StaticDtorSection;
2734 delete EHFrameSection;
2735 delete DwarfAbbrevSection;
2736 delete DwarfInfoSection;
2737 delete DwarfLineSection;
2738 delete DwarfFrameSection;
2739 delete DwarfPubTypesSection;
2740 delete DwarfDebugInlineSection;
2741 delete DwarfStrSection;
2742 delete DwarfLocSection;
2743 delete DwarfARangesSection;
2744 delete DwarfRangesSection;
2745 delete DwarfMacroInfoSection;