2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/MC/MCSectionELF.h"
32 #include "llvm/Support/CallSite.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
71 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
72 /// into their primitive components.
73 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
74 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
75 /// LowerCall, and LowerReturn.
76 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
77 SmallVectorImpl<EVT> &ValueVTs,
78 SmallVectorImpl<uint64_t> *Offsets = 0,
79 uint64_t StartingOffset = 0) {
80 SmallVector<EVT, 16> TempVTs;
81 SmallVector<uint64_t, 16> TempOffsets;
83 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
84 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
86 uint64_t Off = TempOffsets[i];
88 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
89 ValueVTs.push_back(VT.getVectorElementType());
91 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
94 ValueVTs.push_back(VT);
96 Offsets->push_back(Off);
101 // NVPTXTargetLowering Constructor.
102 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
103 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
104 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
106 // always lower memset, memcpy, and memmove intrinsics to load/store
107 // instructions, rather
108 // then generating calls to memset, mempcy or memmove.
109 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
110 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
111 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
113 setBooleanContents(ZeroOrNegativeOneBooleanContent);
115 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
116 // condition branches.
117 setJumpIsExpensive(true);
119 // By default, use the Source scheduling
121 setSchedulingPreference(Sched::RegPressure);
123 setSchedulingPreference(Sched::Source);
125 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
126 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
127 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
128 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
129 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
130 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
132 // Operations not directly supported by NVPTX.
133 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
134 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
135 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
136 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
137 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
138 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
139 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
140 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
141 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
142 // For others we will expand to a SHL/SRA pair.
143 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
144 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
145 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
146 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
147 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
149 if (nvptxSubtarget.hasROT64()) {
150 setOperationAction(ISD::ROTL, MVT::i64, Legal);
151 setOperationAction(ISD::ROTR, MVT::i64, Legal);
153 setOperationAction(ISD::ROTL, MVT::i64, Expand);
154 setOperationAction(ISD::ROTR, MVT::i64, Expand);
156 if (nvptxSubtarget.hasROT32()) {
157 setOperationAction(ISD::ROTL, MVT::i32, Legal);
158 setOperationAction(ISD::ROTR, MVT::i32, Legal);
160 setOperationAction(ISD::ROTL, MVT::i32, Expand);
161 setOperationAction(ISD::ROTR, MVT::i32, Expand);
164 setOperationAction(ISD::ROTL, MVT::i16, Expand);
165 setOperationAction(ISD::ROTR, MVT::i16, Expand);
166 setOperationAction(ISD::ROTL, MVT::i8, Expand);
167 setOperationAction(ISD::ROTR, MVT::i8, Expand);
168 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
169 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
170 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
172 // Indirect branch is not supported.
173 // This also disables Jump Table creation.
174 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
175 setOperationAction(ISD::BRIND, MVT::Other, Expand);
177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
180 // We want to legalize constant related memmove and memcopy
182 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
184 // Turn FP extload into load/fextend
185 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
186 // Turn FP truncstore into trunc + store.
187 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
189 // PTX does not support load / store predicate registers
190 setOperationAction(ISD::LOAD, MVT::i1, Custom);
191 setOperationAction(ISD::STORE, MVT::i1, Custom);
193 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
194 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
195 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
196 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
197 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
198 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
200 // This is legal in NVPTX
201 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
202 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
204 // TRAP can be lowered to PTX trap
205 setOperationAction(ISD::TRAP, MVT::Other, Legal);
207 setOperationAction(ISD::ADDC, MVT::i64, Expand);
208 setOperationAction(ISD::ADDE, MVT::i64, Expand);
210 // Register custom handling for vector loads/stores
211 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
213 MVT VT = (MVT::SimpleValueType) i;
214 if (IsPTXVectorType(VT)) {
215 setOperationAction(ISD::LOAD, VT, Custom);
216 setOperationAction(ISD::STORE, VT, Custom);
217 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
221 // Custom handling for i8 intrinsics
222 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
224 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
225 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
226 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
227 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
228 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
229 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
230 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
231 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
232 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
233 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
234 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
235 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
236 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
237 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
238 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
240 // Now deduce the information based on the above mentioned
242 computeRegisterProperties();
245 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
250 return "NVPTXISD::CALL";
251 case NVPTXISD::RET_FLAG:
252 return "NVPTXISD::RET_FLAG";
253 case NVPTXISD::Wrapper:
254 return "NVPTXISD::Wrapper";
255 case NVPTXISD::DeclareParam:
256 return "NVPTXISD::DeclareParam";
257 case NVPTXISD::DeclareScalarParam:
258 return "NVPTXISD::DeclareScalarParam";
259 case NVPTXISD::DeclareRet:
260 return "NVPTXISD::DeclareRet";
261 case NVPTXISD::DeclareRetParam:
262 return "NVPTXISD::DeclareRetParam";
263 case NVPTXISD::PrintCall:
264 return "NVPTXISD::PrintCall";
265 case NVPTXISD::LoadParam:
266 return "NVPTXISD::LoadParam";
267 case NVPTXISD::LoadParamV2:
268 return "NVPTXISD::LoadParamV2";
269 case NVPTXISD::LoadParamV4:
270 return "NVPTXISD::LoadParamV4";
271 case NVPTXISD::StoreParam:
272 return "NVPTXISD::StoreParam";
273 case NVPTXISD::StoreParamV2:
274 return "NVPTXISD::StoreParamV2";
275 case NVPTXISD::StoreParamV4:
276 return "NVPTXISD::StoreParamV4";
277 case NVPTXISD::StoreParamS32:
278 return "NVPTXISD::StoreParamS32";
279 case NVPTXISD::StoreParamU32:
280 return "NVPTXISD::StoreParamU32";
281 case NVPTXISD::CallArgBegin:
282 return "NVPTXISD::CallArgBegin";
283 case NVPTXISD::CallArg:
284 return "NVPTXISD::CallArg";
285 case NVPTXISD::LastCallArg:
286 return "NVPTXISD::LastCallArg";
287 case NVPTXISD::CallArgEnd:
288 return "NVPTXISD::CallArgEnd";
289 case NVPTXISD::CallVoid:
290 return "NVPTXISD::CallVoid";
291 case NVPTXISD::CallVal:
292 return "NVPTXISD::CallVal";
293 case NVPTXISD::CallSymbol:
294 return "NVPTXISD::CallSymbol";
295 case NVPTXISD::Prototype:
296 return "NVPTXISD::Prototype";
297 case NVPTXISD::MoveParam:
298 return "NVPTXISD::MoveParam";
299 case NVPTXISD::StoreRetval:
300 return "NVPTXISD::StoreRetval";
301 case NVPTXISD::StoreRetvalV2:
302 return "NVPTXISD::StoreRetvalV2";
303 case NVPTXISD::StoreRetvalV4:
304 return "NVPTXISD::StoreRetvalV4";
305 case NVPTXISD::PseudoUseParam:
306 return "NVPTXISD::PseudoUseParam";
307 case NVPTXISD::RETURN:
308 return "NVPTXISD::RETURN";
309 case NVPTXISD::CallSeqBegin:
310 return "NVPTXISD::CallSeqBegin";
311 case NVPTXISD::CallSeqEnd:
312 return "NVPTXISD::CallSeqEnd";
313 case NVPTXISD::LoadV2:
314 return "NVPTXISD::LoadV2";
315 case NVPTXISD::LoadV4:
316 return "NVPTXISD::LoadV4";
317 case NVPTXISD::LDGV2:
318 return "NVPTXISD::LDGV2";
319 case NVPTXISD::LDGV4:
320 return "NVPTXISD::LDGV4";
321 case NVPTXISD::LDUV2:
322 return "NVPTXISD::LDUV2";
323 case NVPTXISD::LDUV4:
324 return "NVPTXISD::LDUV4";
325 case NVPTXISD::StoreV2:
326 return "NVPTXISD::StoreV2";
327 case NVPTXISD::StoreV4:
328 return "NVPTXISD::StoreV4";
332 bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
333 return VT == MVT::i1;
337 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
339 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
340 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
341 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
345 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
346 const SmallVectorImpl<ISD::OutputArg> &Outs,
347 unsigned retAlignment,
348 const ImmutableCallSite *CS) const {
350 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
351 assert(isABI && "Non-ABI compilation is not supported");
356 O << "prototype_" << uniqueCallSite << " : .callprototype ";
358 if (retTy->getTypeID() == Type::VoidTyID) {
362 if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
364 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
365 size = ITy->getBitWidth();
369 assert(retTy->isFloatingPointTy() &&
370 "Floating point type expected here");
371 size = retTy->getPrimitiveSizeInBits();
374 O << ".param .b" << size << " _";
375 } else if (isa<PointerType>(retTy)) {
376 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
378 if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
379 SmallVector<EVT, 16> vtparts;
380 ComputeValueVTs(*this, retTy, vtparts);
381 unsigned totalsz = 0;
382 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
384 EVT elemtype = vtparts[i];
385 if (vtparts[i].isVector()) {
386 elems = vtparts[i].getVectorNumElements();
387 elemtype = vtparts[i].getVectorElementType();
389 // TODO: no need to loop
390 for (unsigned j = 0, je = elems; j != je; ++j) {
391 unsigned sz = elemtype.getSizeInBits();
392 if (elemtype.isInteger() && (sz < 8))
397 O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
399 assert(false && "Unknown return type");
407 MVT thePointerTy = getPointerTy();
410 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
411 Type *Ty = Args[i].Ty;
417 if (Outs[OIdx].Flags.isByVal() == false) {
418 if (Ty->isAggregateType() || Ty->isVectorTy()) {
420 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
421 const DataLayout *TD = getDataLayout();
422 // +1 because index 0 is reserved for return type alignment
423 if (!llvm::getAlign(*CallI, i + 1, align))
424 align = TD->getABITypeAlignment(Ty);
425 unsigned sz = TD->getTypeAllocSize(Ty);
426 O << ".param .align " << align << " .b8 ";
428 O << "[" << sz << "]";
429 // update the index for Outs
430 SmallVector<EVT, 16> vtparts;
431 ComputeValueVTs(*this, Ty, vtparts);
432 if (unsigned len = vtparts.size())
436 // i8 types in IR will be i16 types in SDAG
437 assert((getValueType(Ty) == Outs[OIdx].VT ||
438 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
439 "type mismatch between callee prototype and arguments");
442 if (isa<IntegerType>(Ty)) {
443 sz = cast<IntegerType>(Ty)->getBitWidth();
446 } else if (isa<PointerType>(Ty))
447 sz = thePointerTy.getSizeInBits();
449 sz = Ty->getPrimitiveSizeInBits();
450 O << ".param .b" << sz << " ";
454 const PointerType *PTy = dyn_cast<PointerType>(Ty);
455 assert(PTy && "Param with byval attribute should be a pointer type");
456 Type *ETy = PTy->getElementType();
458 unsigned align = Outs[OIdx].Flags.getByValAlign();
459 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
460 O << ".param .align " << align << " .b8 ";
462 O << "[" << sz << "]";
469 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
470 const ImmutableCallSite *CS,
472 unsigned Idx) const {
473 const DataLayout *TD = getDataLayout();
475 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
477 if (Func) { // direct call
478 assert(CS->getCalledFunction() &&
479 "direct call cannot find callee");
480 if (!llvm::getAlign(*(CS->getCalledFunction()), Idx, align))
481 align = TD->getABITypeAlignment(Ty);
483 else { // indirect call
484 const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
485 if (!llvm::getAlign(*CallI, Idx, align))
486 align = TD->getABITypeAlignment(Ty);
492 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
493 SmallVectorImpl<SDValue> &InVals) const {
494 SelectionDAG &DAG = CLI.DAG;
496 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
497 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
498 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
499 SDValue Chain = CLI.Chain;
500 SDValue Callee = CLI.Callee;
501 bool &isTailCall = CLI.IsTailCall;
502 ArgListTy &Args = CLI.Args;
503 Type *retTy = CLI.RetTy;
504 ImmutableCallSite *CS = CLI.CS;
506 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
507 assert(isABI && "Non-ABI compilation is not supported");
510 const DataLayout *TD = getDataLayout();
511 MachineFunction &MF = DAG.getMachineFunction();
512 const Function *F = MF.getFunction();
514 SDValue tempChain = Chain;
516 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
518 SDValue InFlag = Chain.getValue(1);
520 unsigned paramCount = 0;
521 // Args.size() and Outs.size() need not match.
522 // Outs.size() will be larger
523 // * if there is an aggregate argument with multiple fields (each field
524 // showing up separately in Outs)
525 // * if there is a vector argument with more than typical vector-length
526 // elements (generally if more than 4) where each vector element is
527 // individually present in Outs.
528 // So a different index should be used for indexing into Outs/OutVals.
529 // See similar issue in LowerFormalArguments.
531 // Declare the .params or .reg need to pass values
533 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
534 EVT VT = Outs[OIdx].VT;
535 Type *Ty = Args[i].Ty;
537 if (Outs[OIdx].Flags.isByVal() == false) {
538 if (Ty->isAggregateType()) {
540 SmallVector<EVT, 16> vtparts;
541 ComputeValueVTs(*this, Ty, vtparts);
543 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
544 // declare .param .align <align> .b8 .param<n>[<size>];
545 unsigned sz = TD->getTypeAllocSize(Ty);
546 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
547 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
548 DAG.getConstant(paramCount, MVT::i32),
549 DAG.getConstant(sz, MVT::i32), InFlag };
550 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
552 InFlag = Chain.getValue(1);
553 unsigned curOffset = 0;
554 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
556 EVT elemtype = vtparts[j];
557 if (vtparts[j].isVector()) {
558 elems = vtparts[j].getVectorNumElements();
559 elemtype = vtparts[j].getVectorElementType();
561 for (unsigned k = 0, ke = elems; k != ke; ++k) {
562 unsigned sz = elemtype.getSizeInBits();
563 if (elemtype.isInteger() && (sz < 8))
565 SDValue StVal = OutVals[OIdx];
566 if (elemtype.getSizeInBits() < 16) {
567 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
569 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
570 SDValue CopyParamOps[] = { Chain,
571 DAG.getConstant(paramCount, MVT::i32),
572 DAG.getConstant(curOffset, MVT::i32),
574 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
575 CopyParamVTs, &CopyParamOps[0], 5,
576 elemtype, MachinePointerInfo());
577 InFlag = Chain.getValue(1);
582 if (vtparts.size() > 0)
587 if (Ty->isVectorTy()) {
588 EVT ObjectVT = getValueType(Ty);
589 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
590 // declare .param .align <align> .b8 .param<n>[<size>];
591 unsigned sz = TD->getTypeAllocSize(Ty);
592 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
593 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
594 DAG.getConstant(paramCount, MVT::i32),
595 DAG.getConstant(sz, MVT::i32), InFlag };
596 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
598 InFlag = Chain.getValue(1);
599 unsigned NumElts = ObjectVT.getVectorNumElements();
600 EVT EltVT = ObjectVT.getVectorElementType();
602 bool NeedExtend = false;
603 if (EltVT.getSizeInBits() < 16) {
610 SDValue Elt = OutVals[OIdx++];
612 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
614 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
615 SDValue CopyParamOps[] = { Chain,
616 DAG.getConstant(paramCount, MVT::i32),
617 DAG.getConstant(0, MVT::i32), Elt,
619 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
620 CopyParamVTs, &CopyParamOps[0], 5,
621 MemVT, MachinePointerInfo());
622 InFlag = Chain.getValue(1);
623 } else if (NumElts == 2) {
624 SDValue Elt0 = OutVals[OIdx++];
625 SDValue Elt1 = OutVals[OIdx++];
627 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
628 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
631 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
632 SDValue CopyParamOps[] = { Chain,
633 DAG.getConstant(paramCount, MVT::i32),
634 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
636 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
637 CopyParamVTs, &CopyParamOps[0], 6,
638 MemVT, MachinePointerInfo());
639 InFlag = Chain.getValue(1);
641 unsigned curOffset = 0;
643 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
645 // vector will be expanded to a power of 2 elements, so we know we can
646 // always round up to the next multiple of 4 when creating the vector
648 // e.g. 4 elem => 1 st.v4
651 // 11 elem => 3 st.v4
652 unsigned VecSize = 4;
653 if (EltVT.getSizeInBits() == 64)
656 // This is potentially only part of a vector, so assume all elements
657 // are packed together.
658 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
660 for (unsigned i = 0; i < NumElts; i += VecSize) {
663 SmallVector<SDValue, 8> Ops;
664 Ops.push_back(Chain);
665 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
666 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
668 unsigned Opc = NVPTXISD::StoreParamV2;
670 StoreVal = OutVals[OIdx++];
672 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
673 Ops.push_back(StoreVal);
675 if (i + 1 < NumElts) {
676 StoreVal = OutVals[OIdx++];
679 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
681 StoreVal = DAG.getUNDEF(EltVT);
683 Ops.push_back(StoreVal);
686 Opc = NVPTXISD::StoreParamV4;
687 if (i + 2 < NumElts) {
688 StoreVal = OutVals[OIdx++];
691 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
693 StoreVal = DAG.getUNDEF(EltVT);
695 Ops.push_back(StoreVal);
697 if (i + 3 < NumElts) {
698 StoreVal = OutVals[OIdx++];
701 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
703 StoreVal = DAG.getUNDEF(EltVT);
705 Ops.push_back(StoreVal);
708 Ops.push_back(InFlag);
710 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
711 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
713 MachinePointerInfo());
714 InFlag = Chain.getValue(1);
715 curOffset += PerStoreOffset;
723 // for ABI, declare .param .b<size> .param<n>;
724 unsigned sz = VT.getSizeInBits();
725 bool needExtend = false;
726 if (VT.isInteger()) {
732 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
733 SDValue DeclareParamOps[] = { Chain,
734 DAG.getConstant(paramCount, MVT::i32),
735 DAG.getConstant(sz, MVT::i32),
736 DAG.getConstant(0, MVT::i32), InFlag };
737 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
739 InFlag = Chain.getValue(1);
740 SDValue OutV = OutVals[OIdx];
742 // zext/sext i1 to i16
743 unsigned opc = ISD::ZERO_EXTEND;
744 if (Outs[OIdx].Flags.isSExt())
745 opc = ISD::SIGN_EXTEND;
746 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
748 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
749 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
750 DAG.getConstant(0, MVT::i32), OutV, InFlag };
752 unsigned opcode = NVPTXISD::StoreParam;
753 if (Outs[OIdx].Flags.isZExt())
754 opcode = NVPTXISD::StoreParamU32;
755 else if (Outs[OIdx].Flags.isSExt())
756 opcode = NVPTXISD::StoreParamS32;
757 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
758 VT, MachinePointerInfo());
760 InFlag = Chain.getValue(1);
765 SmallVector<EVT, 16> vtparts;
766 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
767 assert(PTy && "Type of a byval parameter should be pointer");
768 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
770 // declare .param .align <align> .b8 .param<n>[<size>];
771 unsigned sz = Outs[OIdx].Flags.getByValSize();
772 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
773 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
774 // so we don't need to worry about natural alignment or not.
775 // See TargetLowering::LowerCallTo().
776 SDValue DeclareParamOps[] = {
777 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
778 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
781 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
783 InFlag = Chain.getValue(1);
784 unsigned curOffset = 0;
785 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
787 EVT elemtype = vtparts[j];
788 if (vtparts[j].isVector()) {
789 elems = vtparts[j].getVectorNumElements();
790 elemtype = vtparts[j].getVectorElementType();
792 for (unsigned k = 0, ke = elems; k != ke; ++k) {
793 unsigned sz = elemtype.getSizeInBits();
794 if (elemtype.isInteger() && (sz < 8))
797 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
798 DAG.getConstant(curOffset, getPointerTy()));
799 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
800 MachinePointerInfo(), false, false, false,
802 if (elemtype.getSizeInBits() < 16) {
803 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
805 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
806 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
807 DAG.getConstant(curOffset, MVT::i32), theVal,
809 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
810 CopyParamOps, 5, elemtype,
811 MachinePointerInfo());
813 InFlag = Chain.getValue(1);
820 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
821 unsigned retAlignment = 0;
824 if (Ins.size() > 0) {
825 SmallVector<EVT, 16> resvtparts;
826 ComputeValueVTs(*this, retTy, resvtparts);
829 // .param .align 16 .b8 retval0[<size-in-bytes>], or
830 // .param .b<size-in-bits> retval0
831 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
832 if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
833 retTy->isPointerTy()) {
834 // Scalar needs to be at least 32bit wide
837 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
838 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
839 DAG.getConstant(resultsz, MVT::i32),
840 DAG.getConstant(0, MVT::i32), InFlag };
841 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
843 InFlag = Chain.getValue(1);
845 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
846 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
847 SDValue DeclareRetOps[] = { Chain,
848 DAG.getConstant(retAlignment, MVT::i32),
849 DAG.getConstant(resultsz / 8, MVT::i32),
850 DAG.getConstant(0, MVT::i32), InFlag };
851 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
853 InFlag = Chain.getValue(1);
858 // This is indirect function call case : PTX requires a prototype of the
860 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
861 // to be emitted, and the label has to used as the last arg of call
863 // The prototype is embedded in a string and put as the operand for an
865 SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
866 std::string proto_string =
867 getPrototype(retTy, Args, Outs, retAlignment, CS);
868 const char *asmstr = nvTM->getManagedStrPool()
869 ->getManagedString(proto_string.c_str())->c_str();
870 SDValue InlineAsmOps[] = {
871 Chain, DAG.getTargetExternalSymbol(asmstr, getPointerTy()),
872 DAG.getMDNode(0), DAG.getTargetConstant(0, MVT::i32), InFlag
874 Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
875 InFlag = Chain.getValue(1);
877 // Op to just print "call"
878 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
879 SDValue PrintCallOps[] = {
880 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
882 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
883 dl, PrintCallVTs, PrintCallOps, 3);
884 InFlag = Chain.getValue(1);
886 // Ops to print out the function name
887 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
888 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
889 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
890 InFlag = Chain.getValue(1);
892 // Ops to print out the param list
893 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
894 SDValue CallArgBeginOps[] = { Chain, InFlag };
895 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
897 InFlag = Chain.getValue(1);
899 for (unsigned i = 0, e = paramCount; i != e; ++i) {
902 opcode = NVPTXISD::LastCallArg;
904 opcode = NVPTXISD::CallArg;
905 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
906 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
907 DAG.getConstant(i, MVT::i32), InFlag };
908 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
909 InFlag = Chain.getValue(1);
911 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
912 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
915 DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3);
916 InFlag = Chain.getValue(1);
919 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
920 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
922 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
923 InFlag = Chain.getValue(1);
926 // Generate loads from param memory/moves from registers for result
927 if (Ins.size() > 0) {
928 unsigned resoffset = 0;
929 if (retTy && retTy->isVectorTy()) {
930 EVT ObjectVT = getValueType(retTy);
931 unsigned NumElts = ObjectVT.getVectorNumElements();
932 EVT EltVT = ObjectVT.getVectorElementType();
933 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
934 ObjectVT) == NumElts &&
935 "Vector was not scalarized");
936 unsigned sz = EltVT.getSizeInBits();
937 bool needTruncate = sz < 16 ? true : false;
940 // Just a simple load
941 std::vector<EVT> LoadRetVTs;
943 // If loading i1 result, generate
946 LoadRetVTs.push_back(MVT::i16);
948 LoadRetVTs.push_back(EltVT);
949 LoadRetVTs.push_back(MVT::Other);
950 LoadRetVTs.push_back(MVT::Glue);
951 std::vector<SDValue> LoadRetOps;
952 LoadRetOps.push_back(Chain);
953 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
954 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
955 LoadRetOps.push_back(InFlag);
956 SDValue retval = DAG.getMemIntrinsicNode(
957 NVPTXISD::LoadParam, dl,
958 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
959 LoadRetOps.size(), EltVT, MachinePointerInfo());
960 Chain = retval.getValue(1);
961 InFlag = retval.getValue(2);
962 SDValue Ret0 = retval;
964 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
965 InVals.push_back(Ret0);
966 } else if (NumElts == 2) {
968 std::vector<EVT> LoadRetVTs;
970 // If loading i1 result, generate
973 LoadRetVTs.push_back(MVT::i16);
974 LoadRetVTs.push_back(MVT::i16);
976 LoadRetVTs.push_back(EltVT);
977 LoadRetVTs.push_back(EltVT);
979 LoadRetVTs.push_back(MVT::Other);
980 LoadRetVTs.push_back(MVT::Glue);
981 std::vector<SDValue> LoadRetOps;
982 LoadRetOps.push_back(Chain);
983 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
984 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
985 LoadRetOps.push_back(InFlag);
986 SDValue retval = DAG.getMemIntrinsicNode(
987 NVPTXISD::LoadParamV2, dl,
988 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
989 LoadRetOps.size(), EltVT, MachinePointerInfo());
990 Chain = retval.getValue(2);
991 InFlag = retval.getValue(3);
992 SDValue Ret0 = retval.getValue(0);
993 SDValue Ret1 = retval.getValue(1);
995 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
996 InVals.push_back(Ret0);
997 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
998 InVals.push_back(Ret1);
1000 InVals.push_back(Ret0);
1001 InVals.push_back(Ret1);
1004 // Split into N LoadV4
1006 unsigned VecSize = 4;
1007 unsigned Opc = NVPTXISD::LoadParamV4;
1008 if (EltVT.getSizeInBits() == 64) {
1010 Opc = NVPTXISD::LoadParamV2;
1012 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1013 for (unsigned i = 0; i < NumElts; i += VecSize) {
1014 SmallVector<EVT, 8> LoadRetVTs;
1016 // If loading i1 result, generate
1019 for (unsigned j = 0; j < VecSize; ++j)
1020 LoadRetVTs.push_back(MVT::i16);
1022 for (unsigned j = 0; j < VecSize; ++j)
1023 LoadRetVTs.push_back(EltVT);
1025 LoadRetVTs.push_back(MVT::Other);
1026 LoadRetVTs.push_back(MVT::Glue);
1027 SmallVector<SDValue, 4> LoadRetOps;
1028 LoadRetOps.push_back(Chain);
1029 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1030 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1031 LoadRetOps.push_back(InFlag);
1032 SDValue retval = DAG.getMemIntrinsicNode(
1033 Opc, dl, DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()),
1034 &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
1036 Chain = retval.getValue(2);
1037 InFlag = retval.getValue(3);
1039 Chain = retval.getValue(4);
1040 InFlag = retval.getValue(5);
1043 for (unsigned j = 0; j < VecSize; ++j) {
1044 if (i + j >= NumElts)
1046 SDValue Elt = retval.getValue(j);
1048 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1049 InVals.push_back(Elt);
1051 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1055 SmallVector<EVT, 16> VTs;
1056 ComputePTXValueVTs(*this, retTy, VTs);
1057 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1058 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1059 unsigned sz = VTs[i].getSizeInBits();
1060 bool needTruncate = sz < 8 ? true : false;
1061 if (VTs[i].isInteger() && (sz < 8))
1064 SmallVector<EVT, 4> LoadRetVTs;
1065 EVT TheLoadType = VTs[i];
1066 if (retTy->isIntegerTy() &&
1067 TD->getTypeAllocSizeInBits(retTy) < 32) {
1068 // This is for integer types only, and specifically not for
1070 LoadRetVTs.push_back(MVT::i32);
1071 TheLoadType = MVT::i32;
1072 } else if (sz < 16) {
1073 // If loading i1/i8 result, generate
1075 // trunc i16 to i1/i8
1076 LoadRetVTs.push_back(MVT::i16);
1078 LoadRetVTs.push_back(Ins[i].VT);
1079 LoadRetVTs.push_back(MVT::Other);
1080 LoadRetVTs.push_back(MVT::Glue);
1082 SmallVector<SDValue, 4> LoadRetOps;
1083 LoadRetOps.push_back(Chain);
1084 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1085 LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
1086 LoadRetOps.push_back(InFlag);
1087 SDValue retval = DAG.getMemIntrinsicNode(
1088 NVPTXISD::LoadParam, dl,
1089 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
1090 LoadRetOps.size(), TheLoadType, MachinePointerInfo());
1091 Chain = retval.getValue(1);
1092 InFlag = retval.getValue(2);
1093 SDValue Ret0 = retval.getValue(0);
1095 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1096 InVals.push_back(Ret0);
1097 resoffset += sz / 8;
1102 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1103 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1107 // set isTailCall to false for now, until we figure out how to express
1108 // tail call optimization in PTX
1113 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1114 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1115 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1117 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1118 SDNode *Node = Op.getNode();
1120 SmallVector<SDValue, 8> Ops;
1121 unsigned NumOperands = Node->getNumOperands();
1122 for (unsigned i = 0; i < NumOperands; ++i) {
1123 SDValue SubOp = Node->getOperand(i);
1124 EVT VVT = SubOp.getNode()->getValueType(0);
1125 EVT EltVT = VVT.getVectorElementType();
1126 unsigned NumSubElem = VVT.getVectorNumElements();
1127 for (unsigned j = 0; j < NumSubElem; ++j) {
1128 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1129 DAG.getIntPtrConstant(j)));
1132 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0],
1137 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1138 switch (Op.getOpcode()) {
1139 case ISD::RETURNADDR:
1141 case ISD::FRAMEADDR:
1143 case ISD::GlobalAddress:
1144 return LowerGlobalAddress(Op, DAG);
1145 case ISD::INTRINSIC_W_CHAIN:
1147 case ISD::BUILD_VECTOR:
1148 case ISD::EXTRACT_SUBVECTOR:
1150 case ISD::CONCAT_VECTORS:
1151 return LowerCONCAT_VECTORS(Op, DAG);
1153 return LowerSTORE(Op, DAG);
1155 return LowerLOAD(Op, DAG);
1157 llvm_unreachable("Custom lowering not defined for operation");
1161 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1162 if (Op.getValueType() == MVT::i1)
1163 return LowerLOADi1(Op, DAG);
1170 // v1 = ld i8* addr (-> i16)
1171 // v = trunc i16 to i1
1172 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1173 SDNode *Node = Op.getNode();
1174 LoadSDNode *LD = cast<LoadSDNode>(Node);
1176 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1177 assert(Node->getValueType(0) == MVT::i1 &&
1178 "Custom lowering for i1 load only");
1180 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1181 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1182 LD->isInvariant(), LD->getAlignment());
1183 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1184 // The legalizer (the caller) is expecting two values from the legalized
1185 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1186 // in LegalizeDAG.cpp which also uses MergeValues.
1187 SDValue Ops[] = { result, LD->getChain() };
1188 return DAG.getMergeValues(Ops, 2, dl);
1191 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1192 EVT ValVT = Op.getOperand(1).getValueType();
1193 if (ValVT == MVT::i1)
1194 return LowerSTOREi1(Op, DAG);
1195 else if (ValVT.isVector())
1196 return LowerSTOREVector(Op, DAG);
1202 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1203 SDNode *N = Op.getNode();
1204 SDValue Val = N->getOperand(1);
1206 EVT ValVT = Val.getValueType();
1208 if (ValVT.isVector()) {
1209 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1210 // legal. We can (and should) split that into 2 stores of <2 x double> here
1211 // but I'm leaving that as a TODO for now.
1212 if (!ValVT.isSimple())
1214 switch (ValVT.getSimpleVT().SimpleTy) {
1227 // This is a "native" vector type
1231 unsigned Opcode = 0;
1232 EVT EltVT = ValVT.getVectorElementType();
1233 unsigned NumElts = ValVT.getVectorNumElements();
1235 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1236 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1237 // stored type to i16 and propogate the "real" type as the memory type.
1238 bool NeedExt = false;
1239 if (EltVT.getSizeInBits() < 16)
1246 Opcode = NVPTXISD::StoreV2;
1249 Opcode = NVPTXISD::StoreV4;
1254 SmallVector<SDValue, 8> Ops;
1256 // First is the chain
1257 Ops.push_back(N->getOperand(0));
1259 // Then the split values
1260 for (unsigned i = 0; i < NumElts; ++i) {
1261 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1262 DAG.getIntPtrConstant(i));
1264 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1265 Ops.push_back(ExtVal);
1268 // Then any remaining arguments
1269 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1270 Ops.push_back(N->getOperand(i));
1273 MemSDNode *MemSD = cast<MemSDNode>(N);
1275 SDValue NewSt = DAG.getMemIntrinsicNode(
1276 Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
1277 MemSD->getMemoryVT(), MemSD->getMemOperand());
1279 //return DCI.CombineTo(N, NewSt, true);
1288 // v1 = zxt v to i16
1290 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1291 SDNode *Node = Op.getNode();
1293 StoreSDNode *ST = cast<StoreSDNode>(Node);
1294 SDValue Tmp1 = ST->getChain();
1295 SDValue Tmp2 = ST->getBasePtr();
1296 SDValue Tmp3 = ST->getValue();
1297 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1298 unsigned Alignment = ST->getAlignment();
1299 bool isVolatile = ST->isVolatile();
1300 bool isNonTemporal = ST->isNonTemporal();
1301 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1302 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1303 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1304 isVolatile, Alignment);
1308 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1309 int idx, EVT v) const {
1310 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1311 std::stringstream suffix;
1313 *name += suffix.str();
1314 return DAG.getTargetExternalSymbol(name->c_str(), v);
1318 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1319 return getExtSymb(DAG, ".PARAM", idx, v);
1322 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1323 return getExtSymb(DAG, ".HLPPARAM", idx);
1326 // Check to see if the kernel argument is image*_t or sampler_t
1328 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1329 static const char *const specialTypes[] = { "struct._image2d_t",
1330 "struct._image3d_t",
1331 "struct._sampler_t" };
1333 const Type *Ty = arg->getType();
1334 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1342 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1343 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1345 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1346 if (TypeName == specialTypes[i])
1352 SDValue NVPTXTargetLowering::LowerFormalArguments(
1353 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1354 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1355 SmallVectorImpl<SDValue> &InVals) const {
1356 MachineFunction &MF = DAG.getMachineFunction();
1357 const DataLayout *TD = getDataLayout();
1359 const Function *F = MF.getFunction();
1360 const AttributeSet &PAL = F->getAttributes();
1361 const TargetLowering *TLI = nvTM->getTargetLowering();
1363 SDValue Root = DAG.getRoot();
1364 std::vector<SDValue> OutChains;
1366 bool isKernel = llvm::isKernelFunction(*F);
1367 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1368 assert(isABI && "Non-ABI compilation is not supported");
1372 std::vector<Type *> argTypes;
1373 std::vector<const Argument *> theArgs;
1374 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1376 theArgs.push_back(I);
1377 argTypes.push_back(I->getType());
1379 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1380 // Ins.size() will be larger
1381 // * if there is an aggregate argument with multiple fields (each field
1382 // showing up separately in Ins)
1383 // * if there is a vector argument with more than typical vector-length
1384 // elements (generally if more than 4) where each vector element is
1385 // individually present in Ins.
1386 // So a different index should be used for indexing into Ins.
1387 // See similar issue in LowerCall.
1388 unsigned InsIdx = 0;
1391 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1392 Type *Ty = argTypes[i];
1394 // If the kernel argument is image*_t or sampler_t, convert it to
1395 // a i32 constant holding the parameter position. This can later
1396 // matched in the AsmPrinter to output the correct mangled name.
1397 if (isImageOrSamplerVal(
1399 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1401 assert(isKernel && "Only kernels can have image/sampler params");
1402 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1406 if (theArgs[i]->use_empty()) {
1408 if (Ty->isAggregateType()) {
1409 SmallVector<EVT, 16> vtparts;
1411 ComputePTXValueVTs(*this, Ty, vtparts);
1412 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1413 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1415 EVT partVT = vtparts[parti];
1416 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
1419 if (vtparts.size() > 0)
1423 if (Ty->isVectorTy()) {
1424 EVT ObjectVT = getValueType(Ty);
1425 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1426 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1427 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1434 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1438 // In the following cases, assign a node order of "idx+1"
1439 // to newly created nodes. The SDNodes for params have to
1440 // appear in the same order as their order of appearance
1441 // in the original function. "idx+1" holds that order.
1442 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1443 if (Ty->isAggregateType()) {
1444 SmallVector<EVT, 16> vtparts;
1445 SmallVector<uint64_t, 16> offsets;
1447 // NOTE: Here, we lose the ability to issue vector loads for vectors
1448 // that are a part of a struct. This should be investigated in the
1450 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1451 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1452 bool aggregateIsPacked = false;
1453 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1454 aggregateIsPacked = STy->isPacked();
1456 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1457 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1459 EVT partVT = vtparts[parti];
1460 Value *srcValue = Constant::getNullValue(
1461 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1462 llvm::ADDRESS_SPACE_PARAM));
1464 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1465 DAG.getConstant(offsets[parti], getPointerTy()));
1466 unsigned partAlign =
1467 aggregateIsPacked ? 1
1468 : TD->getABITypeAlignment(
1469 partVT.getTypeForEVT(F->getContext()));
1471 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1472 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1473 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1474 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1475 MachinePointerInfo(srcValue), partVT, false,
1478 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1479 MachinePointerInfo(srcValue), false, false, false,
1483 p.getNode()->setIROrder(idx + 1);
1484 InVals.push_back(p);
1487 if (vtparts.size() > 0)
1491 if (Ty->isVectorTy()) {
1492 EVT ObjectVT = getValueType(Ty);
1493 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1494 unsigned NumElts = ObjectVT.getVectorNumElements();
1495 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1496 "Vector was not scalarized");
1498 EVT EltVT = ObjectVT.getVectorElementType();
1503 // We only have one element, so just directly load it
1504 Value *SrcValue = Constant::getNullValue(PointerType::get(
1505 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1506 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1507 DAG.getConstant(Ofst, getPointerTy()));
1508 SDValue P = DAG.getLoad(
1509 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1511 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1513 P.getNode()->setIROrder(idx + 1);
1515 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1516 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1517 InVals.push_back(P);
1518 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1520 } else if (NumElts == 2) {
1522 // f32,f32 = load ...
1523 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1524 Value *SrcValue = Constant::getNullValue(PointerType::get(
1525 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1526 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1527 DAG.getConstant(Ofst, getPointerTy()));
1528 SDValue P = DAG.getLoad(
1529 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1531 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1533 P.getNode()->setIROrder(idx + 1);
1535 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1536 DAG.getIntPtrConstant(0));
1537 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1538 DAG.getIntPtrConstant(1));
1540 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1541 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1542 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1545 InVals.push_back(Elt0);
1546 InVals.push_back(Elt1);
1547 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1551 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1553 // vector will be expanded to a power of 2 elements, so we know we can
1554 // always round up to the next multiple of 4 when creating the vector
1556 // e.g. 4 elem => 1 ld.v4
1557 // 6 elem => 2 ld.v4
1558 // 8 elem => 2 ld.v4
1559 // 11 elem => 3 ld.v4
1560 unsigned VecSize = 4;
1561 if (EltVT.getSizeInBits() == 64) {
1564 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1565 for (unsigned i = 0; i < NumElts; i += VecSize) {
1566 Value *SrcValue = Constant::getNullValue(
1567 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1568 llvm::ADDRESS_SPACE_PARAM));
1570 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1571 DAG.getConstant(Ofst, getPointerTy()));
1572 SDValue P = DAG.getLoad(
1573 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1575 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1577 P.getNode()->setIROrder(idx + 1);
1579 for (unsigned j = 0; j < VecSize; ++j) {
1580 if (i + j >= NumElts)
1582 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1583 DAG.getIntPtrConstant(j));
1584 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1585 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1586 InVals.push_back(Elt);
1588 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1598 EVT ObjectVT = getValueType(Ty);
1599 // If ABI, load from the param symbol
1600 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1601 Value *srcValue = Constant::getNullValue(PointerType::get(
1602 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1604 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1605 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1606 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1607 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1608 MachinePointerInfo(srcValue), ObjectVT, false, false,
1609 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1611 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1612 MachinePointerInfo(srcValue), false, false, false,
1613 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1616 p.getNode()->setIROrder(idx + 1);
1617 InVals.push_back(p);
1621 // Param has ByVal attribute
1622 // Return MoveParam(param symbol).
1623 // Ideally, the param symbol can be returned directly,
1624 // but when SDNode builder decides to use it in a CopyToReg(),
1625 // machine instruction fails because TargetExternalSymbol
1626 // (not lowered) is target dependent, and CopyToReg assumes
1627 // the source is lowered.
1628 EVT ObjectVT = getValueType(Ty);
1629 assert(ObjectVT == Ins[InsIdx].VT &&
1630 "Ins type did not match function type");
1631 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1632 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1634 p.getNode()->setIROrder(idx + 1);
1636 InVals.push_back(p);
1638 SDValue p2 = DAG.getNode(
1639 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1640 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1641 InVals.push_back(p2);
1645 // Clang will check explicit VarArg and issue error if any. However, Clang
1646 // will let code with
1647 // implicit var arg like f() pass. See bug 617733.
1648 // We treat this case as if the arg list is empty.
1649 // if (F.isVarArg()) {
1650 // assert(0 && "VarArg not supported yet!");
1653 if (!OutChains.empty())
1654 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0],
1662 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1664 const SmallVectorImpl<ISD::OutputArg> &Outs,
1665 const SmallVectorImpl<SDValue> &OutVals,
1666 SDLoc dl, SelectionDAG &DAG) const {
1667 MachineFunction &MF = DAG.getMachineFunction();
1668 const Function *F = MF.getFunction();
1669 Type *RetTy = F->getReturnType();
1670 const DataLayout *TD = getDataLayout();
1672 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1673 assert(isABI && "Non-ABI compilation is not supported");
1677 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1678 // If we have a vector type, the OutVals array will be the scalarized
1679 // components and we have combine them into 1 or more vector stores.
1680 unsigned NumElts = VTy->getNumElements();
1681 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1683 // const_cast can be removed in later LLVM versions
1684 EVT EltVT = getValueType(RetTy).getVectorElementType();
1685 bool NeedExtend = false;
1686 if (EltVT.getSizeInBits() < 16)
1691 SDValue StoreVal = OutVals[0];
1692 // We only have one element, so just directly store it
1694 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1695 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1696 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1697 DAG.getVTList(MVT::Other), &Ops[0], 3,
1698 EltVT, MachinePointerInfo());
1700 } else if (NumElts == 2) {
1702 SDValue StoreVal0 = OutVals[0];
1703 SDValue StoreVal1 = OutVals[1];
1706 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1707 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1710 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1712 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1713 DAG.getVTList(MVT::Other), &Ops[0], 4,
1714 EltVT, MachinePointerInfo());
1717 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1718 // vector will be expanded to a power of 2 elements, so we know we can
1719 // always round up to the next multiple of 4 when creating the vector
1721 // e.g. 4 elem => 1 st.v4
1722 // 6 elem => 2 st.v4
1723 // 8 elem => 2 st.v4
1724 // 11 elem => 3 st.v4
1726 unsigned VecSize = 4;
1727 if (OutVals[0].getValueType().getSizeInBits() == 64)
1730 unsigned Offset = 0;
1733 EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
1734 unsigned PerStoreOffset =
1735 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1737 for (unsigned i = 0; i < NumElts; i += VecSize) {
1740 SmallVector<SDValue, 8> Ops;
1741 Ops.push_back(Chain);
1742 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
1743 unsigned Opc = NVPTXISD::StoreRetvalV2;
1744 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
1746 StoreVal = OutVals[i];
1748 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1749 Ops.push_back(StoreVal);
1751 if (i + 1 < NumElts) {
1752 StoreVal = OutVals[i + 1];
1754 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1756 StoreVal = DAG.getUNDEF(ExtendedVT);
1758 Ops.push_back(StoreVal);
1761 Opc = NVPTXISD::StoreRetvalV4;
1762 if (i + 2 < NumElts) {
1763 StoreVal = OutVals[i + 2];
1766 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1768 StoreVal = DAG.getUNDEF(ExtendedVT);
1770 Ops.push_back(StoreVal);
1772 if (i + 3 < NumElts) {
1773 StoreVal = OutVals[i + 3];
1776 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1778 StoreVal = DAG.getUNDEF(ExtendedVT);
1780 Ops.push_back(StoreVal);
1783 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
1785 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
1786 Ops.size(), EltVT, MachinePointerInfo());
1787 Offset += PerStoreOffset;
1791 SmallVector<EVT, 16> ValVTs;
1792 // const_cast is necessary since we are still using an LLVM version from
1793 // before the type system re-write.
1794 ComputePTXValueVTs(*this, RetTy, ValVTs);
1795 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
1797 unsigned SizeSoFar = 0;
1798 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1799 SDValue theVal = OutVals[i];
1800 EVT TheValType = theVal.getValueType();
1801 unsigned numElems = 1;
1802 if (TheValType.isVector())
1803 numElems = TheValType.getVectorNumElements();
1804 for (unsigned j = 0, je = numElems; j != je; ++j) {
1805 SDValue TmpVal = theVal;
1806 if (TheValType.isVector())
1807 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1808 TheValType.getVectorElementType(), TmpVal,
1809 DAG.getIntPtrConstant(j));
1810 EVT TheStoreType = ValVTs[i];
1811 if (RetTy->isIntegerTy() &&
1812 TD->getTypeAllocSizeInBits(RetTy) < 32) {
1813 // The following zero-extension is for integer types only, and
1814 // specifically not for aggregates.
1815 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
1816 TheStoreType = MVT::i32;
1818 else if (TmpVal.getValueType().getSizeInBits() < 16)
1819 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
1821 SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
1822 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1823 DAG.getVTList(MVT::Other), &Ops[0],
1825 MachinePointerInfo());
1826 if(TheValType.isVector())
1828 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
1830 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
1835 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1839 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
1840 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
1841 SelectionDAG &DAG) const {
1842 if (Constraint.length() > 1)
1845 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1848 // NVPTX suuport vector of legal types of any length in Intrinsics because the
1849 // NVPTX specific type legalizer
1850 // will legalize them to the PTX supported length.
1851 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1852 if (isTypeLegal(VT))
1854 if (VT.isVector()) {
1855 MVT eVT = VT.getVectorElementType();
1856 if (isTypeLegal(eVT))
1862 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
1864 // because we need the information that is only available in the "Value" type
1866 // pointer. In particular, the address space information.
1867 bool NVPTXTargetLowering::getTgtMemIntrinsic(
1868 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
1869 switch (Intrinsic) {
1873 case Intrinsic::nvvm_atomic_load_add_f32:
1874 Info.opc = ISD::INTRINSIC_W_CHAIN;
1875 Info.memVT = MVT::f32;
1876 Info.ptrVal = I.getArgOperand(0);
1879 Info.readMem = true;
1880 Info.writeMem = true;
1884 case Intrinsic::nvvm_atomic_load_inc_32:
1885 case Intrinsic::nvvm_atomic_load_dec_32:
1886 Info.opc = ISD::INTRINSIC_W_CHAIN;
1887 Info.memVT = MVT::i32;
1888 Info.ptrVal = I.getArgOperand(0);
1891 Info.readMem = true;
1892 Info.writeMem = true;
1896 case Intrinsic::nvvm_ldu_global_i:
1897 case Intrinsic::nvvm_ldu_global_f:
1898 case Intrinsic::nvvm_ldu_global_p:
1900 Info.opc = ISD::INTRINSIC_W_CHAIN;
1901 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
1902 Info.memVT = getValueType(I.getType());
1903 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
1904 Info.memVT = getValueType(I.getType());
1906 Info.memVT = MVT::f32;
1907 Info.ptrVal = I.getArgOperand(0);
1910 Info.readMem = true;
1911 Info.writeMem = false;
1919 /// isLegalAddressingMode - Return true if the addressing mode represented
1920 /// by AM is legal for this target, for a load/store of the specified type.
1921 /// Used to guide target specific optimizations, like loop strength reduction
1922 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
1923 /// (CodeGenPrepare.cpp)
1924 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1927 // AddrMode - This represents an addressing mode of:
1928 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1930 // The legal address modes are
1937 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
1943 case 0: // "r", "r+i" or "i" is allowed
1946 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
1948 // Otherwise we have r+i.
1951 // No scale > 1 is allowed
1957 //===----------------------------------------------------------------------===//
1958 // NVPTX Inline Assembly Support
1959 //===----------------------------------------------------------------------===//
1961 /// getConstraintType - Given a constraint letter, return the type of
1962 /// constraint it is for this target.
1963 NVPTXTargetLowering::ConstraintType
1964 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
1965 if (Constraint.size() == 1) {
1966 switch (Constraint[0]) {
1977 return C_RegisterClass;
1980 return TargetLowering::getConstraintType(Constraint);
1983 std::pair<unsigned, const TargetRegisterClass *>
1984 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
1986 if (Constraint.size() == 1) {
1987 switch (Constraint[0]) {
1989 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
1991 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
1993 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
1996 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
1998 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2000 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2003 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2006 /// getFunctionAlignment - Return the Log2 alignment of this function.
2007 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2011 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
2012 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
2013 SmallVectorImpl<SDValue> &Results) {
2014 EVT ResVT = N->getValueType(0);
2017 assert(ResVT.isVector() && "Vector load must have vector type");
2019 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
2020 // legal. We can (and should) split that into 2 loads of <2 x double> here
2021 // but I'm leaving that as a TODO for now.
2022 assert(ResVT.isSimple() && "Can only handle simple types");
2023 switch (ResVT.getSimpleVT().SimpleTy) {
2036 // This is a "native" vector type
2040 EVT EltVT = ResVT.getVectorElementType();
2041 unsigned NumElts = ResVT.getVectorNumElements();
2043 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
2044 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2045 // loaded type to i16 and propogate the "real" type as the memory type.
2046 bool NeedTrunc = false;
2047 if (EltVT.getSizeInBits() < 16) {
2052 unsigned Opcode = 0;
2059 Opcode = NVPTXISD::LoadV2;
2060 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2063 Opcode = NVPTXISD::LoadV4;
2064 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2065 LdResVTs = DAG.getVTList(ListVTs, 5);
2070 SmallVector<SDValue, 8> OtherOps;
2072 // Copy regular operands
2073 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2074 OtherOps.push_back(N->getOperand(i));
2076 LoadSDNode *LD = cast<LoadSDNode>(N);
2078 // The select routine does not have access to the LoadSDNode instance, so
2079 // pass along the extension information
2080 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
2082 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
2083 OtherOps.size(), LD->getMemoryVT(),
2084 LD->getMemOperand());
2086 SmallVector<SDValue, 4> ScalarRes;
2088 for (unsigned i = 0; i < NumElts; ++i) {
2089 SDValue Res = NewLD.getValue(i);
2091 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2092 ScalarRes.push_back(Res);
2095 SDValue LoadChain = NewLD.getValue(NumElts);
2098 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
2100 Results.push_back(BuildVec);
2101 Results.push_back(LoadChain);
2104 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
2105 SmallVectorImpl<SDValue> &Results) {
2106 SDValue Chain = N->getOperand(0);
2107 SDValue Intrin = N->getOperand(1);
2110 // Get the intrinsic ID
2111 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2115 case Intrinsic::nvvm_ldg_global_i:
2116 case Intrinsic::nvvm_ldg_global_f:
2117 case Intrinsic::nvvm_ldg_global_p:
2118 case Intrinsic::nvvm_ldu_global_i:
2119 case Intrinsic::nvvm_ldu_global_f:
2120 case Intrinsic::nvvm_ldu_global_p: {
2121 EVT ResVT = N->getValueType(0);
2123 if (ResVT.isVector()) {
2126 unsigned NumElts = ResVT.getVectorNumElements();
2127 EVT EltVT = ResVT.getVectorElementType();
2129 // Since LDU/LDG are target nodes, we cannot rely on DAG type
2131 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2132 // loaded type to i16 and propogate the "real" type as the memory type.
2133 bool NeedTrunc = false;
2134 if (EltVT.getSizeInBits() < 16) {
2139 unsigned Opcode = 0;
2149 case Intrinsic::nvvm_ldg_global_i:
2150 case Intrinsic::nvvm_ldg_global_f:
2151 case Intrinsic::nvvm_ldg_global_p:
2152 Opcode = NVPTXISD::LDGV2;
2154 case Intrinsic::nvvm_ldu_global_i:
2155 case Intrinsic::nvvm_ldu_global_f:
2156 case Intrinsic::nvvm_ldu_global_p:
2157 Opcode = NVPTXISD::LDUV2;
2160 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2166 case Intrinsic::nvvm_ldg_global_i:
2167 case Intrinsic::nvvm_ldg_global_f:
2168 case Intrinsic::nvvm_ldg_global_p:
2169 Opcode = NVPTXISD::LDGV4;
2171 case Intrinsic::nvvm_ldu_global_i:
2172 case Intrinsic::nvvm_ldu_global_f:
2173 case Intrinsic::nvvm_ldu_global_p:
2174 Opcode = NVPTXISD::LDUV4;
2177 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2178 LdResVTs = DAG.getVTList(ListVTs, 5);
2183 SmallVector<SDValue, 8> OtherOps;
2185 // Copy regular operands
2187 OtherOps.push_back(Chain); // Chain
2188 // Skip operand 1 (intrinsic ID)
2190 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
2191 OtherOps.push_back(N->getOperand(i));
2193 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2195 SDValue NewLD = DAG.getMemIntrinsicNode(
2196 Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
2197 MemSD->getMemoryVT(), MemSD->getMemOperand());
2199 SmallVector<SDValue, 4> ScalarRes;
2201 for (unsigned i = 0; i < NumElts; ++i) {
2202 SDValue Res = NewLD.getValue(i);
2205 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2206 ScalarRes.push_back(Res);
2209 SDValue LoadChain = NewLD.getValue(NumElts);
2212 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
2214 Results.push_back(BuildVec);
2215 Results.push_back(LoadChain);
2218 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
2219 "Custom handling of non-i8 ldu/ldg?");
2221 // Just copy all operands as-is
2222 SmallVector<SDValue, 4> Ops;
2223 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2224 Ops.push_back(N->getOperand(i));
2226 // Force output to i16
2227 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
2229 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2231 // We make sure the memory type is i8, which will be used during isel
2232 // to select the proper instruction.
2234 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
2235 Ops.size(), MVT::i8, MemSD->getMemOperand());
2237 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
2238 NewLD.getValue(0)));
2239 Results.push_back(NewLD.getValue(1));
2245 void NVPTXTargetLowering::ReplaceNodeResults(
2246 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
2247 switch (N->getOpcode()) {
2249 report_fatal_error("Unhandled custom legalization");
2251 ReplaceLoadVector(N, DAG, Results);
2253 case ISD::INTRINSIC_W_CHAIN:
2254 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);