1 //===-- NVPTXAsmPrinter.cpp - NVPTX LLVM assembly writer ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a printer that converts from our internal representation
11 // of machine-dependent LLVM code to NVPTX assembly language.
13 //===----------------------------------------------------------------------===//
15 #include "NVPTXAsmPrinter.h"
16 #include "InstPrinter/NVPTXInstPrinter.h"
17 #include "MCTargetDesc/NVPTXMCAsmInfo.h"
19 #include "NVPTXInstrInfo.h"
20 #include "NVPTXMachineFunctionInfo.h"
21 #include "NVPTXMCExpr.h"
22 #include "NVPTXRegisterInfo.h"
23 #include "NVPTXTargetMachine.h"
24 #include "NVPTXUtilities.h"
25 #include "cl_common_defines.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/Analysis/ConstantFolding.h"
28 #include "llvm/CodeGen/Analysis.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineModuleInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/IR/DebugInfo.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Mangler.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/Path.h"
45 #include "llvm/Support/TargetRegistry.h"
46 #include "llvm/Support/TimeValue.h"
47 #include "llvm/Target/TargetLoweringObjectFile.h"
51 #define DEPOTNAME "__local_depot"
54 EmitLineNumbers("nvptx-emit-line-numbers", cl::Hidden,
55 cl::desc("NVPTX Specific: Emit Line numbers even without -G"),
59 InterleaveSrc("nvptx-emit-src", cl::ZeroOrMore, cl::Hidden,
60 cl::desc("NVPTX Specific: Emit source line in ptx file"),
64 /// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
66 void DiscoverDependentGlobals(const Value *V,
67 DenseSet<const GlobalVariable *> &Globals) {
68 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
71 if (const User *U = dyn_cast<User>(V)) {
72 for (unsigned i = 0, e = U->getNumOperands(); i != e; ++i) {
73 DiscoverDependentGlobals(U->getOperand(i), Globals);
79 /// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable
80 /// instances to be emitted, but only after any dependents have been added
82 void VisitGlobalVariableForEmission(
83 const GlobalVariable *GV, SmallVectorImpl<const GlobalVariable *> &Order,
84 DenseSet<const GlobalVariable *> &Visited,
85 DenseSet<const GlobalVariable *> &Visiting) {
86 // Have we already visited this one?
87 if (Visited.count(GV))
90 // Do we have a circular dependency?
91 if (!Visiting.insert(GV).second)
92 report_fatal_error("Circular dependency found in global variable set");
94 // Make sure we visit all dependents first
95 DenseSet<const GlobalVariable *> Others;
96 for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i)
97 DiscoverDependentGlobals(GV->getOperand(i), Others);
99 for (DenseSet<const GlobalVariable *>::iterator I = Others.begin(),
102 VisitGlobalVariableForEmission(*I, Order, Visited, Visiting);
104 // Now we can visit ourself
111 // @TODO: This is a copy from AsmPrinter.cpp. The function is static, so we
112 // cannot just link to the existing version.
113 /// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
115 using namespace nvptx;
116 const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
117 MCContext &Ctx = AP.OutContext;
119 if (CV->isNullValue() || isa<UndefValue>(CV))
120 return MCConstantExpr::Create(0, Ctx);
122 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV))
123 return MCConstantExpr::Create(CI->getZExtValue(), Ctx);
125 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
126 return MCSymbolRefExpr::Create(AP.getSymbol(GV), Ctx);
128 if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
129 return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
131 const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
133 llvm_unreachable("Unknown constant value to lower!");
135 switch (CE->getOpcode()) {
137 // If the code isn't optimized, there may be outstanding folding
138 // opportunities. Attempt to fold the expression using DataLayout as a
139 // last resort before giving up.
140 if (Constant *C = ConstantFoldConstantExpression(
141 CE, AP.TM.getSubtargetImpl()->getDataLayout()))
143 return LowerConstant(C, AP);
145 // Otherwise report the problem to the user.
148 raw_string_ostream OS(S);
149 OS << "Unsupported expression in static initializer: ";
150 CE->printAsOperand(OS, /*PrintType=*/ false,
151 !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
152 report_fatal_error(OS.str());
154 case Instruction::AddrSpaceCast: {
155 // Strip any addrspace(1)->addrspace(0) addrspace casts. These will be
156 // handled by the generic() logic in the MCExpr printer
157 PointerType *DstTy = cast<PointerType>(CE->getType());
158 PointerType *SrcTy = cast<PointerType>(CE->getOperand(0)->getType());
159 if (SrcTy->getAddressSpace() == 1 && DstTy->getAddressSpace() == 0) {
160 return LowerConstant(cast<const Constant>(CE->getOperand(0)), AP);
163 raw_string_ostream OS(S);
164 OS << "Unsupported expression in static initializer: ";
165 CE->printAsOperand(OS, /*PrintType=*/ false,
166 !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
167 report_fatal_error(OS.str());
169 case Instruction::GetElementPtr: {
170 const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
171 // Generate a symbolic expression for the byte address
172 APInt OffsetAI(TD.getPointerSizeInBits(), 0);
173 cast<GEPOperator>(CE)->accumulateConstantOffset(TD, OffsetAI);
175 const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
179 int64_t Offset = OffsetAI.getSExtValue();
180 return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
184 case Instruction::Trunc:
185 // We emit the value and depend on the assembler to truncate the generated
186 // expression properly. This is important for differences between
187 // blockaddress labels. Since the two labels are in the same function, it
188 // is reasonable to treat their delta as a 32-bit value.
190 case Instruction::BitCast:
191 return LowerConstant(CE->getOperand(0), AP);
193 case Instruction::IntToPtr: {
194 const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
195 // Handle casts to pointers by changing them into casts to the appropriate
196 // integer type. This promotes constant folding and simplifies this code.
197 Constant *Op = CE->getOperand(0);
198 Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
200 return LowerConstant(Op, AP);
203 case Instruction::PtrToInt: {
204 const DataLayout &TD = *AP.TM.getSubtargetImpl()->getDataLayout();
205 // Support only foldable casts to/from pointers that can be eliminated by
206 // changing the pointer to the appropriately sized integer type.
207 Constant *Op = CE->getOperand(0);
208 Type *Ty = CE->getType();
210 const MCExpr *OpExpr = LowerConstant(Op, AP);
212 // We can emit the pointer value into this slot if the slot is an
213 // integer slot equal to the size of the pointer.
214 if (TD.getTypeAllocSize(Ty) == TD.getTypeAllocSize(Op->getType()))
217 // Otherwise the pointer is smaller than the resultant integer, mask off
218 // the high bits so we are sure to get a proper truncation if the input is
220 unsigned InBits = TD.getTypeAllocSizeInBits(Op->getType());
221 const MCExpr *MaskExpr =
222 MCConstantExpr::Create(~0ULL >> (64 - InBits), Ctx);
223 return MCBinaryExpr::CreateAnd(OpExpr, MaskExpr, Ctx);
226 // The MC library also has a right-shift operator, but it isn't consistently
227 // signed or unsigned between different targets.
228 case Instruction::Add:
229 case Instruction::Sub:
230 case Instruction::Mul:
231 case Instruction::SDiv:
232 case Instruction::SRem:
233 case Instruction::Shl:
234 case Instruction::And:
235 case Instruction::Or:
236 case Instruction::Xor: {
237 const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
238 const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
239 switch (CE->getOpcode()) {
241 llvm_unreachable("Unknown binary operator constant cast expr");
242 case Instruction::Add:
243 return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
244 case Instruction::Sub:
245 return MCBinaryExpr::CreateSub(LHS, RHS, Ctx);
246 case Instruction::Mul:
247 return MCBinaryExpr::CreateMul(LHS, RHS, Ctx);
248 case Instruction::SDiv:
249 return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx);
250 case Instruction::SRem:
251 return MCBinaryExpr::CreateMod(LHS, RHS, Ctx);
252 case Instruction::Shl:
253 return MCBinaryExpr::CreateShl(LHS, RHS, Ctx);
254 case Instruction::And:
255 return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx);
256 case Instruction::Or:
257 return MCBinaryExpr::CreateOr(LHS, RHS, Ctx);
258 case Instruction::Xor:
259 return MCBinaryExpr::CreateXor(LHS, RHS, Ctx);
265 void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
266 if (!EmitLineNumbers)
271 DebugLoc curLoc = MI.getDebugLoc();
273 if (prevDebugLoc.isUnknown() && curLoc.isUnknown())
276 if (prevDebugLoc == curLoc)
279 prevDebugLoc = curLoc;
281 if (curLoc.isUnknown())
284 const MachineFunction *MF = MI.getParent()->getParent();
285 //const TargetMachine &TM = MF->getTarget();
287 const LLVMContext &ctx = MF->getFunction()->getContext();
288 DIScope Scope(curLoc.getScope(ctx));
290 assert((!Scope || Scope.isScope()) &&
291 "Scope of a DebugLoc should be null or a DIScope.");
295 StringRef fileName(Scope.getFilename());
296 StringRef dirName(Scope.getDirectory());
297 SmallString<128> FullPathName = dirName;
298 if (!dirName.empty() && !sys::path::is_absolute(fileName)) {
299 sys::path::append(FullPathName, fileName);
300 fileName = FullPathName.str();
303 if (filenameMap.find(fileName.str()) == filenameMap.end())
306 // Emit the line from the source file.
308 this->emitSrcInText(fileName.str(), curLoc.getLine());
310 std::stringstream temp;
311 temp << "\t.loc " << filenameMap[fileName.str()] << " " << curLoc.getLine()
312 << " " << curLoc.getCol();
313 OutStreamer.EmitRawText(Twine(temp.str().c_str()));
316 void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
317 SmallString<128> Str;
318 raw_svector_ostream OS(Str);
319 if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
320 emitLineNumberAsDotLoc(*MI);
323 lowerToMCInst(MI, Inst);
324 EmitToStreamer(OutStreamer, Inst);
327 // Handle symbol backtracking for targets that do not support image handles
328 bool NVPTXAsmPrinter::lowerImageHandleOperand(const MachineInstr *MI,
329 unsigned OpNo, MCOperand &MCOp) {
330 const MachineOperand &MO = MI->getOperand(OpNo);
331 const MCInstrDesc &MCID = MI->getDesc();
333 if (MCID.TSFlags & NVPTXII::IsTexFlag) {
334 // This is a texture fetch, so operand 4 is a texref and operand 5 is
336 if (OpNo == 4 && MO.isImm()) {
337 lowerImageHandleSymbol(MO.getImm(), MCOp);
340 if (OpNo == 5 && MO.isImm() && !(MCID.TSFlags & NVPTXII::IsTexModeUnifiedFlag)) {
341 lowerImageHandleSymbol(MO.getImm(), MCOp);
346 } else if (MCID.TSFlags & NVPTXII::IsSuldMask) {
348 1 << (((MCID.TSFlags & NVPTXII::IsSuldMask) >> NVPTXII::IsSuldShift) - 1);
350 // For a surface load of vector size N, the Nth operand will be the surfref
351 if (OpNo == VecSize && MO.isImm()) {
352 lowerImageHandleSymbol(MO.getImm(), MCOp);
357 } else if (MCID.TSFlags & NVPTXII::IsSustFlag) {
358 // This is a surface store, so operand 0 is a surfref
359 if (OpNo == 0 && MO.isImm()) {
360 lowerImageHandleSymbol(MO.getImm(), MCOp);
365 } else if (MCID.TSFlags & NVPTXII::IsSurfTexQueryFlag) {
366 // This is a query, so operand 1 is a surfref/texref
367 if (OpNo == 1 && MO.isImm()) {
368 lowerImageHandleSymbol(MO.getImm(), MCOp);
378 void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp) {
380 TargetMachine &TM = const_cast<TargetMachine&>(MF->getTarget());
381 NVPTXTargetMachine &nvTM = static_cast<NVPTXTargetMachine&>(TM);
382 const NVPTXMachineFunctionInfo *MFI = MF->getInfo<NVPTXMachineFunctionInfo>();
383 const char *Sym = MFI->getImageHandleSymbol(Index);
384 std::string *SymNamePtr =
385 nvTM.getManagedStrPool()->getManagedString(Sym);
386 MCOp = GetSymbolRef(OutContext.GetOrCreateSymbol(
387 StringRef(SymNamePtr->c_str())));
390 void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
391 OutMI.setOpcode(MI->getOpcode());
392 const NVPTXSubtarget &ST = TM.getSubtarget<NVPTXSubtarget>();
394 // Special: Do not mangle symbol operand of CALL_PROTOTYPE
395 if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
396 const MachineOperand &MO = MI->getOperand(0);
397 OutMI.addOperand(GetSymbolRef(
398 OutContext.GetOrCreateSymbol(Twine(MO.getSymbolName()))));
402 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
403 const MachineOperand &MO = MI->getOperand(i);
406 if (!ST.hasImageHandles()) {
407 if (lowerImageHandleOperand(MI, i, MCOp)) {
408 OutMI.addOperand(MCOp);
413 if (lowerOperand(MO, MCOp))
414 OutMI.addOperand(MCOp);
418 bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
420 switch (MO.getType()) {
421 default: llvm_unreachable("unknown operand type");
422 case MachineOperand::MO_Register:
423 MCOp = MCOperand::CreateReg(encodeVirtualRegister(MO.getReg()));
425 case MachineOperand::MO_Immediate:
426 MCOp = MCOperand::CreateImm(MO.getImm());
428 case MachineOperand::MO_MachineBasicBlock:
429 MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
430 MO.getMBB()->getSymbol(), OutContext));
432 case MachineOperand::MO_ExternalSymbol:
433 MCOp = GetSymbolRef(GetExternalSymbolSymbol(MO.getSymbolName()));
435 case MachineOperand::MO_GlobalAddress:
436 MCOp = GetSymbolRef(getSymbol(MO.getGlobal()));
438 case MachineOperand::MO_FPImmediate: {
439 const ConstantFP *Cnt = MO.getFPImm();
440 APFloat Val = Cnt->getValueAPF();
442 switch (Cnt->getType()->getTypeID()) {
443 default: report_fatal_error("Unsupported FP type"); break;
444 case Type::FloatTyID:
445 MCOp = MCOperand::CreateExpr(
446 NVPTXFloatMCExpr::CreateConstantFPSingle(Val, OutContext));
448 case Type::DoubleTyID:
449 MCOp = MCOperand::CreateExpr(
450 NVPTXFloatMCExpr::CreateConstantFPDouble(Val, OutContext));
459 unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
460 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
461 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
463 DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC];
464 unsigned RegNum = RegMap[Reg];
466 // Encode the register class in the upper 4 bits
467 // Must be kept in sync with NVPTXInstPrinter::printRegName
469 if (RC == &NVPTX::Int1RegsRegClass) {
471 } else if (RC == &NVPTX::Int16RegsRegClass) {
473 } else if (RC == &NVPTX::Int32RegsRegClass) {
475 } else if (RC == &NVPTX::Int64RegsRegClass) {
477 } else if (RC == &NVPTX::Float32RegsRegClass) {
479 } else if (RC == &NVPTX::Float64RegsRegClass) {
482 report_fatal_error("Bad register class");
485 // Insert the vreg number
486 Ret |= (RegNum & 0x0FFFFFFF);
489 // Some special-use registers are actually physical registers.
490 // Encode this as the register class ID of 0 and the real register ID.
491 return Reg & 0x0FFFFFFF;
495 MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
497 Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
499 return MCOperand::CreateExpr(Expr);
502 void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
503 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
504 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
506 Type *Ty = F->getReturnType();
508 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
510 if (Ty->getTypeID() == Type::VoidTyID)
516 if (Ty->isFloatingPointTy() || Ty->isIntegerTy()) {
518 if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
519 size = ITy->getBitWidth();
523 assert(Ty->isFloatingPointTy() && "Floating point type expected here");
524 size = Ty->getPrimitiveSizeInBits();
527 O << ".param .b" << size << " func_retval0";
528 } else if (isa<PointerType>(Ty)) {
529 O << ".param .b" << TLI->getPointerTy().getSizeInBits()
532 if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
533 unsigned totalsz = TD->getTypeAllocSize(Ty);
534 unsigned retAlignment = 0;
535 if (!llvm::getAlign(*F, 0, retAlignment))
536 retAlignment = TD->getABITypeAlignment(Ty);
537 O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
540 assert(false && "Unknown return type");
543 SmallVector<EVT, 16> vtparts;
544 ComputeValueVTs(*TLI, Ty, vtparts);
546 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
548 EVT elemtype = vtparts[i];
549 if (vtparts[i].isVector()) {
550 elems = vtparts[i].getVectorNumElements();
551 elemtype = vtparts[i].getVectorElementType();
554 for (unsigned j = 0, je = elems; j != je; ++j) {
555 unsigned sz = elemtype.getSizeInBits();
556 if (elemtype.isInteger() && (sz < 32))
558 O << ".reg .b" << sz << " func_retval" << idx;
571 void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
573 const Function *F = MF.getFunction();
574 printReturnValStr(F, O);
577 void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
578 SmallString<128> Str;
579 raw_svector_ostream O(Str);
581 if (!GlobalsEmitted) {
582 emitGlobals(*MF->getFunction()->getParent());
583 GlobalsEmitted = true;
587 MRI = &MF->getRegInfo();
588 F = MF->getFunction();
589 emitLinkageDirective(F, O);
590 if (llvm::isKernelFunction(*F))
594 printReturnValStr(*MF, O);
599 emitFunctionParamList(*MF, O);
601 if (llvm::isKernelFunction(*F))
602 emitKernelFunctionDirectives(*F, O);
604 OutStreamer.EmitRawText(O.str());
606 prevDebugLoc = DebugLoc();
609 void NVPTXAsmPrinter::EmitFunctionBodyStart() {
611 OutStreamer.EmitRawText(StringRef("{\n"));
612 setAndEmitFunctionVirtualRegisters(*MF);
614 SmallString<128> Str;
615 raw_svector_ostream O(Str);
616 emitDemotedVars(MF->getFunction(), O);
617 OutStreamer.EmitRawText(O.str());
620 void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
621 OutStreamer.EmitRawText(StringRef("}\n"));
625 void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
626 unsigned RegNo = MI->getOperand(0).getReg();
627 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
628 if (TRI->isVirtualRegister(RegNo)) {
629 OutStreamer.AddComment(Twine("implicit-def: ") +
630 getVirtualRegisterName(RegNo));
632 OutStreamer.AddComment(
633 Twine("implicit-def: ") +
634 TM.getSubtargetImpl()->getRegisterInfo()->getName(RegNo));
636 OutStreamer.AddBlankLine();
639 void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
640 raw_ostream &O) const {
641 // If the NVVM IR has some of reqntid* specified, then output
642 // the reqntid directive, and set the unspecified ones to 1.
643 // If none of reqntid* is specified, don't output reqntid directive.
644 unsigned reqntidx, reqntidy, reqntidz;
645 bool specified = false;
646 if (llvm::getReqNTIDx(F, reqntidx) == false)
650 if (llvm::getReqNTIDy(F, reqntidy) == false)
654 if (llvm::getReqNTIDz(F, reqntidz) == false)
660 O << ".reqntid " << reqntidx << ", " << reqntidy << ", " << reqntidz
663 // If the NVVM IR has some of maxntid* specified, then output
664 // the maxntid directive, and set the unspecified ones to 1.
665 // If none of maxntid* is specified, don't output maxntid directive.
666 unsigned maxntidx, maxntidy, maxntidz;
668 if (llvm::getMaxNTIDx(F, maxntidx) == false)
672 if (llvm::getMaxNTIDy(F, maxntidy) == false)
676 if (llvm::getMaxNTIDz(F, maxntidz) == false)
682 O << ".maxntid " << maxntidx << ", " << maxntidy << ", " << maxntidz
686 if (llvm::getMinCTASm(F, mincta))
687 O << ".minnctapersm " << mincta << "\n";
691 NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg) const {
692 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
695 raw_string_ostream NameStr(Name);
697 VRegRCMap::const_iterator I = VRegMapping.find(RC);
698 assert(I != VRegMapping.end() && "Bad register class");
699 const DenseMap<unsigned, unsigned> &RegMap = I->second;
701 VRegMap::const_iterator VI = RegMap.find(Reg);
702 assert(VI != RegMap.end() && "Bad virtual register");
703 unsigned MappedVR = VI->second;
705 NameStr << getNVPTXRegClassStr(RC) << MappedVR;
711 void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr,
713 O << getVirtualRegisterName(vr);
716 void NVPTXAsmPrinter::printVecModifiedImmediate(
717 const MachineOperand &MO, const char *Modifier, raw_ostream &O) {
718 static const char vecelem[] = { '0', '1', '2', '3', '0', '1', '2', '3' };
719 int Imm = (int) MO.getImm();
720 if (0 == strcmp(Modifier, "vecelem"))
721 O << "_" << vecelem[Imm];
722 else if (0 == strcmp(Modifier, "vecv4comm1")) {
723 if ((Imm < 0) || (Imm > 3))
725 } else if (0 == strcmp(Modifier, "vecv4comm2")) {
726 if ((Imm < 4) || (Imm > 7))
728 } else if (0 == strcmp(Modifier, "vecv4pos")) {
731 O << "_" << vecelem[Imm % 4];
732 } else if (0 == strcmp(Modifier, "vecv2comm1")) {
733 if ((Imm < 0) || (Imm > 1))
735 } else if (0 == strcmp(Modifier, "vecv2comm2")) {
736 if ((Imm < 2) || (Imm > 3))
738 } else if (0 == strcmp(Modifier, "vecv2pos")) {
741 O << "_" << vecelem[Imm % 2];
743 llvm_unreachable("Unknown Modifier on immediate operand");
748 void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) {
750 emitLinkageDirective(F, O);
751 if (llvm::isKernelFunction(*F))
755 printReturnValStr(F, O);
756 O << *getSymbol(F) << "\n";
757 emitFunctionParamList(F, O);
761 static bool usedInGlobalVarDef(const Constant *C) {
765 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
766 if (GV->getName().str() == "llvm.used")
771 for (const User *U : C->users())
772 if (const Constant *C = dyn_cast<Constant>(U))
773 if (usedInGlobalVarDef(C))
779 static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
780 if (const GlobalVariable *othergv = dyn_cast<GlobalVariable>(U)) {
781 if (othergv->getName().str() == "llvm.used")
785 if (const Instruction *instr = dyn_cast<Instruction>(U)) {
786 if (instr->getParent() && instr->getParent()->getParent()) {
787 const Function *curFunc = instr->getParent()->getParent();
788 if (oneFunc && (curFunc != oneFunc))
796 if (const MDNode *md = dyn_cast<MDNode>(U))
797 if (md->hasName() && ((md->getName().str() == "llvm.dbg.gv") ||
798 (md->getName().str() == "llvm.dbg.sp")))
801 for (const User *UU : U->users())
802 if (usedInOneFunc(UU, oneFunc) == false)
808 /* Find out if a global variable can be demoted to local scope.
809 * Currently, this is valid for CUDA shared variables, which have local
810 * scope and global lifetime. So the conditions to check are :
811 * 1. Is the global variable in shared address space?
812 * 2. Does it have internal linkage?
813 * 3. Is the global variable referenced only in one function?
815 static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
816 if (gv->hasInternalLinkage() == false)
818 const PointerType *Pty = gv->getType();
819 if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
822 const Function *oneFunc = nullptr;
824 bool flag = usedInOneFunc(gv, oneFunc);
833 static bool useFuncSeen(const Constant *C,
834 llvm::DenseMap<const Function *, bool> &seenMap) {
835 for (const User *U : C->users()) {
836 if (const Constant *cu = dyn_cast<Constant>(U)) {
837 if (useFuncSeen(cu, seenMap))
839 } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
840 const BasicBlock *bb = I->getParent();
843 const Function *caller = bb->getParent();
846 if (seenMap.find(caller) != seenMap.end())
853 void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
854 llvm::DenseMap<const Function *, bool> seenMap;
855 for (Module::const_iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) {
856 const Function *F = FI;
858 if (F->isDeclaration()) {
861 if (F->getIntrinsicID())
863 emitDeclaration(F, O);
866 for (const User *U : F->users()) {
867 if (const Constant *C = dyn_cast<Constant>(U)) {
868 if (usedInGlobalVarDef(C)) {
869 // The use is in the initialization of a global variable
870 // that is a function pointer, so print a declaration
871 // for the original function
872 emitDeclaration(F, O);
875 // Emit a declaration of this function if the function that
876 // uses this constant expr has already been seen.
877 if (useFuncSeen(C, seenMap)) {
878 emitDeclaration(F, O);
883 if (!isa<Instruction>(U))
885 const Instruction *instr = cast<Instruction>(U);
886 const BasicBlock *bb = instr->getParent();
889 const Function *caller = bb->getParent();
893 // If a caller has already been seen, then the caller is
894 // appearing in the module before the callee. so print out
895 // a declaration for the callee.
896 if (seenMap.find(caller) != seenMap.end()) {
897 emitDeclaration(F, O);
905 void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
906 DebugInfoFinder DbgFinder;
907 DbgFinder.processModule(M);
910 for (DICompileUnit DIUnit : DbgFinder.compile_units()) {
911 StringRef Filename(DIUnit.getFilename());
912 StringRef Dirname(DIUnit.getDirectory());
913 SmallString<128> FullPathName = Dirname;
914 if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
915 sys::path::append(FullPathName, Filename);
916 Filename = FullPathName.str();
918 if (filenameMap.find(Filename.str()) != filenameMap.end())
920 filenameMap[Filename.str()] = i;
921 OutStreamer.EmitDwarfFileDirective(i, "", Filename.str());
925 for (DISubprogram SP : DbgFinder.subprograms()) {
926 StringRef Filename(SP.getFilename());
927 StringRef Dirname(SP.getDirectory());
928 SmallString<128> FullPathName = Dirname;
929 if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
930 sys::path::append(FullPathName, Filename);
931 Filename = FullPathName.str();
933 if (filenameMap.find(Filename.str()) != filenameMap.end())
935 filenameMap[Filename.str()] = i;
940 bool NVPTXAsmPrinter::doInitialization(Module &M) {
942 SmallString<128> Str1;
943 raw_svector_ostream OS1(Str1);
945 MMI = getAnalysisIfAvailable<MachineModuleInfo>();
946 MMI->AnalyzeModule(M);
948 // We need to call the parent's one explicitly.
949 //bool Result = AsmPrinter::doInitialization(M);
951 // Initialize TargetLoweringObjectFile.
952 const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
953 .Initialize(OutContext, TM);
955 Mang = new Mangler(TM.getSubtargetImpl()->getDataLayout());
957 // Emit header before any dwarf directives are emitted below.
959 OutStreamer.EmitRawText(OS1.str());
961 // Already commented out
962 //bool Result = AsmPrinter::doInitialization(M);
964 // Emit module-level inline asm if it exists.
965 if (!M.getModuleInlineAsm().empty()) {
966 OutStreamer.AddComment("Start of file scope inline assembly");
967 OutStreamer.AddBlankLine();
968 OutStreamer.EmitRawText(StringRef(M.getModuleInlineAsm()));
969 OutStreamer.AddBlankLine();
970 OutStreamer.AddComment("End of file scope inline assembly");
971 OutStreamer.AddBlankLine();
974 if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
975 recordAndEmitFilenames(M);
977 GlobalsEmitted = false;
979 return false; // success
982 void NVPTXAsmPrinter::emitGlobals(const Module &M) {
983 SmallString<128> Str2;
984 raw_svector_ostream OS2(Str2);
986 emitDeclarations(M, OS2);
988 // As ptxas does not support forward references of globals, we need to first
989 // sort the list of module-level globals in def-use order. We visit each
990 // global variable in order, and ensure that we emit it *after* its dependent
991 // globals. We use a little extra memory maintaining both a set and a list to
992 // have fast searches while maintaining a strict ordering.
993 SmallVector<const GlobalVariable *, 8> Globals;
994 DenseSet<const GlobalVariable *> GVVisited;
995 DenseSet<const GlobalVariable *> GVVisiting;
997 // Visit each global variable, in order
998 for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
1000 VisitGlobalVariableForEmission(I, Globals, GVVisited, GVVisiting);
1002 assert(GVVisited.size() == M.getGlobalList().size() &&
1003 "Missed a global variable");
1004 assert(GVVisiting.size() == 0 && "Did not fully process a global variable");
1006 // Print out module-level global variables in proper order
1007 for (unsigned i = 0, e = Globals.size(); i != e; ++i)
1008 printModuleLevelGV(Globals[i], OS2);
1012 OutStreamer.EmitRawText(OS2.str());
1015 void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O) {
1017 O << "// Generated by LLVM NVPTX Back-End\n";
1021 unsigned PTXVersion = nvptxSubtarget.getPTXVersion();
1022 O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n";
1025 O << nvptxSubtarget.getTargetName();
1027 if (nvptxSubtarget.getDrvInterface() == NVPTX::NVCL)
1028 O << ", texmode_independent";
1029 if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
1030 if (!nvptxSubtarget.hasDouble())
1031 O << ", map_f64_to_f32";
1034 if (MAI->doesSupportDebugInformation())
1039 O << ".address_size ";
1040 if (nvptxSubtarget.is64Bit())
1049 bool NVPTXAsmPrinter::doFinalization(Module &M) {
1051 // If we did not emit any functions, then the global declarations have not
1052 // yet been emitted.
1053 if (!GlobalsEmitted) {
1055 GlobalsEmitted = true;
1058 // XXX Temproarily remove global variables so that doFinalization() will not
1059 // emit them again (global variables are emitted at beginning).
1061 Module::GlobalListType &global_list = M.getGlobalList();
1062 int i, n = global_list.size();
1063 GlobalVariable **gv_array = new GlobalVariable *[n];
1065 // first, back-up GlobalVariable in gv_array
1067 for (Module::global_iterator I = global_list.begin(), E = global_list.end();
1069 gv_array[i++] = &*I;
1071 // second, empty global_list
1072 while (!global_list.empty())
1073 global_list.remove(global_list.begin());
1075 // call doFinalization
1076 bool ret = AsmPrinter::doFinalization(M);
1078 // now we restore global variables
1079 for (i = 0; i < n; i++)
1080 global_list.insert(global_list.end(), gv_array[i]);
1082 clearAnnotationCache(&M);
1087 //bool Result = AsmPrinter::doFinalization(M);
1088 // Instead of calling the parents doFinalization, we may
1089 // clone parents doFinalization and customize here.
1090 // Currently, we if NVISA out the EmitGlobals() in
1091 // parent's doFinalization, which is too intrusive.
1093 // Same for the doInitialization.
1097 // This function emits appropriate linkage directives for
1098 // functions and global variables.
1100 // extern function declaration -> .extern
1101 // extern function definition -> .visible
1102 // external global variable with init -> .visible
1103 // external without init -> .extern
1104 // appending -> not allowed, assert.
1105 // for any linkage other than
1106 // internal, private, linker_private,
1107 // linker_private_weak, linker_private_weak_def_auto,
1108 // we emit -> .weak.
1110 void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
1112 if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
1113 if (V->hasExternalLinkage()) {
1114 if (isa<GlobalVariable>(V)) {
1115 const GlobalVariable *GVar = cast<GlobalVariable>(V);
1117 if (GVar->hasInitializer())
1122 } else if (V->isDeclaration())
1126 } else if (V->hasAppendingLinkage()) {
1128 msg.append("Error: ");
1129 msg.append("Symbol ");
1131 msg.append(V->getName().str());
1132 msg.append("has unsupported appending linkage type");
1133 llvm_unreachable(msg.c_str());
1134 } else if (!V->hasInternalLinkage() &&
1135 !V->hasPrivateLinkage()) {
1141 void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
1143 bool processDemoted) {
1146 if (GVar->hasSection()) {
1147 if (GVar->getSection() == StringRef("llvm.metadata"))
1151 // Skip LLVM intrinsic global variables
1152 if (GVar->getName().startswith("llvm.") ||
1153 GVar->getName().startswith("nvvm."))
1156 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
1158 // GlobalVariables are always constant pointers themselves.
1159 const PointerType *PTy = GVar->getType();
1160 Type *ETy = PTy->getElementType();
1162 if (GVar->hasExternalLinkage()) {
1163 if (GVar->hasInitializer())
1167 } else if (GVar->hasLinkOnceLinkage() || GVar->hasWeakLinkage() ||
1168 GVar->hasAvailableExternallyLinkage() ||
1169 GVar->hasCommonLinkage()) {
1173 if (llvm::isTexture(*GVar)) {
1174 O << ".global .texref " << llvm::getTextureName(*GVar) << ";\n";
1178 if (llvm::isSurface(*GVar)) {
1179 O << ".global .surfref " << llvm::getSurfaceName(*GVar) << ";\n";
1183 if (GVar->isDeclaration()) {
1184 // (extern) declarations, no definition or initializer
1185 // Currently the only known declaration is for an automatic __local
1186 // (.shared) promoted to global.
1187 emitPTXGlobalVariable(GVar, O);
1192 if (llvm::isSampler(*GVar)) {
1193 O << ".global .samplerref " << llvm::getSamplerName(*GVar);
1195 const Constant *Initializer = nullptr;
1196 if (GVar->hasInitializer())
1197 Initializer = GVar->getInitializer();
1198 const ConstantInt *CI = nullptr;
1200 CI = dyn_cast<ConstantInt>(Initializer);
1202 unsigned sample = CI->getZExtValue();
1207 addr = ((sample & __CLK_ADDRESS_MASK) >> __CLK_ADDRESS_BASE);
1209 O << "addr_mode_" << i << " = ";
1215 O << "clamp_to_border";
1218 O << "clamp_to_edge";
1229 O << "filter_mode = ";
1230 switch ((sample & __CLK_FILTER_MASK) >> __CLK_FILTER_BASE) {
1238 llvm_unreachable("Anisotropic filtering is not supported");
1243 if (!((sample & __CLK_NORMALIZED_MASK) >> __CLK_NORMALIZED_BASE)) {
1244 O << ", force_unnormalized_coords = 1";
1253 if (GVar->hasPrivateLinkage()) {
1255 if (!strncmp(GVar->getName().data(), "unrollpragma", 12))
1258 // FIXME - need better way (e.g. Metadata) to avoid generating this global
1259 if (!strncmp(GVar->getName().data(), "filename", 8))
1261 if (GVar->use_empty())
1265 const Function *demotedFunc = nullptr;
1266 if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) {
1267 O << "// " << GVar->getName().str() << " has been demoted\n";
1268 if (localDecls.find(demotedFunc) != localDecls.end())
1269 localDecls[demotedFunc].push_back(GVar);
1271 std::vector<const GlobalVariable *> temp;
1272 temp.push_back(GVar);
1273 localDecls[demotedFunc] = temp;
1279 emitPTXAddressSpace(PTy->getAddressSpace(), O);
1281 if (isManaged(*GVar)) {
1282 O << " .attribute(.managed)";
1285 if (GVar->getAlignment() == 0)
1286 O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
1288 O << " .align " << GVar->getAlignment();
1290 if (ETy->isSingleValueType()) {
1292 // Special case: ABI requires that we use .u8 for predicates
1293 if (ETy->isIntegerTy(1))
1296 O << getPTXFundamentalTypeStr(ETy, false);
1298 O << *getSymbol(GVar);
1300 // Ptx allows variable initilization only for constant and global state
1302 if (GVar->hasInitializer()) {
1303 if ((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
1304 (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) {
1305 const Constant *Initializer = GVar->getInitializer();
1306 // 'undef' is treated as there is no value spefied.
1307 if (!Initializer->isNullValue() && !isa<UndefValue>(Initializer)) {
1309 printScalarConstant(Initializer, O);
1312 // The frontend adds zero-initializer to variables that don't have an
1313 // initial value, so skip warning for this case.
1314 if (!GVar->getInitializer()->isNullValue()) {
1315 std::string warnMsg = "initial value of '" + GVar->getName().str() +
1316 "' is not allowed in addrspace(" +
1317 llvm::utostr_32(PTy->getAddressSpace()) + ")";
1318 report_fatal_error(warnMsg.c_str());
1323 unsigned int ElementSize = 0;
1325 // Although PTX has direct support for struct type and array type and
1326 // LLVM IR is very similar to PTX, the LLVM CodeGen does not support for
1327 // targets that support these high level field accesses. Structs, arrays
1328 // and vectors are lowered into arrays of bytes.
1329 switch (ETy->getTypeID()) {
1330 case Type::StructTyID:
1331 case Type::ArrayTyID:
1332 case Type::VectorTyID:
1333 ElementSize = TD->getTypeStoreSize(ETy);
1334 // Ptx allows variable initilization only for constant and
1335 // global state spaces.
1336 if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
1337 (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) &&
1338 GVar->hasInitializer()) {
1339 const Constant *Initializer = GVar->getInitializer();
1340 if (!isa<UndefValue>(Initializer) && !Initializer->isNullValue()) {
1341 AggBuffer aggBuffer(ElementSize, O, *this);
1342 bufferAggregateConstant(Initializer, &aggBuffer);
1343 if (aggBuffer.numSymbols) {
1344 if (nvptxSubtarget.is64Bit()) {
1345 O << " .u64 " << *getSymbol(GVar) << "[";
1346 O << ElementSize / 8;
1348 O << " .u32 " << *getSymbol(GVar) << "[";
1349 O << ElementSize / 4;
1353 O << " .b8 " << *getSymbol(GVar) << "[";
1361 O << " .b8 " << *getSymbol(GVar);
1369 O << " .b8 " << *getSymbol(GVar);
1378 llvm_unreachable("type not supported yet");
1385 void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) {
1386 if (localDecls.find(f) == localDecls.end())
1389 std::vector<const GlobalVariable *> &gvars = localDecls[f];
1391 for (unsigned i = 0, e = gvars.size(); i != e; ++i) {
1392 O << "\t// demoted variable\n\t";
1393 printModuleLevelGV(gvars[i], O, true);
1397 void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
1398 raw_ostream &O) const {
1399 switch (AddressSpace) {
1400 case llvm::ADDRESS_SPACE_LOCAL:
1403 case llvm::ADDRESS_SPACE_GLOBAL:
1406 case llvm::ADDRESS_SPACE_CONST:
1409 case llvm::ADDRESS_SPACE_SHARED:
1413 report_fatal_error("Bad address space found while emitting PTX");
1419 NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
1420 switch (Ty->getTypeID()) {
1422 llvm_unreachable("unexpected type");
1424 case Type::IntegerTyID: {
1425 unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
1428 else if (NumBits <= 64) {
1429 std::string name = "u";
1430 return name + utostr(NumBits);
1432 llvm_unreachable("Integer too large");
1437 case Type::FloatTyID:
1439 case Type::DoubleTyID:
1441 case Type::PointerTyID:
1442 if (nvptxSubtarget.is64Bit())
1452 llvm_unreachable("unexpected type");
1456 void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
1459 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
1461 // GlobalVariables are always constant pointers themselves.
1462 const PointerType *PTy = GVar->getType();
1463 Type *ETy = PTy->getElementType();
1466 emitPTXAddressSpace(PTy->getAddressSpace(), O);
1467 if (GVar->getAlignment() == 0)
1468 O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
1470 O << " .align " << GVar->getAlignment();
1472 if (ETy->isSingleValueType()) {
1474 O << getPTXFundamentalTypeStr(ETy);
1476 O << *getSymbol(GVar);
1480 int64_t ElementSize = 0;
1482 // Although PTX has direct support for struct type and array type and LLVM IR
1483 // is very similar to PTX, the LLVM CodeGen does not support for targets that
1484 // support these high level field accesses. Structs and arrays are lowered
1485 // into arrays of bytes.
1486 switch (ETy->getTypeID()) {
1487 case Type::StructTyID:
1488 case Type::ArrayTyID:
1489 case Type::VectorTyID:
1490 ElementSize = TD->getTypeStoreSize(ETy);
1491 O << " .b8 " << *getSymbol(GVar) << "[";
1493 O << itostr(ElementSize);
1498 llvm_unreachable("type not supported yet");
1503 static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) {
1504 if (Ty->isSingleValueType())
1505 return TD->getPrefTypeAlignment(Ty);
1507 const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
1509 return getOpenCLAlignment(TD, ATy->getElementType());
1511 const VectorType *VTy = dyn_cast<VectorType>(Ty);
1513 Type *ETy = VTy->getElementType();
1514 unsigned int numE = VTy->getNumElements();
1515 unsigned int alignE = TD->getPrefTypeAlignment(ETy);
1519 return numE * alignE;
1522 const StructType *STy = dyn_cast<StructType>(Ty);
1524 unsigned int alignStruct = 1;
1525 // Go through each element of the struct and find the
1526 // largest alignment.
1527 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1528 Type *ETy = STy->getElementType(i);
1529 unsigned int align = getOpenCLAlignment(TD, ETy);
1530 if (align > alignStruct)
1531 alignStruct = align;
1536 const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
1538 return TD->getPointerPrefAlignment();
1539 return TD->getPrefTypeAlignment(Ty);
1542 void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
1543 int paramIndex, raw_ostream &O) {
1544 if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
1545 (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA))
1546 O << *getSymbol(I->getParent()) << "_param_" << paramIndex;
1548 std::string argName = I->getName();
1549 const char *p = argName.c_str();
1560 void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) {
1561 Function::const_arg_iterator I, E;
1564 if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
1565 (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)) {
1566 O << *CurrentFnSym << "_param_" << paramIndex;
1570 for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, i++) {
1571 if (i == paramIndex) {
1572 printParamName(I, paramIndex, O);
1576 llvm_unreachable("paramIndex out of bound");
1579 void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
1580 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
1581 const AttributeSet &PAL = F->getAttributes();
1582 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1583 Function::const_arg_iterator I, E;
1584 unsigned paramIndex = 0;
1586 bool isKernelFunc = llvm::isKernelFunction(*F);
1587 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1588 MVT thePointerTy = TLI->getPointerTy();
1592 for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) {
1593 Type *Ty = I->getType();
1600 // Handle image/sampler parameters
1601 if (isKernelFunction(*F)) {
1602 if (isSampler(*I) || isImage(*I)) {
1604 std::string sname = I->getName();
1605 if (isImageWriteOnly(*I) || isImageReadWrite(*I)) {
1606 if (nvptxSubtarget.hasImageHandles())
1607 O << "\t.param .u64 .ptr .surfref ";
1609 O << "\t.param .surfref ";
1610 O << *CurrentFnSym << "_param_" << paramIndex;
1612 else { // Default image is read_only
1613 if (nvptxSubtarget.hasImageHandles())
1614 O << "\t.param .u64 .ptr .texref ";
1616 O << "\t.param .texref ";
1617 O << *CurrentFnSym << "_param_" << paramIndex;
1620 if (nvptxSubtarget.hasImageHandles())
1621 O << "\t.param .u64 .ptr .samplerref ";
1623 O << "\t.param .samplerref ";
1624 O << *CurrentFnSym << "_param_" << paramIndex;
1630 if (PAL.hasAttribute(paramIndex + 1, Attribute::ByVal) == false) {
1631 if (Ty->isAggregateType() || Ty->isVectorTy()) {
1632 // Just print .param .align <a> .b8 .param[size];
1633 // <a> = PAL.getparamalignment
1634 // size = typeallocsize of element type
1635 unsigned align = PAL.getParamAlignment(paramIndex + 1);
1637 align = TD->getABITypeAlignment(Ty);
1639 unsigned sz = TD->getTypeAllocSize(Ty);
1640 O << "\t.param .align " << align << " .b8 ";
1641 printParamName(I, paramIndex, O);
1642 O << "[" << sz << "]";
1647 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1650 // Special handling for pointer arguments to kernel
1651 O << "\t.param .u" << thePointerTy.getSizeInBits() << " ";
1653 if (nvptxSubtarget.getDrvInterface() != NVPTX::CUDA) {
1654 Type *ETy = PTy->getElementType();
1655 int addrSpace = PTy->getAddressSpace();
1656 switch (addrSpace) {
1660 case llvm::ADDRESS_SPACE_CONST:
1661 O << ".ptr .const ";
1663 case llvm::ADDRESS_SPACE_SHARED:
1664 O << ".ptr .shared ";
1666 case llvm::ADDRESS_SPACE_GLOBAL:
1667 O << ".ptr .global ";
1670 O << ".align " << (int) getOpenCLAlignment(TD, ETy) << " ";
1672 printParamName(I, paramIndex, O);
1676 // non-pointer scalar to kernel func
1678 // Special case: predicate operands become .u8 types
1679 if (Ty->isIntegerTy(1))
1682 O << getPTXFundamentalTypeStr(Ty);
1684 printParamName(I, paramIndex, O);
1687 // Non-kernel function, just print .param .b<size> for ABI
1688 // and .reg .b<size> for non-ABI
1690 if (isa<IntegerType>(Ty)) {
1691 sz = cast<IntegerType>(Ty)->getBitWidth();
1694 } else if (isa<PointerType>(Ty))
1695 sz = thePointerTy.getSizeInBits();
1697 sz = Ty->getPrimitiveSizeInBits();
1699 O << "\t.param .b" << sz << " ";
1701 O << "\t.reg .b" << sz << " ";
1702 printParamName(I, paramIndex, O);
1706 // param has byVal attribute. So should be a pointer
1707 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1708 assert(PTy && "Param with byval attribute should be a pointer type");
1709 Type *ETy = PTy->getElementType();
1711 if (isABI || isKernelFunc) {
1712 // Just print .param .align <a> .b8 .param[size];
1713 // <a> = PAL.getparamalignment
1714 // size = typeallocsize of element type
1715 unsigned align = PAL.getParamAlignment(paramIndex + 1);
1717 align = TD->getABITypeAlignment(ETy);
1719 unsigned sz = TD->getTypeAllocSize(ETy);
1720 O << "\t.param .align " << align << " .b8 ";
1721 printParamName(I, paramIndex, O);
1722 O << "[" << sz << "]";
1725 // Split the ETy into constituent parts and
1726 // print .param .b<size> <name> for each part.
1727 // Further, if a part is vector, print the above for
1728 // each vector element.
1729 SmallVector<EVT, 16> vtparts;
1730 ComputeValueVTs(*TLI, ETy, vtparts);
1731 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
1733 EVT elemtype = vtparts[i];
1734 if (vtparts[i].isVector()) {
1735 elems = vtparts[i].getVectorNumElements();
1736 elemtype = vtparts[i].getVectorElementType();
1739 for (unsigned j = 0, je = elems; j != je; ++j) {
1740 unsigned sz = elemtype.getSizeInBits();
1741 if (elemtype.isInteger() && (sz < 32))
1743 O << "\t.reg .b" << sz << " ";
1744 printParamName(I, paramIndex, O);
1760 void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
1762 const Function *F = MF.getFunction();
1763 emitFunctionParamList(F, O);
1766 void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
1767 const MachineFunction &MF) {
1768 SmallString<128> Str;
1769 raw_svector_ostream O(Str);
1771 // Map the global virtual register number to a register class specific
1772 // virtual register number starting from 1 with that class.
1773 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1774 //unsigned numRegClasses = TRI->getNumRegClasses();
1776 // Emit the Fake Stack Object
1777 const MachineFrameInfo *MFI = MF.getFrameInfo();
1778 int NumBytes = (int) MFI->getStackSize();
1780 O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t" << DEPOTNAME
1781 << getFunctionNumber() << "[" << NumBytes << "];\n";
1782 if (nvptxSubtarget.is64Bit()) {
1783 O << "\t.reg .b64 \t%SP;\n";
1784 O << "\t.reg .b64 \t%SPL;\n";
1786 O << "\t.reg .b32 \t%SP;\n";
1787 O << "\t.reg .b32 \t%SPL;\n";
1791 // Go through all virtual registers to establish the mapping between the
1793 // register number and the per class virtual register number.
1794 // We use the per class virtual register number in the ptx output.
1795 unsigned int numVRs = MRI->getNumVirtRegs();
1796 for (unsigned i = 0; i < numVRs; i++) {
1797 unsigned int vr = TRI->index2VirtReg(i);
1798 const TargetRegisterClass *RC = MRI->getRegClass(vr);
1799 DenseMap<unsigned, unsigned> ®map = VRegMapping[RC];
1800 int n = regmap.size();
1801 regmap.insert(std::make_pair(vr, n + 1));
1804 // Emit register declarations
1805 // @TODO: Extract out the real register usage
1806 // O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
1807 // O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
1808 // O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
1809 // O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
1810 // O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n";
1811 // O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
1812 // O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n";
1814 // Emit declaration of the virtual registers or 'physical' registers for
1815 // each register class
1816 for (unsigned i=0; i< TRI->getNumRegClasses(); i++) {
1817 const TargetRegisterClass *RC = TRI->getRegClass(i);
1818 DenseMap<unsigned, unsigned> ®map = VRegMapping[RC];
1819 std::string rcname = getNVPTXRegClassName(RC);
1820 std::string rcStr = getNVPTXRegClassStr(RC);
1821 int n = regmap.size();
1823 // Only declare those registers that may be used.
1825 O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
1830 OutStreamer.EmitRawText(O.str());
1833 void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) {
1834 APFloat APF = APFloat(Fp->getValueAPF()); // make a copy
1836 unsigned int numHex;
1839 if (Fp->getType()->getTypeID() == Type::FloatTyID) {
1842 APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &ignored);
1843 } else if (Fp->getType()->getTypeID() == Type::DoubleTyID) {
1846 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
1848 llvm_unreachable("unsupported fp type");
1850 APInt API = APF.bitcastToAPInt();
1851 std::string hexstr(utohexstr(API.getZExtValue()));
1853 if (hexstr.length() < numHex)
1854 O << std::string(numHex - hexstr.length(), '0');
1855 O << utohexstr(API.getZExtValue());
1858 void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
1859 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
1860 O << CI->getValue();
1863 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV)) {
1864 printFPConstant(CFP, O);
1867 if (isa<ConstantPointerNull>(CPV)) {
1871 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
1872 PointerType *PTy = dyn_cast<PointerType>(GVar->getType());
1873 bool IsNonGenericPointer = false;
1874 if (PTy && PTy->getAddressSpace() != 0) {
1875 IsNonGenericPointer = true;
1877 if (EmitGeneric && !isa<Function>(CPV) && !IsNonGenericPointer) {
1879 O << *getSymbol(GVar);
1882 O << *getSymbol(GVar);
1886 if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1887 const Value *v = Cexpr->stripPointerCasts();
1888 PointerType *PTy = dyn_cast<PointerType>(Cexpr->getType());
1889 bool IsNonGenericPointer = false;
1890 if (PTy && PTy->getAddressSpace() != 0) {
1891 IsNonGenericPointer = true;
1893 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
1894 if (EmitGeneric && !isa<Function>(v) && !IsNonGenericPointer) {
1896 O << *getSymbol(GVar);
1899 O << *getSymbol(GVar);
1903 O << *LowerConstant(CPV, *this);
1907 llvm_unreachable("Not scalar type found in printScalarConstant()");
1910 void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
1911 AggBuffer *aggBuffer) {
1913 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
1915 if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
1916 int s = TD->getTypeAllocSize(CPV->getType());
1919 aggBuffer->addZeros(s);
1924 switch (CPV->getType()->getTypeID()) {
1926 case Type::IntegerTyID: {
1927 const Type *ETy = CPV->getType();
1928 if (ETy == Type::getInt8Ty(CPV->getContext())) {
1930 (unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
1932 aggBuffer->addBytes(ptr, 1, Bytes);
1933 } else if (ETy == Type::getInt16Ty(CPV->getContext())) {
1934 short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
1935 ptr = (unsigned char *)&int16;
1936 aggBuffer->addBytes(ptr, 2, Bytes);
1937 } else if (ETy == Type::getInt32Ty(CPV->getContext())) {
1938 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
1939 int int32 = (int)(constInt->getZExtValue());
1940 ptr = (unsigned char *)&int32;
1941 aggBuffer->addBytes(ptr, 4, Bytes);
1943 } else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1944 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
1945 ConstantFoldConstantExpression(Cexpr, TD))) {
1946 int int32 = (int)(constInt->getZExtValue());
1947 ptr = (unsigned char *)&int32;
1948 aggBuffer->addBytes(ptr, 4, Bytes);
1951 if (Cexpr->getOpcode() == Instruction::PtrToInt) {
1952 Value *v = Cexpr->getOperand(0)->stripPointerCasts();
1953 aggBuffer->addSymbol(v);
1954 aggBuffer->addZeros(4);
1958 llvm_unreachable("unsupported integer const type");
1959 } else if (ETy == Type::getInt64Ty(CPV->getContext())) {
1960 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
1961 long long int64 = (long long)(constInt->getZExtValue());
1962 ptr = (unsigned char *)&int64;
1963 aggBuffer->addBytes(ptr, 8, Bytes);
1965 } else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1966 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
1967 ConstantFoldConstantExpression(Cexpr, TD))) {
1968 long long int64 = (long long)(constInt->getZExtValue());
1969 ptr = (unsigned char *)&int64;
1970 aggBuffer->addBytes(ptr, 8, Bytes);
1973 if (Cexpr->getOpcode() == Instruction::PtrToInt) {
1974 Value *v = Cexpr->getOperand(0)->stripPointerCasts();
1975 aggBuffer->addSymbol(v);
1976 aggBuffer->addZeros(8);
1980 llvm_unreachable("unsupported integer const type");
1982 llvm_unreachable("unsupported integer const type");
1985 case Type::FloatTyID:
1986 case Type::DoubleTyID: {
1987 const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
1988 const Type *Ty = CFP->getType();
1989 if (Ty == Type::getFloatTy(CPV->getContext())) {
1990 float float32 = (float) CFP->getValueAPF().convertToFloat();
1991 ptr = (unsigned char *)&float32;
1992 aggBuffer->addBytes(ptr, 4, Bytes);
1993 } else if (Ty == Type::getDoubleTy(CPV->getContext())) {
1994 double float64 = CFP->getValueAPF().convertToDouble();
1995 ptr = (unsigned char *)&float64;
1996 aggBuffer->addBytes(ptr, 8, Bytes);
1998 llvm_unreachable("unsupported fp const type");
2002 case Type::PointerTyID: {
2003 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
2004 aggBuffer->addSymbol(GVar);
2005 } else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
2006 const Value *v = Cexpr->stripPointerCasts();
2007 aggBuffer->addSymbol(v);
2009 unsigned int s = TD->getTypeAllocSize(CPV->getType());
2010 aggBuffer->addZeros(s);
2014 case Type::ArrayTyID:
2015 case Type::VectorTyID:
2016 case Type::StructTyID: {
2017 if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV) ||
2018 isa<ConstantStruct>(CPV) || isa<ConstantDataSequential>(CPV)) {
2019 int ElementSize = TD->getTypeAllocSize(CPV->getType());
2020 bufferAggregateConstant(CPV, aggBuffer);
2021 if (Bytes > ElementSize)
2022 aggBuffer->addZeros(Bytes - ElementSize);
2023 } else if (isa<ConstantAggregateZero>(CPV))
2024 aggBuffer->addZeros(Bytes);
2026 llvm_unreachable("Unexpected Constant type");
2031 llvm_unreachable("unsupported type");
2035 void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
2036 AggBuffer *aggBuffer) {
2037 const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
2041 if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV)) {
2042 if (CPV->getNumOperands())
2043 for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i)
2044 bufferLEByte(cast<Constant>(CPV->getOperand(i)), 0, aggBuffer);
2048 if (const ConstantDataSequential *CDS =
2049 dyn_cast<ConstantDataSequential>(CPV)) {
2050 if (CDS->getNumElements())
2051 for (unsigned i = 0; i < CDS->getNumElements(); ++i)
2052 bufferLEByte(cast<Constant>(CDS->getElementAsConstant(i)), 0,
2057 if (isa<ConstantStruct>(CPV)) {
2058 if (CPV->getNumOperands()) {
2059 StructType *ST = cast<StructType>(CPV->getType());
2060 for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) {
2062 Bytes = TD->getStructLayout(ST)->getElementOffset(0) +
2063 TD->getTypeAllocSize(ST) -
2064 TD->getStructLayout(ST)->getElementOffset(i);
2066 Bytes = TD->getStructLayout(ST)->getElementOffset(i + 1) -
2067 TD->getStructLayout(ST)->getElementOffset(i);
2068 bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes, aggBuffer);
2073 llvm_unreachable("unsupported constant type in printAggregateConstant()");
2076 // buildTypeNameMap - Run through symbol table looking for type names.
2079 bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
2081 std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
2083 if (PI != TypeNameMap.end() && (!PI->second.compare("struct._image1d_t") ||
2084 !PI->second.compare("struct._image2d_t") ||
2085 !PI->second.compare("struct._image3d_t")))
2092 bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
2093 switch (MI.getOpcode()) {
2096 case NVPTX::CallArgBeginInst:
2097 case NVPTX::CallArgEndInst0:
2098 case NVPTX::CallArgEndInst1:
2099 case NVPTX::CallArgF32:
2100 case NVPTX::CallArgF64:
2101 case NVPTX::CallArgI16:
2102 case NVPTX::CallArgI32:
2103 case NVPTX::CallArgI32imm:
2104 case NVPTX::CallArgI64:
2105 case NVPTX::CallArgParam:
2106 case NVPTX::CallVoidInst:
2107 case NVPTX::CallVoidInstReg:
2108 case NVPTX::Callseq_End:
2109 case NVPTX::CallVoidInstReg64:
2110 case NVPTX::DeclareParamInst:
2111 case NVPTX::DeclareRetMemInst:
2112 case NVPTX::DeclareRetRegInst:
2113 case NVPTX::DeclareRetScalarInst:
2114 case NVPTX::DeclareScalarParamInst:
2115 case NVPTX::DeclareScalarRegInst:
2116 case NVPTX::StoreParamF32:
2117 case NVPTX::StoreParamF64:
2118 case NVPTX::StoreParamI16:
2119 case NVPTX::StoreParamI32:
2120 case NVPTX::StoreParamI64:
2121 case NVPTX::StoreParamI8:
2122 case NVPTX::StoreRetvalF32:
2123 case NVPTX::StoreRetvalF64:
2124 case NVPTX::StoreRetvalI16:
2125 case NVPTX::StoreRetvalI32:
2126 case NVPTX::StoreRetvalI64:
2127 case NVPTX::StoreRetvalI8:
2128 case NVPTX::LastCallArgF32:
2129 case NVPTX::LastCallArgF64:
2130 case NVPTX::LastCallArgI16:
2131 case NVPTX::LastCallArgI32:
2132 case NVPTX::LastCallArgI32imm:
2133 case NVPTX::LastCallArgI64:
2134 case NVPTX::LastCallArgParam:
2135 case NVPTX::LoadParamMemF32:
2136 case NVPTX::LoadParamMemF64:
2137 case NVPTX::LoadParamMemI16:
2138 case NVPTX::LoadParamMemI32:
2139 case NVPTX::LoadParamMemI64:
2140 case NVPTX::LoadParamMemI8:
2141 case NVPTX::PrototypeInst:
2142 case NVPTX::DBG_VALUE:
2148 /// PrintAsmOperand - Print out an operand for an inline asm expression.
2150 bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
2151 unsigned AsmVariant,
2152 const char *ExtraCode, raw_ostream &O) {
2153 if (ExtraCode && ExtraCode[0]) {
2154 if (ExtraCode[1] != 0)
2155 return true; // Unknown modifier.
2157 switch (ExtraCode[0]) {
2159 // See if this is a generic print operand
2160 return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
2166 printOperand(MI, OpNo, O);
2171 bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
2172 const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,
2173 const char *ExtraCode, raw_ostream &O) {
2174 if (ExtraCode && ExtraCode[0])
2175 return true; // Unknown modifier
2178 printMemOperand(MI, OpNo, O);
2184 void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
2185 raw_ostream &O, const char *Modifier) {
2186 const MachineOperand &MO = MI->getOperand(opNum);
2187 switch (MO.getType()) {
2188 case MachineOperand::MO_Register:
2189 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
2190 if (MO.getReg() == NVPTX::VRDepot)
2191 O << DEPOTNAME << getFunctionNumber();
2193 O << NVPTXInstPrinter::getRegisterName(MO.getReg());
2195 emitVirtualRegister(MO.getReg(), O);
2199 case MachineOperand::MO_Immediate:
2202 else if (strstr(Modifier, "vec") == Modifier)
2203 printVecModifiedImmediate(MO, Modifier, O);
2206 "Don't know how to handle modifier on immediate operand");
2209 case MachineOperand::MO_FPImmediate:
2210 printFPConstant(MO.getFPImm(), O);
2213 case MachineOperand::MO_GlobalAddress:
2214 O << *getSymbol(MO.getGlobal());
2217 case MachineOperand::MO_MachineBasicBlock:
2218 O << *MO.getMBB()->getSymbol();
2222 llvm_unreachable("Operand type not supported.");
2226 void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
2227 raw_ostream &O, const char *Modifier) {
2228 printOperand(MI, opNum, O);
2230 if (Modifier && !strcmp(Modifier, "add")) {
2232 printOperand(MI, opNum + 1, O);
2234 if (MI->getOperand(opNum + 1).isImm() &&
2235 MI->getOperand(opNum + 1).getImm() == 0)
2236 return; // don't print ',0' or '+0'
2238 printOperand(MI, opNum + 1, O);
2243 // Force static initialization.
2244 extern "C" void LLVMInitializeNVPTXBackendAsmPrinter() {
2245 RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
2246 RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64);
2249 void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) {
2250 std::stringstream temp;
2251 LineReader *reader = this->getReader(filename.str());
2253 temp << filename.str();
2257 temp << reader->readLine(line);
2259 this->OutStreamer.EmitRawText(Twine(temp.str()));
2262 LineReader *NVPTXAsmPrinter::getReader(std::string filename) {
2264 reader = new LineReader(filename);
2267 if (reader->fileName() != filename) {
2269 reader = new LineReader(filename);
2275 std::string LineReader::readLine(unsigned lineNum) {
2276 if (lineNum < theCurLine) {
2278 fstr.seekg(0, std::ios::beg);
2280 while (theCurLine < lineNum) {
2281 fstr.getline(buff, 500);
2287 // Force static initialization.
2288 extern "C" void LLVMInitializeNVPTXAsmPrinter() {
2289 RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
2290 RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64);