1 //===- PTXInstrInfo.td - PTX Instruction defs -----------------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the PTX instructions in TableGen format.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction format superclass
16 //===----------------------------------------------------------------------===//
18 include "PTXInstrFormats.td"
20 //===----------------------------------------------------------------------===//
21 // Code Generation Predicates
22 //===----------------------------------------------------------------------===//
25 def Use32BitAddresses : Predicate<"!getSubtarget().use64BitAddresses()">;
26 def Use64BitAddresses : Predicate<"getSubtarget().use64BitAddresses()">;
28 // Shader Model Support
29 def SupportsSM13 : Predicate<"getSubtarget().supportsSM13()">;
30 def DoesNotSupportSM13 : Predicate<"!getSubtarget().supportsSM13()">;
31 def SupportsSM20 : Predicate<"getSubtarget().supportsSM20()">;
32 def DoesNotSupportSM20 : Predicate<"!getSubtarget().supportsSM20()">;
34 // PTX Version Support
35 def SupportsPTX20 : Predicate<"getSubtarget().supportsPTX20()">;
36 def DoesNotSupportPTX20 : Predicate<"!getSubtarget().supportsPTX20()">;
37 def SupportsPTX21 : Predicate<"getSubtarget().supportsPTX21()">;
38 def DoesNotSupportPTX21 : Predicate<"!getSubtarget().supportsPTX21()">;
40 //===----------------------------------------------------------------------===//
41 // Instruction Pattern Stuff
42 //===----------------------------------------------------------------------===//
44 def load_global : PatFrag<(ops node:$ptr), (load node:$ptr), [{
46 const PointerType *PT;
47 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
48 (PT = dyn_cast<PointerType>(Src->getType())))
49 return PT->getAddressSpace() == PTX::GLOBAL;
53 def load_constant : PatFrag<(ops node:$ptr), (load node:$ptr), [{
55 const PointerType *PT;
56 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
57 (PT = dyn_cast<PointerType>(Src->getType())))
58 return PT->getAddressSpace() == PTX::CONSTANT;
62 def load_local : PatFrag<(ops node:$ptr), (load node:$ptr), [{
64 const PointerType *PT;
65 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
66 (PT = dyn_cast<PointerType>(Src->getType())))
67 return PT->getAddressSpace() == PTX::LOCAL;
71 def load_parameter : PatFrag<(ops node:$ptr), (load node:$ptr), [{
73 const PointerType *PT;
74 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
75 (PT = dyn_cast<PointerType>(Src->getType())))
76 return PT->getAddressSpace() == PTX::PARAMETER;
80 def load_shared : PatFrag<(ops node:$ptr), (load node:$ptr), [{
82 const PointerType *PT;
83 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
84 (PT = dyn_cast<PointerType>(Src->getType())))
85 return PT->getAddressSpace() == PTX::SHARED;
90 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
92 const PointerType *PT;
93 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
94 (PT = dyn_cast<PointerType>(Src->getType())))
95 return PT->getAddressSpace() == PTX::GLOBAL;
100 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
102 const PointerType *PT;
103 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
104 (PT = dyn_cast<PointerType>(Src->getType())))
105 return PT->getAddressSpace() == PTX::LOCAL;
110 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
112 const PointerType *PT;
113 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
114 (PT = dyn_cast<PointerType>(Src->getType())))
115 return PT->getAddressSpace() == PTX::PARAMETER;
120 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
122 const PointerType *PT;
123 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
124 (PT = dyn_cast<PointerType>(Src->getType())))
125 return PT->getAddressSpace() == PTX::SHARED;
130 def ADDRrr32 : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
131 def ADDRrr64 : ComplexPattern<i64, 2, "SelectADDRrr", [], []>;
132 def ADDRri32 : ComplexPattern<i32, 2, "SelectADDRri", [], []>;
133 def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri", [], []>;
134 def ADDRii32 : ComplexPattern<i32, 2, "SelectADDRii", [], []>;
135 def ADDRii64 : ComplexPattern<i64, 2, "SelectADDRii", [], []>;
139 def MEMri32 : Operand<i32> {
140 let PrintMethod = "printMemOperand";
141 let MIOperandInfo = (ops RRegu32, i32imm);
143 def MEMri64 : Operand<i64> {
144 let PrintMethod = "printMemOperand";
145 let MIOperandInfo = (ops RRegu64, i64imm);
147 def MEMii32 : Operand<i32> {
148 let PrintMethod = "printMemOperand";
149 let MIOperandInfo = (ops i32imm, i32imm);
151 def MEMii64 : Operand<i64> {
152 let PrintMethod = "printMemOperand";
153 let MIOperandInfo = (ops i64imm, i64imm);
155 // The operand here does not correspond to an actual address, so we
156 // can use i32 in 64-bit address modes.
157 def MEMpi : Operand<i32> {
158 let PrintMethod = "printParamOperand";
159 let MIOperandInfo = (ops i32imm);
163 //===----------------------------------------------------------------------===//
164 // PTX Specific Node Definitions
165 //===----------------------------------------------------------------------===//
167 // PTX allow generic 3-reg shifts like shl r0, r1, r2
168 def PTXshl : SDNode<"ISD::SHL", SDTIntBinOp>;
169 def PTXsrl : SDNode<"ISD::SRL", SDTIntBinOp>;
170 def PTXsra : SDNode<"ISD::SRA", SDTIntBinOp>;
173 : SDNode<"PTXISD::EXIT", SDTNone, [SDNPHasChain]>;
175 : SDNode<"PTXISD::RET", SDTNone, [SDNPHasChain]>;
177 //===----------------------------------------------------------------------===//
178 // Instruction Class Templates
179 //===----------------------------------------------------------------------===//
181 //===- Floating-Point Instructions - 3 Operand Form -----------------------===//
182 multiclass PTX_FLOAT_3OP<string opcstr, SDNode opnode> {
183 def rr32 : InstPTX<(outs RRegf32:$d),
184 (ins RRegf32:$a, RRegf32:$b),
185 !strconcat(opcstr, ".f32\t$d, $a, $b"),
186 [(set RRegf32:$d, (opnode RRegf32:$a, RRegf32:$b))]>;
187 def ri32 : InstPTX<(outs RRegf32:$d),
188 (ins RRegf32:$a, f32imm:$b),
189 !strconcat(opcstr, ".f32\t$d, $a, $b"),
190 [(set RRegf32:$d, (opnode RRegf32:$a, fpimm:$b))]>;
191 def rr64 : InstPTX<(outs RRegf64:$d),
192 (ins RRegf64:$a, RRegf64:$b),
193 !strconcat(opcstr, ".f64\t$d, $a, $b"),
194 [(set RRegf64:$d, (opnode RRegf64:$a, RRegf64:$b))]>;
195 def ri64 : InstPTX<(outs RRegf64:$d),
196 (ins RRegf64:$a, f64imm:$b),
197 !strconcat(opcstr, ".f64\t$d, $a, $b"),
198 [(set RRegf64:$d, (opnode RRegf64:$a, fpimm:$b))]>;
201 //===- Floating-Point Instructions - 4 Operand Form -----------------------===//
202 multiclass PTX_FLOAT_4OP<string opcstr, SDNode opnode1, SDNode opnode2> {
203 def rrr32 : InstPTX<(outs RRegf32:$d),
204 (ins RRegf32:$a, RRegf32:$b, RRegf32:$c),
205 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
206 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
209 def rri32 : InstPTX<(outs RRegf32:$d),
210 (ins RRegf32:$a, RRegf32:$b, f32imm:$c),
211 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
212 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
215 def rrr64 : InstPTX<(outs RRegf64:$d),
216 (ins RRegf64:$a, RRegf64:$b, RRegf64:$c),
217 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
218 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
221 def rri64 : InstPTX<(outs RRegf64:$d),
222 (ins RRegf64:$a, RRegf64:$b, f64imm:$c),
223 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
224 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
229 multiclass INT3<string opcstr, SDNode opnode> {
230 def rr16 : InstPTX<(outs RRegu16:$d),
231 (ins RRegu16:$a, RRegu16:$b),
232 !strconcat(opcstr, ".u16\t$d, $a, $b"),
233 [(set RRegu16:$d, (opnode RRegu16:$a, RRegu16:$b))]>;
234 def ri16 : InstPTX<(outs RRegu16:$d),
235 (ins RRegu16:$a, i16imm:$b),
236 !strconcat(opcstr, ".u16\t$d, $a, $b"),
237 [(set RRegu16:$d, (opnode RRegu16:$a, imm:$b))]>;
238 def rr32 : InstPTX<(outs RRegu32:$d),
239 (ins RRegu32:$a, RRegu32:$b),
240 !strconcat(opcstr, ".u32\t$d, $a, $b"),
241 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
242 def ri32 : InstPTX<(outs RRegu32:$d),
243 (ins RRegu32:$a, i32imm:$b),
244 !strconcat(opcstr, ".u32\t$d, $a, $b"),
245 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
246 def rr64 : InstPTX<(outs RRegu64:$d),
247 (ins RRegu64:$a, RRegu64:$b),
248 !strconcat(opcstr, ".u64\t$d, $a, $b"),
249 [(set RRegu64:$d, (opnode RRegu64:$a, RRegu64:$b))]>;
250 def ri64 : InstPTX<(outs RRegu64:$d),
251 (ins RRegu64:$a, i64imm:$b),
252 !strconcat(opcstr, ".u64\t$d, $a, $b"),
253 [(set RRegu64:$d, (opnode RRegu64:$a, imm:$b))]>;
256 // no %type directive, non-communtable
257 multiclass INT3ntnc<string opcstr, SDNode opnode> {
258 def rr : InstPTX<(outs RRegu32:$d),
259 (ins RRegu32:$a, RRegu32:$b),
260 !strconcat(opcstr, "\t$d, $a, $b"),
261 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
262 def ri : InstPTX<(outs RRegu32:$d),
263 (ins RRegu32:$a, i32imm:$b),
264 !strconcat(opcstr, "\t$d, $a, $b"),
265 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
266 def ir : InstPTX<(outs RRegu32:$d),
267 (ins i32imm:$a, RRegu32:$b),
268 !strconcat(opcstr, "\t$d, $a, $b"),
269 [(set RRegu32:$d, (opnode imm:$a, RRegu32:$b))]>;
272 multiclass PTX_SETP<RegisterClass RC, string regclsname, Operand immcls,
273 CondCode cmp, string cmpstr> {
275 : InstPTX<(outs Preds:$d), (ins RC:$a, RC:$b),
276 !strconcat("setp.", cmpstr, ".", regclsname, "\t$d, $a, $b"),
277 [(set Preds:$d, (setcc RC:$a, RC:$b, cmp))]>;
279 : InstPTX<(outs Preds:$d), (ins RC:$a, immcls:$b),
280 !strconcat("setp.", cmpstr, ".", regclsname, "\t$d, $a, $b"),
281 [(set Preds:$d, (setcc RC:$a, imm:$b, cmp))]>;
284 multiclass PTX_LD<string opstr, string typestr, RegisterClass RC, PatFrag pat_load> {
285 def rr32 : InstPTX<(outs RC:$d),
287 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
288 [(set RC:$d, (pat_load ADDRrr32:$a))]>, Requires<[Use32BitAddresses]>;
289 def rr64 : InstPTX<(outs RC:$d),
291 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
292 [(set RC:$d, (pat_load ADDRrr64:$a))]>, Requires<[Use64BitAddresses]>;
293 def ri32 : InstPTX<(outs RC:$d),
295 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
296 [(set RC:$d, (pat_load ADDRri32:$a))]>, Requires<[Use32BitAddresses]>;
297 def ri64 : InstPTX<(outs RC:$d),
299 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
300 [(set RC:$d, (pat_load ADDRri64:$a))]>, Requires<[Use64BitAddresses]>;
301 def ii32 : InstPTX<(outs RC:$d),
303 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
304 [(set RC:$d, (pat_load ADDRii32:$a))]>, Requires<[Use32BitAddresses]>;
305 def ii64 : InstPTX<(outs RC:$d),
307 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
308 [(set RC:$d, (pat_load ADDRii64:$a))]>, Requires<[Use64BitAddresses]>;
311 multiclass PTX_LD_ALL<string opstr, PatFrag pat_load> {
312 defm u16 : PTX_LD<opstr, ".u16", RRegu16, pat_load>;
313 defm u32 : PTX_LD<opstr, ".u32", RRegu32, pat_load>;
314 defm u64 : PTX_LD<opstr, ".u64", RRegu64, pat_load>;
315 defm f32 : PTX_LD<opstr, ".f32", RRegf32, pat_load>;
316 defm f64 : PTX_LD<opstr, ".f64", RRegf64, pat_load>;
319 multiclass PTX_ST<string opstr, string typestr, RegisterClass RC, PatFrag pat_store> {
320 def rr32 : InstPTX<(outs),
321 (ins RC:$d, MEMri32:$a),
322 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
323 [(pat_store RC:$d, ADDRrr32:$a)]>, Requires<[Use32BitAddresses]>;
324 def rr64 : InstPTX<(outs),
325 (ins RC:$d, MEMri64:$a),
326 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
327 [(pat_store RC:$d, ADDRrr64:$a)]>, Requires<[Use64BitAddresses]>;
328 def ri32 : InstPTX<(outs),
329 (ins RC:$d, MEMri32:$a),
330 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
331 [(pat_store RC:$d, ADDRri32:$a)]>, Requires<[Use32BitAddresses]>;
332 def ri64 : InstPTX<(outs),
333 (ins RC:$d, MEMri64:$a),
334 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
335 [(pat_store RC:$d, ADDRri64:$a)]>, Requires<[Use64BitAddresses]>;
336 def ii32 : InstPTX<(outs),
337 (ins RC:$d, MEMii32:$a),
338 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
339 [(pat_store RC:$d, ADDRii32:$a)]>, Requires<[Use32BitAddresses]>;
340 def ii64 : InstPTX<(outs),
341 (ins RC:$d, MEMii64:$a),
342 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
343 [(pat_store RC:$d, ADDRii64:$a)]>, Requires<[Use64BitAddresses]>;
346 multiclass PTX_ST_ALL<string opstr, PatFrag pat_store> {
347 defm u16 : PTX_ST<opstr, ".u16", RRegu16, pat_store>;
348 defm u32 : PTX_ST<opstr, ".u32", RRegu32, pat_store>;
349 defm u64 : PTX_ST<opstr, ".u64", RRegu64, pat_store>;
350 defm f32 : PTX_ST<opstr, ".f32", RRegf32, pat_store>;
351 defm f64 : PTX_ST<opstr, ".f64", RRegf64, pat_store>;
354 //===----------------------------------------------------------------------===//
356 //===----------------------------------------------------------------------===//
358 ///===- Integer Arithmetic Instructions -----------------------------------===//
360 defm ADD : INT3<"add", add>;
361 defm SUB : INT3<"sub", sub>;
363 ///===- Floating-Point Arithmetic Instructions ----------------------------===//
365 // Standard Binary Operations
366 defm FADD : PTX_FLOAT_3OP<"add", fadd>;
367 defm FSUB : PTX_FLOAT_3OP<"sub", fsub>;
368 defm FMUL : PTX_FLOAT_3OP<"mul", fmul>;
370 // TODO: Allow user selection of rounding modes for fdiv.
371 // For division, we need to have f32 and f64 differently.
372 // For f32, we just always use .approx since it is supported on all hardware
373 // for PTX 1.4+, which is our minimum target.
374 def FDIVrr32 : InstPTX<(outs RRegf32:$d),
375 (ins RRegf32:$a, RRegf32:$b),
376 "div.approx.f32\t$d, $a, $b",
377 [(set RRegf32:$d, (fdiv RRegf32:$a, RRegf32:$b))]>;
378 def FDIVri32 : InstPTX<(outs RRegf32:$d),
379 (ins RRegf32:$a, f32imm:$b),
380 "div.approx.f32\t$d, $a, $b",
381 [(set RRegf32:$d, (fdiv RRegf32:$a, fpimm:$b))]>;
383 // For f64, we must specify a rounding for sm 1.3+ but *not* for sm 1.0.
384 def FDIVrr64SM13 : InstPTX<(outs RRegf64:$d),
385 (ins RRegf64:$a, RRegf64:$b),
386 "div.rn.f64\t$d, $a, $b",
387 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
388 Requires<[SupportsSM13]>;
389 def FDIVri64SM13 : InstPTX<(outs RRegf64:$d),
390 (ins RRegf64:$a, f64imm:$b),
391 "div.rn.f64\t$d, $a, $b",
392 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
393 Requires<[SupportsSM13]>;
394 def FDIVrr64SM10 : InstPTX<(outs RRegf64:$d),
395 (ins RRegf64:$a, RRegf64:$b),
396 "div.f64\t$d, $a, $b",
397 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
398 Requires<[DoesNotSupportSM13]>;
399 def FDIVri64SM10 : InstPTX<(outs RRegf64:$d),
400 (ins RRegf64:$a, f64imm:$b),
401 "div.f64\t$d, $a, $b",
402 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
403 Requires<[DoesNotSupportSM13]>;
407 // Multi-operation hybrid instructions
409 // The selection of mad/fma is tricky. In some cases, they are the *same*
410 // instruction, but in other cases we may prefer one or the other. Also,
411 // different PTX versions differ on whether rounding mode flags are required.
412 // In the short term, mad is supported on all PTX versions and we use a
413 // default rounding mode no matter what shader model or PTX version.
414 // TODO: Allow the rounding mode to be selectable through llc.
415 defm FMAD : PTX_FLOAT_4OP<"mad.rn", fmul, fadd>;
417 ///===- Floating-Point Intrinsic Instructions -----------------------------===//
419 def FSQRT32 : InstPTX<(outs RRegf32:$d),
421 "sqrt.rn.f32\t$d, $a",
422 [(set RRegf32:$d, (fsqrt RRegf32:$a))]>;
424 def FSQRT64 : InstPTX<(outs RRegf64:$d),
426 "sqrt.rn.f64\t$d, $a",
427 [(set RRegf64:$d, (fsqrt RRegf64:$a))]>;
429 def FSIN32 : InstPTX<(outs RRegf32:$d),
431 "sin.approx.f32\t$d, $a",
432 [(set RRegf32:$d, (fsin RRegf32:$a))]>;
434 def FSIN64 : InstPTX<(outs RRegf64:$d),
436 "sin.approx.f64\t$d, $a",
437 [(set RRegf64:$d, (fsin RRegf64:$a))]>;
439 def FCOS32 : InstPTX<(outs RRegf32:$d),
441 "cos.approx.f32\t$d, $a",
442 [(set RRegf32:$d, (fcos RRegf32:$a))]>;
444 def FCOS64 : InstPTX<(outs RRegf64:$d),
446 "cos.approx.f64\t$d, $a",
447 [(set RRegf64:$d, (fcos RRegf64:$a))]>;
450 ///===- Comparison and Selection Instructions -----------------------------===//
452 defm SETPEQu32 : PTX_SETP<RRegu32, "u32", i32imm, SETEQ, "eq">;
453 defm SETPNEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETNE, "ne">;
454 defm SETPLTu32 : PTX_SETP<RRegu32, "u32", i32imm, SETULT, "lt">;
455 defm SETPLEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETULE, "le">;
456 defm SETPGTu32 : PTX_SETP<RRegu32, "u32", i32imm, SETUGT, "gt">;
457 defm SETPGEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETUGE, "ge">;
459 ///===- Logic and Shift Instructions --------------------------------------===//
461 defm SHL : INT3ntnc<"shl.b32", PTXshl>;
462 defm SRL : INT3ntnc<"shr.u32", PTXsrl>;
463 defm SRA : INT3ntnc<"shr.s32", PTXsra>;
465 ///===- Data Movement and Conversion Instructions -------------------------===//
467 let neverHasSideEffects = 1 in {
469 : InstPTX<(outs Preds:$d), (ins Preds:$a), "mov.pred\t$d, $a", []>;
471 : InstPTX<(outs RRegu16:$d), (ins RRegu16:$a), "mov.u16\t$d, $a", []>;
473 : InstPTX<(outs RRegu32:$d), (ins RRegu32:$a), "mov.u32\t$d, $a", []>;
475 : InstPTX<(outs RRegu64:$d), (ins RRegu64:$a), "mov.u64\t$d, $a", []>;
477 : InstPTX<(outs RRegf32:$d), (ins RRegf32:$a), "mov.f32\t$d, $a", []>;
479 : InstPTX<(outs RRegf64:$d), (ins RRegf64:$a), "mov.f64\t$d, $a", []>;
482 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
484 : InstPTX<(outs Preds:$d), (ins i1imm:$a), "mov.pred\t$d, $a",
485 [(set Preds:$d, imm:$a)]>;
487 : InstPTX<(outs RRegu16:$d), (ins i16imm:$a), "mov.u16\t$d, $a",
488 [(set RRegu16:$d, imm:$a)]>;
490 : InstPTX<(outs RRegu32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
491 [(set RRegu32:$d, imm:$a)]>;
493 : InstPTX<(outs RRegu64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
494 [(set RRegu64:$d, imm:$a)]>;
496 : InstPTX<(outs RRegf32:$d), (ins f32imm:$a), "mov.f32\t$d, $a",
497 [(set RRegf32:$d, fpimm:$a)]>;
499 : InstPTX<(outs RRegf64:$d), (ins f64imm:$a), "mov.f64\t$d, $a",
500 [(set RRegf64:$d, fpimm:$a)]>;
504 defm LDg : PTX_LD_ALL<"ld.global", load_global>;
505 defm LDc : PTX_LD_ALL<"ld.const", load_constant>;
506 defm LDl : PTX_LD_ALL<"ld.local", load_local>;
507 defm LDs : PTX_LD_ALL<"ld.shared", load_shared>;
509 // This is a special instruction that is manually inserted for kernel parameters
510 def LDpiU16 : InstPTX<(outs RRegu16:$d), (ins MEMpi:$a),
511 "ld.param.u16\t$d, [$a]", []>;
512 def LDpiU32 : InstPTX<(outs RRegu32:$d), (ins MEMpi:$a),
513 "ld.param.u32\t$d, [$a]", []>;
514 def LDpiU64 : InstPTX<(outs RRegu64:$d), (ins MEMpi:$a),
515 "ld.param.u64\t$d, [$a]", []>;
516 def LDpiF32 : InstPTX<(outs RRegf32:$d), (ins MEMpi:$a),
517 "ld.param.f32\t$d, [$a]", []>;
518 def LDpiF64 : InstPTX<(outs RRegf64:$d), (ins MEMpi:$a),
519 "ld.param.f64\t$d, [$a]", []>;
522 defm STg : PTX_ST_ALL<"st.global", store_global>;
523 defm STl : PTX_ST_ALL<"st.local", store_local>;
524 defm STs : PTX_ST_ALL<"st.shared", store_shared>;
526 // defm STp : PTX_ST_ALL<"st.param", store_parameter>;
527 // defm LDp : PTX_LD_ALL<"ld.param", load_parameter>;
528 // TODO: Do something with st.param if/when it is needed.
531 : InstPTX<(outs RRegu32:$d), (ins Preds:$a), "cvt.u32.pred\t$d, $a",
532 [(set RRegu32:$d, (zext Preds:$a))]>;
534 ///===- Control Flow Instructions -----------------------------------------===//
536 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
537 def EXIT : InstPTX<(outs), (ins), "exit", [(PTXexit)]>;
538 def RET : InstPTX<(outs), (ins), "ret", [(PTXret)]>;
541 ///===- Intrinsic Instructions --------------------------------------------===//
543 include "PTXIntrinsicInstrInfo.td"