1 //===- PTXInstrInfo.td - PTX Instruction defs -----------------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the PTX instructions in TableGen format.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction format superclass
16 //===----------------------------------------------------------------------===//
18 include "PTXInstrFormats.td"
20 //===----------------------------------------------------------------------===//
21 // Code Generation Predicates
22 //===----------------------------------------------------------------------===//
25 def Use32BitAddresses : Predicate<"!getSubtarget().use64BitAddresses()">;
26 def Use64BitAddresses : Predicate<"getSubtarget().use64BitAddresses()">;
28 // Shader Model Support
29 def SupportsSM13 : Predicate<"getSubtarget().supportsSM13()">;
30 def DoesNotSupportSM13 : Predicate<"!getSubtarget().supportsSM13()">;
31 def SupportsSM20 : Predicate<"getSubtarget().supportsSM20()">;
32 def DoesNotSupportSM20 : Predicate<"!getSubtarget().supportsSM20()">;
34 // PTX Version Support
35 def SupportsPTX20 : Predicate<"getSubtarget().supportsPTX20()">;
36 def DoesNotSupportPTX20 : Predicate<"!getSubtarget().supportsPTX20()">;
37 def SupportsPTX21 : Predicate<"getSubtarget().supportsPTX21()">;
38 def DoesNotSupportPTX21 : Predicate<"!getSubtarget().supportsPTX21()">;
40 //===----------------------------------------------------------------------===//
41 // Instruction Pattern Stuff
42 //===----------------------------------------------------------------------===//
44 def load_global : PatFrag<(ops node:$ptr), (load node:$ptr), [{
46 const PointerType *PT;
47 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
48 (PT = dyn_cast<PointerType>(Src->getType())))
49 return PT->getAddressSpace() == PTX::GLOBAL;
53 def load_constant : PatFrag<(ops node:$ptr), (load node:$ptr), [{
55 const PointerType *PT;
56 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
57 (PT = dyn_cast<PointerType>(Src->getType())))
58 return PT->getAddressSpace() == PTX::CONSTANT;
62 def load_local : PatFrag<(ops node:$ptr), (load node:$ptr), [{
64 const PointerType *PT;
65 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
66 (PT = dyn_cast<PointerType>(Src->getType())))
67 return PT->getAddressSpace() == PTX::LOCAL;
71 def load_parameter : PatFrag<(ops node:$ptr), (load node:$ptr), [{
73 const PointerType *PT;
74 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
75 (PT = dyn_cast<PointerType>(Src->getType())))
76 return PT->getAddressSpace() == PTX::PARAMETER;
80 def load_shared : PatFrag<(ops node:$ptr), (load node:$ptr), [{
82 const PointerType *PT;
83 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
84 (PT = dyn_cast<PointerType>(Src->getType())))
85 return PT->getAddressSpace() == PTX::SHARED;
90 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
92 const PointerType *PT;
93 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
94 (PT = dyn_cast<PointerType>(Src->getType())))
95 return PT->getAddressSpace() == PTX::GLOBAL;
100 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
102 const PointerType *PT;
103 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
104 (PT = dyn_cast<PointerType>(Src->getType())))
105 return PT->getAddressSpace() == PTX::LOCAL;
110 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
112 const PointerType *PT;
113 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
114 (PT = dyn_cast<PointerType>(Src->getType())))
115 return PT->getAddressSpace() == PTX::PARAMETER;
120 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
122 const PointerType *PT;
123 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
124 (PT = dyn_cast<PointerType>(Src->getType())))
125 return PT->getAddressSpace() == PTX::SHARED;
130 def ADDRrr32 : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
131 def ADDRrr64 : ComplexPattern<i64, 2, "SelectADDRrr", [], []>;
132 def ADDRri32 : ComplexPattern<i32, 2, "SelectADDRri", [], []>;
133 def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri", [], []>;
134 def ADDRii32 : ComplexPattern<i32, 2, "SelectADDRii", [], []>;
135 def ADDRii64 : ComplexPattern<i64, 2, "SelectADDRii", [], []>;
139 def MEMri32 : Operand<i32> {
140 let PrintMethod = "printMemOperand";
141 let MIOperandInfo = (ops RRegu32, i32imm);
143 def MEMri64 : Operand<i64> {
144 let PrintMethod = "printMemOperand";
145 let MIOperandInfo = (ops RRegu64, i64imm);
147 def MEMii32 : Operand<i32> {
148 let PrintMethod = "printMemOperand";
149 let MIOperandInfo = (ops i32imm, i32imm);
151 def MEMii64 : Operand<i64> {
152 let PrintMethod = "printMemOperand";
153 let MIOperandInfo = (ops i64imm, i64imm);
155 // The operand here does not correspond to an actual address, so we
156 // can use i32 in 64-bit address modes.
157 def MEMpi : Operand<i32> {
158 let PrintMethod = "printParamOperand";
159 let MIOperandInfo = (ops i32imm);
163 //===----------------------------------------------------------------------===//
164 // PTX Specific Node Definitions
165 //===----------------------------------------------------------------------===//
167 // PTX allow generic 3-reg shifts like shl r0, r1, r2
168 def PTXshl : SDNode<"ISD::SHL", SDTIntBinOp>;
169 def PTXsrl : SDNode<"ISD::SRL", SDTIntBinOp>;
170 def PTXsra : SDNode<"ISD::SRA", SDTIntBinOp>;
173 : SDNode<"PTXISD::EXIT", SDTNone, [SDNPHasChain]>;
175 : SDNode<"PTXISD::RET", SDTNone, [SDNPHasChain]>;
177 //===----------------------------------------------------------------------===//
178 // Instruction Class Templates
179 //===----------------------------------------------------------------------===//
181 //===- Floating-Point Instructions - 3 Operand Form -----------------------===//
182 multiclass PTX_FLOAT_3OP<string opcstr, SDNode opnode> {
183 def rr32 : InstPTX<(outs RRegf32:$d),
184 (ins RRegf32:$a, RRegf32:$b),
185 !strconcat(opcstr, ".f32\t$d, $a, $b"),
186 [(set RRegf32:$d, (opnode RRegf32:$a, RRegf32:$b))]>;
187 def ri32 : InstPTX<(outs RRegf32:$d),
188 (ins RRegf32:$a, f32imm:$b),
189 !strconcat(opcstr, ".f32\t$d, $a, $b"),
190 [(set RRegf32:$d, (opnode RRegf32:$a, fpimm:$b))]>;
191 def rr64 : InstPTX<(outs RRegf64:$d),
192 (ins RRegf64:$a, RRegf64:$b),
193 !strconcat(opcstr, ".f64\t$d, $a, $b"),
194 [(set RRegf64:$d, (opnode RRegf64:$a, RRegf64:$b))]>;
195 def ri64 : InstPTX<(outs RRegf64:$d),
196 (ins RRegf64:$a, f64imm:$b),
197 !strconcat(opcstr, ".f64\t$d, $a, $b"),
198 [(set RRegf64:$d, (opnode RRegf64:$a, fpimm:$b))]>;
201 //===- Floating-Point Instructions - 4 Operand Form -----------------------===//
202 multiclass PTX_FLOAT_4OP<string opcstr, SDNode opnode1, SDNode opnode2> {
203 def rrr32 : InstPTX<(outs RRegf32:$d),
204 (ins RRegf32:$a, RRegf32:$b, RRegf32:$c),
205 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
206 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
209 def rri32 : InstPTX<(outs RRegf32:$d),
210 (ins RRegf32:$a, RRegf32:$b, f32imm:$c),
211 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
212 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
215 def rrr64 : InstPTX<(outs RRegf64:$d),
216 (ins RRegf64:$a, RRegf64:$b, RRegf64:$c),
217 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
218 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
221 def rri64 : InstPTX<(outs RRegf64:$d),
222 (ins RRegf64:$a, RRegf64:$b, f64imm:$c),
223 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
224 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
229 multiclass INT3<string opcstr, SDNode opnode> {
230 def rr16 : InstPTX<(outs RRegu16:$d),
231 (ins RRegu16:$a, RRegu16:$b),
232 !strconcat(opcstr, ".u16\t$d, $a, $b"),
233 [(set RRegu16:$d, (opnode RRegu16:$a, RRegu16:$b))]>;
234 def ri16 : InstPTX<(outs RRegu16:$d),
235 (ins RRegu16:$a, i16imm:$b),
236 !strconcat(opcstr, ".u16\t$d, $a, $b"),
237 [(set RRegu16:$d, (opnode RRegu16:$a, imm:$b))]>;
238 def rr32 : InstPTX<(outs RRegu32:$d),
239 (ins RRegu32:$a, RRegu32:$b),
240 !strconcat(opcstr, ".u32\t$d, $a, $b"),
241 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
242 def ri32 : InstPTX<(outs RRegu32:$d),
243 (ins RRegu32:$a, i32imm:$b),
244 !strconcat(opcstr, ".u32\t$d, $a, $b"),
245 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
246 def rr64 : InstPTX<(outs RRegu64:$d),
247 (ins RRegu64:$a, RRegu64:$b),
248 !strconcat(opcstr, ".u64\t$d, $a, $b"),
249 [(set RRegu64:$d, (opnode RRegu64:$a, RRegu64:$b))]>;
250 def ri64 : InstPTX<(outs RRegu64:$d),
251 (ins RRegu64:$a, i64imm:$b),
252 !strconcat(opcstr, ".u64\t$d, $a, $b"),
253 [(set RRegu64:$d, (opnode RRegu64:$a, imm:$b))]>;
256 // no %type directive, non-communtable
257 multiclass INT3ntnc<string opcstr, SDNode opnode> {
258 def rr : InstPTX<(outs RRegu32:$d),
259 (ins RRegu32:$a, RRegu32:$b),
260 !strconcat(opcstr, "\t$d, $a, $b"),
261 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
262 def ri : InstPTX<(outs RRegu32:$d),
263 (ins RRegu32:$a, i32imm:$b),
264 !strconcat(opcstr, "\t$d, $a, $b"),
265 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
266 def ir : InstPTX<(outs RRegu32:$d),
267 (ins i32imm:$a, RRegu32:$b),
268 !strconcat(opcstr, "\t$d, $a, $b"),
269 [(set RRegu32:$d, (opnode imm:$a, RRegu32:$b))]>;
272 multiclass PTX_LD<string opstr, string typestr, RegisterClass RC, PatFrag pat_load> {
273 def rr32 : InstPTX<(outs RC:$d),
275 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
276 [(set RC:$d, (pat_load ADDRrr32:$a))]>, Requires<[Use32BitAddresses]>;
277 def rr64 : InstPTX<(outs RC:$d),
279 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
280 [(set RC:$d, (pat_load ADDRrr64:$a))]>, Requires<[Use64BitAddresses]>;
281 def ri32 : InstPTX<(outs RC:$d),
283 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
284 [(set RC:$d, (pat_load ADDRri32:$a))]>, Requires<[Use32BitAddresses]>;
285 def ri64 : InstPTX<(outs RC:$d),
287 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
288 [(set RC:$d, (pat_load ADDRri64:$a))]>, Requires<[Use64BitAddresses]>;
289 def ii32 : InstPTX<(outs RC:$d),
291 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
292 [(set RC:$d, (pat_load ADDRii32:$a))]>, Requires<[Use32BitAddresses]>;
293 def ii64 : InstPTX<(outs RC:$d),
295 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
296 [(set RC:$d, (pat_load ADDRii64:$a))]>, Requires<[Use64BitAddresses]>;
299 multiclass PTX_LD_ALL<string opstr, PatFrag pat_load> {
300 defm u16 : PTX_LD<opstr, ".u16", RRegu16, pat_load>;
301 defm u32 : PTX_LD<opstr, ".u32", RRegu32, pat_load>;
302 defm u64 : PTX_LD<opstr, ".u64", RRegu64, pat_load>;
303 defm f32 : PTX_LD<opstr, ".f32", RRegf32, pat_load>;
304 defm f64 : PTX_LD<opstr, ".f64", RRegf64, pat_load>;
307 multiclass PTX_ST<string opstr, string typestr, RegisterClass RC, PatFrag pat_store> {
308 def rr32 : InstPTX<(outs),
309 (ins RC:$d, MEMri32:$a),
310 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
311 [(pat_store RC:$d, ADDRrr32:$a)]>, Requires<[Use32BitAddresses]>;
312 def rr64 : InstPTX<(outs),
313 (ins RC:$d, MEMri64:$a),
314 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
315 [(pat_store RC:$d, ADDRrr64:$a)]>, Requires<[Use64BitAddresses]>;
316 def ri32 : InstPTX<(outs),
317 (ins RC:$d, MEMri32:$a),
318 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
319 [(pat_store RC:$d, ADDRri32:$a)]>, Requires<[Use32BitAddresses]>;
320 def ri64 : InstPTX<(outs),
321 (ins RC:$d, MEMri64:$a),
322 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
323 [(pat_store RC:$d, ADDRri64:$a)]>, Requires<[Use64BitAddresses]>;
324 def ii32 : InstPTX<(outs),
325 (ins RC:$d, MEMii32:$a),
326 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
327 [(pat_store RC:$d, ADDRii32:$a)]>, Requires<[Use32BitAddresses]>;
328 def ii64 : InstPTX<(outs),
329 (ins RC:$d, MEMii64:$a),
330 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
331 [(pat_store RC:$d, ADDRii64:$a)]>, Requires<[Use64BitAddresses]>;
334 multiclass PTX_ST_ALL<string opstr, PatFrag pat_store> {
335 defm u16 : PTX_ST<opstr, ".u16", RRegu16, pat_store>;
336 defm u32 : PTX_ST<opstr, ".u32", RRegu32, pat_store>;
337 defm u64 : PTX_ST<opstr, ".u64", RRegu64, pat_store>;
338 defm f32 : PTX_ST<opstr, ".f32", RRegf32, pat_store>;
339 defm f64 : PTX_ST<opstr, ".f64", RRegf64, pat_store>;
342 //===----------------------------------------------------------------------===//
344 //===----------------------------------------------------------------------===//
346 ///===- Floating-Point Arithmetic Instructions ----------------------------===//
348 // Standard Binary Operations
349 defm FADD : PTX_FLOAT_3OP<"add", fadd>;
350 defm FSUB : PTX_FLOAT_3OP<"sub", fsub>;
351 defm FMUL : PTX_FLOAT_3OP<"mul", fmul>;
353 // TODO: Allow user selection of rounding modes for fdiv.
354 // For division, we need to have f32 and f64 differently.
355 // For f32, we just always use .approx since it is supported on all hardware
356 // for PTX 1.4+, which is our minimum target.
357 def FDIVrr32 : InstPTX<(outs RRegf32:$d),
358 (ins RRegf32:$a, RRegf32:$b),
359 "div.approx.f32\t$d, $a, $b",
360 [(set RRegf32:$d, (fdiv RRegf32:$a, RRegf32:$b))]>;
361 def FDIVri32 : InstPTX<(outs RRegf32:$d),
362 (ins RRegf32:$a, f32imm:$b),
363 "div.approx.f32\t$d, $a, $b",
364 [(set RRegf32:$d, (fdiv RRegf32:$a, fpimm:$b))]>;
366 // For f64, we must specify a rounding for sm 1.3+ but *not* for sm 1.0.
367 def FDIVrr64SM13 : InstPTX<(outs RRegf64:$d),
368 (ins RRegf64:$a, RRegf64:$b),
369 "div.rn.f64\t$d, $a, $b",
370 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
371 Requires<[SupportsSM13]>;
372 def FDIVri64SM13 : InstPTX<(outs RRegf64:$d),
373 (ins RRegf64:$a, f64imm:$b),
374 "div.rn.f64\t$d, $a, $b",
375 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
376 Requires<[SupportsSM13]>;
377 def FDIVrr64SM10 : InstPTX<(outs RRegf64:$d),
378 (ins RRegf64:$a, RRegf64:$b),
379 "div.f64\t$d, $a, $b",
380 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
381 Requires<[DoesNotSupportSM13]>;
382 def FDIVri64SM10 : InstPTX<(outs RRegf64:$d),
383 (ins RRegf64:$a, f64imm:$b),
384 "div.f64\t$d, $a, $b",
385 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
386 Requires<[DoesNotSupportSM13]>;
390 // Multi-operation hybrid instructions
392 // The selection of mad/fma is tricky. In some cases, they are the *same*
393 // instruction, but in other cases we may prefer one or the other. Also,
394 // different PTX versions differ on whether rounding mode flags are required.
395 // In the short term, mad is supported on all PTX versions and we use a
396 // default rounding mode no matter what shader model or PTX version.
397 // TODO: Allow the rounding mode to be selectable through llc.
398 defm FMAD : PTX_FLOAT_4OP<"mad.rn", fmul, fadd>;
402 ///===- Integer Arithmetic Instructions -----------------------------------===//
404 defm ADD : INT3<"add", add>;
405 defm SUB : INT3<"sub", sub>;
407 ///===- Logic and Shift Instructions --------------------------------------===//
409 defm SHL : INT3ntnc<"shl.b32", PTXshl>;
410 defm SRL : INT3ntnc<"shr.u32", PTXsrl>;
411 defm SRA : INT3ntnc<"shr.s32", PTXsra>;
413 ///===- Data Movement and Conversion Instructions -------------------------===//
415 let neverHasSideEffects = 1 in {
417 : InstPTX<(outs Preds:$d), (ins Preds:$a), "mov.pred\t$d, $a", []>;
419 : InstPTX<(outs RRegu16:$d), (ins RRegu16:$a), "mov.u16\t$d, $a", []>;
421 : InstPTX<(outs RRegu32:$d), (ins RRegu32:$a), "mov.u32\t$d, $a", []>;
423 : InstPTX<(outs RRegu64:$d), (ins RRegu64:$a), "mov.u64\t$d, $a", []>;
425 : InstPTX<(outs RRegf32:$d), (ins RRegf32:$a), "mov.f32\t$d, $a", []>;
427 : InstPTX<(outs RRegf64:$d), (ins RRegf64:$a), "mov.f64\t$d, $a", []>;
430 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
432 : InstPTX<(outs Preds:$d), (ins i1imm:$a), "mov.pred\t$d, $a",
433 [(set Preds:$d, imm:$a)]>;
435 : InstPTX<(outs RRegu16:$d), (ins i16imm:$a), "mov.u16\t$d, $a",
436 [(set RRegu16:$d, imm:$a)]>;
438 : InstPTX<(outs RRegu32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
439 [(set RRegu32:$d, imm:$a)]>;
441 : InstPTX<(outs RRegu64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
442 [(set RRegu64:$d, imm:$a)]>;
444 : InstPTX<(outs RRegf32:$d), (ins f32imm:$a), "mov.f32\t$d, $a",
445 [(set RRegf32:$d, fpimm:$a)]>;
447 : InstPTX<(outs RRegf64:$d), (ins f64imm:$a), "mov.f64\t$d, $a",
448 [(set RRegf64:$d, fpimm:$a)]>;
452 defm LDg : PTX_LD_ALL<"ld.global", load_global>;
453 defm LDc : PTX_LD_ALL<"ld.const", load_constant>;
454 defm LDl : PTX_LD_ALL<"ld.local", load_local>;
455 defm LDs : PTX_LD_ALL<"ld.shared", load_shared>;
457 // This is a special instruction that is manually inserted for kernel parameters
458 def LDpiU16 : InstPTX<(outs RRegu16:$d), (ins MEMpi:$a),
459 "ld.param.u16\t$d, [$a]", []>;
460 def LDpiU32 : InstPTX<(outs RRegu32:$d), (ins MEMpi:$a),
461 "ld.param.u32\t$d, [$a]", []>;
462 def LDpiU64 : InstPTX<(outs RRegu64:$d), (ins MEMpi:$a),
463 "ld.param.u64\t$d, [$a]", []>;
464 def LDpiF32 : InstPTX<(outs RRegf32:$d), (ins MEMpi:$a),
465 "ld.param.f32\t$d, [$a]", []>;
466 def LDpiF64 : InstPTX<(outs RRegf64:$d), (ins MEMpi:$a),
467 "ld.param.f64\t$d, [$a]", []>;
470 defm STg : PTX_ST_ALL<"st.global", store_global>;
471 defm STl : PTX_ST_ALL<"st.local", store_local>;
472 defm STs : PTX_ST_ALL<"st.shared", store_shared>;
474 // defm STp : PTX_ST_ALL<"st.param", store_parameter>;
475 // defm LDp : PTX_LD_ALL<"ld.param", load_parameter>;
476 // TODO: Do something with st.param if/when it is needed.
478 ///===- Control Flow Instructions -----------------------------------------===//
480 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
481 def EXIT : InstPTX<(outs), (ins), "exit", [(PTXexit)]>;
482 def RET : InstPTX<(outs), (ins), "ret", [(PTXret)]>;
485 ///===- Intrinsic Instructions --------------------------------------------===//
487 include "PTXIntrinsicInstrInfo.td"