1 //===- PTXInstrInfo.td - PTX Instruction defs -----------------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the PTX instructions in TableGen format.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction format superclass
16 //===----------------------------------------------------------------------===//
18 include "PTXInstrFormats.td"
20 //===----------------------------------------------------------------------===//
21 // Code Generation Predicates
22 //===----------------------------------------------------------------------===//
25 def Use32BitAddresses : Predicate<"!getSubtarget().use64BitAddresses()">;
26 def Use64BitAddresses : Predicate<"getSubtarget().use64BitAddresses()">;
28 // Shader Model Support
29 def SupportsSM13 : Predicate<"getSubtarget().supportsSM13()">;
30 def DoesNotSupportSM13 : Predicate<"!getSubtarget().supportsSM13()">;
31 def SupportsSM20 : Predicate<"getSubtarget().supportsSM20()">;
32 def DoesNotSupportSM20 : Predicate<"!getSubtarget().supportsSM20()">;
34 // PTX Version Support
35 def SupportsPTX21 : Predicate<"getSubtarget().supportsPTX21()">;
36 def DoesNotSupportPTX21 : Predicate<"!getSubtarget().supportsPTX21()">;
37 def SupportsPTX22 : Predicate<"getSubtarget().supportsPTX22()">;
38 def DoesNotSupportPTX22 : Predicate<"!getSubtarget().supportsPTX22()">;
40 //===----------------------------------------------------------------------===//
41 // Instruction Pattern Stuff
42 //===----------------------------------------------------------------------===//
44 def load_global : PatFrag<(ops node:$ptr), (load node:$ptr), [{
46 const PointerType *PT;
47 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
48 (PT = dyn_cast<PointerType>(Src->getType())))
49 return PT->getAddressSpace() == PTX::GLOBAL;
53 def load_constant : PatFrag<(ops node:$ptr), (load node:$ptr), [{
55 const PointerType *PT;
56 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
57 (PT = dyn_cast<PointerType>(Src->getType())))
58 return PT->getAddressSpace() == PTX::CONSTANT;
62 def load_local : PatFrag<(ops node:$ptr), (load node:$ptr), [{
64 const PointerType *PT;
65 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
66 (PT = dyn_cast<PointerType>(Src->getType())))
67 return PT->getAddressSpace() == PTX::LOCAL;
71 def load_parameter : PatFrag<(ops node:$ptr), (load node:$ptr), [{
73 const PointerType *PT;
74 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
75 (PT = dyn_cast<PointerType>(Src->getType())))
76 return PT->getAddressSpace() == PTX::PARAMETER;
80 def load_shared : PatFrag<(ops node:$ptr), (load node:$ptr), [{
82 const PointerType *PT;
83 if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
84 (PT = dyn_cast<PointerType>(Src->getType())))
85 return PT->getAddressSpace() == PTX::SHARED;
90 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
92 const PointerType *PT;
93 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
94 (PT = dyn_cast<PointerType>(Src->getType())))
95 return PT->getAddressSpace() == PTX::GLOBAL;
100 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
102 const PointerType *PT;
103 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
104 (PT = dyn_cast<PointerType>(Src->getType())))
105 return PT->getAddressSpace() == PTX::LOCAL;
110 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
112 const PointerType *PT;
113 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
114 (PT = dyn_cast<PointerType>(Src->getType())))
115 return PT->getAddressSpace() == PTX::PARAMETER;
120 : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
122 const PointerType *PT;
123 if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
124 (PT = dyn_cast<PointerType>(Src->getType())))
125 return PT->getAddressSpace() == PTX::SHARED;
130 def ADDRrr32 : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
131 def ADDRrr64 : ComplexPattern<i64, 2, "SelectADDRrr", [], []>;
132 def ADDRri32 : ComplexPattern<i32, 2, "SelectADDRri", [], []>;
133 def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri", [], []>;
134 def ADDRii32 : ComplexPattern<i32, 2, "SelectADDRii", [], []>;
135 def ADDRii64 : ComplexPattern<i64, 2, "SelectADDRii", [], []>;
138 def MEMri32 : Operand<i32> {
139 let PrintMethod = "printMemOperand";
140 let MIOperandInfo = (ops RRegu32, i32imm);
142 def MEMri64 : Operand<i64> {
143 let PrintMethod = "printMemOperand";
144 let MIOperandInfo = (ops RRegu64, i64imm);
146 def MEMii32 : Operand<i32> {
147 let PrintMethod = "printMemOperand";
148 let MIOperandInfo = (ops i32imm, i32imm);
150 def MEMii64 : Operand<i64> {
151 let PrintMethod = "printMemOperand";
152 let MIOperandInfo = (ops i64imm, i64imm);
154 // The operand here does not correspond to an actual address, so we
155 // can use i32 in 64-bit address modes.
156 def MEMpi : Operand<i32> {
157 let PrintMethod = "printParamOperand";
158 let MIOperandInfo = (ops i32imm);
161 // Branch & call targets have OtherVT type.
162 def brtarget : Operand<OtherVT>;
163 def calltarget : Operand<i32>;
165 //===----------------------------------------------------------------------===//
166 // PTX Specific Node Definitions
167 //===----------------------------------------------------------------------===//
169 // PTX allow generic 3-reg shifts like shl r0, r1, r2
170 def PTXshl : SDNode<"ISD::SHL", SDTIntBinOp>;
171 def PTXsrl : SDNode<"ISD::SRL", SDTIntBinOp>;
172 def PTXsra : SDNode<"ISD::SRA", SDTIntBinOp>;
175 : SDNode<"PTXISD::EXIT", SDTNone, [SDNPHasChain]>;
177 : SDNode<"PTXISD::RET", SDTNone, [SDNPHasChain]>;
179 : SDNode<"PTXISD::COPY_ADDRESS", SDTypeProfile<1, 1, []>, []>;
181 //===----------------------------------------------------------------------===//
182 // Instruction Class Templates
183 //===----------------------------------------------------------------------===//
185 //===- Floating-Point Instructions - 3 Operand Form -----------------------===//
186 multiclass PTX_FLOAT_3OP<string opcstr, SDNode opnode> {
187 def rr32 : InstPTX<(outs RRegf32:$d),
188 (ins RRegf32:$a, RRegf32:$b),
189 !strconcat(opcstr, ".f32\t$d, $a, $b"),
190 [(set RRegf32:$d, (opnode RRegf32:$a, RRegf32:$b))]>;
191 def ri32 : InstPTX<(outs RRegf32:$d),
192 (ins RRegf32:$a, f32imm:$b),
193 !strconcat(opcstr, ".f32\t$d, $a, $b"),
194 [(set RRegf32:$d, (opnode RRegf32:$a, fpimm:$b))]>;
195 def rr64 : InstPTX<(outs RRegf64:$d),
196 (ins RRegf64:$a, RRegf64:$b),
197 !strconcat(opcstr, ".f64\t$d, $a, $b"),
198 [(set RRegf64:$d, (opnode RRegf64:$a, RRegf64:$b))]>;
199 def ri64 : InstPTX<(outs RRegf64:$d),
200 (ins RRegf64:$a, f64imm:$b),
201 !strconcat(opcstr, ".f64\t$d, $a, $b"),
202 [(set RRegf64:$d, (opnode RRegf64:$a, fpimm:$b))]>;
205 //===- Floating-Point Instructions - 4 Operand Form -----------------------===//
206 multiclass PTX_FLOAT_4OP<string opcstr, SDNode opnode1, SDNode opnode2> {
207 def rrr32 : InstPTX<(outs RRegf32:$d),
208 (ins RRegf32:$a, RRegf32:$b, RRegf32:$c),
209 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
210 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
213 def rri32 : InstPTX<(outs RRegf32:$d),
214 (ins RRegf32:$a, RRegf32:$b, f32imm:$c),
215 !strconcat(opcstr, ".f32\t$d, $a, $b, $c"),
216 [(set RRegf32:$d, (opnode2 (opnode1 RRegf32:$a,
219 def rrr64 : InstPTX<(outs RRegf64:$d),
220 (ins RRegf64:$a, RRegf64:$b, RRegf64:$c),
221 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
222 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
225 def rri64 : InstPTX<(outs RRegf64:$d),
226 (ins RRegf64:$a, RRegf64:$b, f64imm:$c),
227 !strconcat(opcstr, ".f64\t$d, $a, $b, $c"),
228 [(set RRegf64:$d, (opnode2 (opnode1 RRegf64:$a,
233 multiclass INT3<string opcstr, SDNode opnode> {
234 def rr16 : InstPTX<(outs RRegu16:$d),
235 (ins RRegu16:$a, RRegu16:$b),
236 !strconcat(opcstr, ".u16\t$d, $a, $b"),
237 [(set RRegu16:$d, (opnode RRegu16:$a, RRegu16:$b))]>;
238 def ri16 : InstPTX<(outs RRegu16:$d),
239 (ins RRegu16:$a, i16imm:$b),
240 !strconcat(opcstr, ".u16\t$d, $a, $b"),
241 [(set RRegu16:$d, (opnode RRegu16:$a, imm:$b))]>;
242 def rr32 : InstPTX<(outs RRegu32:$d),
243 (ins RRegu32:$a, RRegu32:$b),
244 !strconcat(opcstr, ".u32\t$d, $a, $b"),
245 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
246 def ri32 : InstPTX<(outs RRegu32:$d),
247 (ins RRegu32:$a, i32imm:$b),
248 !strconcat(opcstr, ".u32\t$d, $a, $b"),
249 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
250 def rr64 : InstPTX<(outs RRegu64:$d),
251 (ins RRegu64:$a, RRegu64:$b),
252 !strconcat(opcstr, ".u64\t$d, $a, $b"),
253 [(set RRegu64:$d, (opnode RRegu64:$a, RRegu64:$b))]>;
254 def ri64 : InstPTX<(outs RRegu64:$d),
255 (ins RRegu64:$a, i64imm:$b),
256 !strconcat(opcstr, ".u64\t$d, $a, $b"),
257 [(set RRegu64:$d, (opnode RRegu64:$a, imm:$b))]>;
260 multiclass PTX_LOGIC<string opcstr, SDNode opnode> {
261 def rr16 : InstPTX<(outs RRegu16:$d),
262 (ins RRegu16:$a, RRegu16:$b),
263 !strconcat(opcstr, ".b16\t$d, $a, $b"),
264 [(set RRegu16:$d, (opnode RRegu16:$a, RRegu16:$b))]>;
265 def ri16 : InstPTX<(outs RRegu16:$d),
266 (ins RRegu16:$a, i16imm:$b),
267 !strconcat(opcstr, ".b16\t$d, $a, $b"),
268 [(set RRegu16:$d, (opnode RRegu16:$a, imm:$b))]>;
269 def rr32 : InstPTX<(outs RRegu32:$d),
270 (ins RRegu32:$a, RRegu32:$b),
271 !strconcat(opcstr, ".b32\t$d, $a, $b"),
272 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
273 def ri32 : InstPTX<(outs RRegu32:$d),
274 (ins RRegu32:$a, i32imm:$b),
275 !strconcat(opcstr, ".b32\t$d, $a, $b"),
276 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
277 def rr64 : InstPTX<(outs RRegu64:$d),
278 (ins RRegu64:$a, RRegu64:$b),
279 !strconcat(opcstr, ".b64\t$d, $a, $b"),
280 [(set RRegu64:$d, (opnode RRegu64:$a, RRegu64:$b))]>;
281 def ri64 : InstPTX<(outs RRegu64:$d),
282 (ins RRegu64:$a, i64imm:$b),
283 !strconcat(opcstr, ".b64\t$d, $a, $b"),
284 [(set RRegu64:$d, (opnode RRegu64:$a, imm:$b))]>;
287 multiclass INT3ntnc<string opcstr, SDNode opnode> {
288 def rr : InstPTX<(outs RRegu32:$d),
289 (ins RRegu32:$a, RRegu32:$b),
290 !strconcat(opcstr, "\t$d, $a, $b"),
291 [(set RRegu32:$d, (opnode RRegu32:$a, RRegu32:$b))]>;
292 def ri : InstPTX<(outs RRegu32:$d),
293 (ins RRegu32:$a, i32imm:$b),
294 !strconcat(opcstr, "\t$d, $a, $b"),
295 [(set RRegu32:$d, (opnode RRegu32:$a, imm:$b))]>;
296 def ir : InstPTX<(outs RRegu32:$d),
297 (ins i32imm:$a, RRegu32:$b),
298 !strconcat(opcstr, "\t$d, $a, $b"),
299 [(set RRegu32:$d, (opnode imm:$a, RRegu32:$b))]>;
302 multiclass PTX_SETP<RegisterClass RC, string regclsname, Operand immcls,
303 CondCode cmp, string cmpstr> {
305 : InstPTX<(outs Preds:$d), (ins RC:$a, RC:$b),
306 !strconcat("setp.", cmpstr, ".", regclsname, "\t$d, $a, $b"),
307 [(set Preds:$d, (setcc RC:$a, RC:$b, cmp))]>;
309 : InstPTX<(outs Preds:$d), (ins RC:$a, immcls:$b),
310 !strconcat("setp.", cmpstr, ".", regclsname, "\t$d, $a, $b"),
311 [(set Preds:$d, (setcc RC:$a, imm:$b, cmp))]>;
314 multiclass PTX_LD<string opstr, string typestr, RegisterClass RC, PatFrag pat_load> {
315 def rr32 : InstPTX<(outs RC:$d),
317 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
318 [(set RC:$d, (pat_load ADDRrr32:$a))]>, Requires<[Use32BitAddresses]>;
319 def rr64 : InstPTX<(outs RC:$d),
321 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
322 [(set RC:$d, (pat_load ADDRrr64:$a))]>, Requires<[Use64BitAddresses]>;
323 def ri32 : InstPTX<(outs RC:$d),
325 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
326 [(set RC:$d, (pat_load ADDRri32:$a))]>, Requires<[Use32BitAddresses]>;
327 def ri64 : InstPTX<(outs RC:$d),
329 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
330 [(set RC:$d, (pat_load ADDRri64:$a))]>, Requires<[Use64BitAddresses]>;
331 def ii32 : InstPTX<(outs RC:$d),
333 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
334 [(set RC:$d, (pat_load ADDRii32:$a))]>, Requires<[Use32BitAddresses]>;
335 def ii64 : InstPTX<(outs RC:$d),
337 !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
338 [(set RC:$d, (pat_load ADDRii64:$a))]>, Requires<[Use64BitAddresses]>;
341 multiclass PTX_LD_ALL<string opstr, PatFrag pat_load> {
342 defm u16 : PTX_LD<opstr, ".u16", RRegu16, pat_load>;
343 defm u32 : PTX_LD<opstr, ".u32", RRegu32, pat_load>;
344 defm u64 : PTX_LD<opstr, ".u64", RRegu64, pat_load>;
345 defm f32 : PTX_LD<opstr, ".f32", RRegf32, pat_load>;
346 defm f64 : PTX_LD<opstr, ".f64", RRegf64, pat_load>;
349 multiclass PTX_ST<string opstr, string typestr, RegisterClass RC, PatFrag pat_store> {
350 def rr32 : InstPTX<(outs),
351 (ins RC:$d, MEMri32:$a),
352 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
353 [(pat_store RC:$d, ADDRrr32:$a)]>, Requires<[Use32BitAddresses]>;
354 def rr64 : InstPTX<(outs),
355 (ins RC:$d, MEMri64:$a),
356 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
357 [(pat_store RC:$d, ADDRrr64:$a)]>, Requires<[Use64BitAddresses]>;
358 def ri32 : InstPTX<(outs),
359 (ins RC:$d, MEMri32:$a),
360 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
361 [(pat_store RC:$d, ADDRri32:$a)]>, Requires<[Use32BitAddresses]>;
362 def ri64 : InstPTX<(outs),
363 (ins RC:$d, MEMri64:$a),
364 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
365 [(pat_store RC:$d, ADDRri64:$a)]>, Requires<[Use64BitAddresses]>;
366 def ii32 : InstPTX<(outs),
367 (ins RC:$d, MEMii32:$a),
368 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
369 [(pat_store RC:$d, ADDRii32:$a)]>, Requires<[Use32BitAddresses]>;
370 def ii64 : InstPTX<(outs),
371 (ins RC:$d, MEMii64:$a),
372 !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
373 [(pat_store RC:$d, ADDRii64:$a)]>, Requires<[Use64BitAddresses]>;
376 multiclass PTX_ST_ALL<string opstr, PatFrag pat_store> {
377 defm u16 : PTX_ST<opstr, ".u16", RRegu16, pat_store>;
378 defm u32 : PTX_ST<opstr, ".u32", RRegu32, pat_store>;
379 defm u64 : PTX_ST<opstr, ".u64", RRegu64, pat_store>;
380 defm f32 : PTX_ST<opstr, ".f32", RRegf32, pat_store>;
381 defm f64 : PTX_ST<opstr, ".f64", RRegf64, pat_store>;
384 //===----------------------------------------------------------------------===//
386 //===----------------------------------------------------------------------===//
388 ///===- Integer Arithmetic Instructions -----------------------------------===//
390 defm ADD : INT3<"add", add>;
391 defm SUB : INT3<"sub", sub>;
392 defm MUL : INT3<"mul.lo", mul>; // FIXME: Allow 32x32 -> 64 multiplies
394 ///===- Floating-Point Arithmetic Instructions ----------------------------===//
396 // Standard Binary Operations
397 defm FADD : PTX_FLOAT_3OP<"add", fadd>;
398 defm FSUB : PTX_FLOAT_3OP<"sub", fsub>;
399 defm FMUL : PTX_FLOAT_3OP<"mul", fmul>;
401 // TODO: Allow user selection of rounding modes for fdiv.
402 // For division, we need to have f32 and f64 differently.
403 // For f32, we just always use .approx since it is supported on all hardware
404 // for PTX 1.4+, which is our minimum target.
405 def FDIVrr32 : InstPTX<(outs RRegf32:$d),
406 (ins RRegf32:$a, RRegf32:$b),
407 "div.approx.f32\t$d, $a, $b",
408 [(set RRegf32:$d, (fdiv RRegf32:$a, RRegf32:$b))]>;
409 def FDIVri32 : InstPTX<(outs RRegf32:$d),
410 (ins RRegf32:$a, f32imm:$b),
411 "div.approx.f32\t$d, $a, $b",
412 [(set RRegf32:$d, (fdiv RRegf32:$a, fpimm:$b))]>;
414 // For f64, we must specify a rounding for sm 1.3+ but *not* for sm 1.0.
415 def FDIVrr64SM13 : InstPTX<(outs RRegf64:$d),
416 (ins RRegf64:$a, RRegf64:$b),
417 "div.rn.f64\t$d, $a, $b",
418 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
419 Requires<[SupportsSM13]>;
420 def FDIVri64SM13 : InstPTX<(outs RRegf64:$d),
421 (ins RRegf64:$a, f64imm:$b),
422 "div.rn.f64\t$d, $a, $b",
423 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
424 Requires<[SupportsSM13]>;
425 def FDIVrr64SM10 : InstPTX<(outs RRegf64:$d),
426 (ins RRegf64:$a, RRegf64:$b),
427 "div.f64\t$d, $a, $b",
428 [(set RRegf64:$d, (fdiv RRegf64:$a, RRegf64:$b))]>,
429 Requires<[DoesNotSupportSM13]>;
430 def FDIVri64SM10 : InstPTX<(outs RRegf64:$d),
431 (ins RRegf64:$a, f64imm:$b),
432 "div.f64\t$d, $a, $b",
433 [(set RRegf64:$d, (fdiv RRegf64:$a, fpimm:$b))]>,
434 Requires<[DoesNotSupportSM13]>;
438 // Multi-operation hybrid instructions
440 // The selection of mad/fma is tricky. In some cases, they are the *same*
441 // instruction, but in other cases we may prefer one or the other. Also,
442 // different PTX versions differ on whether rounding mode flags are required.
443 // In the short term, mad is supported on all PTX versions and we use a
444 // default rounding mode no matter what shader model or PTX version.
445 // TODO: Allow the rounding mode to be selectable through llc.
446 defm FMADSM13 : PTX_FLOAT_4OP<"mad.rn", fmul, fadd>, Requires<[SupportsSM13]>;
447 defm FMAD : PTX_FLOAT_4OP<"mad", fmul, fadd>, Requires<[DoesNotSupportSM13]>;
449 ///===- Floating-Point Intrinsic Instructions -----------------------------===//
451 def FSQRT32 : InstPTX<(outs RRegf32:$d),
453 "sqrt.rn.f32\t$d, $a",
454 [(set RRegf32:$d, (fsqrt RRegf32:$a))]>;
456 def FSQRT64 : InstPTX<(outs RRegf64:$d),
458 "sqrt.rn.f64\t$d, $a",
459 [(set RRegf64:$d, (fsqrt RRegf64:$a))]>;
461 def FSIN32 : InstPTX<(outs RRegf32:$d),
463 "sin.approx.f32\t$d, $a",
464 [(set RRegf32:$d, (fsin RRegf32:$a))]>;
466 def FSIN64 : InstPTX<(outs RRegf64:$d),
468 "sin.approx.f64\t$d, $a",
469 [(set RRegf64:$d, (fsin RRegf64:$a))]>;
471 def FCOS32 : InstPTX<(outs RRegf32:$d),
473 "cos.approx.f32\t$d, $a",
474 [(set RRegf32:$d, (fcos RRegf32:$a))]>;
476 def FCOS64 : InstPTX<(outs RRegf64:$d),
478 "cos.approx.f64\t$d, $a",
479 [(set RRegf64:$d, (fcos RRegf64:$a))]>;
482 ///===- Comparison and Selection Instructions -----------------------------===//
484 defm SETPEQu32 : PTX_SETP<RRegu32, "u32", i32imm, SETEQ, "eq">;
485 defm SETPNEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETNE, "ne">;
486 defm SETPLTu32 : PTX_SETP<RRegu32, "u32", i32imm, SETULT, "lt">;
487 defm SETPLEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETULE, "le">;
488 defm SETPGTu32 : PTX_SETP<RRegu32, "u32", i32imm, SETUGT, "gt">;
489 defm SETPGEu32 : PTX_SETP<RRegu32, "u32", i32imm, SETUGE, "ge">;
491 ///===- Logic and Shift Instructions --------------------------------------===//
493 defm SHL : INT3ntnc<"shl.b32", PTXshl>;
494 defm SRL : INT3ntnc<"shr.u32", PTXsrl>;
495 defm SRA : INT3ntnc<"shr.s32", PTXsra>;
497 defm AND : PTX_LOGIC<"and", and>;
498 defm OR : PTX_LOGIC<"or", or>;
499 defm XOR : PTX_LOGIC<"xor", xor>;
501 ///===- Data Movement and Conversion Instructions -------------------------===//
503 let neverHasSideEffects = 1 in {
505 : InstPTX<(outs Preds:$d), (ins Preds:$a), "mov.pred\t$d, $a", []>;
507 : InstPTX<(outs RRegu16:$d), (ins RRegu16:$a), "mov.u16\t$d, $a", []>;
509 : InstPTX<(outs RRegu32:$d), (ins RRegu32:$a), "mov.u32\t$d, $a", []>;
511 : InstPTX<(outs RRegu64:$d), (ins RRegu64:$a), "mov.u64\t$d, $a", []>;
513 : InstPTX<(outs RRegf32:$d), (ins RRegf32:$a), "mov.f32\t$d, $a", []>;
515 : InstPTX<(outs RRegf64:$d), (ins RRegf64:$a), "mov.f64\t$d, $a", []>;
518 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
520 : InstPTX<(outs Preds:$d), (ins i1imm:$a), "mov.pred\t$d, $a",
521 [(set Preds:$d, imm:$a)]>;
523 : InstPTX<(outs RRegu16:$d), (ins i16imm:$a), "mov.u16\t$d, $a",
524 [(set RRegu16:$d, imm:$a)]>;
526 : InstPTX<(outs RRegu32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
527 [(set RRegu32:$d, imm:$a)]>;
529 : InstPTX<(outs RRegu64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
530 [(set RRegu64:$d, imm:$a)]>;
532 : InstPTX<(outs RRegf32:$d), (ins f32imm:$a), "mov.f32\t$d, $a",
533 [(set RRegf32:$d, fpimm:$a)]>;
535 : InstPTX<(outs RRegf64:$d), (ins f64imm:$a), "mov.f64\t$d, $a",
536 [(set RRegf64:$d, fpimm:$a)]>;
539 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
541 : InstPTX<(outs RRegu32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
542 [(set RRegu32:$d, (PTXcopyaddress tglobaladdr:$a))]>;
546 defm LDg : PTX_LD_ALL<"ld.global", load_global>;
547 defm LDc : PTX_LD_ALL<"ld.const", load_constant>;
548 defm LDl : PTX_LD_ALL<"ld.local", load_local>;
549 defm LDs : PTX_LD_ALL<"ld.shared", load_shared>;
551 // This is a special instruction that is manually inserted for kernel parameters
552 def LDpiU16 : InstPTX<(outs RRegu16:$d), (ins MEMpi:$a),
553 "ld.param.u16\t$d, [$a]", []>;
554 def LDpiU32 : InstPTX<(outs RRegu32:$d), (ins MEMpi:$a),
555 "ld.param.u32\t$d, [$a]", []>;
556 def LDpiU64 : InstPTX<(outs RRegu64:$d), (ins MEMpi:$a),
557 "ld.param.u64\t$d, [$a]", []>;
558 def LDpiF32 : InstPTX<(outs RRegf32:$d), (ins MEMpi:$a),
559 "ld.param.f32\t$d, [$a]", []>;
560 def LDpiF64 : InstPTX<(outs RRegf64:$d), (ins MEMpi:$a),
561 "ld.param.f64\t$d, [$a]", []>;
564 defm STg : PTX_ST_ALL<"st.global", store_global>;
565 defm STl : PTX_ST_ALL<"st.local", store_local>;
566 defm STs : PTX_ST_ALL<"st.shared", store_shared>;
568 // defm STp : PTX_ST_ALL<"st.param", store_parameter>;
569 // defm LDp : PTX_LD_ALL<"ld.param", load_parameter>;
570 // TODO: Do something with st.param if/when it is needed.
573 : InstPTX<(outs RRegu32:$d), (ins Preds:$a), "cvt.u32.pred\t$d, $a",
574 [(set RRegu32:$d, (zext Preds:$a))]>;
576 ///===- Control Flow Instructions -----------------------------------------===//
578 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
580 : InstPTX<(outs), (ins brtarget:$d), "bra\t$d", [(br bb:$d)]>;
583 let isBranch = 1, isTerminator = 1 in {
584 // FIXME: should be able to write a pattern for brcond, but can't use
585 // a two-value operand where a dag node expects two operands. :(
586 // NOTE: ARM & PowerPC backend also report the same problem
588 : InstPTX<(outs), (ins brtarget:$d), "bra\t$d",
589 [/*(brcond bb:$d, Preds:$p, i32imm:$c)*/]>;
592 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
593 def EXIT : InstPTX<(outs), (ins), "exit", [(PTXexit)]>;
594 def RET : InstPTX<(outs), (ins), "ret", [(PTXret)]>;
597 ///===- Intrinsic Instructions --------------------------------------------===//
599 include "PTXIntrinsicInstrInfo.td"