1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 let TSFlags{63} = isRegisterLoad;
27 let TSFlags{62} = isRegisterStore;
30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
31 : AMDGPUInst<outs, ins, asm, pattern> {
33 field bits<32> Inst = 0xffffffff;
37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
44 let OperandType = "OPERAND_IMMEDIATE" in {
46 def u32imm : Operand<i32> {
47 let PrintMethod = "printU32ImmOperand";
50 def u16imm : Operand<i16> {
51 let PrintMethod = "printU16ImmOperand";
54 def u8imm : Operand<i8> {
55 let PrintMethod = "printU8ImmOperand";
58 } // End OperandType = "OPERAND_IMMEDIATE"
60 //===--------------------------------------------------------------------===//
62 //===--------------------------------------------------------------------===//
63 def brtarget : Operand<OtherVT>;
65 //===----------------------------------------------------------------------===//
66 // PatLeafs for floating-point comparisons
67 //===----------------------------------------------------------------------===//
69 def COND_OEQ : PatLeaf <
71 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
74 def COND_OGT : PatLeaf <
76 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
79 def COND_OGE : PatLeaf <
81 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
84 def COND_OLT : PatLeaf <
86 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
89 def COND_OLE : PatLeaf <
91 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
94 def COND_UNE : PatLeaf <
96 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
99 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
100 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
102 //===----------------------------------------------------------------------===//
103 // PatLeafs for unsigned comparisons
104 //===----------------------------------------------------------------------===//
106 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
107 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
108 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
109 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
111 //===----------------------------------------------------------------------===//
112 // PatLeafs for signed comparisons
113 //===----------------------------------------------------------------------===//
115 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
116 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
117 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
118 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
120 //===----------------------------------------------------------------------===//
121 // PatLeafs for integer equality
122 //===----------------------------------------------------------------------===//
124 def COND_EQ : PatLeaf <
126 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
129 def COND_NE : PatLeaf <
131 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
134 def COND_NULL : PatLeaf <
136 [{(void)N; return false;}]
139 //===----------------------------------------------------------------------===//
140 // Load/Store Pattern Fragments
141 //===----------------------------------------------------------------------===//
143 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
144 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
147 class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
148 (ops node:$ptr), (op node:$ptr)
151 class PrivateStore <SDPatternOperator op> : PrivateMemOp <
152 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
155 def extloadi8_private : PrivateLoad <extloadi8>;
156 def sextloadi8_private : PrivateLoad <sextloadi8>;
157 def extloadi16_private : PrivateLoad <extloadi16>;
158 def sextloadi16_private : PrivateLoad <sextloadi16>;
159 def load_private : PrivateLoad <load>;
161 def truncstorei8_private : PrivateStore <truncstorei8>;
162 def truncstorei16_private : PrivateStore <truncstorei16>;
163 def store_private : PrivateStore <store>;
165 def global_store : PatFrag<(ops node:$val, node:$ptr),
166 (store node:$val, node:$ptr), [{
167 return isGlobalStore(dyn_cast<StoreSDNode>(N));
170 // Global address space loads
171 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
172 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
175 // Constant address space loads
176 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
177 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
180 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
181 LoadSDNode *L = cast<LoadSDNode>(N);
182 return L->getExtensionType() == ISD::ZEXTLOAD ||
183 L->getExtensionType() == ISD::EXTLOAD;
186 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
187 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
190 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
191 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
194 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
195 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
198 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
199 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
202 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
203 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
206 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
207 return isLocalLoad(dyn_cast<LoadSDNode>(N));
210 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
211 return isLocalLoad(dyn_cast<LoadSDNode>(N));
214 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
215 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
218 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
219 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
222 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
223 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
226 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
227 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
230 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
231 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
234 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
235 return isLocalLoad(dyn_cast<LoadSDNode>(N));
238 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
239 return isLocalLoad(dyn_cast<LoadSDNode>(N));
242 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
243 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
246 def az_extloadi32_global : PatFrag<(ops node:$ptr),
247 (az_extloadi32 node:$ptr), [{
248 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
251 def az_extloadi32_constant : PatFrag<(ops node:$ptr),
252 (az_extloadi32 node:$ptr), [{
253 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
256 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
257 (truncstorei8 node:$val, node:$ptr), [{
258 return isGlobalStore(dyn_cast<StoreSDNode>(N));
261 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
262 (truncstorei16 node:$val, node:$ptr), [{
263 return isGlobalStore(dyn_cast<StoreSDNode>(N));
266 def local_store : PatFrag<(ops node:$val, node:$ptr),
267 (store node:$val, node:$ptr), [{
268 return isLocalStore(dyn_cast<StoreSDNode>(N));
271 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
272 (truncstorei8 node:$val, node:$ptr), [{
273 return isLocalStore(dyn_cast<StoreSDNode>(N));
276 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
277 (truncstorei16 node:$val, node:$ptr), [{
278 return isLocalStore(dyn_cast<StoreSDNode>(N));
281 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
282 return isLocalLoad(dyn_cast<LoadSDNode>(N));
286 class local_binary_atomic_op<SDNode atomic_op> :
287 PatFrag<(ops node:$ptr, node:$value),
288 (atomic_op node:$ptr, node:$value), [{
289 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
293 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
294 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
295 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
296 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
297 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
298 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
299 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
300 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
301 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
302 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
303 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
305 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
306 (AMDGPUstore_mskor node:$val, node:$ptr), [{
307 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
310 def atomic_cmp_swap_32_local :
311 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
312 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
313 AtomicSDNode *AN = cast<AtomicSDNode>(N);
314 return AN->getMemoryVT() == MVT::i32 &&
315 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
318 def atomic_cmp_swap_64_local :
319 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
320 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
321 AtomicSDNode *AN = cast<AtomicSDNode>(N);
322 return AN->getMemoryVT() == MVT::i64 &&
323 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
326 //===----------------------------------------------------------------------===//
327 // Misc Pattern Fragments
328 //===----------------------------------------------------------------------===//
331 (ops node:$src0, node:$src1, node:$src2),
332 (fadd (fmul node:$src0, node:$src1), node:$src2)
336 int TWO_PI = 0x40c90fdb;
338 int TWO_PI_INV = 0x3e22f983;
339 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
340 int FP32_NEG_ONE = 0xbf800000;
341 int FP32_ONE = 0x3f800000;
343 def CONST : Constants;
345 def FP_ZERO : PatLeaf <
347 [{return N->getValueAPF().isZero();}]
350 def FP_ONE : PatLeaf <
352 [{return N->isExactlyValue(1.0);}]
355 let isCodeGenOnly = 1, isPseudo = 1 in {
357 let usesCustomInserter = 1 in {
359 class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
363 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
366 class FABS <RegisterClass rc> : AMDGPUShaderInst <
370 [(set f32:$dst, (fabs f32:$src0))]
373 class FNEG <RegisterClass rc> : AMDGPUShaderInst <
377 [(set f32:$dst, (fneg f32:$src0))]
380 } // usesCustomInserter = 1
382 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
383 ComplexPattern addrPat> {
384 let UseNamedOperandTable = 1 in {
386 def RegisterLoad : AMDGPUShaderInst <
387 (outs dstClass:$dst),
388 (ins addrClass:$addr, i32imm:$chan),
389 "RegisterLoad $dst, $addr",
390 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
392 let isRegisterLoad = 1;
395 def RegisterStore : AMDGPUShaderInst <
397 (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
398 "RegisterStore $val, $addr",
399 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
401 let isRegisterStore = 1;
406 } // End isCodeGenOnly = 1, isPseudo = 1
408 /* Generic helper patterns for intrinsics */
409 /* -------------------------------------- */
411 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
413 (fpow f32:$src0, f32:$src1),
414 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
417 /* Other helper patterns */
418 /* --------------------- */
420 /* Extract element pattern */
421 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
424 (sub_type (vector_extract vec_type:$src, sub_idx)),
425 (EXTRACT_SUBREG $src, sub_reg)
428 /* Insert element pattern */
429 class Insert_Element <ValueType elem_type, ValueType vec_type,
430 int sub_idx, SubRegIndex sub_reg>
432 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
433 (INSERT_SUBREG $vec, $elem, sub_reg)
436 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
437 // can handle COPY instructions.
438 // bitconvert pattern
439 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
440 (dt (bitconvert (st rc:$src0))),
444 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
445 // can handle COPY instructions.
446 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
447 (vt (AMDGPUdwordaddr (vt rc:$addr))),
453 multiclass BFIPatterns <Instruction BFI_INT, Instruction LoadImm32> {
455 // Definition from ISA doc:
456 // (y & x) | (z & ~x)
458 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
462 // SHA-256 Ch function
465 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
470 (fcopysign f32:$src0, f32:$src1),
471 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
475 (f64 (fcopysign f64:$src0, f64:$src1)),
476 (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
477 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0),
478 (BFI_INT (LoadImm32 0x7fffffff),
479 (i32 (EXTRACT_SUBREG $src0, sub1)),
480 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
484 // SHA-256 Ma patterns
486 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
487 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
488 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
489 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
492 // Bitfield extract patterns
496 XXX: The BFE pattern is not working correctly because the XForm is not being
499 def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>;
500 def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}],
501 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>;
503 class BFEPattern <Instruction BFE> : Pat <
504 (and (srl i32:$x, legalshift32:$y), bfemask:$z),
511 class ROTRPattern <Instruction BIT_ALIGN> : Pat <
512 (rotr i32:$src0, i32:$src1),
513 (BIT_ALIGN $src0, $src0, $src1)
516 // 24-bit arithmetic patterns
517 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
520 class UMUL24Pattern <Instruction UMUL24> : Pat <
521 (mul U24:$x, U24:$y),
526 class IMad24Pat<Instruction Inst> : Pat <
527 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
528 (Inst $src0, $src1, $src2)
531 class UMad24Pat<Instruction Inst> : Pat <
532 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
533 (Inst $src0, $src1, $src2)
536 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
537 def _expand_imad24 : Pat <
538 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
539 (AddInst (MulInst $src0, $src1), $src2)
542 def _expand_imul24 : Pat <
543 (AMDGPUmul_i24 i32:$src0, i32:$src1),
544 (MulInst $src0, $src1)
548 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
549 def _expand_umad24 : Pat <
550 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
551 (AddInst (MulInst $src0, $src1), $src2)
554 def _expand_umul24 : Pat <
555 (AMDGPUmul_u24 i32:$src0, i32:$src1),
556 (MulInst $src0, $src1)
560 class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
561 (fdiv FP_ONE, vt:$src),
565 multiclass RsqPat<Instruction RsqInst, ValueType vt> {
567 (fdiv FP_ONE, (fsqrt vt:$src)),
572 (AMDGPUrcp (fsqrt vt:$src)),
577 include "R600Instructions.td"
578 include "R700Instructions.td"
579 include "EvergreenInstructions.td"
580 include "CaymanInstructions.td"
582 include "SIInstrInfo.td"