1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 let TSFlags{63} = isRegisterLoad;
27 let TSFlags{62} = isRegisterStore;
30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
31 : AMDGPUInst<outs, ins, asm, pattern> {
33 field bits<32> Inst = 0xffffffff;
37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
44 let OperandType = "OPERAND_IMMEDIATE" in {
46 def u32imm : Operand<i32> {
47 let PrintMethod = "printU32ImmOperand";
50 def u16imm : Operand<i16> {
51 let PrintMethod = "printU16ImmOperand";
54 def u8imm : Operand<i8> {
55 let PrintMethod = "printU8ImmOperand";
58 } // End OperandType = "OPERAND_IMMEDIATE"
60 //===--------------------------------------------------------------------===//
62 //===--------------------------------------------------------------------===//
63 def brtarget : Operand<OtherVT>;
65 //===----------------------------------------------------------------------===//
66 // PatLeafs for floating-point comparisons
67 //===----------------------------------------------------------------------===//
69 def COND_OEQ : PatLeaf <
71 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
74 def COND_ONE : PatLeaf <
76 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
79 def COND_OGT : PatLeaf <
81 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
84 def COND_OGE : PatLeaf <
86 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
89 def COND_OLT : PatLeaf <
91 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
94 def COND_OLE : PatLeaf <
96 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
100 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
101 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
103 //===----------------------------------------------------------------------===//
104 // PatLeafs for unsigned / unordered comparisons
105 //===----------------------------------------------------------------------===//
107 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
108 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
109 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
110 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
111 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
112 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
114 // XXX - For some reason R600 version is preferring to use unordered
116 def COND_UNE_NE : PatLeaf <
118 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
121 //===----------------------------------------------------------------------===//
122 // PatLeafs for signed comparisons
123 //===----------------------------------------------------------------------===//
125 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
126 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
127 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
128 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
130 //===----------------------------------------------------------------------===//
131 // PatLeafs for integer equality
132 //===----------------------------------------------------------------------===//
134 def COND_EQ : PatLeaf <
136 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
139 def COND_NE : PatLeaf <
141 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
144 def COND_NULL : PatLeaf <
146 [{(void)N; return false;}]
149 //===----------------------------------------------------------------------===//
150 // Load/Store Pattern Fragments
151 //===----------------------------------------------------------------------===//
153 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
154 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
157 class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
158 (ops node:$ptr), (op node:$ptr)
161 class PrivateStore <SDPatternOperator op> : PrivateMemOp <
162 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
165 def load_private : PrivateLoad <load>;
167 def truncstorei8_private : PrivateStore <truncstorei8>;
168 def truncstorei16_private : PrivateStore <truncstorei16>;
169 def store_private : PrivateStore <store>;
171 def global_store : PatFrag<(ops node:$val, node:$ptr),
172 (store node:$val, node:$ptr), [{
173 return isGlobalStore(dyn_cast<StoreSDNode>(N));
176 // Global address space loads
177 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
178 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
181 // Constant address space loads
182 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
183 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
186 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
187 LoadSDNode *L = cast<LoadSDNode>(N);
188 return L->getExtensionType() == ISD::ZEXTLOAD ||
189 L->getExtensionType() == ISD::EXTLOAD;
192 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
193 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
196 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
197 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
200 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
201 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
204 def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
205 return isFlatLoad(dyn_cast<LoadSDNode>(N));
208 def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
209 return isFlatLoad(dyn_cast<LoadSDNode>(N));
212 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
213 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
216 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
217 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
220 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
221 return isLocalLoad(dyn_cast<LoadSDNode>(N));
224 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
225 return isLocalLoad(dyn_cast<LoadSDNode>(N));
228 def extloadi8_private : PrivateLoad <az_extloadi8>;
229 def sextloadi8_private : PrivateLoad <sextloadi8>;
231 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
232 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
235 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
236 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
239 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
240 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
243 def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
244 return isFlatLoad(dyn_cast<LoadSDNode>(N));
247 def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
248 return isFlatLoad(dyn_cast<LoadSDNode>(N));
251 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
252 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
255 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
256 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
259 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
260 return isLocalLoad(dyn_cast<LoadSDNode>(N));
263 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
264 return isLocalLoad(dyn_cast<LoadSDNode>(N));
267 def extloadi16_private : PrivateLoad <az_extloadi16>;
268 def sextloadi16_private : PrivateLoad <sextloadi16>;
270 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
271 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
274 def az_extloadi32_global : PatFrag<(ops node:$ptr),
275 (az_extloadi32 node:$ptr), [{
276 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
279 def az_extloadi32_flat : PatFrag<(ops node:$ptr),
280 (az_extloadi32 node:$ptr), [{
281 return isFlatLoad(dyn_cast<LoadSDNode>(N));
284 def az_extloadi32_constant : PatFrag<(ops node:$ptr),
285 (az_extloadi32 node:$ptr), [{
286 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
289 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
290 (truncstorei8 node:$val, node:$ptr), [{
291 return isGlobalStore(dyn_cast<StoreSDNode>(N));
294 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
295 (truncstorei16 node:$val, node:$ptr), [{
296 return isGlobalStore(dyn_cast<StoreSDNode>(N));
299 def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr),
300 (truncstorei8 node:$val, node:$ptr), [{
301 return isFlatStore(dyn_cast<StoreSDNode>(N));
304 def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr),
305 (truncstorei16 node:$val, node:$ptr), [{
306 return isFlatStore(dyn_cast<StoreSDNode>(N));
309 def local_store : PatFrag<(ops node:$val, node:$ptr),
310 (store node:$val, node:$ptr), [{
311 return isLocalStore(dyn_cast<StoreSDNode>(N));
314 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
315 (truncstorei8 node:$val, node:$ptr), [{
316 return isLocalStore(dyn_cast<StoreSDNode>(N));
319 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
320 (truncstorei16 node:$val, node:$ptr), [{
321 return isLocalStore(dyn_cast<StoreSDNode>(N));
324 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
325 return isLocalLoad(dyn_cast<LoadSDNode>(N));
328 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
329 return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
332 def local_load_aligned8bytes : Aligned8Bytes <
333 (ops node:$ptr), (local_load node:$ptr)
336 def local_store_aligned8bytes : Aligned8Bytes <
337 (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr)
340 class local_binary_atomic_op<SDNode atomic_op> :
341 PatFrag<(ops node:$ptr, node:$value),
342 (atomic_op node:$ptr, node:$value), [{
343 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
347 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
348 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
349 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
350 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
351 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
352 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
353 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
354 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
355 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
356 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
357 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
359 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
360 (AMDGPUstore_mskor node:$val, node:$ptr), [{
361 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
365 def atomic_cmp_swap_32_local :
366 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
367 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
368 AtomicSDNode *AN = cast<AtomicSDNode>(N);
369 return AN->getMemoryVT() == MVT::i32 &&
370 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
373 def atomic_cmp_swap_64_local :
374 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
375 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
376 AtomicSDNode *AN = cast<AtomicSDNode>(N);
377 return AN->getMemoryVT() == MVT::i64 &&
378 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
381 def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
382 return isFlatLoad(dyn_cast<LoadSDNode>(N));
385 def flat_store : PatFrag<(ops node:$val, node:$ptr),
386 (store node:$val, node:$ptr), [{
387 return isFlatStore(dyn_cast<StoreSDNode>(N));
390 def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
391 (AMDGPUstore_mskor node:$val, node:$ptr), [{
392 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
395 class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
396 (ops node:$ptr, node:$value),
397 (atomic_op node:$ptr, node:$value),
398 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
401 def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
402 def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
403 def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
404 def atomic_max_global : global_binary_atomic_op<atomic_load_max>;
405 def atomic_min_global : global_binary_atomic_op<atomic_load_min>;
406 def atomic_or_global : global_binary_atomic_op<atomic_load_or>;
407 def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
408 def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
409 def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
410 def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
412 //===----------------------------------------------------------------------===//
413 // Misc Pattern Fragments
414 //===----------------------------------------------------------------------===//
417 int TWO_PI = 0x40c90fdb;
419 int TWO_PI_INV = 0x3e22f983;
420 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
421 int FP32_NEG_ONE = 0xbf800000;
422 int FP32_ONE = 0x3f800000;
424 def CONST : Constants;
426 def FP_ZERO : PatLeaf <
428 [{return N->getValueAPF().isZero();}]
431 def FP_ONE : PatLeaf <
433 [{return N->isExactlyValue(1.0);}]
436 def FP_HALF : PatLeaf <
438 [{return N->isExactlyValue(0.5);}]
441 let isCodeGenOnly = 1, isPseudo = 1 in {
443 let usesCustomInserter = 1 in {
445 class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
449 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
452 class FABS <RegisterClass rc> : AMDGPUShaderInst <
456 [(set f32:$dst, (fabs f32:$src0))]
459 class FNEG <RegisterClass rc> : AMDGPUShaderInst <
463 [(set f32:$dst, (fneg f32:$src0))]
466 } // usesCustomInserter = 1
468 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
469 ComplexPattern addrPat> {
470 let UseNamedOperandTable = 1 in {
472 def RegisterLoad : AMDGPUShaderInst <
473 (outs dstClass:$dst),
474 (ins addrClass:$addr, i32imm:$chan),
475 "RegisterLoad $dst, $addr",
476 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
478 let isRegisterLoad = 1;
481 def RegisterStore : AMDGPUShaderInst <
483 (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
484 "RegisterStore $val, $addr",
485 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
487 let isRegisterStore = 1;
492 } // End isCodeGenOnly = 1, isPseudo = 1
494 /* Generic helper patterns for intrinsics */
495 /* -------------------------------------- */
497 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
499 (fpow f32:$src0, f32:$src1),
500 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
503 /* Other helper patterns */
504 /* --------------------- */
506 /* Extract element pattern */
507 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
510 (sub_type (vector_extract vec_type:$src, sub_idx)),
511 (EXTRACT_SUBREG $src, sub_reg)
514 /* Insert element pattern */
515 class Insert_Element <ValueType elem_type, ValueType vec_type,
516 int sub_idx, SubRegIndex sub_reg>
518 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
519 (INSERT_SUBREG $vec, $elem, sub_reg)
522 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
523 // can handle COPY instructions.
524 // bitconvert pattern
525 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
526 (dt (bitconvert (st rc:$src0))),
530 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
531 // can handle COPY instructions.
532 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
533 (vt (AMDGPUdwordaddr (vt rc:$addr))),
539 multiclass BFIPatterns <Instruction BFI_INT,
540 Instruction LoadImm32,
541 RegisterClass RC64> {
542 // Definition from ISA doc:
543 // (y & x) | (z & ~x)
545 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
549 // SHA-256 Ch function
552 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
557 (fcopysign f32:$src0, f32:$src1),
558 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
562 (f64 (fcopysign f64:$src0, f64:$src1)),
564 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
565 (BFI_INT (LoadImm32 0x7fffffff),
566 (i32 (EXTRACT_SUBREG $src0, sub1)),
567 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
571 // SHA-256 Ma patterns
573 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
574 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
575 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
576 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
579 // Bitfield extract patterns
583 XXX: The BFE pattern is not working correctly because the XForm is not being
586 def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>;
587 def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}],
588 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(countTrailingOnes(N->getZExtValue()), MVT::i32);}]>>;
590 class BFEPattern <Instruction BFE> : Pat <
591 (and (srl i32:$x, legalshift32:$y), bfemask:$z),
598 class ROTRPattern <Instruction BIT_ALIGN> : Pat <
599 (rotr i32:$src0, i32:$src1),
600 (BIT_ALIGN $src0, $src0, $src1)
603 // 24-bit arithmetic patterns
604 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
606 // Special conversion patterns
608 def cvt_rpi_i32_f32 : PatFrag <
610 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
611 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
614 def cvt_flr_i32_f32 : PatFrag <
616 (fp_to_sint (ffloor $src)),
617 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
621 class UMUL24Pattern <Instruction UMUL24> : Pat <
622 (mul U24:$x, U24:$y),
627 class IMad24Pat<Instruction Inst> : Pat <
628 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
629 (Inst $src0, $src1, $src2)
632 class UMad24Pat<Instruction Inst> : Pat <
633 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
634 (Inst $src0, $src1, $src2)
637 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
638 def _expand_imad24 : Pat <
639 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
640 (AddInst (MulInst $src0, $src1), $src2)
643 def _expand_imul24 : Pat <
644 (AMDGPUmul_i24 i32:$src0, i32:$src1),
645 (MulInst $src0, $src1)
649 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
650 def _expand_umad24 : Pat <
651 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
652 (AddInst (MulInst $src0, $src1), $src2)
655 def _expand_umul24 : Pat <
656 (AMDGPUmul_u24 i32:$src0, i32:$src1),
657 (MulInst $src0, $src1)
661 class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
662 (fdiv FP_ONE, vt:$src),
666 class RsqPat<Instruction RsqInst, ValueType vt> : Pat <
667 (AMDGPUrcp (fsqrt vt:$src)),
671 include "R600Instructions.td"
672 include "R700Instructions.td"
673 include "EvergreenInstructions.td"
674 include "CaymanInstructions.td"
676 include "SIInstrInfo.td"