1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 let isCodeGenOnly = 1;
28 let TSFlags{63} = isRegisterLoad;
29 let TSFlags{62} = isRegisterStore;
32 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
33 : AMDGPUInst<outs, ins, asm, pattern> {
35 field bits<32> Inst = 0xffffffff;
39 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
40 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
41 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
43 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
44 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
46 let OperandType = "OPERAND_IMMEDIATE" in {
48 def u32imm : Operand<i32> {
49 let PrintMethod = "printU32ImmOperand";
52 def u16imm : Operand<i16> {
53 let PrintMethod = "printU16ImmOperand";
56 def u8imm : Operand<i8> {
57 let PrintMethod = "printU8ImmOperand";
60 } // End OperandType = "OPERAND_IMMEDIATE"
62 //===--------------------------------------------------------------------===//
64 //===--------------------------------------------------------------------===//
65 def brtarget : Operand<OtherVT>;
67 //===----------------------------------------------------------------------===//
68 // PatLeafs for floating-point comparisons
69 //===----------------------------------------------------------------------===//
71 def COND_OEQ : PatLeaf <
73 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
76 def COND_ONE : PatLeaf <
78 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
81 def COND_OGT : PatLeaf <
83 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
86 def COND_OGE : PatLeaf <
88 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
91 def COND_OLT : PatLeaf <
93 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
96 def COND_OLE : PatLeaf <
98 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
102 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
103 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
105 //===----------------------------------------------------------------------===//
106 // PatLeafs for unsigned / unordered comparisons
107 //===----------------------------------------------------------------------===//
109 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
110 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
111 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
112 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
113 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
114 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
116 // XXX - For some reason R600 version is preferring to use unordered
118 def COND_UNE_NE : PatLeaf <
120 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
123 //===----------------------------------------------------------------------===//
124 // PatLeafs for signed comparisons
125 //===----------------------------------------------------------------------===//
127 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
128 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
129 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
130 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
132 //===----------------------------------------------------------------------===//
133 // PatLeafs for integer equality
134 //===----------------------------------------------------------------------===//
136 def COND_EQ : PatLeaf <
138 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
141 def COND_NE : PatLeaf <
143 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
146 def COND_NULL : PatLeaf <
148 [{(void)N; return false;}]
151 //===----------------------------------------------------------------------===//
152 // Load/Store Pattern Fragments
153 //===----------------------------------------------------------------------===//
155 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
156 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
159 class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
160 (ops node:$ptr), (op node:$ptr)
163 class PrivateStore <SDPatternOperator op> : PrivateMemOp <
164 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
167 def extloadi8_private : PrivateLoad <extloadi8>;
168 def sextloadi8_private : PrivateLoad <sextloadi8>;
169 def extloadi16_private : PrivateLoad <extloadi16>;
170 def sextloadi16_private : PrivateLoad <sextloadi16>;
171 def load_private : PrivateLoad <load>;
173 def truncstorei8_private : PrivateStore <truncstorei8>;
174 def truncstorei16_private : PrivateStore <truncstorei16>;
175 def store_private : PrivateStore <store>;
177 def global_store : PatFrag<(ops node:$val, node:$ptr),
178 (store node:$val, node:$ptr), [{
179 return isGlobalStore(dyn_cast<StoreSDNode>(N));
182 // Global address space loads
183 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
184 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
187 // Constant address space loads
188 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
189 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
192 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
193 LoadSDNode *L = cast<LoadSDNode>(N);
194 return L->getExtensionType() == ISD::ZEXTLOAD ||
195 L->getExtensionType() == ISD::EXTLOAD;
198 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
199 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
202 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
203 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
206 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
207 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
210 def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
211 return isFlatLoad(dyn_cast<LoadSDNode>(N));
214 def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
215 return isFlatLoad(dyn_cast<LoadSDNode>(N));
218 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
219 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
222 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
223 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
226 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
227 return isLocalLoad(dyn_cast<LoadSDNode>(N));
230 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
231 return isLocalLoad(dyn_cast<LoadSDNode>(N));
234 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
235 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
238 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
239 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
242 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
243 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
246 def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
247 return isFlatLoad(dyn_cast<LoadSDNode>(N));
250 def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
251 return isFlatLoad(dyn_cast<LoadSDNode>(N));
254 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
255 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
258 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
259 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
262 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
263 return isLocalLoad(dyn_cast<LoadSDNode>(N));
266 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
267 return isLocalLoad(dyn_cast<LoadSDNode>(N));
270 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
271 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
274 def az_extloadi32_global : PatFrag<(ops node:$ptr),
275 (az_extloadi32 node:$ptr), [{
276 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
279 def az_extloadi32_flat : PatFrag<(ops node:$ptr),
280 (az_extloadi32 node:$ptr), [{
281 return isFlatLoad(dyn_cast<LoadSDNode>(N));
284 def az_extloadi32_constant : PatFrag<(ops node:$ptr),
285 (az_extloadi32 node:$ptr), [{
286 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
289 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
290 (truncstorei8 node:$val, node:$ptr), [{
291 return isGlobalStore(dyn_cast<StoreSDNode>(N));
294 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
295 (truncstorei16 node:$val, node:$ptr), [{
296 return isGlobalStore(dyn_cast<StoreSDNode>(N));
299 def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr),
300 (truncstorei8 node:$val, node:$ptr), [{
301 return isFlatStore(dyn_cast<StoreSDNode>(N));
304 def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr),
305 (truncstorei16 node:$val, node:$ptr), [{
306 return isFlatStore(dyn_cast<StoreSDNode>(N));
309 def local_store : PatFrag<(ops node:$val, node:$ptr),
310 (store node:$val, node:$ptr), [{
311 return isLocalStore(dyn_cast<StoreSDNode>(N));
314 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
315 (truncstorei8 node:$val, node:$ptr), [{
316 return isLocalStore(dyn_cast<StoreSDNode>(N));
319 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
320 (truncstorei16 node:$val, node:$ptr), [{
321 return isLocalStore(dyn_cast<StoreSDNode>(N));
324 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
325 return isLocalLoad(dyn_cast<LoadSDNode>(N));
328 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
329 return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
332 def local_load_aligned8bytes : Aligned8Bytes <
333 (ops node:$ptr), (local_load node:$ptr)
336 def local_store_aligned8bytes : Aligned8Bytes <
337 (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr)
340 class local_binary_atomic_op<SDNode atomic_op> :
341 PatFrag<(ops node:$ptr, node:$value),
342 (atomic_op node:$ptr, node:$value), [{
343 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
347 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
348 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
349 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
350 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
351 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
352 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
353 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
354 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
355 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
356 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
357 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
359 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
360 (AMDGPUstore_mskor node:$val, node:$ptr), [{
361 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
365 def atomic_cmp_swap_32_local :
366 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
367 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
368 AtomicSDNode *AN = cast<AtomicSDNode>(N);
369 return AN->getMemoryVT() == MVT::i32 &&
370 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
373 def atomic_cmp_swap_64_local :
374 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
375 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
376 AtomicSDNode *AN = cast<AtomicSDNode>(N);
377 return AN->getMemoryVT() == MVT::i64 &&
378 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
381 def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
382 return isFlatLoad(dyn_cast<LoadSDNode>(N));
385 def flat_store : PatFrag<(ops node:$val, node:$ptr),
386 (store node:$val, node:$ptr), [{
387 return isFlatStore(dyn_cast<StoreSDNode>(N));
390 def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
391 (AMDGPUstore_mskor node:$val, node:$ptr), [{
392 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
395 class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
396 (ops node:$ptr, node:$value),
397 (atomic_op node:$ptr, node:$value),
398 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
401 def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
402 def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
403 def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
404 def atomic_max_global : global_binary_atomic_op<atomic_load_max>;
405 def atomic_min_global : global_binary_atomic_op<atomic_load_min>;
406 def atomic_or_global : global_binary_atomic_op<atomic_load_or>;
407 def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
408 def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
409 def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
410 def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
412 //===----------------------------------------------------------------------===//
413 // Misc Pattern Fragments
414 //===----------------------------------------------------------------------===//
417 (ops node:$src0, node:$src1, node:$src2),
418 (fadd (fmul node:$src0, node:$src1), node:$src2)
422 int TWO_PI = 0x40c90fdb;
424 int TWO_PI_INV = 0x3e22f983;
425 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
426 int FP32_NEG_ONE = 0xbf800000;
427 int FP32_ONE = 0x3f800000;
429 def CONST : Constants;
431 def FP_ZERO : PatLeaf <
433 [{return N->getValueAPF().isZero();}]
436 def FP_ONE : PatLeaf <
438 [{return N->isExactlyValue(1.0);}]
441 def FP_HALF : PatLeaf <
443 [{return N->isExactlyValue(0.5);}]
446 let isCodeGenOnly = 1, isPseudo = 1 in {
448 let usesCustomInserter = 1 in {
450 class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
454 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
457 class FABS <RegisterClass rc> : AMDGPUShaderInst <
461 [(set f32:$dst, (fabs f32:$src0))]
464 class FNEG <RegisterClass rc> : AMDGPUShaderInst <
468 [(set f32:$dst, (fneg f32:$src0))]
471 } // usesCustomInserter = 1
473 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
474 ComplexPattern addrPat> {
475 let UseNamedOperandTable = 1 in {
477 def RegisterLoad : AMDGPUShaderInst <
478 (outs dstClass:$dst),
479 (ins addrClass:$addr, i32imm:$chan),
480 "RegisterLoad $dst, $addr",
481 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
483 let isRegisterLoad = 1;
486 def RegisterStore : AMDGPUShaderInst <
488 (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
489 "RegisterStore $val, $addr",
490 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
492 let isRegisterStore = 1;
497 } // End isCodeGenOnly = 1, isPseudo = 1
499 /* Generic helper patterns for intrinsics */
500 /* -------------------------------------- */
502 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
504 (fpow f32:$src0, f32:$src1),
505 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
508 /* Other helper patterns */
509 /* --------------------- */
511 /* Extract element pattern */
512 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
515 (sub_type (vector_extract vec_type:$src, sub_idx)),
516 (EXTRACT_SUBREG $src, sub_reg)
519 /* Insert element pattern */
520 class Insert_Element <ValueType elem_type, ValueType vec_type,
521 int sub_idx, SubRegIndex sub_reg>
523 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
524 (INSERT_SUBREG $vec, $elem, sub_reg)
527 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
528 // can handle COPY instructions.
529 // bitconvert pattern
530 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
531 (dt (bitconvert (st rc:$src0))),
535 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
536 // can handle COPY instructions.
537 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
538 (vt (AMDGPUdwordaddr (vt rc:$addr))),
544 multiclass BFIPatterns <Instruction BFI_INT,
545 Instruction LoadImm32,
546 RegisterClass RC64> {
547 // Definition from ISA doc:
548 // (y & x) | (z & ~x)
550 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
554 // SHA-256 Ch function
557 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
562 (fcopysign f32:$src0, f32:$src1),
563 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
567 (f64 (fcopysign f64:$src0, f64:$src1)),
569 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
570 (BFI_INT (LoadImm32 0x7fffffff),
571 (i32 (EXTRACT_SUBREG $src0, sub1)),
572 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
576 // SHA-256 Ma patterns
578 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
579 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
580 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
581 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
584 // Bitfield extract patterns
588 XXX: The BFE pattern is not working correctly because the XForm is not being
591 def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>;
592 def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}],
593 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(countTrailingOnes(N->getZExtValue()), MVT::i32);}]>>;
595 class BFEPattern <Instruction BFE> : Pat <
596 (and (srl i32:$x, legalshift32:$y), bfemask:$z),
603 class ROTRPattern <Instruction BIT_ALIGN> : Pat <
604 (rotr i32:$src0, i32:$src1),
605 (BIT_ALIGN $src0, $src0, $src1)
608 // 24-bit arithmetic patterns
609 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
611 // Special conversion patterns
613 def cvt_rpi_i32_f32 : PatFrag <
615 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
616 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
619 def cvt_flr_i32_f32 : PatFrag <
621 (fp_to_sint (ffloor $src)),
622 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
626 class UMUL24Pattern <Instruction UMUL24> : Pat <
627 (mul U24:$x, U24:$y),
632 class IMad24Pat<Instruction Inst> : Pat <
633 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
634 (Inst $src0, $src1, $src2)
637 class UMad24Pat<Instruction Inst> : Pat <
638 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
639 (Inst $src0, $src1, $src2)
642 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
643 def _expand_imad24 : Pat <
644 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
645 (AddInst (MulInst $src0, $src1), $src2)
648 def _expand_imul24 : Pat <
649 (AMDGPUmul_i24 i32:$src0, i32:$src1),
650 (MulInst $src0, $src1)
654 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
655 def _expand_umad24 : Pat <
656 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
657 (AddInst (MulInst $src0, $src1), $src2)
660 def _expand_umul24 : Pat <
661 (AMDGPUmul_u24 i32:$src0, i32:$src1),
662 (MulInst $src0, $src1)
666 class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
667 (fdiv FP_ONE, vt:$src),
671 class RsqPat<Instruction RsqInst, ValueType vt> : Pat <
672 (AMDGPUrcp (fsqrt vt:$src)),
676 include "R600Instructions.td"
677 include "R700Instructions.td"
678 include "EvergreenInstructions.td"
679 include "CaymanInstructions.td"
681 include "SIInstrInfo.td"