1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 let TSFlags{63} = isRegisterLoad;
27 let TSFlags{62} = isRegisterStore;
30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
31 : AMDGPUInst<outs, ins, asm, pattern> {
33 field bits<32> Inst = 0xffffffff;
37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
44 let OperandType = "OPERAND_IMMEDIATE" in {
46 def u32imm : Operand<i32> {
47 let PrintMethod = "printU32ImmOperand";
50 def u16imm : Operand<i16> {
51 let PrintMethod = "printU16ImmOperand";
54 def u8imm : Operand<i8> {
55 let PrintMethod = "printU8ImmOperand";
58 } // End OperandType = "OPERAND_IMMEDIATE"
60 //===--------------------------------------------------------------------===//
62 //===--------------------------------------------------------------------===//
63 def brtarget : Operand<OtherVT>;
65 //===----------------------------------------------------------------------===//
66 // PatLeafs for floating-point comparisons
67 //===----------------------------------------------------------------------===//
69 def COND_OEQ : PatLeaf <
71 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
74 def COND_ONE : PatLeaf <
76 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
79 def COND_OGT : PatLeaf <
81 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
84 def COND_OGE : PatLeaf <
86 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
89 def COND_OLT : PatLeaf <
91 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
94 def COND_OLE : PatLeaf <
96 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
100 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
101 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
103 //===----------------------------------------------------------------------===//
104 // PatLeafs for unsigned / unordered comparisons
105 //===----------------------------------------------------------------------===//
107 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
108 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
109 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
110 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
111 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
112 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
114 // XXX - For some reason R600 version is preferring to use unordered
116 def COND_UNE_NE : PatLeaf <
118 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
121 //===----------------------------------------------------------------------===//
122 // PatLeafs for signed comparisons
123 //===----------------------------------------------------------------------===//
125 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
126 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
127 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
128 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
130 //===----------------------------------------------------------------------===//
131 // PatLeafs for integer equality
132 //===----------------------------------------------------------------------===//
134 def COND_EQ : PatLeaf <
136 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
139 def COND_NE : PatLeaf <
141 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
144 def COND_NULL : PatLeaf <
146 [{(void)N; return false;}]
149 //===----------------------------------------------------------------------===//
150 // Load/Store Pattern Fragments
151 //===----------------------------------------------------------------------===//
153 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
154 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
157 class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
158 (ops node:$ptr), (op node:$ptr)
161 class PrivateStore <SDPatternOperator op> : PrivateMemOp <
162 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
165 def load_private : PrivateLoad <load>;
167 def truncstorei8_private : PrivateStore <truncstorei8>;
168 def truncstorei16_private : PrivateStore <truncstorei16>;
169 def store_private : PrivateStore <store>;
171 def global_store : PatFrag<(ops node:$val, node:$ptr),
172 (store node:$val, node:$ptr), [{
173 return isGlobalStore(dyn_cast<StoreSDNode>(N));
176 // Global address space loads
177 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
178 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
181 // Constant address space loads
182 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
183 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
186 class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
187 (ld_node node:$ptr), [{
188 LoadSDNode *L = cast<LoadSDNode>(N);
189 return L->getExtensionType() == ISD::ZEXTLOAD ||
190 L->getExtensionType() == ISD::EXTLOAD;
193 def az_extload : AZExtLoadBase <unindexedload>;
195 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
196 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
199 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
200 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
203 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
204 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
207 def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
208 return isFlatLoad(dyn_cast<LoadSDNode>(N));
211 def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
212 return isFlatLoad(dyn_cast<LoadSDNode>(N));
215 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
216 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
219 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
220 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
223 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
224 return isLocalLoad(dyn_cast<LoadSDNode>(N));
227 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
228 return isLocalLoad(dyn_cast<LoadSDNode>(N));
231 def extloadi8_private : PrivateLoad <az_extloadi8>;
232 def sextloadi8_private : PrivateLoad <sextloadi8>;
234 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
235 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
238 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
239 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
242 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
243 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
246 def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
247 return isFlatLoad(dyn_cast<LoadSDNode>(N));
250 def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
251 return isFlatLoad(dyn_cast<LoadSDNode>(N));
254 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
255 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
258 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
259 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
262 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
263 return isLocalLoad(dyn_cast<LoadSDNode>(N));
266 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
267 return isLocalLoad(dyn_cast<LoadSDNode>(N));
270 def extloadi16_private : PrivateLoad <az_extloadi16>;
271 def sextloadi16_private : PrivateLoad <sextloadi16>;
273 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
274 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
277 def az_extloadi32_global : PatFrag<(ops node:$ptr),
278 (az_extloadi32 node:$ptr), [{
279 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
282 def az_extloadi32_flat : PatFrag<(ops node:$ptr),
283 (az_extloadi32 node:$ptr), [{
284 return isFlatLoad(dyn_cast<LoadSDNode>(N));
287 def az_extloadi32_constant : PatFrag<(ops node:$ptr),
288 (az_extloadi32 node:$ptr), [{
289 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
292 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
293 (truncstorei8 node:$val, node:$ptr), [{
294 return isGlobalStore(dyn_cast<StoreSDNode>(N));
297 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
298 (truncstorei16 node:$val, node:$ptr), [{
299 return isGlobalStore(dyn_cast<StoreSDNode>(N));
302 def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr),
303 (truncstorei8 node:$val, node:$ptr), [{
304 return isFlatStore(dyn_cast<StoreSDNode>(N));
307 def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr),
308 (truncstorei16 node:$val, node:$ptr), [{
309 return isFlatStore(dyn_cast<StoreSDNode>(N));
312 def local_store : PatFrag<(ops node:$val, node:$ptr),
313 (store node:$val, node:$ptr), [{
314 return isLocalStore(dyn_cast<StoreSDNode>(N));
317 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
318 (truncstorei8 node:$val, node:$ptr), [{
319 return isLocalStore(dyn_cast<StoreSDNode>(N));
322 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
323 (truncstorei16 node:$val, node:$ptr), [{
324 return isLocalStore(dyn_cast<StoreSDNode>(N));
327 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
328 return isLocalLoad(dyn_cast<LoadSDNode>(N));
331 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
332 return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
335 def local_load_aligned8bytes : Aligned8Bytes <
336 (ops node:$ptr), (local_load node:$ptr)
339 def local_store_aligned8bytes : Aligned8Bytes <
340 (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr)
343 class local_binary_atomic_op<SDNode atomic_op> :
344 PatFrag<(ops node:$ptr, node:$value),
345 (atomic_op node:$ptr, node:$value), [{
346 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
350 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
351 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
352 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
353 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
354 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
355 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
356 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
357 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
358 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
359 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
360 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
362 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
363 (AMDGPUstore_mskor node:$val, node:$ptr), [{
364 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
367 multiclass AtomicCmpSwapLocal <SDNode cmp_swap_node> {
369 def _32_local : PatFrag <
370 (ops node:$ptr, node:$cmp, node:$swap),
371 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
372 AtomicSDNode *AN = cast<AtomicSDNode>(N);
373 return AN->getMemoryVT() == MVT::i32 &&
374 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
377 def _64_local : PatFrag<
378 (ops node:$ptr, node:$cmp, node:$swap),
379 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
380 AtomicSDNode *AN = cast<AtomicSDNode>(N);
381 return AN->getMemoryVT() == MVT::i64 &&
382 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
386 defm atomic_cmp_swap : AtomicCmpSwapLocal <atomic_cmp_swap>;
388 def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
389 return isFlatLoad(dyn_cast<LoadSDNode>(N));
392 def flat_store : PatFrag<(ops node:$val, node:$ptr),
393 (store node:$val, node:$ptr), [{
394 return isFlatStore(dyn_cast<StoreSDNode>(N));
397 def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
398 (AMDGPUstore_mskor node:$val, node:$ptr), [{
399 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
402 class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
403 (ops node:$ptr, node:$value),
404 (atomic_op node:$ptr, node:$value),
405 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
408 def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
409 def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
410 def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
411 def atomic_max_global : global_binary_atomic_op<atomic_load_max>;
412 def atomic_min_global : global_binary_atomic_op<atomic_load_min>;
413 def atomic_or_global : global_binary_atomic_op<atomic_load_or>;
414 def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
415 def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
416 def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
417 def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
419 //===----------------------------------------------------------------------===//
420 // Misc Pattern Fragments
421 //===----------------------------------------------------------------------===//
424 int TWO_PI = 0x40c90fdb;
426 int TWO_PI_INV = 0x3e22f983;
427 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
428 int FP32_NEG_ONE = 0xbf800000;
429 int FP32_ONE = 0x3f800000;
431 def CONST : Constants;
433 def FP_ZERO : PatLeaf <
435 [{return N->getValueAPF().isZero();}]
438 def FP_ONE : PatLeaf <
440 [{return N->isExactlyValue(1.0);}]
443 def FP_HALF : PatLeaf <
445 [{return N->isExactlyValue(0.5);}]
448 let isCodeGenOnly = 1, isPseudo = 1 in {
450 let usesCustomInserter = 1 in {
452 class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
456 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
459 class FABS <RegisterClass rc> : AMDGPUShaderInst <
463 [(set f32:$dst, (fabs f32:$src0))]
466 class FNEG <RegisterClass rc> : AMDGPUShaderInst <
470 [(set f32:$dst, (fneg f32:$src0))]
473 } // usesCustomInserter = 1
475 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
476 ComplexPattern addrPat> {
477 let UseNamedOperandTable = 1 in {
479 def RegisterLoad : AMDGPUShaderInst <
480 (outs dstClass:$dst),
481 (ins addrClass:$addr, i32imm:$chan),
482 "RegisterLoad $dst, $addr",
483 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
485 let isRegisterLoad = 1;
488 def RegisterStore : AMDGPUShaderInst <
490 (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
491 "RegisterStore $val, $addr",
492 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
494 let isRegisterStore = 1;
499 } // End isCodeGenOnly = 1, isPseudo = 1
501 /* Generic helper patterns for intrinsics */
502 /* -------------------------------------- */
504 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
506 (fpow f32:$src0, f32:$src1),
507 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
510 /* Other helper patterns */
511 /* --------------------- */
513 /* Extract element pattern */
514 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
517 (sub_type (vector_extract vec_type:$src, sub_idx)),
518 (EXTRACT_SUBREG $src, sub_reg)
521 /* Insert element pattern */
522 class Insert_Element <ValueType elem_type, ValueType vec_type,
523 int sub_idx, SubRegIndex sub_reg>
525 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
526 (INSERT_SUBREG $vec, $elem, sub_reg)
529 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
530 // can handle COPY instructions.
531 // bitconvert pattern
532 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
533 (dt (bitconvert (st rc:$src0))),
537 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
538 // can handle COPY instructions.
539 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
540 (vt (AMDGPUdwordaddr (vt rc:$addr))),
546 multiclass BFIPatterns <Instruction BFI_INT,
547 Instruction LoadImm32,
548 RegisterClass RC64> {
549 // Definition from ISA doc:
550 // (y & x) | (z & ~x)
552 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
556 // SHA-256 Ch function
559 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
564 (fcopysign f32:$src0, f32:$src1),
565 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
569 (f64 (fcopysign f64:$src0, f64:$src1)),
571 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
572 (BFI_INT (LoadImm32 0x7fffffff),
573 (i32 (EXTRACT_SUBREG $src0, sub1)),
574 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
578 // SHA-256 Ma patterns
580 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
581 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
582 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
583 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
586 // Bitfield extract patterns
588 def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
589 return isMask_32(N->getZExtValue());
592 def IMMPopCount : SDNodeXForm<imm, [{
593 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
597 class BFEPattern <Instruction BFE, Instruction MOV> : Pat <
598 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)),
599 (BFE $src, $rshift, (MOV (i32 (IMMPopCount $mask))))
603 class ROTRPattern <Instruction BIT_ALIGN> : Pat <
604 (rotr i32:$src0, i32:$src1),
605 (BIT_ALIGN $src0, $src0, $src1)
608 // 24-bit arithmetic patterns
609 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
611 // Special conversion patterns
613 def cvt_rpi_i32_f32 : PatFrag <
615 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
616 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
619 def cvt_flr_i32_f32 : PatFrag <
621 (fp_to_sint (ffloor $src)),
622 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
626 class UMUL24Pattern <Instruction UMUL24> : Pat <
627 (mul U24:$x, U24:$y),
632 class IMad24Pat<Instruction Inst> : Pat <
633 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
634 (Inst $src0, $src1, $src2)
637 class UMad24Pat<Instruction Inst> : Pat <
638 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
639 (Inst $src0, $src1, $src2)
642 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
643 def _expand_imad24 : Pat <
644 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
645 (AddInst (MulInst $src0, $src1), $src2)
648 def _expand_imul24 : Pat <
649 (AMDGPUmul_i24 i32:$src0, i32:$src1),
650 (MulInst $src0, $src1)
654 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
655 def _expand_umad24 : Pat <
656 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
657 (AddInst (MulInst $src0, $src1), $src2)
660 def _expand_umul24 : Pat <
661 (AMDGPUmul_u24 i32:$src0, i32:$src1),
662 (MulInst $src0, $src1)
666 class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
667 (fdiv FP_ONE, vt:$src),
671 class RsqPat<Instruction RsqInst, ValueType vt> : Pat <
672 (AMDGPUrcp (fsqrt vt:$src)),
676 include "R600Instructions.td"
677 include "R700Instructions.td"
678 include "EvergreenInstructions.td"
679 include "CaymanInstructions.td"
681 include "SIInstrInfo.td"