Add patterns for selecting TBM instructions from logical operations. Patch from Yunzh...
[oota-llvm.git] / include / llvm / IR / IntrinsicsAArch64.td
1 //===- IntrinsicsAArch64.td - Defines AArch64 intrinsics -----------*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines all of the AArch64-specific intrinsics.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // Advanced SIMD (NEON)
16
17 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
18
19 // Vector Absolute Compare (Floating Point)
20 def int_aarch64_neon_vacgeq :
21   Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
22 def int_aarch64_neon_vacgtq :
23   Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
24
25 // Vector maxNum (Floating Point)
26 def int_aarch64_neon_vmaxnm : Neon_2Arg_Intrinsic;
27
28 // Vector minNum (Floating Point)
29 def int_aarch64_neon_vminnm : Neon_2Arg_Intrinsic;
30
31 // Vector Pairwise maxNum (Floating Point)
32 def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic;
33
34 // Vector Pairwise minNum (Floating Point)
35 def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic;
36
37 // Vector Multiply Extended (Floating Point)
38 def int_aarch64_neon_vmulx : Neon_2Arg_Intrinsic;
39
40 class Neon_N2V_Intrinsic
41   : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty],
42               [IntrNoMem]>;
43 class Neon_N3V_Intrinsic
44   : Intrinsic<[llvm_anyvector_ty],
45               [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
46               [IntrNoMem]>;
47 class Neon_N2V_Narrow_Intrinsic
48   : Intrinsic<[llvm_anyvector_ty],
49               [LLVMExtendedElementVectorType<0>, llvm_i32_ty],
50               [IntrNoMem]>;
51
52 // Vector rounding shift right by immediate (Signed)
53 def int_aarch64_neon_vsrshr : Neon_N2V_Intrinsic;
54 def int_aarch64_neon_vurshr : Neon_N2V_Intrinsic;
55 def int_aarch64_neon_vsqshlu : Neon_N2V_Intrinsic;
56
57 def int_aarch64_neon_vsri : Neon_N3V_Intrinsic;
58 def int_aarch64_neon_vsli : Neon_N3V_Intrinsic;
59
60 def int_aarch64_neon_vsqshrun : Neon_N2V_Narrow_Intrinsic;
61 def int_aarch64_neon_vrshrn : Neon_N2V_Narrow_Intrinsic;
62 def int_aarch64_neon_vsqrshrun : Neon_N2V_Narrow_Intrinsic;
63 def int_aarch64_neon_vsqshrn : Neon_N2V_Narrow_Intrinsic;
64 def int_aarch64_neon_vuqshrn : Neon_N2V_Narrow_Intrinsic;
65 def int_aarch64_neon_vsqrshrn : Neon_N2V_Narrow_Intrinsic;
66 def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
67
68 // Scalar Add
69 def int_aarch64_neon_vaddds :
70   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
71 def int_aarch64_neon_vadddu :
72   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
73
74 // Scalar Saturating Add (Signed, Unsigned)
75 def int_aarch64_neon_vqadds : Neon_2Arg_Intrinsic;
76 def int_aarch64_neon_vqaddu : Neon_2Arg_Intrinsic;
77
78 // Scalar Sub
79 def int_aarch64_neon_vsubds :
80   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
81 def int_aarch64_neon_vsubdu :
82   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
83
84 // Scalar Saturating Sub (Signed, Unsigned)
85 def int_aarch64_neon_vqsubs : Neon_2Arg_Intrinsic;
86 def int_aarch64_neon_vqsubu : Neon_2Arg_Intrinsic;
87
88 // Scalar Shift
89 // Scalar Shift Left
90 def int_aarch64_neon_vshlds :
91   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
92 def int_aarch64_neon_vshldu :
93   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
94
95 // Scalar Saturating Shift Left
96 def int_aarch64_neon_vqshls : Neon_2Arg_Intrinsic;
97 def int_aarch64_neon_vqshlu : Neon_2Arg_Intrinsic;
98
99 // Scalar Shift Rouding Left
100 def int_aarch64_neon_vrshlds :
101   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
102 def int_aarch64_neon_vrshldu :
103   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
104
105 // Scalar Saturating Rounding Shift Left
106 def int_aarch64_neon_vqrshls : Neon_2Arg_Intrinsic;
107 def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic;
108
109 // Scalar Reduce Pairwise Add.
110 def int_aarch64_neon_vpadd :
111   Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>;
112 def int_aarch64_neon_vpfadd :
113   Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
114 def int_aarch64_neon_vpfaddq :
115   Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
116
117 // Scalar Reduce Pairwise Floating Point Max/Min.
118 def int_aarch64_neon_vpmax :
119   Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
120 def int_aarch64_neon_vpmaxq :
121   Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
122 def int_aarch64_neon_vpmin :
123   Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
124 def int_aarch64_neon_vpminq :
125   Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
126
127 // Scalar Reduce Pairwise Floating Point Maxnm/Minnm.
128 def int_aarch64_neon_vpfmaxnm :
129   Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
130 def int_aarch64_neon_vpfmaxnmq :
131   Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
132 def int_aarch64_neon_vpfminnm :
133   Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
134 def int_aarch64_neon_vpfminnmq :
135   Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
136 }