1 //===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines all of the AARCH64-specific intrinsics.
12 //===----------------------------------------------------------------------===//
14 let TargetPrefix = "aarch64" in {
16 def int_aarch64_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
17 Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
19 def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
20 def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
21 def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
22 def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
24 def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
25 def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
26 def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
27 [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
28 def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
29 [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
31 def int_aarch64_clrex : Intrinsic<[]>;
33 def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
34 LLVMMatchType<0>], [IntrNoMem]>;
35 def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
36 LLVMMatchType<0>], [IntrNoMem]>;
38 //===----------------------------------------------------------------------===//
41 def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
43 //===----------------------------------------------------------------------===//
46 def int_aarch64_rbit : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
49 //===----------------------------------------------------------------------===//
50 // Data Barrier Instructions
52 def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, Intrinsic<[], [llvm_i32_ty]>;
53 def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, Intrinsic<[], [llvm_i32_ty]>;
54 def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, Intrinsic<[], [llvm_i32_ty]>;
58 //===----------------------------------------------------------------------===//
59 // Advanced SIMD (NEON)
61 let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
62 class AdvSIMD_2Scalar_Float_Intrinsic
63 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
66 class AdvSIMD_FPToIntRounding_Intrinsic
67 : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
69 class AdvSIMD_1IntArg_Intrinsic
70 : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
71 class AdvSIMD_1FloatArg_Intrinsic
72 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
73 class AdvSIMD_1VectorArg_Intrinsic
74 : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
75 class AdvSIMD_1VectorArg_Expand_Intrinsic
76 : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
77 class AdvSIMD_1VectorArg_Long_Intrinsic
78 : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
79 class AdvSIMD_1IntArg_Narrow_Intrinsic
80 : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
81 class AdvSIMD_1VectorArg_Narrow_Intrinsic
82 : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
83 class AdvSIMD_1VectorArg_Int_Across_Intrinsic
84 : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
85 class AdvSIMD_1VectorArg_Float_Across_Intrinsic
86 : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
88 class AdvSIMD_2IntArg_Intrinsic
89 : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
91 class AdvSIMD_2FloatArg_Intrinsic
92 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
94 class AdvSIMD_2VectorArg_Intrinsic
95 : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
97 class AdvSIMD_2VectorArg_Compare_Intrinsic
98 : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
100 class AdvSIMD_2Arg_FloatCompare_Intrinsic
101 : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
103 class AdvSIMD_2VectorArg_Long_Intrinsic
104 : Intrinsic<[llvm_anyvector_ty],
105 [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
107 class AdvSIMD_2VectorArg_Wide_Intrinsic
108 : Intrinsic<[llvm_anyvector_ty],
109 [LLVMMatchType<0>, LLVMTruncatedType<0>],
111 class AdvSIMD_2VectorArg_Narrow_Intrinsic
112 : Intrinsic<[llvm_anyvector_ty],
113 [LLVMExtendedType<0>, LLVMExtendedType<0>],
115 class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
116 : Intrinsic<[llvm_anyint_ty],
117 [LLVMExtendedType<0>, llvm_i32_ty],
119 class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
120 : Intrinsic<[llvm_anyvector_ty],
123 class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
124 : Intrinsic<[llvm_anyvector_ty],
125 [LLVMTruncatedType<0>],
127 class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
128 : Intrinsic<[llvm_anyvector_ty],
129 [LLVMTruncatedType<0>, llvm_i32_ty],
131 class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
132 : Intrinsic<[llvm_anyvector_ty],
133 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
136 class AdvSIMD_3VectorArg_Intrinsic
137 : Intrinsic<[llvm_anyvector_ty],
138 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
140 class AdvSIMD_3VectorArg_Scalar_Intrinsic
141 : Intrinsic<[llvm_anyvector_ty],
142 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
144 class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
145 : Intrinsic<[llvm_anyvector_ty],
146 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
147 LLVMMatchType<1>], [IntrNoMem]>;
148 class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
149 : Intrinsic<[llvm_anyvector_ty],
150 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
152 class AdvSIMD_CvtFxToFP_Intrinsic
153 : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
155 class AdvSIMD_CvtFPToFx_Intrinsic
156 : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
162 let Properties = [IntrNoMem] in {
163 // Vector Add Across Lanes
164 def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
165 def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
166 def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
168 // Vector Long Add Across Lanes
169 def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
170 def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
172 // Vector Halving Add
173 def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
174 def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
176 // Vector Rounding Halving Add
177 def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
178 def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
180 // Vector Saturating Add
181 def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
182 def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
183 def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
184 def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
186 // Vector Add High-Half
187 // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
188 // header is no longer supported.
189 def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
191 // Vector Rounding Add High-Half
192 def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
194 // Vector Saturating Doubling Multiply High
195 def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
197 // Vector Saturating Rounding Doubling Multiply High
198 def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
200 // Vector Polynominal Multiply
201 def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
203 // Vector Long Multiply
204 def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
205 def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
206 def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
208 // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
210 def int_aarch64_neon_pmull64 :
211 Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
213 // Vector Extending Multiply
214 def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
215 let Properties = [IntrNoMem, Commutative];
218 // Vector Saturating Doubling Long Multiply
219 def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
220 def int_aarch64_neon_sqdmulls_scalar
221 : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
223 // Vector Halving Subtract
224 def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
225 def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
227 // Vector Saturating Subtract
228 def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
229 def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
231 // Vector Subtract High-Half
232 // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
233 // header is no longer supported.
234 def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
236 // Vector Rounding Subtract High-Half
237 def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
239 // Vector Compare Absolute Greater-than-or-equal
240 def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
242 // Vector Compare Absolute Greater-than
243 def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
245 // Vector Absolute Difference
246 def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
247 def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
248 def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
250 // Scalar Absolute Difference
251 def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
254 def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
255 def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
256 def int_aarch64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
257 def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
259 // Vector Max Across Lanes
260 def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
261 def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
262 def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
263 def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
266 def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
267 def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
268 def int_aarch64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
269 def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
271 // Vector Min/Max Number
272 def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
273 def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
275 // Vector Min Across Lanes
276 def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
277 def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
278 def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
279 def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
282 def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
285 // FIXME: In theory, we shouldn't need intrinsics for saddlp or
286 // uaddlp, but tblgen's type inference currently can't handle the
287 // pattern fragments this ends up generating.
288 def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
289 def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
292 def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
293 def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
294 def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
297 def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
298 def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
299 def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
301 // Reciprocal Estimate/Step
302 def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
303 def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
305 // Reciprocal Exponent
306 def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
308 // Vector Saturating Shift Left
309 def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
310 def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
312 // Vector Rounding Shift Left
313 def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
314 def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
316 // Vector Saturating Rounding Shift Left
317 def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
318 def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
320 // Vector Signed->Unsigned Shift Left by Constant
321 def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
323 // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
324 def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
326 // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
327 def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
329 // Vector Narrowing Shift Right by Constant
330 def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
331 def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
333 // Vector Rounding Narrowing Shift Right by Constant
334 def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
336 // Vector Rounding Narrowing Saturating Shift Right by Constant
337 def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
338 def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
341 def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
342 def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
344 // Vector Widening Shift Left by Constant
345 def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
346 def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
347 def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
349 // Vector Shift Right by Constant and Insert
350 def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
352 // Vector Shift Left by Constant and Insert
353 def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
355 // Vector Saturating Narrow
356 def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
357 def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
358 def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
359 def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
361 // Vector Saturating Extract and Unsigned Narrow
362 def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
363 def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
365 // Vector Absolute Value
366 def int_aarch64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
368 // Vector Saturating Absolute Value
369 def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
371 // Vector Saturating Negation
372 def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
374 // Vector Count Leading Sign Bits
375 def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
377 // Vector Reciprocal Estimate
378 def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
379 def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
381 // Vector Square Root Estimate
382 def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
383 def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
385 // Vector Bitwise Reverse
386 def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
388 // Vector Conversions Between Half-Precision and Single-Precision.
389 def int_aarch64_neon_vcvtfp2hf
390 : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
391 def int_aarch64_neon_vcvthf2fp
392 : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
394 // Vector Conversions Between Floating-point and Fixed-point.
395 def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
396 def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
397 def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
398 def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
400 // Vector FP->Int Conversions
401 def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
402 def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
403 def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
404 def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
405 def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
406 def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
407 def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
408 def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
409 def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
410 def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
412 // Vector FP Rounding: only ties to even is unrepresented by a normal
414 def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
416 // Scalar FP->Int conversions
418 // Vector FP Inexact Narrowing
419 def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
421 // Scalar FP Inexact Narrowing
422 def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
426 let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
427 class AdvSIMD_2Vector2Index_Intrinsic
428 : Intrinsic<[llvm_anyvector_ty],
429 [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
433 // Vector element to element moves
434 def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
436 let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
437 class AdvSIMD_1Vec_Load_Intrinsic
438 : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
440 class AdvSIMD_1Vec_Store_Lane_Intrinsic
441 : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
442 [IntrReadWriteArgMem, NoCapture<2>]>;
444 class AdvSIMD_2Vec_Load_Intrinsic
445 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
446 [LLVMAnyPointerType<LLVMMatchType<0>>],
448 class AdvSIMD_2Vec_Load_Lane_Intrinsic
449 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
450 [LLVMMatchType<0>, LLVMMatchType<0>,
451 llvm_i64_ty, llvm_anyptr_ty],
453 class AdvSIMD_2Vec_Store_Intrinsic
454 : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
455 LLVMAnyPointerType<LLVMMatchType<0>>],
456 [IntrReadWriteArgMem, NoCapture<2>]>;
457 class AdvSIMD_2Vec_Store_Lane_Intrinsic
458 : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
459 llvm_i64_ty, llvm_anyptr_ty],
460 [IntrReadWriteArgMem, NoCapture<3>]>;
462 class AdvSIMD_3Vec_Load_Intrinsic
463 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
464 [LLVMAnyPointerType<LLVMMatchType<0>>],
466 class AdvSIMD_3Vec_Load_Lane_Intrinsic
467 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
468 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
469 llvm_i64_ty, llvm_anyptr_ty],
471 class AdvSIMD_3Vec_Store_Intrinsic
472 : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
473 LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
474 [IntrReadWriteArgMem, NoCapture<3>]>;
475 class AdvSIMD_3Vec_Store_Lane_Intrinsic
476 : Intrinsic<[], [llvm_anyvector_ty,
477 LLVMMatchType<0>, LLVMMatchType<0>,
478 llvm_i64_ty, llvm_anyptr_ty],
479 [IntrReadWriteArgMem, NoCapture<4>]>;
481 class AdvSIMD_4Vec_Load_Intrinsic
482 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
483 LLVMMatchType<0>, LLVMMatchType<0>],
484 [LLVMAnyPointerType<LLVMMatchType<0>>],
486 class AdvSIMD_4Vec_Load_Lane_Intrinsic
487 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
488 LLVMMatchType<0>, LLVMMatchType<0>],
489 [LLVMMatchType<0>, LLVMMatchType<0>,
490 LLVMMatchType<0>, LLVMMatchType<0>,
491 llvm_i64_ty, llvm_anyptr_ty],
493 class AdvSIMD_4Vec_Store_Intrinsic
494 : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
495 LLVMMatchType<0>, LLVMMatchType<0>,
496 LLVMAnyPointerType<LLVMMatchType<0>>],
497 [IntrReadWriteArgMem, NoCapture<4>]>;
498 class AdvSIMD_4Vec_Store_Lane_Intrinsic
499 : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
500 LLVMMatchType<0>, LLVMMatchType<0>,
501 llvm_i64_ty, llvm_anyptr_ty],
502 [IntrReadWriteArgMem, NoCapture<5>]>;
507 def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
508 def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
509 def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
511 def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
512 def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
513 def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
515 def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
516 def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
517 def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
519 def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
520 def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
521 def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
523 def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
524 def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
525 def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
527 def int_aarch64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic;
528 def int_aarch64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic;
529 def int_aarch64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic;
531 def int_aarch64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic;
532 def int_aarch64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic;
533 def int_aarch64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic;
535 let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
536 class AdvSIMD_Tbl1_Intrinsic
537 : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
539 class AdvSIMD_Tbl2_Intrinsic
540 : Intrinsic<[llvm_anyvector_ty],
541 [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
542 class AdvSIMD_Tbl3_Intrinsic
543 : Intrinsic<[llvm_anyvector_ty],
544 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
547 class AdvSIMD_Tbl4_Intrinsic
548 : Intrinsic<[llvm_anyvector_ty],
549 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
553 class AdvSIMD_Tbx1_Intrinsic
554 : Intrinsic<[llvm_anyvector_ty],
555 [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
557 class AdvSIMD_Tbx2_Intrinsic
558 : Intrinsic<[llvm_anyvector_ty],
559 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
562 class AdvSIMD_Tbx3_Intrinsic
563 : Intrinsic<[llvm_anyvector_ty],
564 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
565 llvm_v16i8_ty, LLVMMatchType<0>],
567 class AdvSIMD_Tbx4_Intrinsic
568 : Intrinsic<[llvm_anyvector_ty],
569 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
570 llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
573 def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
574 def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
575 def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
576 def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
578 def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
579 def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
580 def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
581 def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
583 let TargetPrefix = "aarch64" in {
584 class Crypto_AES_DataKey_Intrinsic
585 : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
587 class Crypto_AES_Data_Intrinsic
588 : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
590 // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
592 class Crypto_SHA_5Hash4Schedule_Intrinsic
593 : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
596 // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
598 class Crypto_SHA_1Hash_Intrinsic
599 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
601 // SHA intrinsic taking 8 words of the schedule
602 class Crypto_SHA_8Schedule_Intrinsic
603 : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
605 // SHA intrinsic taking 12 words of the schedule
606 class Crypto_SHA_12Schedule_Intrinsic
607 : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
610 // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
611 class Crypto_SHA_8Hash4Schedule_Intrinsic
612 : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
617 def int_aarch64_crypto_aese : Crypto_AES_DataKey_Intrinsic;
618 def int_aarch64_crypto_aesd : Crypto_AES_DataKey_Intrinsic;
619 def int_aarch64_crypto_aesmc : Crypto_AES_Data_Intrinsic;
620 def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
623 def int_aarch64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic;
624 def int_aarch64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic;
625 def int_aarch64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic;
626 def int_aarch64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic;
628 def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
629 def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
632 def int_aarch64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic;
633 def int_aarch64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic;
634 def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
635 def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
637 //===----------------------------------------------------------------------===//
640 let TargetPrefix = "aarch64" in {
642 def int_aarch64_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
644 def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
646 def int_aarch64_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
648 def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
650 def int_aarch64_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
652 def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
654 def int_aarch64_crc32x : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
656 def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],