Add 'musttail' marker to call instructions
[oota-llvm.git] / include / llvm / IR / IntrinsicsAArch64.td
index a2f49050d60ee6d3e6ef95b9c7e95fbeb9c77a3c..61c0e5d419f4b13810b4dbecc370bb9a591c37d4 100644 (file)
@@ -22,6 +22,27 @@ def int_aarch64_neon_vacgeq :
 def int_aarch64_neon_vacgtq :
   Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
 
+// Vector saturating accumulate
+def int_aarch64_neon_suqadd : Neon_2Arg_Intrinsic;
+def int_aarch64_neon_usqadd : Neon_2Arg_Intrinsic;
+
+// Vector Bitwise reverse
+def int_aarch64_neon_rbit : Neon_1Arg_Intrinsic;
+
+// Vector extract and narrow
+def int_aarch64_neon_xtn : 
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+// Vector floating-point convert
+def int_aarch64_neon_frintn : Neon_1Arg_Intrinsic;
+def int_aarch64_neon_fsqrt : Neon_1Arg_Intrinsic;
+def int_aarch64_neon_vcvtxn :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtzs :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtzu :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
 // Vector maxNum (Floating Point)
 def int_aarch64_neon_vmaxnm : Neon_2Arg_Intrinsic;
 
@@ -34,8 +55,9 @@ def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic;
 // Vector Pairwise minNum (Floating Point)
 def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic;
 
-// Vector Multiply Extended (Floating Point)
-def int_aarch64_neon_vmulx : Neon_2Arg_Intrinsic;
+// Vector Multiply Extended and Scalar Multiply Extended (Floating Point)
+def int_aarch64_neon_vmulx  :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>;
 
 class Neon_N2V_Intrinsic
   : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty],
@@ -46,7 +68,7 @@ class Neon_N3V_Intrinsic
               [IntrNoMem]>;
 class Neon_N2V_Narrow_Intrinsic
   : Intrinsic<[llvm_anyvector_ty],
-              [LLVMExtendedElementVectorType<0>, llvm_i32_ty],
+              [LLVMExtendedType<0>, llvm_i32_ty],
               [IntrNoMem]>;
 
 // Vector rounding shift right by immediate (Signed)
@@ -69,9 +91,6 @@ def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
 class Neon_Across_Intrinsic
   : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 
-class Neon_2Arg_Across_Float_Intrinsic
-  : Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-
 def int_aarch64_neon_saddlv : Neon_Across_Intrinsic;
 def int_aarch64_neon_uaddlv : Neon_Across_Intrinsic;
 def int_aarch64_neon_smaxv  : Neon_Across_Intrinsic;
@@ -79,10 +98,86 @@ def int_aarch64_neon_umaxv  : Neon_Across_Intrinsic;
 def int_aarch64_neon_sminv  : Neon_Across_Intrinsic;
 def int_aarch64_neon_uminv  : Neon_Across_Intrinsic;
 def int_aarch64_neon_vaddv  : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxv  : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminv  : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxnmv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminnmv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vmaxv :
+  Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminv :
+  Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vmaxnmv :
+  Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminnmv :
+  Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+// Vector Table Lookup.
+def int_aarch64_neon_vtbl1 :
+  Intrinsic<[llvm_anyvector_ty],
+            [llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_aarch64_neon_vtbl2 :
+  Intrinsic<[llvm_anyvector_ty],
+            [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+def int_aarch64_neon_vtbl3 :
+  Intrinsic<[llvm_anyvector_ty],
+            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+            LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_aarch64_neon_vtbl4 :
+  Intrinsic<[llvm_anyvector_ty],
+            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+            llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+// Vector Table Extension.
+// Some elements of the destination vector may not be updated, so the original
+// value of that vector is passed as the first argument.  The next 1-4
+// arguments after that are the table.
+def int_aarch64_neon_vtbx1 :
+  Intrinsic<[llvm_anyvector_ty],
+            [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+def int_aarch64_neon_vtbx2 :
+  Intrinsic<[llvm_anyvector_ty],
+            [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+             LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_aarch64_neon_vtbx3 :
+  Intrinsic<[llvm_anyvector_ty],
+            [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+             llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_aarch64_neon_vtbx4 :
+  Intrinsic<[llvm_anyvector_ty],
+            [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+             llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+// Vector Load/store
+def int_aarch64_neon_vld1x2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                                        [llvm_ptr_ty, llvm_i32_ty],
+                                        [IntrReadArgMem]>;
+def int_aarch64_neon_vld1x3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                         LLVMMatchType<0>],
+                                        [llvm_ptr_ty, llvm_i32_ty],
+                                        [IntrReadArgMem]>;
+def int_aarch64_neon_vld1x4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+                                         LLVMMatchType<0>, LLVMMatchType<0>],
+                                        [llvm_ptr_ty, llvm_i32_ty],
+                                        [IntrReadArgMem]>;
+
+def int_aarch64_neon_vst1x2 : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_anyvector_ty,
+                                         LLVMMatchType<0>, llvm_i32_ty],
+                                        [IntrReadWriteArgMem]>;
+def int_aarch64_neon_vst1x3 : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_anyvector_ty,
+                                         LLVMMatchType<0>, LLVMMatchType<0>,
+                                         llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_aarch64_neon_vst1x4 : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_anyvector_ty,
+                                         LLVMMatchType<0>, LLVMMatchType<0>,
+                                         LLVMMatchType<0>, llvm_i32_ty],
+                                        [IntrReadWriteArgMem]>;
 
 // Scalar Add
 def int_aarch64_neon_vaddds :
@@ -90,9 +185,6 @@ def int_aarch64_neon_vaddds :
 def int_aarch64_neon_vadddu :
   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
 
-// Scalar Saturating Add (Signed, Unsigned)
-def int_aarch64_neon_vqadds : Neon_2Arg_Intrinsic;
-def int_aarch64_neon_vqaddu : Neon_2Arg_Intrinsic;
 
 // Scalar Sub
 def int_aarch64_neon_vsubds :
@@ -100,9 +192,6 @@ def int_aarch64_neon_vsubds :
 def int_aarch64_neon_vsubdu :
   Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
 
-// Scalar Saturating Sub (Signed, Unsigned)
-def int_aarch64_neon_vqsubs : Neon_2Arg_Intrinsic;
-def int_aarch64_neon_vqsubu : Neon_2Arg_Intrinsic;
 
 // Scalar Shift
 // Scalar Shift Left
@@ -129,66 +218,190 @@ def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic;
 def int_aarch64_neon_vpadd :
   Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>;
 def int_aarch64_neon_vpfadd :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfaddq :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 
 // Scalar Reduce Pairwise Floating Point Max/Min.
 def int_aarch64_neon_vpmax :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpmaxq :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 def int_aarch64_neon_vpmin :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpminq :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 
 // Scalar Reduce Pairwise Floating Point Maxnm/Minnm.
 def int_aarch64_neon_vpfmaxnm :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfmaxnmq :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 def int_aarch64_neon_vpfminnm :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfminnmq :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 
 // Scalar Signed Integer Convert To Floating-point
-def int_aarch64_neon_vcvtf32_s32 :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_s64 :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtint2fps :
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
 
 // Scalar Unsigned Integer Convert To Floating-point
-def int_aarch64_neon_vcvtf32_u32 :
-  Intrinsic<[llvm_v1f32_ty], [llvm_v1i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_u64 :
-  Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtint2fpu :
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Convert
+def int_aarch64_neon_fcvtxn :
+  Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtns : 
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtnu :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtps :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtpu :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtms :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtmu :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtas :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtau :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtzs :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtzu :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Estimate.
+def int_aarch64_neon_vrecpe :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
 
 // Scalar Floating-point Reciprocal Exponent
-def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic;
+def int_aarch64_neon_vrecpx :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Square Root Estimate
+def int_aarch64_neon_vrsqrte :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Step
+def int_aarch64_neon_vrecps :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Square Root Step
+def int_aarch64_neon_vrsqrts :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+// Compare with vector operands.
+class Neon_Cmp_Intrinsic :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
+            [IntrNoMem]>;
+
+// Floating-point compare with scalar operands.
+class Neon_Float_Cmp_Intrinsic :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_anyfloat_ty],
+            [IntrNoMem]>;
+
+// Scalar Compare Equal
+def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fceq : Neon_Float_Cmp_Intrinsic;
+
+// Scalar Compare Greater-Than or Equal
+def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcge : Neon_Float_Cmp_Intrinsic;
+def int_aarch64_neon_fchs : Neon_Float_Cmp_Intrinsic;
+
+// Scalar Compare Less-Than or Equal
+def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fclez : Neon_Float_Cmp_Intrinsic;
 
-class Neon_ICmp_Intrinsic
-  : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
+// Scalar Compare Less-Than
+def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcltz : Neon_Float_Cmp_Intrinsic;
 
-// Scalar Integer Compare Equal
-def int_aarch64_neon_vceq : Neon_ICmp_Intrinsic;
+// Scalar Compare Greater-Than
+def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcgt : Neon_Float_Cmp_Intrinsic;
+def int_aarch64_neon_fchi : Neon_Float_Cmp_Intrinsic;
 
-// Scalar Integer Compare Greater-Than or Equal
-def int_aarch64_neon_vcge : Neon_ICmp_Intrinsic;
-def int_aarch64_neon_vchs : Neon_ICmp_Intrinsic;
+// Scalar Compare Bitwise Test Bits
+def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic;
 
-// Scalar Integer Compare Less-Than or Equal
-def int_aarch64_neon_vclez : Neon_ICmp_Intrinsic;
+// Scalar Floating-point Absolute Compare Greater Than Or Equal
+def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcage : Neon_Float_Cmp_Intrinsic;
 
-// Scalar Compare Less-Than
-def int_aarch64_neon_vcltz : Neon_ICmp_Intrinsic;
+// Scalar Floating-point Absolute Compare Greater Than
+def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcagt : Neon_Float_Cmp_Intrinsic;
 
-// Scalar Compare Greater-Than
-def int_aarch64_neon_vcgt : Neon_ICmp_Intrinsic;
-def int_aarch64_neon_vchi : Neon_ICmp_Intrinsic;
+// Scalar Signed Saturating Accumulated of Unsigned Value
+def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic;
 
-// Scalar Compare Bitwise Test Bits
-def int_aarch64_neon_vtstd : Neon_ICmp_Intrinsic;
+// Scalar Unsigned Saturating Accumulated of Signed Value
+def int_aarch64_neon_vsqadd : Neon_2Arg_Intrinsic;
+
+// Scalar Absolute Value
+def int_aarch64_neon_vabs :
+  Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+
+// Scalar Absolute Difference
+def int_aarch64_neon_vabd :
+  Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+            [IntrNoMem]>;
+
+// Scalar Negate Value
+def int_aarch64_neon_vneg :
+  Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+
+// Signed Saturating Doubling Multiply-Add Long
+def int_aarch64_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
+
+// Signed Saturating Doubling Multiply-Subtract Long
+def int_aarch64_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
+
+def int_aarch64_neon_vmull_p64 :
+  Intrinsic<[llvm_v16i8_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
+
+class Neon_2Arg_ShiftImm_Intrinsic
+  : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+class Neon_3Arg_ShiftImm_Intrinsic
+  : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty],
+              [IntrNoMem]>;
+
+// Scalar Shift Right (Immediate)
+def int_aarch64_neon_vshrds_n : Neon_2Arg_ShiftImm_Intrinsic;
+def int_aarch64_neon_vshrdu_n : Neon_2Arg_ShiftImm_Intrinsic;
+
+// Scalar Shift Right and Accumulate (Immediate)
+def int_aarch64_neon_vsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
+def int_aarch64_neon_vsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
+
+// Scalar Rounding Shift Right and Accumulate (Immediate)
+def int_aarch64_neon_vrsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
+def int_aarch64_neon_vrsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
+
+// Scalar Shift Left (Immediate)
+def int_aarch64_neon_vshld_n : Neon_2Arg_ShiftImm_Intrinsic;
+
+// Scalar Saturating Shift Left (Immediate)
+def int_aarch64_neon_vqshls_n : Neon_N2V_Intrinsic;
+def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic;
+
+// Scalar Signed Saturating Shift Left Unsigned (Immediate)
+def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
+
+// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
+def int_aarch64_neon_vcvtfxs2fp_n :
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
+def int_aarch64_neon_vcvtfxu2fp_n :
+  Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
+def int_aarch64_neon_vcvtfp2fxs_n :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
+def int_aarch64_neon_vcvtfp2fxu_n :
+  Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
 
 }