From c3e5d72ba83f607d7e1409027f7593c689fc70d0 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Wed, 11 Dec 2013 21:03:40 +0000 Subject: [PATCH] [AArch64] Refactor the NEON scalar floating-point reciprocal estimate, floating- point reciprocal exponent, and floating-point reciprocal square root estimate LLVM AArch64 intrinsics to use f32/f64 types, rather than their vector equivalents. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197066 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsAArch64.td | 11 ++++++- lib/Target/AArch64/AArch64InstrNEON.td | 29 +++++++++++------- test/CodeGen/AArch64/neon-scalar-recip.ll | 36 ++++++++--------------- 3 files changed, 41 insertions(+), 35 deletions(-) diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td index 37abbe7db8a..fb31452fb23 100644 --- a/include/llvm/IR/IntrinsicsAArch64.td +++ b/include/llvm/IR/IntrinsicsAArch64.td @@ -260,8 +260,17 @@ def int_aarch64_neon_fcvtzs : def int_aarch64_neon_fcvtzu : Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>; +// Scalar Floating-point Reciprocal Estimate. +def int_aarch64_neon_vrecpe : + Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; + // Scalar Floating-point Reciprocal Exponent -def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic; +def int_aarch64_neon_vrecpx : + Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; + +// Scalar Floating-point Reciprocal Square Root Estimate +def int_aarch64_neon_vrsqrte : + Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; class Neon_Cmp_Intrinsic : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty], diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 8f25818226c..9e02dc4fff1 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -4346,12 +4346,17 @@ multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns { - def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))), + def : Pat<(f32 (opnode (f32 FPR32:$Rn))), (INSTS FPR32:$Rn)>; - def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), + def : Pat<(f64 (opnode (f64 FPR64:$Rn))), (INSTD FPR64:$Rn)>; } +class Neon_Scalar2SameMisc_V1_D_size_patterns + : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), + (INSTD FPR64:$Rn)>; + class NeonI_Scalar2SameMisc_cmpz_D_size opcode, string asmop> : NeonI_Scalar2SameMisc; } -class Neon_ScalarShiftImm_arm_D_size_patterns +class Neon_ScalarShiftImm_V1_D_size_patterns : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))), (INSTD FPR64:$Rn, imm:$Imm)>; @@ -4659,13 +4664,13 @@ multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns; defm : Neon_ScalarShiftRImm_D_size_patterns; // Pattern to match llvm.arm.* intrinsic. -def : Neon_ScalarShiftImm_arm_D_size_patterns; +def : Neon_ScalarShiftImm_V1_D_size_patterns; // Scalar Unsigned Shift Right (Immediate) defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">; defm : Neon_ScalarShiftRImm_D_size_patterns; // Pattern to match llvm.arm.* intrinsic. -def : Neon_ScalarShiftImm_arm_D_size_patterns; +def : Neon_ScalarShiftImm_V1_D_size_patterns; // Scalar Signed Rounding Shift Right (Immediate) defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">; @@ -4699,7 +4704,7 @@ def : Neon_ScalarShiftRImm_accum_D_size_patterns defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">; defm : Neon_ScalarShiftLImm_D_size_patterns; // Pattern to match llvm.arm.* intrinsic. -def : Neon_ScalarShiftImm_arm_D_size_patterns; +def : Neon_ScalarShiftImm_V1_D_size_patterns; // Signed Saturating Shift Left (Immediate) defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">; @@ -5056,8 +5061,10 @@ def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern; // Scalar Floating-point Reciprocal Estimate defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">; -defm : Neon_Scalar2SameMisc_SD_size_patterns; +def : Neon_Scalar2SameMisc_V1_D_size_patterns; // Scalar Floating-point Reciprocal Exponent defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">; @@ -5066,8 +5073,10 @@ defm : Neon_Scalar2SameMisc_SD_size_patterns; -defm : Neon_Scalar2SameMisc_SD_size_patterns; +defm : Neon_Scalar2SameMisc_SD_size_patterns; +def : Neon_Scalar2SameMisc_V1_D_size_patterns; // Scalar Floating-point Round class Neon_ScalarFloatRound_pattern diff --git a/test/CodeGen/AArch64/neon-scalar-recip.ll b/test/CodeGen/AArch64/neon-scalar-recip.ll index f21c27bee43..bd549a86a40 100644 --- a/test/CodeGen/AArch64/neon-scalar-recip.ll +++ b/test/CodeGen/AArch64/neon-scalar-recip.ll @@ -50,9 +50,7 @@ define float @test_vrecpes_f32(float %a) { ; CHECK: test_vrecpes_f32 ; CHECK: frecpe {{s[0-9]+}}, {{s[0-9]+}} entry: - %vrecpe.i = insertelement <1 x float> undef, float %a, i32 0 - %vrecpe1.i = tail call <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float> %vrecpe.i) - %0 = extractelement <1 x float> %vrecpe1.i, i32 0 + %0 = call float @llvm.aarch64.neon.vrecpe.f32(float %a) ret float %0 } @@ -60,22 +58,18 @@ define double @test_vrecped_f64(double %a) { ; CHECK: test_vrecped_f64 ; CHECK: frecpe {{d[0-9]+}}, {{d[0-9]+}} entry: - %vrecpe.i = insertelement <1 x double> undef, double %a, i32 0 - %vrecpe1.i = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %vrecpe.i) - %0 = extractelement <1 x double> %vrecpe1.i, i32 0 + %0 = call double @llvm.aarch64.neon.vrecpe.f64(double %a) ret double %0 } -declare <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float>) -declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>) +declare float @llvm.aarch64.neon.vrecpe.f32(float) +declare double @llvm.aarch64.neon.vrecpe.f64(double) define float @test_vrecpxs_f32(float %a) { ; CHECK: test_vrecpxs_f32 ; CHECK: frecpx {{s[0-9]+}}, {{s[0-9]+}} entry: - %vrecpx.i = insertelement <1 x float> undef, float %a, i32 0 - %vrecpx1.i = tail call <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float> %vrecpx.i) - %0 = extractelement <1 x float> %vrecpx1.i, i32 0 + %0 = call float @llvm.aarch64.neon.vrecpx.f32(float %a) ret float %0 } @@ -83,22 +77,18 @@ define double @test_vrecpxd_f64(double %a) { ; CHECK: test_vrecpxd_f64 ; CHECK: frecpx {{d[0-9]+}}, {{d[0-9]+}} entry: - %vrecpx.i = insertelement <1 x double> undef, double %a, i32 0 - %vrecpx1.i = tail call <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double> %vrecpx.i) - %0 = extractelement <1 x double> %vrecpx1.i, i32 0 + %0 = call double @llvm.aarch64.neon.vrecpx.f64(double %a) ret double %0 } -declare <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float>) -declare <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double>) +declare float @llvm.aarch64.neon.vrecpx.f32(float) +declare double @llvm.aarch64.neon.vrecpx.f64(double) define float @test_vrsqrtes_f32(float %a) { ; CHECK: test_vrsqrtes_f32 ; CHECK: frsqrte {{s[0-9]+}}, {{s[0-9]+}} entry: - %vrsqrte.i = insertelement <1 x float> undef, float %a, i32 0 - %vrsqrte1.i = tail call <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float> %vrsqrte.i) - %0 = extractelement <1 x float> %vrsqrte1.i, i32 0 + %0 = call float @llvm.aarch64.neon.vrsqrte.f32(float %a) ret float %0 } @@ -106,11 +96,9 @@ define double @test_vrsqrted_f64(double %a) { ; CHECK: test_vrsqrted_f64 ; CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}} entry: - %vrsqrte.i = insertelement <1 x double> undef, double %a, i32 0 - %vrsqrte1.i = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %vrsqrte.i) - %0 = extractelement <1 x double> %vrsqrte1.i, i32 0 + %0 = call double @llvm.aarch64.neon.vrsqrte.f64(double %a) ret double %0 } -declare <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float>) -declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>) +declare float @llvm.aarch64.neon.vrsqrte.f32(float) +declare double @llvm.aarch64.neon.vrsqrte.f64(double) -- 2.34.1