From: Sanjay Patel Date: Wed, 22 Apr 2015 16:11:19 +0000 (+0000) Subject: [x86] Add store-folded memop patterns for vcvtps2ph X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=3f1f6571cc0ee38bc75b9b98e85e499fa2c1d536;p=oota-llvm.git [x86] Add store-folded memop patterns for vcvtps2ph Differential Revision: http://reviews.llvm.org/D7296 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@235517 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 6366b1ae46a..dfaf9c27d1e 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -8243,6 +8243,18 @@ let Predicates = [HasF16C] in { (VCVTPH2PSrm addr:$src)>; def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)), (VCVTPH2PSrm addr:$src)>; + + def : Pat<(store (f64 (vector_extract (bc_v2f64 (v8i16 + (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))), + addr:$dst), + (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>; + def : Pat<(store (i64 (vector_extract (bc_v2i64 (v8i16 + (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))), + addr:$dst), + (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>; + def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)), + addr:$dst), + (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>; } // Patterns for matching conversions from float to half-float and vice versa. diff --git a/test/CodeGen/X86/f16c-intrinsics.ll b/test/CodeGen/X86/f16c-intrinsics.ll index 02967d52048..485592aeac3 100644 --- a/test/CodeGen/X86/f16c-intrinsics.ll +++ b/test/CodeGen/X86/f16c-intrinsics.ll @@ -1,8 +1,8 @@ -; RUN: llc < %s -march=x86 -mattr=+avx,+f16c | FileCheck %s -; RUN: llc < %s -march=x86-64 -mattr=+avx,+f16c | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_128 + ; CHECK-LABEL: test_x86_vcvtph2ps_128: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1] @@ -12,7 +12,7 @@ declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_256 + ; CHECK-LABEL: test_x86_vcvtph2ps_256: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1] @@ -31,7 +31,7 @@ entry: } define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_128 + ; CHECK-LABEL: test_x86_vcvtps2ph_128: ; CHECK-NOT: vmov ; CHECK: vcvtps2ph %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] @@ -39,9 +39,8 @@ define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) { } declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly - define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_256 + ; CHECK-LABEL: test_x86_vcvtps2ph_256: ; CHECK-NOT: vmov ; CHECK: vcvtps2ph %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] @@ -50,7 +49,7 @@ define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) { declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) { -; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar +; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps (% @@ -61,3 +60,48 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) { %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2 ret <4 x float> %res } + +define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_256_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %ymm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3) + store <8 x i16> %0, <8 x i16>* %d, align 16 + ret void +} + +define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3) + %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <4 x i32> + store <4 x i16> %1, <4 x i16>* %d, align 8 + ret void +} + +define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m2: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) + %1 = bitcast <8 x i16> %0 to <2 x double> + %vecext = extractelement <2 x double> %1, i32 0 + store double %vecext, double* %hf4x16, align 8 + ret void +} + +define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m3: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) + %1 = bitcast <8 x i16> %0 to <2 x i64> + %vecext = extractelement <2 x i64> %1, i32 0 + store i64 %vecext, i64* %hf4x16, align 8 + ret void +}