From ae464b2ba144ebf3993dd41bd4bc950df97e59cd Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Sat, 20 Sep 2014 22:09:27 +0000 Subject: [PATCH] [x86] Teach the new vector shuffle lowering to use VPERMILPD for single-input shuffles with doubles. This allows them to fold memory operands into the shuffle, etc. This is just the analog to the v4f32 case in my prior commit. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218193 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 8 +++++++ test/CodeGen/X86/vector-shuffle-128-v2.ll | 28 +++++++++++++++++++---- test/CodeGen/X86/vector-shuffle-256-v4.ll | 6 ++--- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 29b9effad73..654a1abf710 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7657,6 +7657,14 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // Straight shuffle of a single input vector. Simulate this by using the // single input as both of the "inputs" to this instruction.. unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1); + + if (Subtarget->hasAVX()) { + // If we have AVX, we can use VPERMILPS which will allow folding a load + // into the shuffle. + return DAG.getNode(X86ISD::VPERMILP, DL, MVT::v2f64, V1, + DAG.getConstant(SHUFPDMask, MVT::i8)); + } + return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1, DAG.getConstant(SHUFPDMask, MVT::i8)); } diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll index 27c746fb322..430a0f1eb2c 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -70,9 +70,13 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) { ret <2 x double> %shuffle } define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) { -; ALL-LABEL: @shuffle_v2f64_10 -; ALL: shufpd {{.*}} # xmm0 = xmm0[1,0] -; ALL-NEXT: retq +; SSE-LABEL: @shuffle_v2f64_10 +; SSE: shufpd {{.*}} # xmm0 = xmm0[1,0] +; SSE-NEXT: retq +; +; AVX-LABEL: @shuffle_v2f64_10 +; AVX: vpermilpd {{.*}} # xmm0 = xmm0[1,0] +; AVX-NEXT: retq %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %shuffle } @@ -112,7 +116,7 @@ define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) { ; SSE-NEXT: retq ; ; AVX-LABEL: @shuffle_v2f64_32 -; AVX: vshufpd {{.*}} # xmm0 = xmm1[1,0] +; AVX: vpermilpd {{.*}} # xmm0 = xmm1[1,0] ; AVX-NEXT: retq %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %shuffle @@ -520,3 +524,19 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) { %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> ret <2 x double> %shuffle } + +define <2 x double> @shuffle_mem_v2f64_10(<2 x double>* %ptr) { +; SSE-LABEL: @shuffle_mem_v2f64_10 +; SSE: # BB#0: +; SSE-NEXT: movapd (%rdi), %xmm0 +; SSE-NEXT: shufpd {{.*}} # xmm0 = xmm0[1,0] +; SSE-NEXT: retq +; +; AVX-LABEL: @shuffle_mem_v2f64_10 +; AVX: # BB#0: +; AVX-NEXT: vpermilpd {{.*}} # xmm0 = mem[1,0] +; AVX-NEXT: retq + %a = load <2 x double>* %ptr + %shuffle = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> + ret <2 x double> %shuffle +} diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll index 982542b59b2..3f3170babda 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -129,7 +129,7 @@ define <4 x double> @shuffle_v4f64_0300(<4 x double> %a, <4 x double> %b) { define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) { ; AVX1-LABEL: @shuffle_v4f64_1000 ; AVX1: # BB#0: -; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1,0] +; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0] ; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -162,8 +162,8 @@ define <4 x double> @shuffle_v4f64_3210(<4 x double> %a, <4 x double> %b) { ; AVX1-LABEL: @shuffle_v4f64_3210 ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[1,0] -; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[1,0] +; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0] +; AVX1-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> -- 2.34.1