From: Chandler Carruth Date: Thu, 25 Sep 2014 04:10:27 +0000 (+0000) Subject: [x86] Fix an oversight in the v8i32 path of the new vector shuffle X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=2e8d2c727c11426babad8cc88858a5ce3725ea6f;p=oota-llvm.git [x86] Fix an oversight in the v8i32 path of the new vector shuffle lowering where it only used the mask of the low 128-bit lane rather than the entire mask. This allows the new lowering to correctly match the unpack patterns for v8i32 vectors. For reference, the reason that we check for the the entire mask rather than checking the repeated mask is because the repeated masks don't abide by all of the invariants of normal masks. As a consequence, it is safer to use the full mask with functions like the generic equivalence test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218442 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a02dc84af68..cda3861ce35 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9579,9 +9579,9 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, getV4X86ShuffleImm8ForMask(RepeatedMask, DAG)); // Use dedicated unpack instructions for masks that match their pattern. - if (isShuffleEquivalent(Mask, 0, 8, 1, 9)) + if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13)) return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2); - if (isShuffleEquivalent(Mask, 2, 10, 3, 11)) + if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15)) return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2); } diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll index daac1224584..bdcbe94ce98 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -850,9 +850,7 @@ define <8 x i32> @shuffle_v8i32_08194c5d(<8 x i32> %a, <8 x i32> %b) { ; ; AVX2-LABEL: @shuffle_v8i32_08194c5d ; AVX2: # BB#0: -; AVX2-NEXT: vpshufd {{.*}} # ymm1 = ymm1[0,0,2,1,4,4,6,5] -; AVX2-NEXT: vpshufd {{.*}} # ymm0 = ymm0[0,1,1,3,4,5,5,7] -; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX2-NEXT: vpunpckldq {{.*}} # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] ; AVX2-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle @@ -866,9 +864,7 @@ define <8 x i32> @shuffle_v8i32_2a3b6e7f(<8 x i32> %a, <8 x i32> %b) { ; ; AVX2-LABEL: @shuffle_v8i32_2a3b6e7f ; AVX2: # BB#0: -; AVX2-NEXT: vpshufd {{.*}} # ymm1 = ymm1[0,2,2,3,4,6,6,7] -; AVX2-NEXT: vpshufd {{.*}} # ymm0 = ymm0[2,1,3,3,6,5,7,7] -; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX2-NEXT: vpunpckhdq {{.*}} # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] ; AVX2-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle