From: Chandler Carruth Date: Mon, 22 Sep 2014 03:05:23 +0000 (+0000) Subject: [x86] Move the AVX v4i64 test cases down to group them together. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=ec35919c9a220b32365c5191843c8728c61bb6b4;p=oota-llvm.git [x86] Move the AVX v4i64 test cases down to group them together. Increasingly I don't want to mix the integer and floating point tests, especially with AVX where they are handled quite differently. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218233 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll index c8673204746..175e732b9ab 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -3,98 +3,6 @@ target triple = "x86_64-unknown-unknown" -define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_0001 -; ALL: # BB#0: -; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm0[0,0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_0020 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0],xmm0[0] -; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_0112 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1],xmm1[0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_0300 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm0[0],xmm1[1] -; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_1000 -; ALL: # BB#0: -; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0] -; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_2200 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0] -; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_3330 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0] -; ALL-NEXT: vmovhlps {{.*}} # xmm1 = xmm1[1,1] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - -define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) { -; ALL-LABEL: @shuffle_v4i64_3210 -; ALL: # BB#0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0] -; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: retq - %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> - ret <4 x i64> %shuffle -} - define <4 x double> @shuffle_v4f64_0001(<4 x double> %a, <4 x double> %b) { ; ALL-LABEL: @shuffle_v4f64_0001 ; ALL: # BB#0: @@ -341,6 +249,98 @@ define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) { ret <4 x double> %shuffle } +define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_0001 +; ALL: # BB#0: +; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm0[0,0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_0020 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0],xmm0[0] +; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] +; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_0112 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1],xmm1[0] +; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_0300 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vblendpd {{.*}} # xmm1 = xmm0[0],xmm1[1] +; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_1000 +; ALL: # BB#0: +; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0] +; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_2200 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0] +; ALL-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_3330 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0] +; ALL-NEXT: vmovhlps {{.*}} # xmm1 = xmm1[1,1] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) { +; ALL-LABEL: @shuffle_v4i64_3210 +; ALL: # BB#0: +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0] +; ALL-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) { ; ALL-LABEL: @shuffle_v4i64_0124 ; ALL: # BB#0: