1 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s -check-prefix=SSE2
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s -check-prefix=AVX
4 define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
7 ; SSE2-NEXT: movd %xmm1, %eax
8 ; SSE2-NEXT: movzwl %ax, %eax
9 ; SSE2-NEXT: movd %eax, %xmm1
10 ; SSE2-NEXT: psllw %xmm1, %xmm0
14 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
15 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
16 ; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
19 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
20 %shl = shl <8 x i16> %A, %vecinit14
24 define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
27 ; SSE2-NEXT: xorps %xmm2, %xmm2
28 ; SSE2-NEXT: movss %xmm1, %xmm2
29 ; SSE2-NEXT: pslld %xmm2, %xmm0
33 ; AVX-NEXT: vpxor %xmm2, %xmm2
34 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
35 ; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
38 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
39 %shl = shl <4 x i32> %A, %vecinit6
43 define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
46 ; SSE2-NEXT: psllq %xmm1, %xmm0
50 ; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
53 %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
54 %shl = shl <2 x i64> %A, %vecinit2
58 define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
61 ; SSE2-NEXT: movd %xmm1, %eax
62 ; SSE2-NEXT: movzwl %ax, %eax
63 ; SSE2-NEXT: movd %eax, %xmm1
64 ; SSE2-NEXT: psrlw %xmm1, %xmm0
68 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
69 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
70 ; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
73 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
74 %shr = lshr <8 x i16> %A, %vecinit14
78 define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
81 ; SSE2-NEXT: xorps %xmm2, %xmm2
82 ; SSE2-NEXT: movss %xmm1, %xmm2
83 ; SSE2-NEXT: psrld %xmm2, %xmm0
87 ; AVX-NEXT: vpxor %xmm2, %xmm2
88 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
89 ; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
92 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
93 %shr = lshr <4 x i32> %A, %vecinit6
97 define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
100 ; SSE2-NEXT: psrlq %xmm1, %xmm0
104 ; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
107 %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
108 %shr = lshr <2 x i64> %A, %vecinit2
112 define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
115 ; SSE2-NEXT: movd %xmm1, %eax
116 ; SSE2-NEXT: movzwl %ax, %eax
117 ; SSE2-NEXT: movd %eax, %xmm1
118 ; SSE2-NEXT: psraw %xmm1, %xmm0
122 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
123 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
124 ; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
127 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
128 %shr = ashr <8 x i16> %A, %vecinit14
132 define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
135 ; SSE2-NEXT: xorps %xmm2, %xmm2
136 ; SSE2-NEXT: movss %xmm1, %xmm2
137 ; SSE2-NEXT: psrad %xmm2, %xmm0
141 ; AVX-NEXT: vpxor %xmm2, %xmm2
142 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
143 ; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
146 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
147 %shr = ashr <4 x i32> %A, %vecinit6