-; RUN: llvm-as < %s | llc -march=x86 -mattr=sse41 -stack-alignment=16 > %t
-; RUN: grep pmul %t | count 12
-; RUN: grep mov %t | count 12
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+
+define <16 x i8> @mul8c(<16 x i8> %i) nounwind {
+; SSE2-LABEL: mul8c:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: pmullw %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: mul8c:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
+; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2
+; SSE41-NEXT: pmullw %xmm2, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT: pmullw %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm3, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: mul8c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+entry:
+ %A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
+ ret <16 x i8> %A
+}
+
+define <8 x i16> @mul16c(<8 x i16> %i) nounwind {
+; SSE-LABEL: mul16c:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: mul16c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
+ ret <8 x i16> %A
+}
define <4 x i32> @a(<4 x i32> %i) nounwind {
- %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
- ret <4 x i32> %A
+; SSE2-LABEL: a:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: a:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: a:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
+ ret <4 x i32> %A
}
+
define <2 x i64> @b(<2 x i64> %i) nounwind {
- %A = mul <2 x i64> %i, < i64 117, i64 117 >
- ret <2 x i64> %A
+; SSE-LABEL: b:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117]
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <2 x i64> %i, < i64 117, i64 117 >
+ ret <2 x i64> %A
}
+
+define <16 x i8> @mul8(<16 x i8> %i, <16 x i8> %j) nounwind {
+; SSE2-LABEL: mul8:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: pmullw %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: pmullw %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: mul8:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm3
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
+; SSE41-NEXT: pmullw %xmm3, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT: pand %xmm3, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT: pmullw %xmm1, %xmm0
+; SSE41-NEXT: pand %xmm3, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: mul8:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+entry:
+ %A = mul <16 x i8> %i, %j
+ ret <16 x i8> %A
+}
+
+define <8 x i16> @mul16(<8 x i16> %i, <8 x i16> %j) nounwind {
+; SSE-LABEL: mul16:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: pmullw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: mul16:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <8 x i16> %i, %j
+ ret <8 x i16> %A
+}
+
define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind {
- %A = mul <4 x i32> %i, %j
- ret <4 x i32> %A
+; SSE2-LABEL: c:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: c:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmulld %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <4 x i32> %i, %j
+ ret <4 x i32> %A
}
+
define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind {
- %A = mul <2 x i64> %i, %j
- ret <2 x i64> %A
+; SSE-LABEL: d:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: psrlq $32, %xmm3
+; SSE-NEXT: pmuludq %xmm0, %xmm3
+; SSE-NEXT: psllq $32, %xmm3
+; SSE-NEXT: paddq %xmm3, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: d:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
+; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT: vpsllq $32, %xmm3, %xmm3
+; AVX2-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <2 x i64> %i, %j
+ ret <2 x i64> %A
}
-; Use a call to force spills.
+
declare void @foo()
+
define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind {
- call void @foo()
- %A = mul <4 x i32> %i, %j
- ret <4 x i32> %A
+; SSE2-LABEL: e:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: subq $40, %rsp
+; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: callq foo
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: addq $40, %rsp
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: e:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: subq $40, %rsp
+; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE41-NEXT: callq foo
+; SSE41-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE41-NEXT: addq $40, %rsp
+; SSE41-NEXT: retq
+;
+; AVX2-LABEL: e:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: callq foo
+; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX2-NEXT: vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
+entry:
+ ; Use a call to force spills.
+ call void @foo()
+ %A = mul <4 x i32> %i, %j
+ ret <4 x i32> %A
}
+
define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind {
- call void @foo()
- %A = mul <2 x i64> %i, %j
- ret <2 x i64> %A
+; SSE-LABEL: f:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: subq $40, %rsp
+; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: callq foo
+; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm3, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: addq $40, %rsp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: f:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: callq foo
+; AVX2-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX2-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
+; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm0
+; AVX2-NEXT: vpsrlq $32, %xmm2, %xmm1
+; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlq $32, %xmm3, %xmm1
+; AVX2-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
+entry:
+ ; Use a call to force spills.
+ call void @foo()
+ %A = mul <2 x i64> %i, %j
+ ret <2 x i64> %A
}
+
+define <4 x i64> @b1(<4 x i64> %i) nounwind {
+; SSE-LABEL: b1:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm2, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b1:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 >
+ ret <4 x i64> %A
+}
+
+define <4 x i64> @b2(<4 x i64> %i, <4 x i64> %j) nounwind {
+; SSE-LABEL: b2:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm0, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: paddq %xmm5, %xmm4
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm1, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: paddq %xmm4, %xmm2
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm3, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b2:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
+; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; AVX2-NEXT: vpsllq $32, %ymm3, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+entry:
+ %A = mul <4 x i64> %i, %j
+ ret <4 x i64> %A
+}
+