1 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
4 ; Verify that we're folding the load into the math instruction.
5 ; This pattern is generated out of the simplest intrinsics usage:
6 ; _mm_add_ss(a, _mm_load_ss(b));
8 define <4 x float> @addss(<4 x float> %va, float* %pb) {
11 ; SSE-NEXT: addss (%rdi), %xmm0
16 ; AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0
18 %a = extractelement <4 x float> %va, i32 0
19 %b = load float, float* %pb
20 %r = fadd float %a, %b
21 %vr = insertelement <4 x float> %va, float %r, i32 0
25 define <2 x double> @addsd(<2 x double> %va, double* %pb) {
28 ; SSE-NEXT: addsd (%rdi), %xmm0
33 ; AVX-NEXT: vaddsd (%rdi), %xmm0, %xmm0
35 %a = extractelement <2 x double> %va, i32 0
36 %b = load double, double* %pb
37 %r = fadd double %a, %b
38 %vr = insertelement <2 x double> %va, double %r, i32 0
42 define <4 x float> @subss(<4 x float> %va, float* %pb) {
45 ; SSE-NEXT: subss (%rdi), %xmm0
50 ; AVX-NEXT: vsubss (%rdi), %xmm0, %xmm0
52 %a = extractelement <4 x float> %va, i32 0
53 %b = load float, float* %pb
54 %r = fsub float %a, %b
55 %vr = insertelement <4 x float> %va, float %r, i32 0
59 define <2 x double> @subsd(<2 x double> %va, double* %pb) {
62 ; SSE-NEXT: subsd (%rdi), %xmm0
67 ; AVX-NEXT: vsubsd (%rdi), %xmm0, %xmm0
69 %a = extractelement <2 x double> %va, i32 0
70 %b = load double, double* %pb
71 %r = fsub double %a, %b
72 %vr = insertelement <2 x double> %va, double %r, i32 0
76 define <4 x float> @mulss(<4 x float> %va, float* %pb) {
79 ; SSE-NEXT: mulss (%rdi), %xmm0
84 ; AVX-NEXT: vmulss (%rdi), %xmm0, %xmm0
86 %a = extractelement <4 x float> %va, i32 0
87 %b = load float, float* %pb
88 %r = fmul float %a, %b
89 %vr = insertelement <4 x float> %va, float %r, i32 0
93 define <2 x double> @mulsd(<2 x double> %va, double* %pb) {
96 ; SSE-NEXT: mulsd (%rdi), %xmm0
101 ; AVX-NEXT: vmulsd (%rdi), %xmm0, %xmm0
103 %a = extractelement <2 x double> %va, i32 0
104 %b = load double, double* %pb
105 %r = fmul double %a, %b
106 %vr = insertelement <2 x double> %va, double %r, i32 0
110 define <4 x float> @divss(<4 x float> %va, float* %pb) {
113 ; SSE-NEXT: divss (%rdi), %xmm0
118 ; AVX-NEXT: vdivss (%rdi), %xmm0, %xmm0
120 %a = extractelement <4 x float> %va, i32 0
121 %b = load float, float* %pb
122 %r = fdiv float %a, %b
123 %vr = insertelement <4 x float> %va, float %r, i32 0
127 define <2 x double> @divsd(<2 x double> %va, double* %pb) {
130 ; SSE-NEXT: divsd (%rdi), %xmm0
135 ; AVX-NEXT: vdivsd (%rdi), %xmm0, %xmm0
137 %a = extractelement <2 x double> %va, i32 0
138 %b = load double, double* %pb
139 %r = fdiv double %a, %b
140 %vr = insertelement <2 x double> %va, double %r, i32 0