1 ; RUN: llc -march=x86-64 < %s | FileCheck %s
4 ; When doing vector gather-scatter index calculation with 32-bit indices,
5 ; bounce the vector off of cache rather than shuffling each individual
6 ; element out of the index vector.
8 ; CHECK: andps (%rdx), %xmm0
9 ; CHECK: movaps %xmm0, -24(%rsp)
10 ; CHECK: movslq -24(%rsp), %rax
11 ; CHECK: movsd (%rdi,%rax,8), %xmm0
12 ; CHECK: movslq -20(%rsp), %rax
13 ; CHECK: movhpd (%rdi,%rax,8), %xmm0
14 ; CHECK: movslq -16(%rsp), %rax
15 ; CHECK: movsd (%rdi,%rax,8), %xmm1
16 ; CHECK: movslq -12(%rsp), %rax
17 ; CHECK: movhpd (%rdi,%rax,8), %xmm1
19 define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
20 %a = load <4 x i32>* %i
21 %b = load <4 x i32>* %h
22 %j = and <4 x i32> %a, %b
23 %d0 = extractelement <4 x i32> %j, i32 0
24 %d1 = extractelement <4 x i32> %j, i32 1
25 %d2 = extractelement <4 x i32> %j, i32 2
26 %d3 = extractelement <4 x i32> %j, i32 3
27 %q0 = getelementptr double* %p, i32 %d0
28 %q1 = getelementptr double* %p, i32 %d1
29 %q2 = getelementptr double* %p, i32 %d2
30 %q3 = getelementptr double* %p, i32 %d3
31 %r0 = load double* %q0
32 %r1 = load double* %q1
33 %r2 = load double* %q2
34 %r3 = load double* %q3
35 %v0 = insertelement <4 x double> undef, double %r0, i32 0
36 %v1 = insertelement <4 x double> %v0, double %r1, i32 1
37 %v2 = insertelement <4 x double> %v1, double %r2, i32 2
38 %v3 = insertelement <4 x double> %v2, double %r3, i32 3