1 ; RUN: llc -mtriple=x86_64-linux -mcpu=nehalem < %s | FileCheck %s --check-prefix=LIN
2 ; RUN: llc -mtriple=x86_64-win32 -mcpu=nehalem < %s | FileCheck %s --check-prefix=WIN
3 ; RUN: llc -mtriple=i686-win32 -mcpu=nehalem < %s | FileCheck %s --check-prefix=LIN32
6 ; When doing vector gather-scatter index calculation with 32-bit indices,
7 ; use an efficient mov/shift sequence rather than shuffling each individual
8 ; element out of the index vector.
11 ; LIN: movdqa (%rsi), %xmm0
12 ; LIN: pand (%rdx), %xmm0
13 ; LIN: pextrq $1, %xmm0, %r[[REG4:.+]]
14 ; LIN: movd %xmm0, %r[[REG2:.+]]
15 ; LIN: movslq %e[[REG2]], %r[[REG1:.+]]
16 ; LIN: sarq $32, %r[[REG2]]
17 ; LIN: movslq %e[[REG4]], %r[[REG3:.+]]
18 ; LIN: sarq $32, %r[[REG4]]
19 ; LIN: movsd (%rdi,%r[[REG1]],8), %xmm0
20 ; LIN: movhpd (%rdi,%r[[REG2]],8), %xmm0
21 ; LIN: movsd (%rdi,%r[[REG3]],8), %xmm1
22 ; LIN: movhpd (%rdi,%r[[REG4]],8), %xmm1
24 ; WIN: movdqa (%rdx), %xmm0
25 ; WIN: pand (%r8), %xmm0
26 ; WIN: pextrq $1, %xmm0, %r[[REG4:.+]]
27 ; WIN: movd %xmm0, %r[[REG2:.+]]
28 ; WIN: movslq %e[[REG2]], %r[[REG1:.+]]
29 ; WIN: sarq $32, %r[[REG2]]
30 ; WIN: movslq %e[[REG4]], %r[[REG3:.+]]
31 ; WIN: sarq $32, %r[[REG4]]
32 ; WIN: movsd (%rcx,%r[[REG1]],8), %xmm0
33 ; WIN: movhpd (%rcx,%r[[REG2]],8), %xmm0
34 ; WIN: movsd (%rcx,%r[[REG3]],8), %xmm1
35 ; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
37 define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
38 %a = load <4 x i32>* %i
39 %b = load <4 x i32>* %h
40 %j = and <4 x i32> %a, %b
41 %d0 = extractelement <4 x i32> %j, i32 0
42 %d1 = extractelement <4 x i32> %j, i32 1
43 %d2 = extractelement <4 x i32> %j, i32 2
44 %d3 = extractelement <4 x i32> %j, i32 3
45 %q0 = getelementptr double, double* %p, i32 %d0
46 %q1 = getelementptr double, double* %p, i32 %d1
47 %q2 = getelementptr double, double* %p, i32 %d2
48 %q3 = getelementptr double, double* %p, i32 %d3
49 %r0 = load double* %q0
50 %r1 = load double* %q1
51 %r2 = load double* %q2
52 %r3 = load double* %q3
53 %v0 = insertelement <4 x double> undef, double %r0, i32 0
54 %v1 = insertelement <4 x double> %v0, double %r1, i32 1
55 %v2 = insertelement <4 x double> %v1, double %r2, i32 2
56 %v3 = insertelement <4 x double> %v2, double %r3, i32 3
60 ; Check that the sequence previously used above, which bounces the vector off the
61 ; cache works for x86-32. Note that in this case it will not be used for index
62 ; calculation, since indexes are 32-bit, not 64.
64 ; LIN32: movaps %xmm0, (%esp)
65 ; LIN32-DAG: {{(mov|and)}}l (%esp),
66 ; LIN32-DAG: {{(mov|and)}}l 4(%esp),
67 ; LIN32-DAG: {{(mov|and)}}l 8(%esp),
68 ; LIN32-DAG: {{(mov|and)}}l 12(%esp),
69 define <4 x i64> @old(double* %p, <4 x i32>* %i, <4 x i32>* %h, i64 %f) nounwind {
70 %a = load <4 x i32>* %i
71 %b = load <4 x i32>* %h
72 %j = and <4 x i32> %a, %b
73 %d0 = extractelement <4 x i32> %j, i32 0
74 %d1 = extractelement <4 x i32> %j, i32 1
75 %d2 = extractelement <4 x i32> %j, i32 2
76 %d3 = extractelement <4 x i32> %j, i32 3
77 %q0 = zext i32 %d0 to i64
78 %q1 = zext i32 %d1 to i64
79 %q2 = zext i32 %d2 to i64
80 %q3 = zext i32 %d3 to i64
85 %v0 = insertelement <4 x i64> undef, i64 %r0, i32 0
86 %v1 = insertelement <4 x i64> %v0, i64 %r1, i32 1
87 %v2 = insertelement <4 x i64> %v1, i64 %r2, i32 2
88 %v3 = insertelement <4 x i64> %v2, i64 %r3, i32 3