1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
2 ; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
10 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
12 %0 = bitcast double* %d to <4 x double>*
13 %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
14 %1 = bitcast float* %f to <8 x float>*
15 %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
16 %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
17 tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
18 store <4 x double> %tmp1.i, <4 x double>* %0, align 32
19 store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
20 store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
24 declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
27 ;; The two tests below check that we must fold load + scalar_to_vector
28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
31 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
32 %val = load float, float* %ptr
34 %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
40 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
41 %val = load double, double* %ptr
43 %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
49 define void @storev16i16(<16 x i16> %a) nounwind {
50 store <16 x i16> %a, <16 x i16>* undef, align 32
54 ; CHECK: storev16i16_01
57 define void @storev16i16_01(<16 x i16> %a) nounwind {
58 store <16 x i16> %a, <16 x i16>* undef, align 4
64 define void @storev32i8(<32 x i8> %a) nounwind {
65 store <32 x i8> %a, <32 x i8>* undef, align 32
69 ; CHECK: storev32i8_01
72 define void @storev32i8_01(<32 x i8> %a) nounwind {
73 store <32 x i8> %a, <32 x i8>* undef, align 4
77 ; It is faster to make two saves, if the data is already in XMM registers. For
78 ; example, after making an integer operation.
80 ; CHECK-NOT: vinsertf128 $1
81 ; CHECK-NOT: vinsertf128 $0
84 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
86 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
87 store <8 x i32> %Z, <8 x i32>* %P, align 16
91 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
94 ; CHECK-O0: vmovss LCPI
95 ; CHECK-O0: vxorps %xmm
96 ; CHECK-O0: vmovss %xmm
97 define void @f_f() nounwind {
99 br i1 undef, label %cif_mask_all, label %cif_mask_mixed
101 cif_mask_all: ; preds = %allocas
104 cif_mask_mixed: ; preds = %allocas
105 br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
107 cif_mixed_test_all: ; preds = %cif_mask_mixed
108 call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
111 cif_mixed_test_any_check: ; preds = %cif_mask_mixed
118 ; CHECK-NOT: vinsertf128
119 ; CHECK-NOT: vextractf128
122 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
123 %b = load <8 x i32>, <8 x i32>* %bp, align 1
124 %x = add <8 x i32> zeroinitializer, %b
125 store <8 x i32> %x, <8 x i32>* %ret, align 1
130 ; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
131 ; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
132 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
133 %b = load <4 x i64>, <4 x i64>* %bp, align 64
134 %x = add <4 x i64> zeroinitializer, %b
135 store <4 x i64> %x, <4 x i64>* %ret, align 64
140 ; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
141 ; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
142 ; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
143 ; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
144 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
145 %b = load <4 x i64>, <4 x i64>* %bp, align 16
146 %x = add <4 x i64> zeroinitializer, %b
147 store <4 x i64> %x, <4 x i64>* %ret, align 16