1 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=KNL
2 ; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=SCALAR
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6 target triple = "x86_64-unknown-linux-gnu"
9 ; KNL: kxnorw %k1, %k1, %k1
10 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
13 ; SCALAR: extractelement <16 x float*>
14 ; SCALAR-NEXT: load float
15 ; SCALAR-NEXT: insertelement <16 x float>
16 ; SCALAR-NEXT: extractelement <16 x float*>
17 ; SCALAR-NEXT: load float
19 define <16 x float> @test1(float* %base, <16 x i32> %ind) {
21 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
22 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
24 %sext_ind = sext <16 x i32> %ind to <16 x i64>
25 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, <16 x i64> %sext_ind
27 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
31 declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
32 declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*>, i32, <16 x i1>, <16 x float>)
33 declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> , i32, <8 x i1> , <8 x i32> )
36 ; KNL: kmovw %esi, %k1
37 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
40 ; SCALAR: extractelement <16 x float*>
41 ; SCALAR-NEXT: load float
42 ; SCALAR-NEXT: insertelement <16 x float>
43 ; SCALAR-NEXT: br label %else
45 ; SCALAR-NEXT: %res.phi.else = phi
46 ; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1
47 ; SCALAR-NEXT: %ToLoad1 = icmp eq i1 %Mask1, true
48 ; SCALAR-NEXT: br i1 %ToLoad1, label %cond.load1, label %else2
50 define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
52 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
53 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
55 %sext_ind = sext <16 x i32> %ind to <16 x i64>
56 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, <16 x i64> %sext_ind
57 %imask = bitcast i16 %mask to <16 x i1>
58 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> %imask, <16 x float>undef)
63 ; KNL: kmovw %esi, %k1
64 ; KNL: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
65 define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
67 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
68 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
70 %sext_ind = sext <16 x i32> %ind to <16 x i64>
71 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i64> %sext_ind
72 %imask = bitcast i16 %mask to <16 x i1>
73 %res = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
78 ; KNL: kmovw %esi, %k1
83 define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
85 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
86 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
88 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i32> %ind
89 %imask = bitcast i16 %mask to <16 x i1>
90 %gt1 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
91 %gt2 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>%gt1)
92 %res = add <16 x i32> %gt1, %gt2
98 ; KNL: vpscatterdd {{.*}}%k2
99 ; KNL: vpscatterdd {{.*}}%k1
101 ; SCALAR-LABEL: test5
102 ; SCALAR: %Mask0 = extractelement <16 x i1> %imask, i32 0
103 ; SCALAR-NEXT: %ToStore0 = icmp eq i1 %Mask0, true
104 ; SCALAR-NEXT: br i1 %ToStore0, label %cond.store, label %else
105 ; SCALAR: cond.store:
106 ; SCALAR-NEXT: %Elt0 = extractelement <16 x i32> %val, i32 0
107 ; SCALAR-NEXT: %Ptr0 = extractelement <16 x i32*> %gep.random, i32 0
108 ; SCALAR-NEXT: store i32 %Elt0, i32* %Ptr0, align 4
109 ; SCALAR-NEXT: br label %else
111 ; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1
112 ; SCALAR-NEXT: %ToStore1 = icmp eq i1 %Mask1, true
113 ; SCALAR-NEXT: br i1 %ToStore1, label %cond.store1, label %else2
115 define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
117 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
118 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
120 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i32> %ind
121 %imask = bitcast i16 %mask to <16 x i1>
122 call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %imask)
123 call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %imask)
127 declare void @llvm.masked.scatter.v8i32(<8 x i32> , <8 x i32*> , i32 , <8 x i1> )
128 declare void @llvm.masked.scatter.v16i32(<16 x i32> , <16 x i32*> , i32 , <16 x i1> )
131 ; KNL: kxnorw %k1, %k1, %k1
132 ; KNL: kxnorw %k2, %k2, %k2
133 ; KNL: vpgatherqd (,%zmm{{.*}}), %ymm{{.*}} {%k2}
134 ; KNL: vpscatterqd %ymm{{.*}}, (,%zmm{{.*}}) {%k1}
136 ; SCALAR-LABEL: test6
137 ; SCALAR: store i32 %Elt0, i32* %Ptr01, align 4
138 ; SCALAR-NEXT: %Elt1 = extractelement <8 x i32> %a1, i32 1
139 ; SCALAR-NEXT: %Ptr12 = extractelement <8 x i32*> %ptr, i32 1
140 ; SCALAR-NEXT: store i32 %Elt1, i32* %Ptr12, align 4
141 ; SCALAR-NEXT: %Elt2 = extractelement <8 x i32> %a1, i32 2
142 ; SCALAR-NEXT: %Ptr23 = extractelement <8 x i32*> %ptr, i32 2
143 ; SCALAR-NEXT: store i32 %Elt2, i32* %Ptr23, align 4
145 define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
147 %a = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
149 call void @llvm.masked.scatter.v8i32(<8 x i32> %a1, <8 x i32*> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
153 ; In this case the index should be promoted to <8 x i64> for KNL
155 ; KNL: vpmovsxdq %ymm0, %zmm0
156 ; KNL: kmovw %k1, %k2
157 ; KNL: vpgatherqd {{.*}} {%k2}
158 ; KNL: vpgatherqd {{.*}} {%k1}
159 define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
161 %broadcast.splatinsert = insertelement <8 x i32*> undef, i32* %base, i32 0
162 %broadcast.splat = shufflevector <8 x i32*> %broadcast.splatinsert, <8 x i32*> undef, <8 x i32> zeroinitializer
164 %gep.random = getelementptr i32, <8 x i32*> %broadcast.splat, <8 x i32> %ind
165 %imask = bitcast i8 %mask to <8 x i1>
166 %gt1 = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.random, i32 4, <8 x i1> %imask, <8 x i32>undef)
167 %gt2 = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.random, i32 4, <8 x i1> %imask, <8 x i32>%gt1)
168 %res = add <8 x i32> %gt1, %gt2
172 ; No uniform base in this case, index <8 x i64> contains addresses,
173 ; each gather call will be split into two
175 ; KNL: kshiftrw $8, %k1, %k2
182 define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
183 %imask = bitcast i16 %mask to <16 x i1>
184 %gt1 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %ptr.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
185 %gt2 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %ptr.random, i32 4, <16 x i1> %imask, <16 x i32>%gt1)
186 %res = add <16 x i32> %gt1, %gt2
190 %struct.RT = type { i8, [10 x [20 x i32]], i8 }
191 %struct.ST = type { i32, double, %struct.RT }
193 ; Masked gather for agregate types
194 ; Test9 and Test10 should give the same result (scalar and vector indices in GEP)
197 ; KNL: vpbroadcastq %rdi, %zmm
205 ; KNL: vpgatherqd (,%zmm
207 define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
209 %broadcast.splatinsert = insertelement <8 x %struct.ST*> undef, %struct.ST* %base, i32 0
210 %broadcast.splat = shufflevector <8 x %struct.ST*> %broadcast.splatinsert, <8 x %struct.ST*> undef, <8 x i32> zeroinitializer
212 %arrayidx = getelementptr %struct.ST, <8 x %struct.ST*> %broadcast.splat, <8 x i64> %ind1, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>, <8 x i32><i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, <8 x i32> %ind5, <8 x i64> <i64 13, i64 13, i64 13, i64 13, i64 13, i64 13, i64 13, i64 13>
213 %res = call <8 x i32 > @llvm.masked.gather.v8i32(<8 x i32*>%arrayidx, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
218 ; KNL: vpbroadcastq %rdi, %zmm
226 ; KNL: vpgatherqd (,%zmm
227 define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
229 %broadcast.splatinsert = insertelement <8 x %struct.ST*> undef, %struct.ST* %base, i32 0
230 %broadcast.splat = shufflevector <8 x %struct.ST*> %broadcast.splatinsert, <8 x %struct.ST*> undef, <8 x i32> zeroinitializer
232 %arrayidx = getelementptr %struct.ST, <8 x %struct.ST*> %broadcast.splat, <8 x i64> %i1, i32 2, i32 1, <8 x i32> %ind5, i64 13
233 %res = call <8 x i32 > @llvm.masked.gather.v8i32(<8 x i32*>%arrayidx, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
237 ; Splat index in GEP, requires broadcast
239 ; KNL: vpbroadcastd %esi, %zmm
240 ; KNL: vgatherdps (%rdi,%zmm
241 define <16 x float> @test11(float* %base, i32 %ind) {
243 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
244 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
246 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, i32 %ind
248 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
252 ; We are checking the uniform base here. It is taken directly from input to vgatherdps
254 ; KNL: kxnorw %k1, %k1, %k1
255 ; KNL: vgatherdps (%rdi,%zmm
256 define <16 x float> @test12(float* %base, <16 x i32> %ind) {
258 %sext_ind = sext <16 x i32> %ind to <16 x i64>
259 %gep.random = getelementptr float, float *%base, <16 x i64> %sext_ind
261 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
265 ; The same as the previous, but the mask is undefined
268 ; KNL: vgatherdps (%rdi,%zmm
269 define <16 x float> @test13(float* %base, <16 x i32> %ind) {
271 %sext_ind = sext <16 x i32> %ind to <16 x i64>
272 %gep.random = getelementptr float, float *%base, <16 x i64> %sext_ind
274 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> undef, <16 x float> undef)
278 ; The base pointer is not splat, can't find unform base
280 ; KNL: vgatherqps (,%zmm0)
281 ; KNL: vgatherqps (,%zmm0)
282 define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
284 %broadcast.splatinsert = insertelement <16 x float*> %vec, float* %base, i32 1
285 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
287 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, i32 %ind
289 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> undef, <16 x float> undef)
295 ; KNL: kmovw %eax, %k1
296 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
298 ; SCALAR-LABEL: test15
299 ; SCALAR: extractelement <16 x float*>
300 ; SCALAR-NEXT: load float
301 ; SCALAR-NEXT: insertelement <16 x float>
302 ; SCALAR-NEXT: extractelement <16 x float*>
303 ; SCALAR-NEXT: load float
305 define <16 x float> @test15(float* %base, <16 x i32> %ind) {
307 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
308 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
310 %sext_ind = sext <16 x i32> %ind to <16 x i64>
311 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, <16 x i64> %sext_ind
313 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x float> undef)
317 ; Check non-power-of-2 case. It should be scalarized.
318 declare <3 x i32> @llvm.masked.gather.v3i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>)
326 define <3 x i32> @test16(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
327 %sext_ind = sext <3 x i32> %ind to <3 x i64>
328 %gep.random = getelementptr i32, <3 x i32*> %base, <3 x i64> %sext_ind
329 %res = call <3 x i32> @llvm.masked.gather.v3i32(<3 x i32*> %gep.random, i32 4, <3 x i1> %mask, <3 x i32> %src0)
333 declare <16 x float*> @llvm.masked.gather.v16p0f32(<16 x float**>, i32, <16 x i1>, <16 x float*>)
338 define <16 x float*> @test17(<16 x float**> %ptrs) {
340 %res = call <16 x float*> @llvm.masked.gather.v16p0f32(<16 x float**> %ptrs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float*> undef)
341 ret <16 x float*>%res