1 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=KNL
3 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
4 target triple = "x86_64-unknown-linux-gnu"
7 ; KNL: kxnorw %k1, %k1, %k1
8 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
9 define <16 x float> @test1(float* %base, <16 x i32> %ind) {
11 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
12 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
14 %sext_ind = sext <16 x i32> %ind to <16 x i64>
15 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, <16 x i64> %sext_ind
17 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
21 declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
22 declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*>, i32, <16 x i1>, <16 x float>)
23 declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> , i32, <8 x i1> , <8 x i32> )
26 ; KNL: kmovw %esi, %k1
27 ; KNL: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
28 define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
30 %broadcast.splatinsert = insertelement <16 x float*> undef, float* %base, i32 0
31 %broadcast.splat = shufflevector <16 x float*> %broadcast.splatinsert, <16 x float*> undef, <16 x i32> zeroinitializer
33 %sext_ind = sext <16 x i32> %ind to <16 x i64>
34 %gep.random = getelementptr float, <16 x float*> %broadcast.splat, <16 x i64> %sext_ind
35 %imask = bitcast i16 %mask to <16 x i1>
36 %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> %imask, <16 x float>undef)
41 ; KNL: kmovw %esi, %k1
42 ; KNL: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
43 define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
45 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
46 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
48 %sext_ind = sext <16 x i32> %ind to <16 x i64>
49 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i64> %sext_ind
50 %imask = bitcast i16 %mask to <16 x i1>
51 %res = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
56 ; KNL: kmovw %esi, %k1
61 define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
63 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
64 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
66 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i32> %ind
67 %imask = bitcast i16 %mask to <16 x i1>
68 %gt1 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
69 %gt2 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %gep.random, i32 4, <16 x i1> %imask, <16 x i32>%gt1)
70 %res = add <16 x i32> %gt1, %gt2
76 ; KNL: vpscatterdd {{.*}}%k2
77 ; KNL: vpscatterdd {{.*}}%k1
79 define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
81 %broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
82 %broadcast.splat = shufflevector <16 x i32*> %broadcast.splatinsert, <16 x i32*> undef, <16 x i32> zeroinitializer
84 %gep.random = getelementptr i32, <16 x i32*> %broadcast.splat, <16 x i32> %ind
85 %imask = bitcast i16 %mask to <16 x i1>
86 call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %imask)
87 call void @llvm.masked.scatter.v16i32(<16 x i32>%val, <16 x i32*> %gep.random, i32 4, <16 x i1> %imask)
91 declare void @llvm.masked.scatter.v8i32(<8 x i32> , <8 x i32*> , i32 , <8 x i1> )
92 declare void @llvm.masked.scatter.v16i32(<16 x i32> , <16 x i32*> , i32 , <16 x i1> )
95 ; KNL: kxnorw %k1, %k1, %k1
96 ; KNL: kxnorw %k2, %k2, %k2
97 ; KNL: vpgatherqd (,%zmm{{.*}}), %ymm{{.*}} {%k2}
98 ; KNL: vpscatterqd %ymm{{.*}}, (,%zmm{{.*}}) {%k1}
99 define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
101 %a = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
103 call void @llvm.masked.scatter.v8i32(<8 x i32> %a1, <8 x i32*> %ptr, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
107 ; In this case the index should be promoted to <8 x i64> for KNL
109 ; KNL: vpmovsxdq %ymm0, %zmm0
110 ; KNL: kmovw %k1, %k2
111 ; KNL: vpgatherqd {{.*}} {%k2}
112 ; KNL: vpgatherqd {{.*}} {%k1}
113 define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
115 %broadcast.splatinsert = insertelement <8 x i32*> undef, i32* %base, i32 0
116 %broadcast.splat = shufflevector <8 x i32*> %broadcast.splatinsert, <8 x i32*> undef, <8 x i32> zeroinitializer
118 %gep.random = getelementptr i32, <8 x i32*> %broadcast.splat, <8 x i32> %ind
119 %imask = bitcast i8 %mask to <8 x i1>
120 %gt1 = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.random, i32 4, <8 x i1> %imask, <8 x i32>undef)
121 %gt2 = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.random, i32 4, <8 x i1> %imask, <8 x i32>%gt1)
122 %res = add <8 x i32> %gt1, %gt2
126 ; No uniform base in this case, index <8 x i64> contains addresses,
127 ; each gather call will be split into two
129 ; KNL: kshiftrw $8, %k1, %k2
136 define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
137 %imask = bitcast i16 %mask to <16 x i1>
138 %gt1 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %ptr.random, i32 4, <16 x i1> %imask, <16 x i32>undef)
139 %gt2 = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %ptr.random, i32 4, <16 x i1> %imask, <16 x i32>%gt1)
140 %res = add <16 x i32> %gt1, %gt2