1 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=AVX512
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2
3 ; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=AVX_SCALAR
6 ; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
9 ; AVX2: vpmaskmovd 32(%rdi)
10 ; AVX2: vpmaskmovd (%rdi)
13 ; AVX_SCALAR-LABEL: test1
14 ; AVX_SCALAR-NOT: masked
15 ; AVX_SCALAR: extractelement
16 ; AVX_SCALAR: insertelement
17 ; AVX_SCALAR: extractelement
18 ; AVX_SCALAR: insertelement
19 define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
20 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
21 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>undef)
26 ; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
29 ; AVX2: vpmaskmovd {{.*}}(%rdi)
30 ; AVX2: vpmaskmovd {{.*}}(%rdi)
32 define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
33 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
34 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>zeroinitializer)
39 ; AVX512: vmovdqu32 %zmm1, (%rdi) {%k1}
41 ; AVX_SCALAR-LABEL: test3
42 ; AVX_SCALAR-NOT: masked
43 ; AVX_SCALAR: extractelement
45 ; AVX_SCALAR: extractelement
47 ; AVX_SCALAR: extractelement
49 define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
50 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
51 call void @llvm.masked.store.v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
56 ; AVX512: vmovups (%rdi), %zmm{{.*{%k[1-7]}}}
59 ; AVX2: vmaskmovps {{.*}}(%rdi)
60 ; AVX2: vmaskmovps {{.*}}(%rdi)
62 define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
63 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
64 %res = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
69 ; AVX512: vmovupd (%rdi), %zmm1 {%k1}
76 define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> %dst) {
77 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
78 %res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x double>%dst)
85 define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
86 %mask = icmp eq <2 x i64> %trigger, zeroinitializer
87 %res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
92 ; AVX2: vmaskmovps {{.*}}(%rdi)
94 define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
95 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
96 %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
101 ; AVX2: vpmaskmovd {{.*}}(%rdi)
103 define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
104 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
105 %res = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
110 ; AVX2: vpmaskmovd %xmm
111 define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
112 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
113 call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
118 ; AVX2: vmaskmovpd (%rdi), %ymm
120 define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
121 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
122 %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1>%mask, <4 x double>%dst)
123 ret <4 x double> %res
129 define <8 x float> @test11(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
130 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
131 %res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, <8 x float>%dst)
136 ; AVX2: vpmaskmovd %ymm
137 define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
138 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
139 call void @llvm.masked.store.v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
143 ; AVX512-LABEL: test13
144 ; AVX512: vmovups %zmm1, (%rdi) {%k1}
146 define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
147 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
148 call void @llvm.masked.store.v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
153 ; AVX2: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
155 define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
156 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
157 call void @llvm.masked.store.v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%mask)
163 define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
164 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
165 call void @llvm.masked.store.v2i32(<2 x i32>%val, <2 x i32>* %addr, i32 4, <2 x i1>%mask)
172 define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) {
173 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
174 %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>%dst)
182 define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
183 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
184 %res = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %addr, i32 4, <2 x i1>%mask, <2 x i32>%dst)
191 define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
192 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
193 %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>undef)
198 declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
199 declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
200 declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
201 declare void @llvm.masked.store.v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
202 declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
203 declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
204 declare void @llvm.masked.store.v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
205 declare void @llvm.masked.store.v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
206 declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
207 declare void @llvm.masked.store.v16f32p(<16 x float>*, <16 x float>**, i32, <16 x i1>)
208 declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
209 declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
210 declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
211 declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
212 declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
213 declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
214 declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
215 declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
216 declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
217 declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)