1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
3 declare i32 @llvm.x86.avx512.kortestz.w(i16, i16) nounwind readnone
4 ; CHECK-LABEL: test_kortestz
7 define i32 @test_kortestz(i16 %a0, i16 %a1) {
8 %res = call i32 @llvm.x86.avx512.kortestz.w(i16 %a0, i16 %a1)
12 declare i32 @llvm.x86.avx512.kortestc.w(i16, i16) nounwind readnone
13 ; CHECK-LABEL: test_kortestc
16 define i32 @test_kortestc(i16 %a0, i16 %a1) {
17 %res = call i32 @llvm.x86.avx512.kortestc.w(i16 %a0, i16 %a1)
21 declare i16 @llvm.x86.avx512.kand.w(i16, i16) nounwind readnone
22 ; CHECK-LABEL: test_kand
25 define i16 @test_kand(i16 %a0, i16 %a1) {
26 %t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
27 %t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
31 declare i16 @llvm.x86.avx512.knot.w(i16) nounwind readnone
32 ; CHECK-LABEL: test_knot
34 define i16 @test_knot(i16 %a0) {
35 %res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
39 declare i16 @llvm.x86.avx512.kunpck.bw(i16, i16) nounwind readnone
41 ; CHECK-LABEL: unpckbw_test
44 define i16 @unpckbw_test(i16 %a0, i16 %a1) {
45 %res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
49 define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
50 ; CHECK: vrcp14ps {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x4c,0xc0]
51 %res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
54 declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
56 define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
57 ; CHECK: vrcp14pd {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x4c,0xc0]
58 %res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
61 declare <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double>, <8 x double>, i8) nounwind readnone
63 declare <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
65 define <8 x double> @test7(<8 x double> %a) {
66 ; CHECK: vrndscalepd {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x0b]
67 %res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
71 declare <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
73 define <16 x float> @test8(<16 x float> %a) {
74 ; CHECK: vrndscaleps {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x0b]
75 %res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
79 define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
80 ; CHECK: vrsqrt14ps {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x4e,0xc0]
81 %res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
84 declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
86 define <4 x float> @test_rsqrt14_ss(<4 x float> %a0) {
87 ; CHECK: vrsqrt14ss {{.*}}encoding: [0x62,0xf2,0x7d,0x08,0x4f,0xc0]
88 %res = call <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ; <<4 x float>> [#uses=1]
91 declare <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
93 define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
94 ; CHECK: vrcp14ss {{.*}}encoding: [0x62,0xf2,0x7d,0x08,0x4d,0xc0]
95 %res = call <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ; <<4 x float>> [#uses=1]
98 declare <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
100 define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
101 ; CHECK-LABEL: test_sqrt_pd_512
103 %res = call <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
104 ret <8 x double> %res
106 declare <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
108 define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
109 ; CHECK-LABEL: test_sqrt_ps_512
111 %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
112 ret <16 x float> %res
114 define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
115 ; CHECK-LABEL: test_sqrt_round_ps_512
116 ; CHECK: vsqrtps {rz-sae}
117 %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 3)
118 ret <16 x float> %res
120 declare <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
122 define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
123 ; CHECK-LABEL: test_getexp_pd_512
125 %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
126 ret <8 x double> %res
128 define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
129 ; CHECK-LABEL: test_getexp_round_pd_512
130 ; CHECK: vgetexppd {sae}
131 %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
132 ret <8 x double> %res
134 declare <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
136 define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
137 ; CHECK-LABEL: test_getexp_ps_512
139 %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
140 ret <16 x float> %res
143 define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
144 ; CHECK-LABEL: test_getexp_round_ps_512
145 ; CHECK: vgetexpps {sae}
146 %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
147 ret <16 x float> %res
149 declare <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
151 define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1) {
152 ; CHECK: vsqrtss {{.*}}encoding: [0x62
153 %res = call <4 x float> @llvm.x86.avx512.sqrt.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
156 declare <4 x float> @llvm.x86.avx512.sqrt.ss(<4 x float>, <4 x float>) nounwind readnone
158 define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1) {
159 ; CHECK: vsqrtsd {{.*}}encoding: [0x62
160 %res = call <2 x double> @llvm.x86.avx512.sqrt.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
161 ret <2 x double> %res
163 declare <2 x double> @llvm.x86.avx512.sqrt.sd(<2 x double>, <2 x double>) nounwind readnone
165 define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
166 ; CHECK: vcvtsd2si {{.*}}encoding: [0x62
167 %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
170 declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
172 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
173 ; CHECK: vcvtsi2sdq {{.*}}encoding: [0x62
174 %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
175 ret <2 x double> %res
177 declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
179 define <2 x double> @test_x86_avx512_cvtusi642sd(<2 x double> %a0, i64 %a1) {
180 ; CHECK: vcvtusi2sdq {{.*}}encoding: [0x62
181 %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
182 ret <2 x double> %res
184 declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64) nounwind readnone
186 define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
187 ; CHECK: vcvttsd2si {{.*}}encoding: [0x62
188 %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
191 declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
194 define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
195 ; CHECK: vcvtss2si {{.*}}encoding: [0x62
196 %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
199 declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
202 define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
203 ; CHECK: vcvtsi2ssq {{.*}}encoding: [0x62
204 %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
207 declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
210 define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
211 ; CHECK: vcvttss2si {{.*}}encoding: [0x62
212 %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
215 declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
217 define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
218 ; CHECK: vcvtsd2usi {{.*}}encoding: [0x62
219 %res = call i64 @llvm.x86.avx512.cvtsd2usi64(<2 x double> %a0) ; <i64> [#uses=1]
222 declare i64 @llvm.x86.avx512.cvtsd2usi64(<2 x double>) nounwind readnone
224 define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
225 ; CHECK: vcvtph2ps %ymm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x13,0xc0]
226 %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
227 ret <16 x float> %res
229 declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float>, i16, i32) nounwind readonly
232 define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0) {
233 ; CHECK: vcvtps2ph $2, %zmm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x48,0x1d,0xc0,0x02]
234 %res = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
238 declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x i16>, i16) nounwind readonly
240 define <16 x float> @test_x86_vbroadcast_ss_512(i8* %a0) {
241 ; CHECK: vbroadcastss
242 %res = call <16 x float> @llvm.x86.avx512.vbroadcast.ss.512(i8* %a0) ; <<16 x float>> [#uses=1]
243 ret <16 x float> %res
245 declare <16 x float> @llvm.x86.avx512.vbroadcast.ss.512(i8*) nounwind readonly
247 define <8 x double> @test_x86_vbroadcast_sd_512(i8* %a0) {
248 ; CHECK: vbroadcastsd
249 %res = call <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8* %a0) ; <<8 x double>> [#uses=1]
250 ret <8 x double> %res
252 declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
254 define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0) {
255 ; CHECK: vbroadcastss
256 %res = call <16 x float> @llvm.x86.avx512.vbroadcast.ss.ps.512(<4 x float> %a0) ; <<16 x float>> [#uses=1]
257 ret <16 x float> %res
259 declare <16 x float> @llvm.x86.avx512.vbroadcast.ss.ps.512(<4 x float>) nounwind readonly
261 define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0) {
262 ; CHECK: vbroadcastsd
263 %res = call <8 x double> @llvm.x86.avx512.vbroadcast.sd.pd.512(<2 x double> %a0) ; <<8 x double>> [#uses=1]
264 ret <8 x double> %res
266 declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.pd.512(<2 x double>) nounwind readonly
268 define <16 x i32> @test_x86_pbroadcastd_512(<4 x i32> %a0) {
269 ; CHECK: vpbroadcastd
270 %res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %a0) ; <<16 x i32>> [#uses=1]
273 declare <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32>) nounwind readonly
275 define <16 x i32> @test_x86_pbroadcastd_i32_512(i32 %a0) {
276 ; CHECK: vpbroadcastd
277 %res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.i32.512(i32 %a0) ; <<16 x i32>> [#uses=1]
280 declare <16 x i32> @llvm.x86.avx512.pbroadcastd.i32.512(i32) nounwind readonly
282 define <8 x i64> @test_x86_pbroadcastq_512(<2 x i64> %a0) {
283 ; CHECK: vpbroadcastq
284 %res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %a0) ; <<8 x i64>> [#uses=1]
287 declare <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64>) nounwind readonly
289 define <8 x i64> @test_x86_pbroadcastq_i64_512(i64 %a0) {
290 ; CHECK: vpbroadcastq
291 %res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.i64.512(i64 %a0) ; <<8 x i64>> [#uses=1]
294 declare <8 x i64> @llvm.x86.avx512.pbroadcastq.i64.512(i64) nounwind readonly
296 define <16 x i32> @test_conflict_d(<16 x i32> %a) {
297 ; CHECK: movw $-1, %ax
300 %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
304 declare <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
306 define <8 x i64> @test_conflict_q(<8 x i64> %a) {
307 ; CHECK: movb $-1, %al
310 %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
314 declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
316 define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
318 %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
322 define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
324 %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
328 define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
329 ; CHECK: movw $-1, %ax
332 %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
336 declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
338 define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
339 ; CHECK: movb $-1, %al
342 %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
346 declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
349 define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
351 %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
355 define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
357 %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
361 define <16 x i32> @test_ctlz_d(<16 x i32> %a) {
362 ; CHECK-LABEL: test_ctlz_d
364 %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
368 declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) nounwind readonly
370 define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
371 ; CHECK-LABEL: test_ctlz_q
373 %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
377 declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
379 define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
380 ; CHECK: vblendmps %zmm1, %zmm0
381 %res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
382 ret <16 x float> %res
385 declare <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float>, <16 x float>, i16) nounwind readonly
387 define <8 x double> @test_x86_mask_blend_pd_512(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
388 ; CHECK: vblendmpd %zmm1, %zmm0
389 %res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a1, <8 x double> %a2, i8 %a0) ; <<8 x double>> [#uses=1]
390 ret <8 x double> %res
393 define <8 x double> @test_x86_mask_blend_pd_512_memop(<8 x double> %a, <8 x double>* %ptr, i8 %mask) {
394 ; CHECK-LABEL: test_x86_mask_blend_pd_512_memop
395 ; CHECK: vblendmpd (%
396 %b = load <8 x double>, <8 x double>* %ptr
397 %res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a, <8 x double> %b, i8 %mask) ; <<8 x double>> [#uses=1]
398 ret <8 x double> %res
400 declare <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double>, <8 x double>, i8) nounwind readonly
402 define <16 x i32> @test_x86_mask_blend_d_512(i16 %a0, <16 x i32> %a1, <16 x i32> %a2) {
404 %res = call <16 x i32> @llvm.x86.avx512.mask.blend.d.512(<16 x i32> %a1, <16 x i32> %a2, i16 %a0) ; <<16 x i32>> [#uses=1]
407 declare <16 x i32> @llvm.x86.avx512.mask.blend.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
409 define <8 x i64> @test_x86_mask_blend_q_512(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
411 %res = call <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64> %a1, <8 x i64> %a2, i8 %a0) ; <<8 x i64>> [#uses=1]
414 declare <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
416 define <8 x i32> @test_cvtpd2udq(<8 x double> %a) {
417 ;CHECK: vcvtpd2udq {ru-sae}{{.*}}encoding: [0x62,0xf1,0xfc,0x58,0x79,0xc0]
418 %res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %a, <8 x i32>zeroinitializer, i8 -1, i32 2)
421 declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
423 define <16 x i32> @test_cvtps2udq(<16 x float> %a) {
424 ;CHECK: vcvtps2udq {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x38,0x79,0xc0]
425 %res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %a, <16 x i32>zeroinitializer, i16 -1, i32 1)
428 declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>, i16, i32)
430 define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
431 ;CHECK: vcmpleps {sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x18,0xc2,0xc1,0x02]
432 %res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
435 declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
437 define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
438 ;CHECK: vcmpneqpd %zmm{{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc1,0x04]
439 %res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
442 declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
445 define <16 x float> @test_cvtdq2ps(<16 x i32> %a) {
446 ;CHECK: vcvtdq2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x38,0x5b,0xc0]
447 %res = call <16 x float> @llvm.x86.avx512.mask.cvtdq2ps.512(<16 x i32> %a, <16 x float>zeroinitializer, i16 -1, i32 1)
450 declare <16 x float> @llvm.x86.avx512.mask.cvtdq2ps.512(<16 x i32>, <16 x float>, i16, i32)
452 define <16 x float> @test_cvtudq2ps(<16 x i32> %a) {
453 ;CHECK: vcvtudq2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7f,0x38,0x7a,0xc0]
454 %res = call <16 x float> @llvm.x86.avx512.mask.cvtudq2ps.512(<16 x i32> %a, <16 x float>zeroinitializer, i16 -1, i32 1)
457 declare <16 x float> @llvm.x86.avx512.mask.cvtudq2ps.512(<16 x i32>, <16 x float>, i16, i32)
459 define <8 x double> @test_cvtdq2pd(<8 x i32> %a) {
460 ;CHECK: vcvtdq2pd {{.*}}encoding: [0x62,0xf1,0x7e,0x48,0xe6,0xc0]
461 %res = call <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32> %a, <8 x double>zeroinitializer, i8 -1)
464 declare <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32>, <8 x double>, i8)
466 define <8 x double> @test_cvtudq2pd(<8 x i32> %a) {
467 ;CHECK: vcvtudq2pd {{.*}}encoding: [0x62,0xf1,0x7e,0x48,0x7a,0xc0]
468 %res = call <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32> %a, <8 x double>zeroinitializer, i8 -1)
471 declare <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32>, <8 x double>, i8)
474 define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
476 %res = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %a0, <8 x double> %a1,
477 <8 x double>zeroinitializer, i8 -1, i32 4)
478 ret <8 x double> %res
480 declare <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double>, <8 x double>,
481 <8 x double>, i8, i32)
483 define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
485 %res = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %a0, <8 x double> %a1,
486 <8 x double>zeroinitializer, i8 -1, i32 4)
487 ret <8 x double> %res
489 declare <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double>, <8 x double>,
490 <8 x double>, i8, i32)
492 define <8 x float> @test_cvtpd2ps(<8 x double> %a) {
493 ;CHECK: vcvtpd2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0xfd,0x38,0x5a,0xc0]
494 %res = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %a, <8 x float>zeroinitializer, i8 -1, i32 1)
497 declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>, i8, i32)
499 define <16 x i32> @test_pabsd(<16 x i32> %a) {
500 ;CHECK: vpabsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x1e,0xc0]
501 %res = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a, <16 x i32>zeroinitializer, i16 -1)
504 declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16)
506 define <8 x i64> @test_pabsq(<8 x i64> %a) {
507 ;CHECK: vpabsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xc0]
508 %res = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %a, <8 x i64>zeroinitializer, i8 -1)
511 declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8)
513 define <8 x i64> @test_vpmaxq(<8 x i64> %a0, <8 x i64> %a1) {
514 ; CHECK: vpmaxsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x3d,0xc1]
515 %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %a0, <8 x i64> %a1,
516 <8 x i64>zeroinitializer, i8 -1)
519 declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
521 define <16 x i32> @test_vpminud(<16 x i32> %a0, <16 x i32> %a1) {
522 ; CHECK: vpminud {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3b,0xc1]
523 %res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %a0, <16 x i32> %a1,
524 <16 x i32>zeroinitializer, i16 -1)
527 declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
529 define <16 x i32> @test_vpmaxsd(<16 x i32> %a0, <16 x i32> %a1) {
530 ; CHECK: vpmaxsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3d,0xc1]
531 %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %a0, <16 x i32> %a1,
532 <16 x i32>zeroinitializer, i16 -1)
535 declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
537 define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1) {
538 ; CHECK: vptestmq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x27,0xc1]
539 %res = call i8 @llvm.x86.avx512.mask.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
542 declare i8 @llvm.x86.avx512.mask.ptestm.q.512(<8 x i64>, <8 x i64>, i8)
544 define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1) {
545 ; CHECK: vptestmd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x27,0xc1]
546 %res = call i16 @llvm.x86.avx512.mask.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
549 declare i16 @llvm.x86.avx512.mask.ptestm.d.512(<16 x i32>, <16 x i32>, i16)
551 define void @test_store1(<16 x float> %data, i8* %ptr, i16 %mask) {
552 ; CHECK: vmovups {{.*}}encoding: [0x62,0xf1,0x7c,0x49,0x11,0x07]
553 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
557 declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
559 define void @test_store2(<8 x double> %data, i8* %ptr, i8 %mask) {
560 ; CHECK: vmovupd {{.*}}encoding: [0x62,0xf1,0xfd,0x49,0x11,0x07]
561 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
565 declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
567 define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
568 ; CHECK-LABEL: test_mask_store_aligned_ps:
570 ; CHECK-NEXT: kmovw %esi, %k1
571 ; CHECK-NEXT: vmovaps %zmm0, (%rdi) {%k1}
573 call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
577 declare void @llvm.x86.avx512.mask.store.ps.512(i8*, <16 x float>, i16 )
579 define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
580 ; CHECK-LABEL: test_mask_store_aligned_pd:
582 ; CHECK-NEXT: kmovw %esi, %k1
583 ; CHECK-NEXT: vmovapd %zmm0, (%rdi) {%k1}
585 call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
589 declare void @llvm.x86.avx512.mask.store.pd.512(i8*, <8 x double>, i8)
591 define <16 x float> @test_maskz_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
592 ; CHECK-LABEL: test_maskz_load_aligned_ps:
594 ; CHECK-NEXT: kmovw %esi, %k1
595 ; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1} {z}
597 %res = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 %mask)
598 ret <16 x float> %res
601 declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8*, <16 x float>, i16)
603 define <8 x double> @test_maskz_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
604 ; CHECK-LABEL: test_maskz_load_aligned_pd:
606 ; CHECK-NEXT: kmovw %esi, %k1
607 ; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1} {z}
609 %res = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 %mask)
610 ret <8 x double> %res
613 declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8*, <8 x double>, i8)
615 define <16 x float> @test_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
616 ; CHECK-LABEL: test_load_aligned_ps:
618 ; CHECK-NEXT: vmovaps (%rdi), %zmm0
620 %res = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 -1)
621 ret <16 x float> %res
624 define <8 x double> @test_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
625 ; CHECK-LABEL: test_load_aligned_pd:
627 ; CHECK-NEXT: vmovapd (%rdi), %zmm0
629 %res = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 -1)
630 ret <8 x double> %res
633 define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%perm) {
634 ; CHECK: vpermt2ps {{.*}}encoding: [0x62,0xf2,0x6d,0x48,0x7f,0xc1]
635 %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 -1)
636 ret <16 x float> %res
639 define <16 x float> @test_vpermt2ps_mask(<16 x float>%x, <16 x float>%y, <16 x i32>%perm, i16 %mask) {
640 ; CHECK-LABEL: test_vpermt2ps_mask:
641 ; CHECK: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x7f,0xc1]
642 %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 %mask)
643 ret <16 x float> %res
646 declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
648 define <8 x i64> @test_vmovntdqa(i8 *%x) {
649 ; CHECK-LABEL: test_vmovntdqa:
650 ; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07]
651 %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x)
655 declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*)
657 define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
658 ; CHECK-LABEL: test_valign_q:
659 ; CHECK: valignq $2, %zmm1, %zmm0, %zmm0
660 %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i8 2, <8 x i64> zeroinitializer, i8 -1)
664 define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) {
665 ; CHECK-LABEL: test_mask_valign_q:
666 ; CHECK: valignq $2, %zmm1, %zmm0, %zmm2 {%k1}
667 %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i8 2, <8 x i64> %src, i8 %mask)
671 declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i8, <8 x i64>, i8)
673 define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
674 ; CHECK-LABEL: test_maskz_valign_d:
675 ; CHECK: valignd $5, %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xc9,0x03,0xc1,0x05]
676 %res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i8 5, <16 x i32> zeroinitializer, i16 %mask)
680 declare <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32>, <16 x i32>, i8, <16 x i32>, i16)
682 define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) {
683 ; CHECK-LABEL: test_mask_store_ss
684 ; CHECK: vmovss %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x11,0x07]
685 call void @llvm.x86.avx512.mask.store.ss(i8* %ptr, <4 x float> %data, i8 %mask)
689 declare void @llvm.x86.avx512.mask.store.ss(i8*, <4 x float>, i8 )
691 define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
692 ; CHECK-LABEL: test_pcmpeq_d
693 ; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 ##
694 %res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
698 define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
699 ; CHECK-LABEL: test_mask_pcmpeq_d
700 ; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ##
701 %res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
705 declare i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32>, <16 x i32>, i16)
707 define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
708 ; CHECK-LABEL: test_pcmpeq_q
709 ; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 ##
710 %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
714 define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
715 ; CHECK-LABEL: test_mask_pcmpeq_q
716 ; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ##
717 %res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
721 declare i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64>, <8 x i64>, i8)
723 define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
724 ; CHECK-LABEL: test_pcmpgt_d
725 ; CHECK: vpcmpgtd %zmm1, %zmm0, %k0 ##
726 %res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
730 define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
731 ; CHECK-LABEL: test_mask_pcmpgt_d
732 ; CHECK: vpcmpgtd %zmm1, %zmm0, %k0 {%k1} ##
733 %res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
737 declare i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32>, <16 x i32>, i16)
739 define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
740 ; CHECK-LABEL: test_pcmpgt_q
741 ; CHECK: vpcmpgtq %zmm1, %zmm0, %k0 ##
742 %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
746 define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
747 ; CHECK-LABEL: test_mask_pcmpgt_q
748 ; CHECK: vpcmpgtq %zmm1, %zmm0, %k0 {%k1} ##
749 %res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
753 declare i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64>, <8 x i64>, i8)
755 define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
756 ; CHECK_LABEL: test_cmp_d_512
757 ; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 ##
758 %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
759 %vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
760 ; CHECK: vpcmpltd %zmm1, %zmm0, %k0 ##
761 %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
762 %vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
763 ; CHECK: vpcmpled %zmm1, %zmm0, %k0 ##
764 %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
765 %vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
766 ; CHECK: vpcmpunordd %zmm1, %zmm0, %k0 ##
767 %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
768 %vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
769 ; CHECK: vpcmpneqd %zmm1, %zmm0, %k0 ##
770 %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
771 %vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
772 ; CHECK: vpcmpnltd %zmm1, %zmm0, %k0 ##
773 %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
774 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
775 ; CHECK: vpcmpnled %zmm1, %zmm0, %k0 ##
776 %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
777 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
778 ; CHECK: vpcmpordd %zmm1, %zmm0, %k0 ##
779 %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
780 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
784 define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
785 ; CHECK_LABEL: test_mask_cmp_d_512
786 ; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ##
787 %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
788 %vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
789 ; CHECK: vpcmpltd %zmm1, %zmm0, %k0 {%k1} ##
790 %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
791 %vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
792 ; CHECK: vpcmpled %zmm1, %zmm0, %k0 {%k1} ##
793 %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
794 %vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
795 ; CHECK: vpcmpunordd %zmm1, %zmm0, %k0 {%k1} ##
796 %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
797 %vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
798 ; CHECK: vpcmpneqd %zmm1, %zmm0, %k0 {%k1} ##
799 %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
800 %vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
801 ; CHECK: vpcmpnltd %zmm1, %zmm0, %k0 {%k1} ##
802 %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
803 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
804 ; CHECK: vpcmpnled %zmm1, %zmm0, %k0 {%k1} ##
805 %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
806 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
807 ; CHECK: vpcmpordd %zmm1, %zmm0, %k0 {%k1} ##
808 %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
809 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
813 declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
815 define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
816 ; CHECK_LABEL: test_ucmp_d_512
817 ; CHECK: vpcmpequd %zmm1, %zmm0, %k0 ##
818 %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
819 %vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
820 ; CHECK: vpcmpltud %zmm1, %zmm0, %k0 ##
821 %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
822 %vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
823 ; CHECK: vpcmpleud %zmm1, %zmm0, %k0 ##
824 %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
825 %vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
826 ; CHECK: vpcmpunordud %zmm1, %zmm0, %k0 ##
827 %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
828 %vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
829 ; CHECK: vpcmpnequd %zmm1, %zmm0, %k0 ##
830 %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
831 %vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
832 ; CHECK: vpcmpnltud %zmm1, %zmm0, %k0 ##
833 %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
834 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
835 ; CHECK: vpcmpnleud %zmm1, %zmm0, %k0 ##
836 %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
837 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
838 ; CHECK: vpcmpordud %zmm1, %zmm0, %k0 ##
839 %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
840 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
844 define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
845 ; CHECK_LABEL: test_mask_ucmp_d_512
846 ; CHECK: vpcmpequd %zmm1, %zmm0, %k0 {%k1} ##
847 %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
848 %vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
849 ; CHECK: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ##
850 %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
851 %vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
852 ; CHECK: vpcmpleud %zmm1, %zmm0, %k0 {%k1} ##
853 %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
854 %vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
855 ; CHECK: vpcmpunordud %zmm1, %zmm0, %k0 {%k1} ##
856 %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
857 %vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
858 ; CHECK: vpcmpnequd %zmm1, %zmm0, %k0 {%k1} ##
859 %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
860 %vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
861 ; CHECK: vpcmpnltud %zmm1, %zmm0, %k0 {%k1} ##
862 %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
863 %vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
864 ; CHECK: vpcmpnleud %zmm1, %zmm0, %k0 {%k1} ##
865 %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
866 %vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
867 ; CHECK: vpcmpordud %zmm1, %zmm0, %k0 {%k1} ##
868 %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
869 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
873 declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
875 define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
876 ; CHECK_LABEL: test_cmp_q_512
877 ; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 ##
878 %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
879 %vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
880 ; CHECK: vpcmpltq %zmm1, %zmm0, %k0 ##
881 %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
882 %vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
883 ; CHECK: vpcmpleq %zmm1, %zmm0, %k0 ##
884 %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
885 %vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
886 ; CHECK: vpcmpunordq %zmm1, %zmm0, %k0 ##
887 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
888 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
889 ; CHECK: vpcmpneqq %zmm1, %zmm0, %k0 ##
890 %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
891 %vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
892 ; CHECK: vpcmpnltq %zmm1, %zmm0, %k0 ##
893 %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
894 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
895 ; CHECK: vpcmpnleq %zmm1, %zmm0, %k0 ##
896 %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
897 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
898 ; CHECK: vpcmpordq %zmm1, %zmm0, %k0 ##
899 %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
900 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
904 define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
905 ; CHECK_LABEL: test_mask_cmp_q_512
906 ; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ##
907 %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
908 %vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
909 ; CHECK: vpcmpltq %zmm1, %zmm0, %k0 {%k1} ##
910 %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
911 %vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
912 ; CHECK: vpcmpleq %zmm1, %zmm0, %k0 {%k1} ##
913 %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
914 %vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
915 ; CHECK: vpcmpunordq %zmm1, %zmm0, %k0 {%k1} ##
916 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
917 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
918 ; CHECK: vpcmpneqq %zmm1, %zmm0, %k0 {%k1} ##
919 %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
920 %vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
921 ; CHECK: vpcmpnltq %zmm1, %zmm0, %k0 {%k1} ##
922 %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
923 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
924 ; CHECK: vpcmpnleq %zmm1, %zmm0, %k0 {%k1} ##
925 %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
926 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
927 ; CHECK: vpcmpordq %zmm1, %zmm0, %k0 {%k1} ##
928 %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
929 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
933 declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
935 define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
936 ; CHECK_LABEL: test_ucmp_q_512
937 ; CHECK: vpcmpequq %zmm1, %zmm0, %k0 ##
938 %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
939 %vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
940 ; CHECK: vpcmpltuq %zmm1, %zmm0, %k0 ##
941 %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
942 %vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
943 ; CHECK: vpcmpleuq %zmm1, %zmm0, %k0 ##
944 %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
945 %vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
946 ; CHECK: vpcmpunorduq %zmm1, %zmm0, %k0 ##
947 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
948 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
949 ; CHECK: vpcmpnequq %zmm1, %zmm0, %k0 ##
950 %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
951 %vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
952 ; CHECK: vpcmpnltuq %zmm1, %zmm0, %k0 ##
953 %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
954 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
955 ; CHECK: vpcmpnleuq %zmm1, %zmm0, %k0 ##
956 %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
957 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
958 ; CHECK: vpcmporduq %zmm1, %zmm0, %k0 ##
959 %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
960 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
964 define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
965 ; CHECK_LABEL: test_mask_ucmp_q_512
966 ; CHECK: vpcmpequq %zmm1, %zmm0, %k0 {%k1} ##
967 %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
968 %vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
969 ; CHECK: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ##
970 %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
971 %vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
972 ; CHECK: vpcmpleuq %zmm1, %zmm0, %k0 {%k1} ##
973 %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
974 %vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
975 ; CHECK: vpcmpunorduq %zmm1, %zmm0, %k0 {%k1} ##
976 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
977 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
978 ; CHECK: vpcmpnequq %zmm1, %zmm0, %k0 {%k1} ##
979 %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
980 %vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
981 ; CHECK: vpcmpnltuq %zmm1, %zmm0, %k0 {%k1} ##
982 %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
983 %vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
984 ; CHECK: vpcmpnleuq %zmm1, %zmm0, %k0 {%k1} ##
985 %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
986 %vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
987 ; CHECK: vpcmporduq %zmm1, %zmm0, %k0 {%k1} ##
988 %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
989 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
993 declare i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
995 define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
996 ; CHECK-LABEL: test_mask_vextractf32x4:
997 ; CHECK: vextractf32x4 $2, %zmm1, %xmm0 {%k1}
998 %res = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float> %a, i8 2, <4 x float> %b, i8 %mask)
1002 declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i8, <4 x float>, i8)
1004 define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) {
1005 ; CHECK-LABEL: test_mask_vextracti64x4:
1006 ; CHECK: vextracti64x4 $2, %zmm1, %ymm0 {%k1}
1007 %res = call <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64> %a, i8 2, <4 x i64> %b, i8 %mask)
1011 declare <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64>, i8, <4 x i64>, i8)
1013 define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
1014 ; CHECK-LABEL: test_maskz_vextracti32x4:
1015 ; CHECK: vextracti32x4 $2, %zmm0, %xmm0 {%k1} {z}
1016 %res = call <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32> %a, i8 2, <4 x i32> zeroinitializer, i8 %mask)
1020 declare <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32>, i8, <4 x i32>, i8)
1022 define <4 x double> @test_vextractf64x4(<8 x double> %a) {
1023 ; CHECK-LABEL: test_vextractf64x4:
1024 ; CHECK: vextractf64x4 $2, %zmm0, %ymm0 ##
1025 %res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i8 2, <4 x double> zeroinitializer, i8 -1)
1026 ret <4 x double> %res
1029 declare <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double>, i8, <4 x double>, i8)
1031 define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
1032 ; CHECK-LABEL: test_x86_avx512_pslli_d
1034 %res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
1038 define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1039 ; CHECK-LABEL: test_x86_avx512_mask_pslli_d
1040 ; CHECK: vpslld $7, %zmm0, %zmm1 {%k1}
1041 %res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
1045 define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) {
1046 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_d
1047 ; CHECK: vpslld $7, %zmm0, %zmm0 {%k1} {z}
1048 %res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
1052 declare <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
1054 define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
1055 ; CHECK-LABEL: test_x86_avx512_pslli_q
1057 %res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
1061 define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1062 ; CHECK-LABEL: test_x86_avx512_mask_pslli_q
1063 ; CHECK: vpsllq $7, %zmm0, %zmm1 {%k1}
1064 %res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
1068 define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) {
1069 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_q
1070 ; CHECK: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
1071 %res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
1075 declare <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
1077 define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
1078 ; CHECK-LABEL: test_x86_avx512_psrli_d
1080 %res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
1084 define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1085 ; CHECK-LABEL: test_x86_avx512_mask_psrli_d
1086 ; CHECK: vpsrld $7, %zmm0, %zmm1 {%k1}
1087 %res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
1091 define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) {
1092 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_d
1093 ; CHECK: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
1094 %res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
1098 declare <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
1100 define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
1101 ; CHECK-LABEL: test_x86_avx512_psrli_q
1103 %res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
1107 define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1108 ; CHECK-LABEL: test_x86_avx512_mask_psrli_q
1109 ; CHECK: vpsrlq $7, %zmm0, %zmm1 {%k1}
1110 %res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
1114 define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) {
1115 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_q
1116 ; CHECK: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
1117 %res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
1121 declare <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
1123 define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
1124 ; CHECK-LABEL: test_x86_avx512_psrai_d
1126 %res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
1130 define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1131 ; CHECK-LABEL: test_x86_avx512_mask_psrai_d
1132 ; CHECK: vpsrad $7, %zmm0, %zmm1 {%k1}
1133 %res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
1137 define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) {
1138 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_d
1139 ; CHECK: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
1140 %res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
1144 declare <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
1146 define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
1147 ; CHECK-LABEL: test_x86_avx512_psrai_q
1149 %res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
1153 define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1154 ; CHECK-LABEL: test_x86_avx512_mask_psrai_q
1155 ; CHECK: vpsraq $7, %zmm0, %zmm1 {%k1}
1156 %res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
1160 define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) {
1161 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_q
1162 ; CHECK: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
1163 %res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
1167 declare <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
1169 define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
1170 ; CHECK-LABEL: test_x86_avx512_psll_d
1172 %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1176 define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1177 ; CHECK-LABEL: test_x86_avx512_mask_psll_d
1178 ; CHECK: vpslld %xmm1, %zmm0, %zmm2 {%k1}
1179 %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
1183 define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
1184 ; CHECK-LABEL: test_x86_avx512_maskz_psll_d
1185 ; CHECK: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
1186 %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1190 declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
1192 define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
1193 ; CHECK-LABEL: test_x86_avx512_psll_q
1195 %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1199 define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1200 ; CHECK-LABEL: test_x86_avx512_mask_psll_q
1201 ; CHECK: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
1202 %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
1206 define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
1207 ; CHECK-LABEL: test_x86_avx512_maskz_psll_q
1208 ; CHECK: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
1209 %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1213 declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
1215 define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
1216 ; CHECK-LABEL: test_x86_avx512_psrl_d
1218 %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1222 define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1223 ; CHECK-LABEL: test_x86_avx512_mask_psrl_d
1224 ; CHECK: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
1225 %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
1229 define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
1230 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_d
1231 ; CHECK: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
1232 %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1236 declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
1238 define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
1239 ; CHECK-LABEL: test_x86_avx512_psrl_q
1241 %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1245 define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1246 ; CHECK-LABEL: test_x86_avx512_mask_psrl_q
1247 ; CHECK: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
1248 %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
1252 define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
1253 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_q
1254 ; CHECK: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
1255 %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1259 declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
1261 define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
1262 ; CHECK-LABEL: test_x86_avx512_psra_d
1264 %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1268 define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1269 ; CHECK-LABEL: test_x86_avx512_mask_psra_d
1270 ; CHECK: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
1271 %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
1275 define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
1276 ; CHECK-LABEL: test_x86_avx512_maskz_psra_d
1277 ; CHECK: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
1278 %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1282 declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
1284 define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
1285 ; CHECK-LABEL: test_x86_avx512_psra_q
1287 %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1291 define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1292 ; CHECK-LABEL: test_x86_avx512_mask_psra_q
1293 ; CHECK: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
1294 %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
1298 define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
1299 ; CHECK-LABEL: test_x86_avx512_maskz_psra_q
1300 ; CHECK: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
1301 %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1305 declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
1307 define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
1308 ; CHECK-LABEL: test_x86_avx512_psllv_d
1310 %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1314 define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1315 ; CHECK-LABEL: test_x86_avx512_mask_psllv_d
1316 ; CHECK: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
1317 %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
1321 define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1322 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_d
1323 ; CHECK: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
1324 %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1328 declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
1330 define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
1331 ; CHECK-LABEL: test_x86_avx512_psllv_q
1333 %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1337 define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1338 ; CHECK-LABEL: test_x86_avx512_mask_psllv_q
1339 ; CHECK: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
1340 %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
1344 define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1345 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_q
1346 ; CHECK: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
1347 %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1351 declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
1354 define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
1355 ; CHECK-LABEL: test_x86_avx512_psrav_d
1357 %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1361 define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1362 ; CHECK-LABEL: test_x86_avx512_mask_psrav_d
1363 ; CHECK: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
1364 %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
1368 define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1369 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_d
1370 ; CHECK: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
1371 %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1375 declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
1377 define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
1378 ; CHECK-LABEL: test_x86_avx512_psrav_q
1380 %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1384 define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1385 ; CHECK-LABEL: test_x86_avx512_mask_psrav_q
1386 ; CHECK: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
1387 %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
1391 define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1392 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_q
1393 ; CHECK: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
1394 %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1398 declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
1400 define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
1401 ; CHECK-LABEL: test_x86_avx512_psrlv_d
1403 %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
1407 define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
1408 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_d
1409 ; CHECK: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
1410 %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
1414 define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
1415 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d
1416 ; CHECK: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
1417 %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
1421 declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
1423 define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
1424 ; CHECK-LABEL: test_x86_avx512_psrlv_q
1426 %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
1430 define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
1431 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_q
1432 ; CHECK: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
1433 %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
1437 define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
1438 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q
1439 ; CHECK: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
1440 %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
1444 declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
1446 define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
1447 ; CHECK-LABEL: test_x86_avx512_psrlv_q_memop
1449 %b = load <8 x i64>, <8 x i64>* %ptr
1450 %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1454 declare <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
1455 declare <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
1456 declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
1458 define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
1459 ; CHECK-LABEL: test_vsubps_rn
1460 ; CHECK: vsubps {rn-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x18,0x5c,0xc1]
1461 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
1462 <16 x float> zeroinitializer, i16 -1, i32 0)
1463 ret <16 x float> %res
1466 define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
1467 ; CHECK-LABEL: test_vsubps_rd
1468 ; CHECK: vsubps {rd-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x38,0x5c,0xc1]
1469 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
1470 <16 x float> zeroinitializer, i16 -1, i32 1)
1471 ret <16 x float> %res
1474 define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
1475 ; CHECK-LABEL: test_vsubps_ru
1476 ; CHECK: vsubps {ru-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x58,0x5c,0xc1]
1477 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
1478 <16 x float> zeroinitializer, i16 -1, i32 2)
1479 ret <16 x float> %res
1482 define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
1483 ; CHECK-LABEL: test_vsubps_rz
1484 ; CHECK: vsubps {rz-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x78,0x5c,0xc1]
1485 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
1486 <16 x float> zeroinitializer, i16 -1, i32 3)
1487 ret <16 x float> %res
1490 define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
1491 ; CHECK-LABEL: test_vmulps_rn
1492 ; CHECK: vmulps {rn-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x18,0x59,0xc1]
1493 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1494 <16 x float> zeroinitializer, i16 -1, i32 0)
1495 ret <16 x float> %res
1498 define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
1499 ; CHECK-LABEL: test_vmulps_rd
1500 ; CHECK: vmulps {rd-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x38,0x59,0xc1]
1501 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1502 <16 x float> zeroinitializer, i16 -1, i32 1)
1503 ret <16 x float> %res
1506 define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
1507 ; CHECK-LABEL: test_vmulps_ru
1508 ; CHECK: vmulps {ru-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x58,0x59,0xc1]
1509 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1510 <16 x float> zeroinitializer, i16 -1, i32 2)
1511 ret <16 x float> %res
1514 define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
1515 ; CHECK-LABEL: test_vmulps_rz
1516 ; CHECK: vmulps {rz-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x78,0x59,0xc1]
1517 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1518 <16 x float> zeroinitializer, i16 -1, i32 3)
1519 ret <16 x float> %res
1523 define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
1524 ; CHECK-LABEL: test_vmulps_mask_rn
1525 ; CHECK: vmulps {rn-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x59,0xc1]
1526 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1527 <16 x float> zeroinitializer, i16 %mask, i32 0)
1528 ret <16 x float> %res
1531 define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
1532 ; CHECK-LABEL: test_vmulps_mask_rd
1533 ; CHECK: vmulps {rd-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x59,0xc1]
1534 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1535 <16 x float> zeroinitializer, i16 %mask, i32 1)
1536 ret <16 x float> %res
1539 define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
1540 ; CHECK-LABEL: test_vmulps_mask_ru
1541 ; CHECK: vmulps {ru-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x59,0xc1]
1542 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1543 <16 x float> zeroinitializer, i16 %mask, i32 2)
1544 ret <16 x float> %res
1547 define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
1548 ; CHECK-LABEL: test_vmulps_mask_rz
1549 ; CHECK: vmulps {rz-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xf9,0x59,0xc1]
1550 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1551 <16 x float> zeroinitializer, i16 %mask, i32 3)
1552 ret <16 x float> %res
1555 ;; With Passthru value
1556 define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
1557 ; CHECK-LABEL: test_vmulps_mask_passthru_rn
1558 ; CHECK: vmulps {rn-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x59,0xd1]
1559 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1560 <16 x float> %passthru, i16 %mask, i32 0)
1561 ret <16 x float> %res
1564 define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
1565 ; CHECK-LABEL: test_vmulps_mask_passthru_rd
1566 ; CHECK: vmulps {rd-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x59,0xd1]
1567 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1568 <16 x float> %passthru, i16 %mask, i32 1)
1569 ret <16 x float> %res
1572 define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
1573 ; CHECK-LABEL: test_vmulps_mask_passthru_ru
1574 ; CHECK: vmulps {ru-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x59,0xd1]
1575 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1576 <16 x float> %passthru, i16 %mask, i32 2)
1577 ret <16 x float> %res
1580 define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
1581 ; CHECK-LABEL: test_vmulps_mask_passthru_rz
1582 ; CHECK: vmulps {rz-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x79,0x59,0xd1]
1583 %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
1584 <16 x float> %passthru, i16 %mask, i32 3)
1585 ret <16 x float> %res
1589 define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
1590 ; CHECK-LABEL: test_vmulpd_mask_rn
1591 ; CHECK: vmulpd {rn-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x59,0xc1]
1592 %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
1593 <8 x double> zeroinitializer, i8 %mask, i32 0)
1594 ret <8 x double> %res
1597 define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
1598 ; CHECK-LABEL: test_vmulpd_mask_rd
1599 ; CHECK: vmulpd {rd-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x59,0xc1]
1600 %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
1601 <8 x double> zeroinitializer, i8 %mask, i32 1)
1602 ret <8 x double> %res
1605 define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
1606 ; CHECK-LABEL: test_vmulpd_mask_ru
1607 ; CHECK: vmulpd {ru-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xd9,0x59,0xc1]
1608 %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
1609 <8 x double> zeroinitializer, i8 %mask, i32 2)
1610 ret <8 x double> %res
1613 define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
1614 ; CHECK-LABEL: test_vmulpd_mask_rz
1615 ; CHECK: vmulpd {rz-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xf9,0x59,0xc1]
1616 %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
1617 <8 x double> zeroinitializer, i8 %mask, i32 3)
1618 ret <8 x double> %res
1621 define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) {
1622 ;CHECK-LABEL: test_xor_epi32
1623 ;CHECK: vpxord {{.*}}encoding: [0x62,0xf1,0x7d,0x48,0xef,0xc1]
1624 %res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
1625 ret < 16 x i32> %res
1628 define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
1629 ;CHECK-LABEL: test_mask_xor_epi32
1630 ;CHECK: vpxord %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xef,0xd1]
1631 %res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1632 ret < 16 x i32> %res
1635 declare <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
1637 define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) {
1638 ;CHECK-LABEL: test_or_epi32
1639 ;CHECK: vpord {{.*}}encoding: [0x62,0xf1,0x7d,0x48,0xeb,0xc1]
1640 %res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
1641 ret < 16 x i32> %res
1644 define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
1645 ;CHECK-LABEL: test_mask_or_epi32
1646 ;CHECK: vpord %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xeb,0xd1]
1647 %res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1648 ret < 16 x i32> %res
1651 declare <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
1653 define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) {
1654 ;CHECK-LABEL: test_and_epi32
1655 ;CHECK: vpandd {{.*}}encoding: [0x62,0xf1,0x7d,0x48,0xdb,0xc1]
1656 %res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
1657 ret < 16 x i32> %res
1660 define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
1661 ;CHECK-LABEL: test_mask_and_epi32
1662 ;CHECK: vpandd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xdb,0xd1]
1663 %res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1664 ret < 16 x i32> %res
1667 declare <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
1669 define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) {
1670 ;CHECK-LABEL: test_xor_epi64
1671 ;CHECK: vpxorq {{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xef,0xc1]
1672 %res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
1676 define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
1677 ;CHECK-LABEL: test_mask_xor_epi64
1678 ;CHECK: vpxorq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xef,0xd1]
1679 %res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1683 declare <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
1685 define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) {
1686 ;CHECK-LABEL: test_or_epi64
1687 ;CHECK: vporq {{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xeb,0xc1]
1688 %res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
1692 define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
1693 ;CHECK-LABEL: test_mask_or_epi64
1694 ;CHECK: vporq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xeb,0xd1]
1695 %res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1699 declare <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
1701 define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) {
1702 ;CHECK-LABEL: test_and_epi64
1703 ;CHECK: vpandq {{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xdb,0xc1]
1704 %res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
1708 define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
1709 ;CHECK-LABEL: test_mask_and_epi64
1710 ;CHECK: vpandq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xdb,0xd1]
1711 %res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1715 declare <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
1718 define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
1719 ;CHECK-LABEL: test_mask_add_epi32_rr
1720 ;CHECK: vpaddd %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc1]
1721 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1722 ret < 16 x i32> %res
1725 define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
1726 ;CHECK-LABEL: test_mask_add_epi32_rrk
1727 ;CHECK: vpaddd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfe,0xd1]
1728 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1729 ret < 16 x i32> %res
1732 define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
1733 ;CHECK-LABEL: test_mask_add_epi32_rrkz
1734 ;CHECK: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfe,0xc1]
1735 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1736 ret < 16 x i32> %res
1739 define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
1740 ;CHECK-LABEL: test_mask_add_epi32_rm
1741 ;CHECK: vpaddd (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0x07]
1742 %b = load <16 x i32>, <16 x i32>* %ptr_b
1743 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1744 ret < 16 x i32> %res
1747 define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
1748 ;CHECK-LABEL: test_mask_add_epi32_rmk
1749 ;CHECK: vpaddd (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfe,0x0f]
1750 %b = load <16 x i32>, <16 x i32>* %ptr_b
1751 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1752 ret < 16 x i32> %res
1755 define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
1756 ;CHECK-LABEL: test_mask_add_epi32_rmkz
1757 ;CHECK: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfe,0x07]
1758 %b = load <16 x i32>, <16 x i32>* %ptr_b
1759 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1760 ret < 16 x i32> %res
1763 define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
1764 ;CHECK-LABEL: test_mask_add_epi32_rmb
1765 ;CHECK: vpaddd (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x58,0xfe,0x07]
1766 %q = load i32, i32* %ptr_b
1767 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1768 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1769 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1770 ret < 16 x i32> %res
1773 define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
1774 ;CHECK-LABEL: test_mask_add_epi32_rmbk
1775 ;CHECK: vpaddd (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x59,0xfe,0x0f]
1776 %q = load i32, i32* %ptr_b
1777 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1778 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1779 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1780 ret < 16 x i32> %res
1783 define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
1784 ;CHECK-LABEL: test_mask_add_epi32_rmbkz
1785 ;CHECK: vpaddd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xd9,0xfe,0x07]
1786 %q = load i32, i32* %ptr_b
1787 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1788 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1789 %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1790 ret < 16 x i32> %res
1793 declare <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
1795 define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
1796 ;CHECK-LABEL: test_mask_sub_epi32_rr
1797 ;CHECK: vpsubd %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfa,0xc1]
1798 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1799 ret < 16 x i32> %res
1802 define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
1803 ;CHECK-LABEL: test_mask_sub_epi32_rrk
1804 ;CHECK: vpsubd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfa,0xd1]
1805 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1806 ret < 16 x i32> %res
1809 define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
1810 ;CHECK-LABEL: test_mask_sub_epi32_rrkz
1811 ;CHECK: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfa,0xc1]
1812 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1813 ret < 16 x i32> %res
1816 define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
1817 ;CHECK-LABEL: test_mask_sub_epi32_rm
1818 ;CHECK: vpsubd (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfa,0x07]
1819 %b = load <16 x i32>, <16 x i32>* %ptr_b
1820 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1821 ret < 16 x i32> %res
1824 define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
1825 ;CHECK-LABEL: test_mask_sub_epi32_rmk
1826 ;CHECK: vpsubd (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfa,0x0f]
1827 %b = load <16 x i32>, <16 x i32>* %ptr_b
1828 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1829 ret < 16 x i32> %res
1832 define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
1833 ;CHECK-LABEL: test_mask_sub_epi32_rmkz
1834 ;CHECK: vpsubd (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfa,0x07]
1835 %b = load <16 x i32>, <16 x i32>* %ptr_b
1836 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1837 ret < 16 x i32> %res
1840 define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
1841 ;CHECK-LABEL: test_mask_sub_epi32_rmb
1842 ;CHECK: vpsubd (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x58,0xfa,0x07]
1843 %q = load i32, i32* %ptr_b
1844 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1845 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1846 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
1847 ret < 16 x i32> %res
1850 define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
1851 ;CHECK-LABEL: test_mask_sub_epi32_rmbk
1852 ;CHECK: vpsubd (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x59,0xfa,0x0f]
1853 %q = load i32, i32* %ptr_b
1854 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1855 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1856 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
1857 ret < 16 x i32> %res
1860 define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
1861 ;CHECK-LABEL: test_mask_sub_epi32_rmbkz
1862 ;CHECK: vpsubd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xd9,0xfa,0x07]
1863 %q = load i32, i32* %ptr_b
1864 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
1865 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
1866 %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
1867 ret < 16 x i32> %res
1870 declare <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
1872 define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
1873 ;CHECK-LABEL: test_mask_add_epi64_rr
1874 ;CHECK: vpaddq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc1]
1875 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1879 define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
1880 ;CHECK-LABEL: test_mask_add_epi64_rrk
1881 ;CHECK: vpaddq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xd4,0xd1]
1882 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1886 define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
1887 ;CHECK-LABEL: test_mask_add_epi64_rrkz
1888 ;CHECK: vpaddq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xd4,0xc1]
1889 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
1893 define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
1894 ;CHECK-LABEL: test_mask_add_epi64_rm
1895 ;CHECK: vpaddq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0x07]
1896 %b = load <8 x i64>, <8 x i64>* %ptr_b
1897 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1901 define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
1902 ;CHECK-LABEL: test_mask_add_epi64_rmk
1903 ;CHECK: vpaddq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xd4,0x0f]
1904 %b = load <8 x i64>, <8 x i64>* %ptr_b
1905 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1909 define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
1910 ;CHECK-LABEL: test_mask_add_epi64_rmkz
1911 ;CHECK: vpaddq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xd4,0x07]
1912 %b = load <8 x i64>, <8 x i64>* %ptr_b
1913 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
1917 define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
1918 ;CHECK-LABEL: test_mask_add_epi64_rmb
1919 ;CHECK: vpaddq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x58,0xd4,0x07]
1920 %q = load i64, i64* %ptr_b
1921 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
1922 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
1923 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1927 define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
1928 ;CHECK-LABEL: test_mask_add_epi64_rmbk
1929 ;CHECK: vpaddq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x59,0xd4,0x0f]
1930 %q = load i64, i64* %ptr_b
1931 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
1932 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
1933 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1937 define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
1938 ;CHECK-LABEL: test_mask_add_epi64_rmbkz
1939 ;CHECK: vpaddq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xd9,0xd4,0x07]
1940 %q = load i64, i64* %ptr_b
1941 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
1942 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
1943 %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
1947 declare <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
1949 define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
1950 ;CHECK-LABEL: test_mask_sub_epi64_rr
1951 ;CHECK: vpsubq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xfb,0xc1]
1952 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1956 define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
1957 ;CHECK-LABEL: test_mask_sub_epi64_rrk
1958 ;CHECK: vpsubq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xfb,0xd1]
1959 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1963 define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
1964 ;CHECK-LABEL: test_mask_sub_epi64_rrkz
1965 ;CHECK: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xfb,0xc1]
1966 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
1970 define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
1971 ;CHECK-LABEL: test_mask_sub_epi64_rm
1972 ;CHECK: vpsubq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xfb,0x07]
1973 %b = load <8 x i64>, <8 x i64>* %ptr_b
1974 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
1978 define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
1979 ;CHECK-LABEL: test_mask_sub_epi64_rmk
1980 ;CHECK: vpsubq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xfb,0x0f]
1981 %b = load <8 x i64>, <8 x i64>* %ptr_b
1982 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
1986 define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
1987 ;CHECK-LABEL: test_mask_sub_epi64_rmkz
1988 ;CHECK: vpsubq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xfb,0x07]
1989 %b = load <8 x i64>, <8 x i64>* %ptr_b
1990 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
1994 define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
1995 ;CHECK-LABEL: test_mask_sub_epi64_rmb
1996 ;CHECK: vpsubq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x58,0xfb,0x07]
1997 %q = load i64, i64* %ptr_b
1998 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
1999 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2000 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
2004 define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
2005 ;CHECK-LABEL: test_mask_sub_epi64_rmbk
2006 ;CHECK: vpsubq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x59,0xfb,0x0f]
2007 %q = load i64, i64* %ptr_b
2008 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2009 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2010 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
2014 define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
2015 ;CHECK-LABEL: test_mask_sub_epi64_rmbkz
2016 ;CHECK: vpsubq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xd9,0xfb,0x07]
2017 %q = load i64, i64* %ptr_b
2018 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2019 %b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2020 %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
2024 declare <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
2026 define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
2027 ;CHECK-LABEL: test_mask_mul_epi32_rr
2028 ;CHECK: vpmuldq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x28,0xc1]
2029 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2033 define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
2034 ;CHECK-LABEL: test_mask_mul_epi32_rrk
2035 ;CHECK: vpmuldq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x28,0xd1]
2036 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2040 define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
2041 ;CHECK-LABEL: test_mask_mul_epi32_rrkz
2042 ;CHECK: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x28,0xc1]
2043 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2047 define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
2048 ;CHECK-LABEL: test_mask_mul_epi32_rm
2049 ;CHECK: vpmuldq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x28,0x07]
2050 %b = load <16 x i32>, <16 x i32>* %ptr_b
2051 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2055 define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
2056 ;CHECK-LABEL: test_mask_mul_epi32_rmk
2057 ;CHECK: vpmuldq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x28,0x0f]
2058 %b = load <16 x i32>, <16 x i32>* %ptr_b
2059 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2063 define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
2064 ;CHECK-LABEL: test_mask_mul_epi32_rmkz
2065 ;CHECK: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x28,0x07]
2066 %b = load <16 x i32>, <16 x i32>* %ptr_b
2067 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2071 define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
2072 ;CHECK-LABEL: test_mask_mul_epi32_rmb
2073 ;CHECK: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x58,0x28,0x07]
2074 %q = load i64, i64* %ptr_b
2075 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2076 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2077 %b = bitcast <8 x i64> %b64 to <16 x i32>
2078 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2082 define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
2083 ;CHECK-LABEL: test_mask_mul_epi32_rmbk
2084 ;CHECK: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x59,0x28,0x0f]
2085 %q = load i64, i64* %ptr_b
2086 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2087 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2088 %b = bitcast <8 x i64> %b64 to <16 x i32>
2089 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2093 define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
2094 ;CHECK-LABEL: test_mask_mul_epi32_rmbkz
2095 ;CHECK: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xd9,0x28,0x07]
2096 %q = load i64, i64* %ptr_b
2097 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2098 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2099 %b = bitcast <8 x i64> %b64 to <16 x i32>
2100 %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2104 declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
2106 define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
2107 ;CHECK-LABEL: test_mask_mul_epu32_rr
2108 ;CHECK: vpmuludq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xf4,0xc1]
2109 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2113 define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
2114 ;CHECK-LABEL: test_mask_mul_epu32_rrk
2115 ;CHECK: vpmuludq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xf4,0xd1]
2116 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2120 define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
2121 ;CHECK-LABEL: test_mask_mul_epu32_rrkz
2122 ;CHECK: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xf4,0xc1]
2123 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2127 define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
2128 ;CHECK-LABEL: test_mask_mul_epu32_rm
2129 ;CHECK: vpmuludq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xf4,0x07]
2130 %b = load <16 x i32>, <16 x i32>* %ptr_b
2131 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2135 define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
2136 ;CHECK-LABEL: test_mask_mul_epu32_rmk
2137 ;CHECK: vpmuludq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0xf4,0x0f]
2138 %b = load <16 x i32>, <16 x i32>* %ptr_b
2139 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2143 define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
2144 ;CHECK-LABEL: test_mask_mul_epu32_rmkz
2145 ;CHECK: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xf4,0x07]
2146 %b = load <16 x i32>, <16 x i32>* %ptr_b
2147 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2151 define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
2152 ;CHECK-LABEL: test_mask_mul_epu32_rmb
2153 ;CHECK: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x58,0xf4,0x07]
2154 %q = load i64, i64* %ptr_b
2155 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2156 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2157 %b = bitcast <8 x i64> %b64 to <16 x i32>
2158 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
2162 define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
2163 ;CHECK-LABEL: test_mask_mul_epu32_rmbk
2164 ;CHECK: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x59,0xf4,0x0f]
2165 %q = load i64, i64* %ptr_b
2166 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2167 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2168 %b = bitcast <8 x i64> %b64 to <16 x i32>
2169 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
2173 define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
2174 ;CHECK-LABEL: test_mask_mul_epu32_rmbkz
2175 ;CHECK: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xd9,0xf4,0x07]
2176 %q = load i64, i64* %ptr_b
2177 %vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
2178 %b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
2179 %b = bitcast <8 x i64> %b64 to <16 x i32>
2180 %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
2184 declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
2186 define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
2187 ;CHECK-LABEL: test_mask_mullo_epi32_rr_512
2188 ;CHECK: vpmulld %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x40,0xc1]
2189 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
2193 define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
2194 ;CHECK-LABEL: test_mask_mullo_epi32_rrk_512
2195 ;CHECK: vpmulld %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x40,0xd1]
2196 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
2197 ret < 16 x i32> %res
2200 define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
2201 ;CHECK-LABEL: test_mask_mullo_epi32_rrkz_512
2202 ;CHECK: vpmulld %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x40,0xc1]
2203 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
2204 ret < 16 x i32> %res
2207 define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
2208 ;CHECK-LABEL: test_mask_mullo_epi32_rm_512
2209 ;CHECK: vpmulld (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x40,0x07]
2210 %b = load <16 x i32>, <16 x i32>* %ptr_b
2211 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
2212 ret < 16 x i32> %res
2215 define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
2216 ;CHECK-LABEL: test_mask_mullo_epi32_rmk_512
2217 ;CHECK: vpmulld (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x40,0x0f]
2218 %b = load <16 x i32>, <16 x i32>* %ptr_b
2219 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
2220 ret < 16 x i32> %res
2223 define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
2224 ;CHECK-LABEL: test_mask_mullo_epi32_rmkz_512
2225 ;CHECK: vpmulld (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x40,0x07]
2226 %b = load <16 x i32>, <16 x i32>* %ptr_b
2227 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
2228 ret < 16 x i32> %res
2231 define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
2232 ;CHECK-LABEL: test_mask_mullo_epi32_rmb_512
2233 ;CHECK: vpmulld (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x58,0x40,0x07]
2234 %q = load i32, i32* %ptr_b
2235 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
2236 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
2237 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
2238 ret < 16 x i32> %res
2241 define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
2242 ;CHECK-LABEL: test_mask_mullo_epi32_rmbk_512
2243 ;CHECK: vpmulld (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x59,0x40,0x0f]
2244 %q = load i32, i32* %ptr_b
2245 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
2246 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
2247 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
2248 ret < 16 x i32> %res
2251 define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
2252 ;CHECK-LABEL: test_mask_mullo_epi32_rmbkz_512
2253 ;CHECK: vpmulld (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xd9,0x40,0x07]
2254 %q = load i32, i32* %ptr_b
2255 %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
2256 %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
2257 %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
2258 ret < 16 x i32> %res
2261 declare <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
2263 define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2264 ;CHECK-LABEL: test_mm512_maskz_add_round_ps_rn_sae
2265 ;CHECK: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2266 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 0)
2267 ret <16 x float> %res
2269 define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2270 ;CHECK-LABEL: test_mm512_maskz_add_round_ps_rd_sae
2271 ;CHECK: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
2272 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 1)
2273 ret <16 x float> %res
2275 define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2276 ;CHECK-LABEL: test_mm512_maskz_add_round_ps_ru_sae
2277 ;CHECK: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2278 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 2)
2279 ret <16 x float> %res
2282 define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2283 ;CHECK-LABEL: test_mm512_maskz_add_round_ps_rz_sae
2284 ;CHECK: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2285 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 3)
2286 ret <16 x float> %res
2290 define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2291 ;CHECK-LABEL: test_mm512_maskz_add_round_ps_current
2292 ;CHECK: vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
2293 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 4)
2294 ret <16 x float> %res
2297 define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2298 ;CHECK-LABEL: test_mm512_mask_add_round_ps_rn_sae
2299 ;CHECK: vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2300 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 0)
2301 ret <16 x float> %res
2303 define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2304 ;CHECK-LABEL: test_mm512_mask_add_round_ps_rd_sae
2305 ;CHECK: vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2306 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 1)
2307 ret <16 x float> %res
2309 define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2310 ;CHECK-LABEL: test_mm512_mask_add_round_ps_ru_sae
2311 ;CHECK: vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2312 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 2)
2313 ret <16 x float> %res
2316 define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2317 ;CHECK-LABEL: test_mm512_mask_add_round_ps_rz_sae
2318 ;CHECK: vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2319 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 3)
2320 ret <16 x float> %res
2324 define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2325 ;CHECK-LABEL: test_mm512_mask_add_round_ps_current
2326 ;CHECK: vaddps %zmm1, %zmm0, %zmm2 {%k1}
2327 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 4)
2328 ret <16 x float> %res
2332 define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2333 ;CHECK-LABEL: test_mm512_add_round_ps_rn_sae
2334 ;CHECK: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0
2335 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
2336 ret <16 x float> %res
2338 define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2339 ;CHECK-LABEL: test_mm512_add_round_ps_rd_sae
2340 ;CHECK: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
2341 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
2342 ret <16 x float> %res
2344 define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2345 ;CHECK-LABEL: test_mm512_add_round_ps_ru_sae
2346 ;CHECK: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0
2347 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
2348 ret <16 x float> %res
2351 define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2352 ;CHECK-LABEL: test_mm512_add_round_ps_rz_sae
2353 ;CHECK: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0
2354 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
2355 ret <16 x float> %res
2358 define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2359 ;CHECK-LABEL: test_mm512_add_round_ps_current
2360 ;CHECK: vaddps %zmm1, %zmm0, %zmm0
2361 %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
2362 ret <16 x float> %res
2364 declare <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
2366 define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2367 ;CHECK-LABEL: test_mm512_mask_sub_round_ps_rn_sae
2368 ;CHECK: vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2369 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 0)
2370 ret <16 x float> %res
2372 define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2373 ;CHECK-LABEL: test_mm512_mask_sub_round_ps_rd_sae
2374 ;CHECK: vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2375 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 1)
2376 ret <16 x float> %res
2378 define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2379 ;CHECK-LABEL: test_mm512_mask_sub_round_ps_ru_sae
2380 ;CHECK: vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2381 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 2)
2382 ret <16 x float> %res
2385 define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2386 ;CHECK-LABEL: test_mm512_mask_sub_round_ps_rz_sae
2387 ;CHECK: vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2388 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 3)
2389 ret <16 x float> %res
2393 define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2394 ;CHECK-LABEL: test_mm512_mask_sub_round_ps_current
2395 ;CHECK: vsubps %zmm1, %zmm0, %zmm2 {%k1}
2396 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 4)
2397 ret <16 x float> %res
2400 define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2401 ;CHECK-LABEL: test_mm512_sub_round_ps_rn_sae
2402 ;CHECK: vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
2403 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
2404 ret <16 x float> %res
2406 define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2407 ;CHECK-LABEL: test_mm512_sub_round_ps_rd_sae
2408 ;CHECK: vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
2409 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
2410 ret <16 x float> %res
2412 define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2413 ;CHECK-LABEL: test_mm512_sub_round_ps_ru_sae
2414 ;CHECK: vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
2415 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
2416 ret <16 x float> %res
2419 define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2420 ;CHECK-LABEL: test_mm512_sub_round_ps_rz_sae
2421 ;CHECK: vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
2422 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
2423 ret <16 x float> %res
2426 define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2427 ;CHECK-LABEL: test_mm512_sub_round_ps_current
2428 ;CHECK: vsubps %zmm1, %zmm0, %zmm0
2429 %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
2430 ret <16 x float> %res
2433 define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2434 ;CHECK-LABEL: test_mm512_maskz_div_round_ps_rn_sae
2435 ;CHECK: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2436 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 0)
2437 ret <16 x float> %res
2439 define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2440 ;CHECK-LABEL: test_mm512_maskz_div_round_ps_rd_sae
2441 ;CHECK: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
2442 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 1)
2443 ret <16 x float> %res
2445 define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2446 ;CHECK-LABEL: test_mm512_maskz_div_round_ps_ru_sae
2447 ;CHECK: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2448 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 2)
2449 ret <16 x float> %res
2452 define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2453 ;CHECK-LABEL: test_mm512_maskz_div_round_ps_rz_sae
2454 ;CHECK: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2455 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 3)
2456 ret <16 x float> %res
2460 define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2461 ;CHECK-LABEL: test_mm512_maskz_div_round_ps_current
2462 ;CHECK: vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
2463 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 4)
2464 ret <16 x float> %res
2467 define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2468 ;CHECK-LABEL: test_mm512_mask_div_round_ps_rn_sae
2469 ;CHECK: vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2470 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 0)
2471 ret <16 x float> %res
2473 define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2474 ;CHECK-LABEL: test_mm512_mask_div_round_ps_rd_sae
2475 ;CHECK: vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2476 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 1)
2477 ret <16 x float> %res
2479 define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2480 ;CHECK-LABEL: test_mm512_mask_div_round_ps_ru_sae
2481 ;CHECK: vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2482 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 2)
2483 ret <16 x float> %res
2486 define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2487 ;CHECK-LABEL: test_mm512_mask_div_round_ps_rz_sae
2488 ;CHECK: vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
2489 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 3)
2490 ret <16 x float> %res
2494 define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2495 ;CHECK-LABEL: test_mm512_mask_div_round_ps_current
2496 ;CHECK: vdivps %zmm1, %zmm0, %zmm2 {%k1}
2497 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 4)
2498 ret <16 x float> %res
2502 define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2503 ;CHECK-LABEL: test_mm512_div_round_ps_rn_sae
2504 ;CHECK: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0
2505 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
2506 ret <16 x float> %res
2508 define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2509 ;CHECK-LABEL: test_mm512_div_round_ps_rd_sae
2510 ;CHECK: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
2511 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
2512 ret <16 x float> %res
2514 define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2515 ;CHECK-LABEL: test_mm512_div_round_ps_ru_sae
2516 ;CHECK: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0
2517 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
2518 ret <16 x float> %res
2521 define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2522 ;CHECK-LABEL: test_mm512_div_round_ps_rz_sae
2523 ;CHECK: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0
2524 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
2525 ret <16 x float> %res
2528 define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2529 ;CHECK-LABEL: test_mm512_div_round_ps_current
2530 ;CHECK: vdivps %zmm1, %zmm0, %zmm0
2531 %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
2532 ret <16 x float> %res
2534 declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
2536 define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2537 ;CHECK-LABEL: test_mm512_maskz_min_round_ps_sae
2538 ;CHECK: vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2539 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 8)
2540 ret <16 x float> %res
2543 define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2544 ;CHECK-LABEL: test_mm512_maskz_min_round_ps_current
2545 ;CHECK: vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
2546 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 4)
2547 ret <16 x float> %res
2550 define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2551 ;CHECK-LABEL: test_mm512_mask_min_round_ps_sae
2552 ;CHECK: vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
2553 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 8)
2554 ret <16 x float> %res
2557 define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2558 ;CHECK-LABEL: test_mm512_mask_min_round_ps_current
2559 ;CHECK: vminps %zmm1, %zmm0, %zmm2 {%k1}
2560 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 4)
2561 ret <16 x float> %res
2564 define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2565 ;CHECK-LABEL: test_mm512_min_round_ps_sae
2566 ;CHECK: vminps {sae}, %zmm1, %zmm0, %zmm0
2567 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
2568 ret <16 x float> %res
2571 define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2572 ;CHECK-LABEL: test_mm512_min_round_ps_current
2573 ;CHECK: vminps %zmm1, %zmm0, %zmm0
2574 %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
2575 ret <16 x float> %res
2577 declare <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
2579 define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2580 ;CHECK-LABEL: test_mm512_maskz_max_round_ps_sae
2581 ;CHECK: vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
2582 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 8)
2583 ret <16 x float> %res
2586 define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2587 ;CHECK-LABEL: test_mm512_maskz_max_round_ps_current
2588 ;CHECK: vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
2589 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 %mask, i32 4)
2590 ret <16 x float> %res
2593 define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2594 ;CHECK-LABEL: test_mm512_mask_max_round_ps_sae
2595 ;CHECK: vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
2596 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 8)
2597 ret <16 x float> %res
2600 define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
2601 ;CHECK-LABEL: test_mm512_mask_max_round_ps_current
2602 ;CHECK: vmaxps %zmm1, %zmm0, %zmm2 {%k1}
2603 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask, i32 4)
2604 ret <16 x float> %res
2607 define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2608 ;CHECK-LABEL: test_mm512_max_round_ps_sae
2609 ;CHECK: vmaxps {sae}, %zmm1, %zmm0, %zmm0
2610 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
2611 ret <16 x float> %res
2614 define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
2615 ;CHECK-LABEL: test_mm512_max_round_ps_current
2616 ;CHECK: vmaxps %zmm1, %zmm0, %zmm0
2617 %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
2618 ret <16 x float> %res
2620 declare <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
2622 declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
2624 define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2625 ; CHECK-LABEL: test_mask_add_ss_rn
2626 ; CHECK: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2627 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 0)
2628 ret <4 x float> %res
2631 define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2632 ; CHECK-LABEL: test_mask_add_ss_rd
2633 ; CHECK: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2634 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1)
2635 ret <4 x float> %res
2638 define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2639 ; CHECK-LABEL: test_mask_add_ss_ru
2640 ; CHECK: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2641 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 2)
2642 ret <4 x float> %res
2645 define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2646 ; CHECK-LABEL: test_mask_add_ss_rz
2647 ; CHECK: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2648 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 3)
2649 ret <4 x float> %res
2652 define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2653 ; CHECK-LABEL: test_mask_add_ss_current
2654 ; CHECK: vaddss %xmm1, %xmm0, %xmm2 {%k1}
2655 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
2656 ret <4 x float> %res
2659 define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
2660 ; CHECK-LABEL: test_maskz_add_ss_rn
2661 ; CHECK: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
2662 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 0)
2663 ret <4 x float> %res
2666 define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
2667 ; CHECK-LABEL: test_add_ss_rn
2668 ; CHECK: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0
2669 %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 0)
2670 ret <4 x float> %res
2673 declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
2675 define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2676 ; CHECK-LABEL: test_mask_add_sd_rn
2677 ; CHECK: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2678 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 0)
2679 ret <2 x double> %res
2682 define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2683 ; CHECK-LABEL: test_mask_add_sd_rd
2684 ; CHECK: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2685 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1)
2686 ret <2 x double> %res
2689 define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2690 ; CHECK-LABEL: test_mask_add_sd_ru
2691 ; CHECK: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2692 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 2)
2693 ret <2 x double> %res
2696 define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2697 ; CHECK-LABEL: test_mask_add_sd_rz
2698 ; CHECK: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
2699 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 3)
2700 ret <2 x double> %res
2703 define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2704 ; CHECK-LABEL: test_mask_add_sd_current
2705 ; CHECK: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
2706 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
2707 ret <2 x double> %res
2710 define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
2711 ; CHECK-LABEL: test_maskz_add_sd_rn
2712 ; CHECK: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
2713 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 0)
2714 ret <2 x double> %res
2717 define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
2718 ; CHECK-LABEL: test_add_sd_rn
2719 ; CHECK: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0
2720 %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 0)
2721 ret <2 x double> %res
2724 declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
2726 define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2727 ; CHECK-LABEL: test_mask_max_ss_sae
2728 ; CHECK: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
2729 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
2730 ret <4 x float> %res
2733 define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
2734 ; CHECK-LABEL: test_maskz_max_ss_sae
2735 ; CHECK: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
2736 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
2737 ret <4 x float> %res
2740 define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
2741 ; CHECK-LABEL: test_max_ss_sae
2742 ; CHECK: vmaxss {sae}, %xmm1, %xmm0, %xmm0
2743 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
2744 ret <4 x float> %res
2747 define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
2748 ; CHECK-LABEL: test_mask_max_ss
2749 ; CHECK: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
2750 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
2751 ret <4 x float> %res
2754 define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
2755 ; CHECK-LABEL: test_maskz_max_ss
2756 ; CHECK: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
2757 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 4)
2758 ret <4 x float> %res
2761 define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
2762 ; CHECK-LABEL: test_max_ss
2763 ; CHECK: vmaxss %xmm1, %xmm0, %xmm0
2764 %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
2765 ret <4 x float> %res
2767 declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
2769 define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2770 ; CHECK-LABEL: test_mask_max_sd_sae
2771 ; CHECK: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
2772 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
2773 ret <2 x double> %res
2776 define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
2777 ; CHECK-LABEL: test_maskz_max_sd_sae
2778 ; CHECK: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
2779 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
2780 ret <2 x double> %res
2783 define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
2784 ; CHECK-LABEL: test_max_sd_sae
2785 ; CHECK: vmaxsd {sae}, %xmm1, %xmm0, %xmm0
2786 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
2787 ret <2 x double> %res
2790 define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
2791 ; CHECK-LABEL: test_mask_max_sd
2792 ; CHECK: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
2793 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
2794 ret <2 x double> %res
2797 define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
2798 ; CHECK-LABEL: test_maskz_max_sd
2799 ; CHECK: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
2800 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 4)
2801 ret <2 x double> %res
2804 define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
2805 ; CHECK-LABEL: test_max_sd
2806 ; CHECK: vmaxsd %xmm1, %xmm0, %xmm0
2807 %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
2808 ret <2 x double> %res
2811 define <2 x double> @test_x86_avx512_cvtsi2sd32(<2 x double> %a, i32 %b) {
2812 ; CHECK-LABEL: test_x86_avx512_cvtsi2sd32:
2814 ; CHECK-NEXT: vcvtsi2sdl %edi, {rz-sae}, %xmm0, %xmm0
2816 %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd32(<2 x double> %a, i32 %b, i32 3) ; <<<2 x double>> [#uses=1]
2817 ret <2 x double> %res
2819 declare <2 x double> @llvm.x86.avx512.cvtsi2sd32(<2 x double>, i32, i32) nounwind readnone
2821 define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
2822 ; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
2824 ; CHECK-NEXT: vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
2826 %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1]
2827 ret <2 x double> %res
2829 declare <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double>, i64, i32) nounwind readnone
2831 define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
2832 ; CHECK-LABEL: test_x86_avx512_cvtsi2ss32:
2834 ; CHECK-NEXT: vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
2836 %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 3) ; <<<4 x float>> [#uses=1]
2837 ret <4 x float> %res
2839 declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind readnone
2841 define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
2842 ; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
2844 ; CHECK-NEXT: vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
2846 %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1]
2847 ret <4 x float> %res
2849 declare <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float>, i64, i32) nounwind readnone