1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
3 define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
4 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
5 %tmp3 = icmp eq <8 x i8> %A, %B;
6 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
11 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
12 %tmp3 = icmp eq <16 x i8> %A, %B;
13 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
17 define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
18 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
19 %tmp3 = icmp eq <4 x i16> %A, %B;
20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
24 define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
25 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
26 %tmp3 = icmp eq <8 x i16> %A, %B;
27 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
31 define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
32 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
33 %tmp3 = icmp eq <2 x i32> %A, %B;
34 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
38 define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
39 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
40 %tmp3 = icmp eq <4 x i32> %A, %B;
41 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
45 define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
46 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
47 %tmp3 = icmp eq <2 x i64> %A, %B;
48 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
52 define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
53 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
54 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
55 %tmp3 = icmp ne <8 x i8> %A, %B;
56 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
60 define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
61 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
62 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
63 %tmp3 = icmp ne <16 x i8> %A, %B;
64 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
68 define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
69 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
70 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
71 %tmp3 = icmp ne <4 x i16> %A, %B;
72 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
76 define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
77 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
78 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
79 %tmp3 = icmp ne <8 x i16> %A, %B;
80 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
84 define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
85 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
86 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
87 %tmp3 = icmp ne <2 x i32> %A, %B;
88 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
92 define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
93 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
94 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
95 %tmp3 = icmp ne <4 x i32> %A, %B;
96 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
100 define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
101 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
102 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
103 %tmp3 = icmp ne <2 x i64> %A, %B;
104 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
108 define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
109 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
110 %tmp3 = icmp sgt <8 x i8> %A, %B;
111 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
115 define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
116 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
117 %tmp3 = icmp sgt <16 x i8> %A, %B;
118 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
122 define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
123 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
124 %tmp3 = icmp sgt <4 x i16> %A, %B;
125 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
129 define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
130 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
131 %tmp3 = icmp sgt <8 x i16> %A, %B;
132 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
136 define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
137 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
138 %tmp3 = icmp sgt <2 x i32> %A, %B;
139 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
143 define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
144 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
145 %tmp3 = icmp sgt <4 x i32> %A, %B;
146 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
150 define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
151 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
152 %tmp3 = icmp sgt <2 x i64> %A, %B;
153 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
157 define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
158 ; Using registers other than v0, v1 are possible, but would be odd.
159 ; LT implemented as GT, so check reversed operands.
160 ;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
161 %tmp3 = icmp slt <8 x i8> %A, %B;
162 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
166 define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
167 ; Using registers other than v0, v1 are possible, but would be odd.
168 ; LT implemented as GT, so check reversed operands.
169 ;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
170 %tmp3 = icmp slt <16 x i8> %A, %B;
171 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
175 define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
176 ; Using registers other than v0, v1 are possible, but would be odd.
177 ; LT implemented as GT, so check reversed operands.
178 ;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
179 %tmp3 = icmp slt <4 x i16> %A, %B;
180 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
184 define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
185 ; Using registers other than v0, v1 are possible, but would be odd.
186 ; LT implemented as GT, so check reversed operands.
187 ;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
188 %tmp3 = icmp slt <8 x i16> %A, %B;
189 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
193 define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
194 ; Using registers other than v0, v1 are possible, but would be odd.
195 ; LT implemented as GT, so check reversed operands.
196 ;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
197 %tmp3 = icmp slt <2 x i32> %A, %B;
198 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
202 define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
203 ; Using registers other than v0, v1 are possible, but would be odd.
204 ; LT implemented as GT, so check reversed operands.
205 ;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
206 %tmp3 = icmp slt <4 x i32> %A, %B;
207 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
211 define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
212 ; Using registers other than v0, v1 are possible, but would be odd.
213 ; LT implemented as GT, so check reversed operands.
214 ;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
215 %tmp3 = icmp slt <2 x i64> %A, %B;
216 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
220 define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
221 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
222 %tmp3 = icmp sge <8 x i8> %A, %B;
223 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
227 define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
228 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
229 %tmp3 = icmp sge <16 x i8> %A, %B;
230 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
234 define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
235 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
236 %tmp3 = icmp sge <4 x i16> %A, %B;
237 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
241 define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
242 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
243 %tmp3 = icmp sge <8 x i16> %A, %B;
244 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
248 define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
249 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
250 %tmp3 = icmp sge <2 x i32> %A, %B;
251 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
255 define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
256 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
257 %tmp3 = icmp sge <4 x i32> %A, %B;
258 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
262 define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
263 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
264 %tmp3 = icmp sge <2 x i64> %A, %B;
265 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
269 define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
270 ; Using registers other than v0, v1 are possible, but would be odd.
271 ; LE implemented as GE, so check reversed operands.
272 ;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
273 %tmp3 = icmp sle <8 x i8> %A, %B;
274 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
278 define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
279 ; Using registers other than v0, v1 are possible, but would be odd.
280 ; LE implemented as GE, so check reversed operands.
281 ;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
282 %tmp3 = icmp sle <16 x i8> %A, %B;
283 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
287 define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
288 ; Using registers other than v0, v1 are possible, but would be odd.
289 ; LE implemented as GE, so check reversed operands.
290 ;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
291 %tmp3 = icmp sle <4 x i16> %A, %B;
292 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
296 define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
297 ; Using registers other than v0, v1 are possible, but would be odd.
298 ; LE implemented as GE, so check reversed operands.
299 ;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
300 %tmp3 = icmp sle <8 x i16> %A, %B;
301 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
305 define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
306 ; Using registers other than v0, v1 are possible, but would be odd.
307 ; LE implemented as GE, so check reversed operands.
308 ;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
309 %tmp3 = icmp sle <2 x i32> %A, %B;
310 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
314 define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
315 ; Using registers other than v0, v1 are possible, but would be odd.
316 ; LE implemented as GE, so check reversed operands.
317 ;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
318 %tmp3 = icmp sle <4 x i32> %A, %B;
319 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
323 define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
324 ; Using registers other than v0, v1 are possible, but would be odd.
325 ; LE implemented as GE, so check reversed operands.
326 ;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
327 %tmp3 = icmp sle <2 x i64> %A, %B;
328 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
332 define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
333 ;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
334 %tmp3 = icmp ugt <8 x i8> %A, %B;
335 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
339 define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
340 ;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
341 %tmp3 = icmp ugt <16 x i8> %A, %B;
342 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
346 define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
347 ;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
348 %tmp3 = icmp ugt <4 x i16> %A, %B;
349 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
353 define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
354 ;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
355 %tmp3 = icmp ugt <8 x i16> %A, %B;
356 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
360 define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
361 ;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
362 %tmp3 = icmp ugt <2 x i32> %A, %B;
363 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
367 define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
368 ;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
369 %tmp3 = icmp ugt <4 x i32> %A, %B;
370 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
374 define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
375 ;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
376 %tmp3 = icmp ugt <2 x i64> %A, %B;
377 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
381 define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
382 ; Using registers other than v0, v1 are possible, but would be odd.
383 ; LO implemented as HI, so check reversed operands.
384 ;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
385 %tmp3 = icmp ult <8 x i8> %A, %B;
386 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
390 define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
391 ; Using registers other than v0, v1 are possible, but would be odd.
392 ; LO implemented as HI, so check reversed operands.
393 ;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
394 %tmp3 = icmp ult <16 x i8> %A, %B;
395 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
399 define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
400 ; Using registers other than v0, v1 are possible, but would be odd.
401 ; LO implemented as HI, so check reversed operands.
402 ;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
403 %tmp3 = icmp ult <4 x i16> %A, %B;
404 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
408 define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
409 ; Using registers other than v0, v1 are possible, but would be odd.
410 ; LO implemented as HI, so check reversed operands.
411 ;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
412 %tmp3 = icmp ult <8 x i16> %A, %B;
413 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
417 define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
418 ; Using registers other than v0, v1 are possible, but would be odd.
419 ; LO implemented as HI, so check reversed operands.
420 ;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
421 %tmp3 = icmp ult <2 x i32> %A, %B;
422 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
426 define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
427 ; Using registers other than v0, v1 are possible, but would be odd.
428 ; LO implemented as HI, so check reversed operands.
429 ;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
430 %tmp3 = icmp ult <4 x i32> %A, %B;
431 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
435 define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
436 ; Using registers other than v0, v1 are possible, but would be odd.
437 ; LO implemented as HI, so check reversed operands.
438 ;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
439 %tmp3 = icmp ult <2 x i64> %A, %B;
440 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
444 define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
445 ;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
446 %tmp3 = icmp uge <8 x i8> %A, %B;
447 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
451 define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
452 ;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
453 %tmp3 = icmp uge <16 x i8> %A, %B;
454 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
458 define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
459 ;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
460 %tmp3 = icmp uge <4 x i16> %A, %B;
461 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
465 define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
466 ;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
467 %tmp3 = icmp uge <8 x i16> %A, %B;
468 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
472 define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
473 ;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
474 %tmp3 = icmp uge <2 x i32> %A, %B;
475 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
479 define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
480 ;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
481 %tmp3 = icmp uge <4 x i32> %A, %B;
482 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
486 define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
487 ;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
488 %tmp3 = icmp uge <2 x i64> %A, %B;
489 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
493 define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
494 ; Using registers other than v0, v1 are possible, but would be odd.
495 ; LS implemented as HS, so check reversed operands.
496 ;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
497 %tmp3 = icmp ule <8 x i8> %A, %B;
498 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
502 define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
503 ; Using registers other than v0, v1 are possible, but would be odd.
504 ; LS implemented as HS, so check reversed operands.
505 ;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
506 %tmp3 = icmp ule <16 x i8> %A, %B;
507 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
511 define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
512 ; Using registers other than v0, v1 are possible, but would be odd.
513 ; LS implemented as HS, so check reversed operands.
514 ;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
515 %tmp3 = icmp ule <4 x i16> %A, %B;
516 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
520 define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
521 ; Using registers other than v0, v1 are possible, but would be odd.
522 ; LS implemented as HS, so check reversed operands.
523 ;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
524 %tmp3 = icmp ule <8 x i16> %A, %B;
525 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
529 define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
530 ; Using registers other than v0, v1 are possible, but would be odd.
531 ; LS implemented as HS, so check reversed operands.
532 ;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
533 %tmp3 = icmp ule <2 x i32> %A, %B;
534 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
538 define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
539 ; Using registers other than v0, v1 are possible, but would be odd.
540 ; LS implemented as HS, so check reversed operands.
541 ;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
542 %tmp3 = icmp ule <4 x i32> %A, %B;
543 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
547 define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
548 ; Using registers other than v0, v1 are possible, but would be odd.
549 ; LS implemented as HS, so check reversed operands.
550 ;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
551 %tmp3 = icmp ule <2 x i64> %A, %B;
552 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
556 define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) {
557 ;CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
558 %tmp3 = and <8 x i8> %A, %B
559 %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
560 %tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
564 define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) {
565 ;CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
566 %tmp3 = and <16 x i8> %A, %B
567 %tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
568 %tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
572 define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) {
573 ;CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
574 %tmp3 = and <4 x i16> %A, %B
575 %tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
576 %tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
580 define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) {
581 ;CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
582 %tmp3 = and <8 x i16> %A, %B
583 %tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
584 %tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
588 define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) {
589 ;CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
590 %tmp3 = and <2 x i32> %A, %B
591 %tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
592 %tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
596 define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) {
597 ;CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
598 %tmp3 = and <4 x i32> %A, %B
599 %tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
600 %tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
604 define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) {
605 ;CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
606 %tmp3 = and <2 x i64> %A, %B
607 %tmp4 = icmp ne <2 x i64> %tmp3, zeroinitializer
608 %tmp5 = sext <2 x i1> %tmp4 to <2 x i64>
614 define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
615 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
616 %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
617 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
621 define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
622 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
623 %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
624 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
628 define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
629 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
630 %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
631 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
635 define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
636 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
637 %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
638 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
642 define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
643 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
644 %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
645 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
649 define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
650 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
651 %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
652 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
656 define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
657 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
658 %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
659 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
664 define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
665 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
666 %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
667 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
671 define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
672 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
673 %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
674 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
678 define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
679 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
680 %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
681 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
685 define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
686 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
687 %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
688 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
692 define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
693 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
694 %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
695 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
699 define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
700 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
701 %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
702 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
706 define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
707 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
708 %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
709 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
714 define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
715 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
716 %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
717 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
721 define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
722 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
723 %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
724 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
728 define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
729 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
730 %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
731 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
735 define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
736 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
737 %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
738 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
742 define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
743 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
744 %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
745 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
749 define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
750 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
751 %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
752 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
756 define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
757 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
758 %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
759 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
763 define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
764 ;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
765 %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
766 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
770 define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
771 ;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
772 %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
773 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
777 define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
778 ;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
779 %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
780 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
784 define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
785 ;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
786 %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
787 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
791 define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
792 ;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
793 %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
794 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
798 define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
799 ;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
800 %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
801 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
805 define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
806 ;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
807 %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
808 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
812 define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
813 ;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
814 %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
815 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
819 define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
820 ;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
821 %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
822 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
826 define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
827 ;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
828 %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
829 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
833 define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
834 ;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
835 %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
836 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
840 define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
841 ;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
842 %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
843 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
847 define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
848 ;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
849 %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
850 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
854 define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
855 ;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
856 %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
857 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
861 define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
862 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
863 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
864 %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
865 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
869 define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
870 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
871 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
872 %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
873 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
877 define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
878 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
879 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
880 %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
881 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
885 define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
886 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
887 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
888 %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
889 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
893 define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
894 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
895 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
896 %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
897 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
901 define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
902 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
903 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
904 %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
905 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
909 define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
910 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
911 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
912 %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
913 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
917 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
918 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
919 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
920 %tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
921 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
925 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
926 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
927 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
928 %tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
929 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
933 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
934 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
935 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
936 %tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
937 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
941 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
942 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
943 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
944 %tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
945 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
949 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
950 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
951 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
952 %tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
953 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
957 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
958 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
959 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
960 %tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
961 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
965 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
966 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
967 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
968 %tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
969 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
974 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
975 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
976 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
977 %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
978 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
982 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
983 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
984 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
985 %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
986 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
990 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
991 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
992 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
993 %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
994 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
998 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
999 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1000 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
1001 %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
1002 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1006 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
1007 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
1008 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1009 %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
1010 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1014 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
1015 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1016 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1017 %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
1018 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1022 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
1023 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1024 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1025 %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
1026 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1030 define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
1031 ; Using registers other than v0, v1 are possible, but would be odd.
1032 ; LS implemented as HS, so check reversed operands.
1033 ;CHECK: movi v1.8b, #0x0
1034 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
1035 %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
1036 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1040 define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
1041 ; Using registers other than v0, v1 are possible, but would be odd.
1042 ; LS implemented as HS, so check reversed operands.
1043 ;CHECK: movi v1.16b, #0x0
1044 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
1045 %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
1046 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1050 define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
1051 ; Using registers other than v0, v1 are possible, but would be odd.
1052 ; LS implemented as HS, so check reversed operands.
1053 ;CHECK: movi v1.8b, #0x0
1054 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
1055 %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
1056 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1060 define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
1061 ; Using registers other than v0, v1 are possible, but would be odd.
1062 ; LS implemented as HS, so check reversed operands.
1063 ;CHECK: movi v1.16b, #0x0
1064 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
1065 %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
1066 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1070 define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
1071 ; Using registers other than v0, v1 are possible, but would be odd.
1072 ; LS implemented as HS, so check reversed operands.
1073 ;CHECK: movi v1.8b, #0x0
1074 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
1075 %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
1076 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1080 define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
1081 ; Using registers other than v0, v1 are possible, but would be odd.
1082 ; LS implemented as HS, so check reversed operands.
1083 ;CHECK: movi v1.16b, #0x0
1084 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
1085 %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
1086 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1090 define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
1091 ; Using registers other than v0, v1 are possible, but would be odd.
1092 ; LS implemented as HS, so check reversed operands.
1093 ;CHECK: movi v1.16b, #0x0
1094 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
1095 %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
1096 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1100 define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
1101 ; Using registers other than v0, v1 are possible, but would be odd.
1102 ; LO implemented as HI, so check reversed operands.
1103 ;CHECK: movi v1.8b, #0x0
1104 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
1105 %tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
1106 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1110 define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
1111 ; Using registers other than v0, v1 are possible, but would be odd.
1112 ; LO implemented as HI, so check reversed operands.
1113 ;CHECK: movi v1.16b, #0x0
1114 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
1115 %tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
1116 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1120 define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
1121 ; Using registers other than v0, v1 are possible, but would be odd.
1122 ; LO implemented as HI, so check reversed operands.
1123 ;CHECK: movi v1.8b, #0x0
1124 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
1125 %tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
1126 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1130 define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
1131 ; Using registers other than v0, v1 are possible, but would be odd.
1132 ; LO implemented as HI, so check reversed operands.
1133 ;CHECK: movi v1.16b, #0x0
1134 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
1135 %tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
1136 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1140 define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
1141 ; Using registers other than v0, v1 are possible, but would be odd.
1142 ; LO implemented as HI, so check reversed operands.
1143 ;CHECK: movi v1.8b, #0x0
1144 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
1145 %tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
1146 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1150 define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
1151 ; Using registers other than v0, v1 are possible, but would be odd.
1152 ; LO implemented as HI, so check reversed operands.
1153 ;CHECK: movi v1.16b, #0x0
1154 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
1155 %tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
1156 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1160 define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
1161 ; Using registers other than v0, v1 are possible, but would be odd.
1162 ; LO implemented as HI, so check reversed operands.
1163 ;CHECK: movi v1.16b, #0x0
1164 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
1165 %tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
1166 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1171 define <2 x i32> @fcmoeq2xfloat(<2 x float> %A, <2 x float> %B) {
1172 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1173 %tmp3 = fcmp oeq <2 x float> %A, %B
1174 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1178 define <4 x i32> @fcmoeq4xfloat(<4 x float> %A, <4 x float> %B) {
1179 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1180 %tmp3 = fcmp oeq <4 x float> %A, %B
1181 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1184 define <2 x i64> @fcmoeq2xdouble(<2 x double> %A, <2 x double> %B) {
1185 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1186 %tmp3 = fcmp oeq <2 x double> %A, %B
1187 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1191 define <2 x i32> @fcmoge2xfloat(<2 x float> %A, <2 x float> %B) {
1192 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1193 %tmp3 = fcmp oge <2 x float> %A, %B
1194 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1198 define <4 x i32> @fcmoge4xfloat(<4 x float> %A, <4 x float> %B) {
1199 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1200 %tmp3 = fcmp oge <4 x float> %A, %B
1201 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1204 define <2 x i64> @fcmoge2xdouble(<2 x double> %A, <2 x double> %B) {
1205 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1206 %tmp3 = fcmp oge <2 x double> %A, %B
1207 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1211 define <2 x i32> @fcmogt2xfloat(<2 x float> %A, <2 x float> %B) {
1212 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1213 %tmp3 = fcmp ogt <2 x float> %A, %B
1214 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1218 define <4 x i32> @fcmogt4xfloat(<4 x float> %A, <4 x float> %B) {
1219 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1220 %tmp3 = fcmp ogt <4 x float> %A, %B
1221 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1224 define <2 x i64> @fcmogt2xdouble(<2 x double> %A, <2 x double> %B) {
1225 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1226 %tmp3 = fcmp ogt <2 x double> %A, %B
1227 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1231 define <2 x i32> @fcmole2xfloat(<2 x float> %A, <2 x float> %B) {
1232 ; Using registers other than v0, v1 are possible, but would be odd.
1233 ; OLE implemented as OGE, so check reversed operands.
1234 ;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
1235 %tmp3 = fcmp ole <2 x float> %A, %B
1236 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1240 define <4 x i32> @fcmole4xfloat(<4 x float> %A, <4 x float> %B) {
1241 ; Using registers other than v0, v1 are possible, but would be odd.
1242 ; OLE implemented as OGE, so check reversed operands.
1243 ;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
1244 %tmp3 = fcmp ole <4 x float> %A, %B
1245 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1248 define <2 x i64> @fcmole2xdouble(<2 x double> %A, <2 x double> %B) {
1249 ; Using registers other than v0, v1 are possible, but would be odd.
1250 ; OLE implemented as OGE, so check reversed operands.
1251 ;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
1252 %tmp3 = fcmp ole <2 x double> %A, %B
1253 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1257 define <2 x i32> @fcmolt2xfloat(<2 x float> %A, <2 x float> %B) {
1258 ; Using registers other than v0, v1 are possible, but would be odd.
1259 ; OLE implemented as OGE, so check reversed operands.
1260 ;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1261 %tmp3 = fcmp olt <2 x float> %A, %B
1262 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1266 define <4 x i32> @fcmolt4xfloat(<4 x float> %A, <4 x float> %B) {
1267 ; Using registers other than v0, v1 are possible, but would be odd.
1268 ; OLE implemented as OGE, so check reversed operands.
1269 ;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1270 %tmp3 = fcmp olt <4 x float> %A, %B
1271 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1274 define <2 x i64> @fcmolt2xdouble(<2 x double> %A, <2 x double> %B) {
1275 ; Using registers other than v0, v1 are possible, but would be odd.
1276 ; OLE implemented as OGE, so check reversed operands.
1277 ;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1278 %tmp3 = fcmp olt <2 x double> %A, %B
1279 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1283 define <2 x i32> @fcmone2xfloat(<2 x float> %A, <2 x float> %B) {
1284 ; Using registers other than v0, v1 are possible, but would be odd.
1285 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1286 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1287 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1288 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1289 %tmp3 = fcmp one <2 x float> %A, %B
1290 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1294 define <4 x i32> @fcmone4xfloat(<4 x float> %A, <4 x float> %B) {
1295 ; Using registers other than v0, v1 are possible, but would be odd.
1296 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1297 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1298 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1299 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1300 %tmp3 = fcmp one <4 x float> %A, %B
1301 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1304 define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) {
1305 ; Using registers other than v0, v1 are possible, but would be odd.
1306 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1307 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1308 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1309 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1310 ; todo check reversed operands
1311 %tmp3 = fcmp one <2 x double> %A, %B
1312 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1317 define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) {
1318 ; Using registers other than v0, v1 are possible, but would be odd.
1319 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1320 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1321 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1322 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1323 %tmp3 = fcmp ord <2 x float> %A, %B
1324 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1329 define <4 x i32> @fcmord4xfloat(<4 x float> %A, <4 x float> %B) {
1330 ; Using registers other than v0, v1 are possible, but would be odd.
1331 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1332 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1333 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1334 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1335 %tmp3 = fcmp ord <4 x float> %A, %B
1336 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1340 define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) {
1341 ; Using registers other than v0, v1 are possible, but would be odd.
1342 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1343 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1344 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1345 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1346 %tmp3 = fcmp ord <2 x double> %A, %B
1347 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1352 define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) {
1353 ; Using registers other than v0, v1 are possible, but would be odd.
1354 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1355 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1356 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1357 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1358 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1359 %tmp3 = fcmp uno <2 x float> %A, %B
1360 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1364 define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) {
1365 ; Using registers other than v0, v1 are possible, but would be odd.
1366 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1367 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1368 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1369 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1370 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1371 %tmp3 = fcmp uno <4 x float> %A, %B
1372 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1376 define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) {
1377 ; Using registers other than v0, v1 are possible, but would be odd.
1378 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1379 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1380 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1381 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1382 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1383 %tmp3 = fcmp uno <2 x double> %A, %B
1384 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1388 define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) {
1389 ; Using registers other than v0, v1 are possible, but would be odd.
1390 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1391 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1392 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1393 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1394 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1395 %tmp3 = fcmp ueq <2 x float> %A, %B
1396 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1400 define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) {
1401 ; Using registers other than v0, v1 are possible, but would be odd.
1402 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1403 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1404 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1405 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1406 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1407 %tmp3 = fcmp ueq <4 x float> %A, %B
1408 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1412 define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) {
1413 ; Using registers other than v0, v1 are possible, but would be odd.
1414 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1415 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1416 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1417 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1418 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1419 %tmp3 = fcmp ueq <2 x double> %A, %B
1420 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1424 define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) {
1425 ; Using registers other than v0, v1 are possible, but would be odd.
1426 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1427 ;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1428 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1429 %tmp3 = fcmp uge <2 x float> %A, %B
1430 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1434 define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) {
1435 ; Using registers other than v0, v1 are possible, but would be odd.
1436 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1437 ;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1438 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1439 %tmp3 = fcmp uge <4 x float> %A, %B
1440 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1444 define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) {
1445 ; Using registers other than v0, v1 are possible, but would be odd.
1446 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1447 ;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1448 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1449 %tmp3 = fcmp uge <2 x double> %A, %B
1450 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1454 define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) {
1455 ; Using registers other than v0, v1 are possible, but would be odd.
1456 ; UGT = ULT with swapped operands, ULT implemented as !OGE.
1457 ;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
1458 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1459 %tmp3 = fcmp ugt <2 x float> %A, %B
1460 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1464 define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
1465 ; Using registers other than v0, v1 are possible, but would be odd.
1466 ; UGT = ULT with swapped operands, ULT implemented as !OGE.
1467 ;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
1468 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1469 %tmp3 = fcmp ugt <4 x float> %A, %B
1470 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1473 define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) {
1474 ;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
1475 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1476 %tmp3 = fcmp ugt <2 x double> %A, %B
1477 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1481 define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) {
1482 ; Using registers other than v0, v1 are possible, but would be odd.
1483 ; ULE implemented as !OGT.
1484 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1485 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1486 %tmp3 = fcmp ule <2 x float> %A, %B
1487 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1491 define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) {
1492 ; Using registers other than v0, v1 are possible, but would be odd.
1493 ; ULE implemented as !OGT.
1494 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1495 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1496 %tmp3 = fcmp ule <4 x float> %A, %B
1497 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1500 define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) {
1501 ; Using registers other than v0, v1 are possible, but would be odd.
1502 ; ULE implemented as !OGT.
1503 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1504 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1505 %tmp3 = fcmp ule <2 x double> %A, %B
1506 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1510 define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) {
1511 ; Using registers other than v0, v1 are possible, but would be odd.
1512 ; ULT implemented as !OGE.
1513 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1514 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1515 %tmp3 = fcmp ult <2 x float> %A, %B
1516 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1520 define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) {
1521 ; Using registers other than v0, v1 are possible, but would be odd.
1522 ; ULT implemented as !OGE.
1523 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1524 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1525 %tmp3 = fcmp ult <4 x float> %A, %B
1526 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1529 define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) {
1530 ; Using registers other than v0, v1 are possible, but would be odd.
1531 ; ULT implemented as !OGE.
1532 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1533 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1534 %tmp3 = fcmp ult <2 x double> %A, %B
1535 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1539 define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) {
1540 ; Using registers other than v0, v1 are possible, but would be odd.
1542 ;CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
1543 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1544 %tmp3 = fcmp une <2 x float> %A, %B
1545 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1549 define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) {
1550 ; Using registers other than v0, v1 are possible, but would be odd.
1552 ;CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
1553 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1554 %tmp3 = fcmp une <4 x float> %A, %B
1555 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1558 define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
1559 ; Using registers other than v0, v1 are possible, but would be odd.
1561 ;CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
1562 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1563 %tmp3 = fcmp une <2 x double> %A, %B
1564 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1568 define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
1569 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1570 %tmp3 = fcmp oeq <2 x float> %A, zeroinitializer
1571 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1575 define <4 x i32> @fcmoeqz4xfloat(<4 x float> %A) {
1576 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1577 %tmp3 = fcmp oeq <4 x float> %A, zeroinitializer
1578 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1581 define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) {
1582 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1583 %tmp3 = fcmp oeq <2 x double> %A, zeroinitializer
1584 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1589 define <2 x i32> @fcmogez2xfloat(<2 x float> %A) {
1590 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1591 %tmp3 = fcmp oge <2 x float> %A, zeroinitializer
1592 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1596 define <4 x i32> @fcmogez4xfloat(<4 x float> %A) {
1597 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1598 %tmp3 = fcmp oge <4 x float> %A, zeroinitializer
1599 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1602 define <2 x i64> @fcmogez2xdouble(<2 x double> %A) {
1603 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1604 %tmp3 = fcmp oge <2 x double> %A, zeroinitializer
1605 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1609 define <2 x i32> @fcmogtz2xfloat(<2 x float> %A) {
1610 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1611 %tmp3 = fcmp ogt <2 x float> %A, zeroinitializer
1612 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1616 define <4 x i32> @fcmogtz4xfloat(<4 x float> %A) {
1617 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1618 %tmp3 = fcmp ogt <4 x float> %A, zeroinitializer
1619 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1622 define <2 x i64> @fcmogtz2xdouble(<2 x double> %A) {
1623 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1624 %tmp3 = fcmp ogt <2 x double> %A, zeroinitializer
1625 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1629 define <2 x i32> @fcmoltz2xfloat(<2 x float> %A) {
1630 ;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1631 %tmp3 = fcmp olt <2 x float> %A, zeroinitializer
1632 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1636 define <4 x i32> @fcmoltz4xfloat(<4 x float> %A) {
1637 ;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1638 %tmp3 = fcmp olt <4 x float> %A, zeroinitializer
1639 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1643 define <2 x i64> @fcmoltz2xdouble(<2 x double> %A) {
1644 ;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1645 %tmp3 = fcmp olt <2 x double> %A, zeroinitializer
1646 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1650 define <2 x i32> @fcmolez2xfloat(<2 x float> %A) {
1651 ;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1652 %tmp3 = fcmp ole <2 x float> %A, zeroinitializer
1653 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1657 define <4 x i32> @fcmolez4xfloat(<4 x float> %A) {
1658 ;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1659 %tmp3 = fcmp ole <4 x float> %A, zeroinitializer
1660 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1664 define <2 x i64> @fcmolez2xdouble(<2 x double> %A) {
1665 ;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1666 %tmp3 = fcmp ole <2 x double> %A, zeroinitializer
1667 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1671 define <2 x i32> @fcmonez2xfloat(<2 x float> %A) {
1672 ; ONE with zero = OLT | OGT
1673 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1674 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1675 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1676 %tmp3 = fcmp one <2 x float> %A, zeroinitializer
1677 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1681 define <4 x i32> @fcmonez4xfloat(<4 x float> %A) {
1682 ; ONE with zero = OLT | OGT
1683 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1684 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1685 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1686 %tmp3 = fcmp one <4 x float> %A, zeroinitializer
1687 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1690 define <2 x i64> @fcmonez2xdouble(<2 x double> %A) {
1691 ; ONE with zero = OLT | OGT
1692 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1693 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1694 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1695 %tmp3 = fcmp one <2 x double> %A, zeroinitializer
1696 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1700 define <2 x i32> @fcmordz2xfloat(<2 x float> %A) {
1701 ; ORD with zero = OLT | OGE
1702 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1703 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1704 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1705 %tmp3 = fcmp ord <2 x float> %A, zeroinitializer
1706 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1710 define <4 x i32> @fcmordz4xfloat(<4 x float> %A) {
1711 ; ORD with zero = OLT | OGE
1712 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1713 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1714 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1715 %tmp3 = fcmp ord <4 x float> %A, zeroinitializer
1716 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1719 define <2 x i64> @fcmordz2xdouble(<2 x double> %A) {
1720 ; ORD with zero = OLT | OGE
1721 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1722 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1723 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1724 %tmp3 = fcmp ord <2 x double> %A, zeroinitializer
1725 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1729 define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) {
1730 ; UEQ with zero = !ONE = !(OLT |OGT)
1731 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1732 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1733 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1734 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1735 %tmp3 = fcmp ueq <2 x float> %A, zeroinitializer
1736 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1740 define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) {
1741 ; UEQ with zero = !ONE = !(OLT |OGT)
1742 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1743 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1744 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1745 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1746 %tmp3 = fcmp ueq <4 x float> %A, zeroinitializer
1747 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1751 define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) {
1752 ; UEQ with zero = !ONE = !(OLT |OGT)
1753 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1754 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1755 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1756 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1757 %tmp3 = fcmp ueq <2 x double> %A, zeroinitializer
1758 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1762 define <2 x i32> @fcmugez2xfloat(<2 x float> %A) {
1763 ; UGE with zero = !OLT
1764 ;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1765 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1766 %tmp3 = fcmp uge <2 x float> %A, zeroinitializer
1767 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1771 define <4 x i32> @fcmugez4xfloat(<4 x float> %A) {
1772 ; UGE with zero = !OLT
1773 ;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1774 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1775 %tmp3 = fcmp uge <4 x float> %A, zeroinitializer
1776 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1779 define <2 x i64> @fcmugez2xdouble(<2 x double> %A) {
1780 ; UGE with zero = !OLT
1781 ;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1782 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1783 %tmp3 = fcmp uge <2 x double> %A, zeroinitializer
1784 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1788 define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) {
1789 ; UGT with zero = !OLE
1790 ;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1791 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1792 %tmp3 = fcmp ugt <2 x float> %A, zeroinitializer
1793 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1797 define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) {
1798 ; UGT with zero = !OLE
1799 ;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1800 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1801 %tmp3 = fcmp ugt <4 x float> %A, zeroinitializer
1802 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1805 define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) {
1806 ; UGT with zero = !OLE
1807 ;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1808 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1809 %tmp3 = fcmp ugt <2 x double> %A, zeroinitializer
1810 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1814 define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
1815 ; ULT with zero = !OGE
1816 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1817 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1818 %tmp3 = fcmp ult <2 x float> %A, zeroinitializer
1819 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1823 define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
1824 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1825 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1826 %tmp3 = fcmp ult <4 x float> %A, zeroinitializer
1827 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1831 define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
1832 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1833 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1834 %tmp3 = fcmp ult <2 x double> %A, zeroinitializer
1835 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1840 define <2 x i32> @fcmulez2xfloat(<2 x float> %A) {
1841 ; ULE with zero = !OGT
1842 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1843 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1844 %tmp3 = fcmp ule <2 x float> %A, zeroinitializer
1845 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1849 define <4 x i32> @fcmulez4xfloat(<4 x float> %A) {
1850 ; ULE with zero = !OGT
1851 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1852 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1853 %tmp3 = fcmp ule <4 x float> %A, zeroinitializer
1854 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1858 define <2 x i64> @fcmulez2xdouble(<2 x double> %A) {
1859 ; ULE with zero = !OGT
1860 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1861 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1862 %tmp3 = fcmp ule <2 x double> %A, zeroinitializer
1863 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1867 define <2 x i32> @fcmunez2xfloat(<2 x float> %A) {
1868 ; UNE with zero = !OEQ with zero
1869 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1870 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1871 %tmp3 = fcmp une <2 x float> %A, zeroinitializer
1872 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1876 define <4 x i32> @fcmunez4xfloat(<4 x float> %A) {
1877 ; UNE with zero = !OEQ with zero
1878 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1879 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1880 %tmp3 = fcmp une <4 x float> %A, zeroinitializer
1881 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1884 define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
1885 ; UNE with zero = !OEQ with zero
1886 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1887 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1888 %tmp3 = fcmp une <2 x double> %A, zeroinitializer
1889 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1894 define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) {
1895 ; UNO with zero = !ORD = !(OLT | OGE)
1896 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1897 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1898 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1899 ;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1900 %tmp3 = fcmp uno <2 x float> %A, zeroinitializer
1901 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1905 define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) {
1906 ; UNO with zero = !ORD = !(OLT | OGE)
1907 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1908 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1909 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1910 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1911 %tmp3 = fcmp uno <4 x float> %A, zeroinitializer
1912 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1916 define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) {
1917 ; UNO with zero = !ORD = !(OLT | OGE)
1918 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1919 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1920 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1921 ;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1922 %tmp3 = fcmp uno <2 x double> %A, zeroinitializer
1923 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>