1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
2 ; These duplicate tests in arm64's vmax.ll
4 declare <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>)
5 declare <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8>, <8 x i8>)
7 define <8 x i8> @test_smax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
8 ; Using registers other than v0, v1 are possible, but would be odd.
9 ; CHECK: test_smax_v8i8:
10 %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
11 ; CHECK: smax v0.8b, v0.8b, v1.8b
15 define <8 x i8> @test_umax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
16 %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
17 ; CHECK: umax v0.8b, v0.8b, v1.8b
21 declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>)
22 declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>)
24 define <16 x i8> @test_smax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
25 ; CHECK: test_smax_v16i8:
26 %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
27 ; CHECK: smax v0.16b, v0.16b, v1.16b
31 define <16 x i8> @test_umax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
32 ; CHECK: test_umax_v16i8:
33 %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
34 ; CHECK: umax v0.16b, v0.16b, v1.16b
38 declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>)
39 declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>)
41 define <4 x i16> @test_smax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
42 ; CHECK: test_smax_v4i16:
43 %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
44 ; CHECK: smax v0.4h, v0.4h, v1.4h
48 define <4 x i16> @test_umax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
49 ; CHECK: test_umax_v4i16:
50 %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
51 ; CHECK: umax v0.4h, v0.4h, v1.4h
56 declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>)
57 declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>)
59 define <8 x i16> @test_smax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
60 ; CHECK: test_smax_v8i16:
61 %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
62 ; CHECK: smax v0.8h, v0.8h, v1.8h
66 define <8 x i16> @test_umax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
67 ; CHECK: test_umax_v8i16:
68 %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
69 ; CHECK: umax v0.8h, v0.8h, v1.8h
74 declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>)
75 declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>)
77 define <2 x i32> @test_smax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
78 ; CHECK: test_smax_v2i32:
79 %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
80 ; CHECK: smax v0.2s, v0.2s, v1.2s
84 define <2 x i32> @test_umax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
85 ; CHECK: test_umax_v2i32:
86 %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
87 ; CHECK: umax v0.2s, v0.2s, v1.2s
91 declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>)
92 declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>)
94 define <4 x i32> @test_smax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
95 ; CHECK: test_smax_v4i32:
96 %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
97 ; CHECK: smax v0.4s, v0.4s, v1.4s
101 define <4 x i32> @test_umax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
102 ; CHECK: test_umax_v4i32:
103 %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
104 ; CHECK: umax v0.4s, v0.4s, v1.4s
108 declare <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8>, <8 x i8>)
109 declare <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8>, <8 x i8>)
111 define <8 x i8> @test_smin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
112 ; Using registers other than v0, v1 are possible, but would be odd.
113 ; CHECK: test_smin_v8i8:
114 %tmp1 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
115 ; CHECK: smin v0.8b, v0.8b, v1.8b
119 define <8 x i8> @test_umin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
120 %tmp1 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
121 ; CHECK: umin v0.8b, v0.8b, v1.8b
125 declare <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8>, <16 x i8>)
126 declare <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8>, <16 x i8>)
128 define <16 x i8> @test_smin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
129 ; CHECK: test_smin_v16i8:
130 %tmp1 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
131 ; CHECK: smin v0.16b, v0.16b, v1.16b
135 define <16 x i8> @test_umin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
136 ; CHECK: test_umin_v16i8:
137 %tmp1 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
138 ; CHECK: umin v0.16b, v0.16b, v1.16b
142 declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>)
143 declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>)
145 define <4 x i16> @test_smin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
146 ; CHECK: test_smin_v4i16:
147 %tmp1 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
148 ; CHECK: smin v0.4h, v0.4h, v1.4h
152 define <4 x i16> @test_umin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
153 ; CHECK: test_umin_v4i16:
154 %tmp1 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
155 ; CHECK: umin v0.4h, v0.4h, v1.4h
160 declare <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16>, <8 x i16>)
161 declare <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16>, <8 x i16>)
163 define <8 x i16> @test_smin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
164 ; CHECK: test_smin_v8i16:
165 %tmp1 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
166 ; CHECK: smin v0.8h, v0.8h, v1.8h
170 define <8 x i16> @test_umin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
171 ; CHECK: test_umin_v8i16:
172 %tmp1 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
173 ; CHECK: umin v0.8h, v0.8h, v1.8h
178 declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>)
179 declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>)
181 define <2 x i32> @test_smin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
182 ; CHECK: test_smin_v2i32:
183 %tmp1 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
184 ; CHECK: smin v0.2s, v0.2s, v1.2s
188 define <2 x i32> @test_umin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
189 ; CHECK: test_umin_v2i32:
190 %tmp1 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
191 ; CHECK: umin v0.2s, v0.2s, v1.2s
195 declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>)
196 declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>)
198 define <4 x i32> @test_smin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
199 ; CHECK: test_smin_v4i32:
200 %tmp1 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
201 ; CHECK: smin v0.4s, v0.4s, v1.4s
205 define <4 x i32> @test_umin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
206 ; CHECK: test_umin_v4i32:
207 %tmp1 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
208 ; CHECK: umin v0.4s, v0.4s, v1.4s
212 declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>)
213 declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>)
214 declare <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double>, <2 x double>)
216 define <2 x float> @test_fmax_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
217 ; CHECK: test_fmax_v2f32:
218 %val = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %lhs, <2 x float> %rhs)
219 ; CHECK: fmax v0.2s, v0.2s, v1.2s
223 define <4 x float> @test_fmax_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
224 ; CHECK: test_fmax_v4f32:
225 %val = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %lhs, <4 x float> %rhs)
226 ; CHECK: fmax v0.4s, v0.4s, v1.4s
230 define <2 x double> @test_fmax_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
231 ; CHECK: test_fmax_v2f64:
232 %val = call <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
233 ; CHECK: fmax v0.2d, v0.2d, v1.2d
234 ret <2 x double> %val
237 declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>)
238 declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>)
239 declare <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double>, <2 x double>)
241 define <2 x float> @test_fmin_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
242 ; CHECK: test_fmin_v2f32:
243 %val = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %lhs, <2 x float> %rhs)
244 ; CHECK: fmin v0.2s, v0.2s, v1.2s
248 define <4 x float> @test_fmin_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
249 ; CHECK: test_fmin_v4f32:
250 %val = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %lhs, <4 x float> %rhs)
251 ; CHECK: fmin v0.4s, v0.4s, v1.4s
255 define <2 x double> @test_fmin_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
256 ; CHECK: test_fmin_v2f64:
257 %val = call <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
258 ; CHECK: fmin v0.2d, v0.2d, v1.2d
259 ret <2 x double> %val
263 declare <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float>, <2 x float>)
264 declare <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float>, <4 x float>)
265 declare <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double>, <2 x double>)
267 define <2 x float> @test_fmaxnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
268 ; CHECK: test_fmaxnm_v2f32:
269 %val = call <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
270 ; CHECK: fmaxnm v0.2s, v0.2s, v1.2s
274 define <4 x float> @test_fmaxnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
275 ; CHECK: test_fmaxnm_v4f32:
276 %val = call <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
277 ; CHECK: fmaxnm v0.4s, v0.4s, v1.4s
281 define <2 x double> @test_fmaxnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
282 ; CHECK: test_fmaxnm_v2f64:
283 %val = call <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
284 ; CHECK: fmaxnm v0.2d, v0.2d, v1.2d
285 ret <2 x double> %val
288 declare <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float>, <2 x float>)
289 declare <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float>, <4 x float>)
290 declare <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double>, <2 x double>)
292 define <2 x float> @test_fminnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
293 ; CHECK: test_fminnm_v2f32:
294 %val = call <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
295 ; CHECK: fminnm v0.2s, v0.2s, v1.2s
299 define <4 x float> @test_fminnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
300 ; CHECK: test_fminnm_v4f32:
301 %val = call <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
302 ; CHECK: fminnm v0.4s, v0.4s, v1.4s
306 define <2 x double> @test_fminnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
307 ; CHECK: test_fminnm_v2f64:
308 %val = call <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
309 ; CHECK: fminnm v0.2d, v0.2d, v1.2d
310 ret <2 x double> %val