1 ; Test the MSA intrinsics that are encoded with the VEC instruction format.
3 ; RUN: llc -march=mips -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s
4 ; RUN: llc -march=mipsel -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s
6 @llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
7 @llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
8 @llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
10 define void @llvm_mips_and_v_b_test() nounwind {
12 %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
13 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
14 %2 = bitcast <16 x i8> %0 to <16 x i8>
15 %3 = bitcast <16 x i8> %1 to <16 x i8>
16 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
17 %5 = bitcast <16 x i8> %4 to <16 x i8>
18 store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
22 ; ANYENDIAN: llvm_mips_and_v_b_test:
27 ; ANYENDIAN: .size llvm_mips_and_v_b_test
29 @llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
30 @llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
31 @llvm_mips_and_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
33 define void @llvm_mips_and_v_h_test() nounwind {
35 %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
36 %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
37 %2 = bitcast <8 x i16> %0 to <16 x i8>
38 %3 = bitcast <8 x i16> %1 to <16 x i8>
39 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
40 %5 = bitcast <16 x i8> %4 to <8 x i16>
41 store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
45 ; ANYENDIAN: llvm_mips_and_v_h_test:
50 ; ANYENDIAN: .size llvm_mips_and_v_h_test
52 @llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
53 @llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
54 @llvm_mips_and_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
56 define void @llvm_mips_and_v_w_test() nounwind {
58 %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
59 %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
60 %2 = bitcast <4 x i32> %0 to <16 x i8>
61 %3 = bitcast <4 x i32> %1 to <16 x i8>
62 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
63 %5 = bitcast <16 x i8> %4 to <4 x i32>
64 store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
68 ; ANYENDIAN: llvm_mips_and_v_w_test:
73 ; ANYENDIAN: .size llvm_mips_and_v_w_test
75 @llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
76 @llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
77 @llvm_mips_and_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
79 define void @llvm_mips_and_v_d_test() nounwind {
81 %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
82 %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
83 %2 = bitcast <2 x i64> %0 to <16 x i8>
84 %3 = bitcast <2 x i64> %1 to <16 x i8>
85 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
86 %5 = bitcast <16 x i8> %4 to <2 x i64>
87 store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
91 ; ANYENDIAN: llvm_mips_and_v_d_test:
96 ; ANYENDIAN: .size llvm_mips_and_v_d_test
98 define void @and_v_b_test() nounwind {
100 %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
101 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
102 %2 = and <16 x i8> %0, %1
103 store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
107 ; CHECK: and_v_b_test:
112 ; CHECK: .size and_v_b_test
114 define void @and_v_h_test() nounwind {
116 %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
117 %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
118 %2 = and <8 x i16> %0, %1
119 store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
123 ; CHECK: and_v_h_test:
128 ; CHECK: .size and_v_h_test
131 define void @and_v_w_test() nounwind {
133 %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
134 %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
135 %2 = and <4 x i32> %0, %1
136 store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
140 ; CHECK: and_v_w_test:
145 ; CHECK: .size and_v_w_test
148 define void @and_v_d_test() nounwind {
150 %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
151 %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
152 %2 = and <2 x i64> %0, %1
153 store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
157 ; CHECK: and_v_d_test:
162 ; CHECK: .size and_v_d_test
164 @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
165 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
166 @llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
168 define void @llvm_mips_bmnz_v_b_test() nounwind {
170 %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
171 %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
172 %2 = bitcast <16 x i8> %0 to <16 x i8>
173 %3 = bitcast <16 x i8> %1 to <16 x i8>
174 %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3)
175 %5 = bitcast <16 x i8> %4 to <16 x i8>
176 store <16 x i8> %5, <16 x i8>* @llvm_mips_bmnz_v_b_RES
180 ; ANYENDIAN: llvm_mips_bmnz_v_b_test:
185 ; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
187 @llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
188 @llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
189 @llvm_mips_bmnz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
191 define void @llvm_mips_bmnz_v_h_test() nounwind {
193 %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
194 %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
195 %2 = bitcast <8 x i16> %0 to <16 x i8>
196 %3 = bitcast <8 x i16> %1 to <16 x i8>
197 %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3)
198 %5 = bitcast <16 x i8> %4 to <8 x i16>
199 store <8 x i16> %5, <8 x i16>* @llvm_mips_bmnz_v_h_RES
203 ; ANYENDIAN: llvm_mips_bmnz_v_h_test:
208 ; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
210 @llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
211 @llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
212 @llvm_mips_bmnz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
214 define void @llvm_mips_bmnz_v_w_test() nounwind {
216 %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
217 %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
218 %2 = bitcast <4 x i32> %0 to <16 x i8>
219 %3 = bitcast <4 x i32> %1 to <16 x i8>
220 %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3)
221 %5 = bitcast <16 x i8> %4 to <4 x i32>
222 store <4 x i32> %5, <4 x i32>* @llvm_mips_bmnz_v_w_RES
226 ; ANYENDIAN: llvm_mips_bmnz_v_w_test:
231 ; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
233 @llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
234 @llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
235 @llvm_mips_bmnz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
237 define void @llvm_mips_bmnz_v_d_test() nounwind {
239 %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
240 %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
241 %2 = bitcast <2 x i64> %0 to <16 x i8>
242 %3 = bitcast <2 x i64> %1 to <16 x i8>
243 %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3)
244 %5 = bitcast <16 x i8> %4 to <2 x i64>
245 store <2 x i64> %5, <2 x i64>* @llvm_mips_bmnz_v_d_RES
249 ; ANYENDIAN: llvm_mips_bmnz_v_d_test:
254 ; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
256 @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
257 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
258 @llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
260 define void @llvm_mips_bmz_v_b_test() nounwind {
262 %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
263 %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
264 %2 = bitcast <16 x i8> %0 to <16 x i8>
265 %3 = bitcast <16 x i8> %1 to <16 x i8>
266 %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3)
267 %5 = bitcast <16 x i8> %4 to <16 x i8>
268 store <16 x i8> %5, <16 x i8>* @llvm_mips_bmz_v_b_RES
272 ; ANYENDIAN: llvm_mips_bmz_v_b_test:
277 ; ANYENDIAN: .size llvm_mips_bmz_v_b_test
279 @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
280 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
281 @llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
283 define void @llvm_mips_bmz_v_h_test() nounwind {
285 %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
286 %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
287 %2 = bitcast <8 x i16> %0 to <16 x i8>
288 %3 = bitcast <8 x i16> %1 to <16 x i8>
289 %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3)
290 %5 = bitcast <16 x i8> %4 to <8 x i16>
291 store <8 x i16> %5, <8 x i16>* @llvm_mips_bmz_v_h_RES
295 ; ANYENDIAN: llvm_mips_bmz_v_h_test:
300 ; ANYENDIAN: .size llvm_mips_bmz_v_h_test
302 @llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
303 @llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
304 @llvm_mips_bmz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
306 define void @llvm_mips_bmz_v_w_test() nounwind {
308 %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
309 %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
310 %2 = bitcast <4 x i32> %0 to <16 x i8>
311 %3 = bitcast <4 x i32> %1 to <16 x i8>
312 %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3)
313 %5 = bitcast <16 x i8> %4 to <4 x i32>
314 store <4 x i32> %5, <4 x i32>* @llvm_mips_bmz_v_w_RES
318 ; ANYENDIAN: llvm_mips_bmz_v_w_test:
323 ; ANYENDIAN: .size llvm_mips_bmz_v_w_test
325 @llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
326 @llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
327 @llvm_mips_bmz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
329 define void @llvm_mips_bmz_v_d_test() nounwind {
331 %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
332 %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
333 %2 = bitcast <2 x i64> %0 to <16 x i8>
334 %3 = bitcast <2 x i64> %1 to <16 x i8>
335 %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3)
336 %5 = bitcast <16 x i8> %4 to <2 x i64>
337 store <2 x i64> %5, <2 x i64>* @llvm_mips_bmz_v_d_RES
341 ; ANYENDIAN: llvm_mips_bmz_v_d_test:
346 ; ANYENDIAN: .size llvm_mips_bmz_v_d_test
348 @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
349 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
350 @llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
352 define void @llvm_mips_bsel_v_b_test() nounwind {
354 %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
355 %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
356 %2 = bitcast <16 x i8> %0 to <16 x i8>
357 %3 = bitcast <16 x i8> %1 to <16 x i8>
358 %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %2, <16 x i8> %3)
359 %5 = bitcast <16 x i8> %4 to <16 x i8>
360 store <16 x i8> %5, <16 x i8>* @llvm_mips_bsel_v_b_RES
364 ; ANYENDIAN: llvm_mips_bsel_v_b_test:
369 ; ANYENDIAN: .size llvm_mips_bsel_v_b_test
371 @llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
372 @llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
373 @llvm_mips_bsel_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
375 define void @llvm_mips_bsel_v_h_test() nounwind {
377 %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
378 %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
379 %2 = bitcast <8 x i16> %0 to <16 x i8>
380 %3 = bitcast <8 x i16> %1 to <16 x i8>
381 %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %2, <16 x i8> %3)
382 %5 = bitcast <16 x i8> %4 to <8 x i16>
383 store <8 x i16> %5, <8 x i16>* @llvm_mips_bsel_v_h_RES
387 ; ANYENDIAN: llvm_mips_bsel_v_h_test:
392 ; ANYENDIAN: .size llvm_mips_bsel_v_h_test
394 @llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
395 @llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
396 @llvm_mips_bsel_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
398 define void @llvm_mips_bsel_v_w_test() nounwind {
400 %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
401 %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
402 %2 = bitcast <4 x i32> %0 to <16 x i8>
403 %3 = bitcast <4 x i32> %1 to <16 x i8>
404 %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %2, <16 x i8> %3)
405 %5 = bitcast <16 x i8> %4 to <4 x i32>
406 store <4 x i32> %5, <4 x i32>* @llvm_mips_bsel_v_w_RES
410 ; ANYENDIAN: llvm_mips_bsel_v_w_test:
415 ; ANYENDIAN: .size llvm_mips_bsel_v_w_test
417 @llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
418 @llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
419 @llvm_mips_bsel_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
421 define void @llvm_mips_bsel_v_d_test() nounwind {
423 %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
424 %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
425 %2 = bitcast <2 x i64> %0 to <16 x i8>
426 %3 = bitcast <2 x i64> %1 to <16 x i8>
427 %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %2, <16 x i8> %3)
428 %5 = bitcast <16 x i8> %4 to <2 x i64>
429 store <2 x i64> %5, <2 x i64>* @llvm_mips_bsel_v_d_RES
433 ; ANYENDIAN: llvm_mips_bsel_v_d_test:
438 ; ANYENDIAN: .size llvm_mips_bsel_v_d_test
440 @llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
441 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
442 @llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
444 define void @llvm_mips_nor_v_b_test() nounwind {
446 %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
447 %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
448 %2 = bitcast <16 x i8> %0 to <16 x i8>
449 %3 = bitcast <16 x i8> %1 to <16 x i8>
450 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
451 %5 = bitcast <16 x i8> %4 to <16 x i8>
452 store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
456 ; ANYENDIAN: llvm_mips_nor_v_b_test:
461 ; ANYENDIAN: .size llvm_mips_nor_v_b_test
463 @llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
464 @llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
465 @llvm_mips_nor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
467 define void @llvm_mips_nor_v_h_test() nounwind {
469 %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
470 %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
471 %2 = bitcast <8 x i16> %0 to <16 x i8>
472 %3 = bitcast <8 x i16> %1 to <16 x i8>
473 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
474 %5 = bitcast <16 x i8> %4 to <8 x i16>
475 store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
479 ; ANYENDIAN: llvm_mips_nor_v_h_test:
484 ; ANYENDIAN: .size llvm_mips_nor_v_h_test
486 @llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
487 @llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
488 @llvm_mips_nor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
490 define void @llvm_mips_nor_v_w_test() nounwind {
492 %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
493 %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
494 %2 = bitcast <4 x i32> %0 to <16 x i8>
495 %3 = bitcast <4 x i32> %1 to <16 x i8>
496 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
497 %5 = bitcast <16 x i8> %4 to <4 x i32>
498 store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
502 ; ANYENDIAN: llvm_mips_nor_v_w_test:
507 ; ANYENDIAN: .size llvm_mips_nor_v_w_test
509 @llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
510 @llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
511 @llvm_mips_nor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
513 define void @llvm_mips_nor_v_d_test() nounwind {
515 %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
516 %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
517 %2 = bitcast <2 x i64> %0 to <16 x i8>
518 %3 = bitcast <2 x i64> %1 to <16 x i8>
519 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
520 %5 = bitcast <16 x i8> %4 to <2 x i64>
521 store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
525 ; ANYENDIAN: llvm_mips_nor_v_d_test:
530 ; ANYENDIAN: .size llvm_mips_nor_v_d_test
532 @llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
533 @llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
534 @llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
536 define void @llvm_mips_or_v_b_test() nounwind {
538 %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
539 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
540 %2 = bitcast <16 x i8> %0 to <16 x i8>
541 %3 = bitcast <16 x i8> %1 to <16 x i8>
542 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
543 %5 = bitcast <16 x i8> %4 to <16 x i8>
544 store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
548 ; ANYENDIAN: llvm_mips_or_v_b_test:
553 ; ANYENDIAN: .size llvm_mips_or_v_b_test
555 @llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
556 @llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
557 @llvm_mips_or_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
559 define void @llvm_mips_or_v_h_test() nounwind {
561 %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
562 %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
563 %2 = bitcast <8 x i16> %0 to <16 x i8>
564 %3 = bitcast <8 x i16> %1 to <16 x i8>
565 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
566 %5 = bitcast <16 x i8> %4 to <8 x i16>
567 store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
571 ; ANYENDIAN: llvm_mips_or_v_h_test:
576 ; ANYENDIAN: .size llvm_mips_or_v_h_test
578 @llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
579 @llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
580 @llvm_mips_or_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
582 define void @llvm_mips_or_v_w_test() nounwind {
584 %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
585 %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
586 %2 = bitcast <4 x i32> %0 to <16 x i8>
587 %3 = bitcast <4 x i32> %1 to <16 x i8>
588 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
589 %5 = bitcast <16 x i8> %4 to <4 x i32>
590 store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
594 ; ANYENDIAN: llvm_mips_or_v_w_test:
599 ; ANYENDIAN: .size llvm_mips_or_v_w_test
601 @llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
602 @llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
603 @llvm_mips_or_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
605 define void @llvm_mips_or_v_d_test() nounwind {
607 %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
608 %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
609 %2 = bitcast <2 x i64> %0 to <16 x i8>
610 %3 = bitcast <2 x i64> %1 to <16 x i8>
611 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
612 %5 = bitcast <16 x i8> %4 to <2 x i64>
613 store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
617 ; ANYENDIAN: llvm_mips_or_v_d_test:
622 ; ANYENDIAN: .size llvm_mips_or_v_d_test
624 define void @or_v_b_test() nounwind {
626 %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
627 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
628 %2 = or <16 x i8> %0, %1
629 store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
633 ; CHECK: or_v_b_test:
638 ; CHECK: .size or_v_b_test
640 define void @or_v_h_test() nounwind {
642 %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
643 %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
644 %2 = or <8 x i16> %0, %1
645 store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
649 ; CHECK: or_v_h_test:
654 ; CHECK: .size or_v_h_test
657 define void @or_v_w_test() nounwind {
659 %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
660 %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
661 %2 = or <4 x i32> %0, %1
662 store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
666 ; CHECK: or_v_w_test:
671 ; CHECK: .size or_v_w_test
674 define void @or_v_d_test() nounwind {
676 %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
677 %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
678 %2 = or <2 x i64> %0, %1
679 store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
683 ; CHECK: or_v_d_test:
688 ; CHECK: .size or_v_d_test
690 @llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
691 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
692 @llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
694 define void @llvm_mips_xor_v_b_test() nounwind {
696 %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
697 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
698 %2 = bitcast <16 x i8> %0 to <16 x i8>
699 %3 = bitcast <16 x i8> %1 to <16 x i8>
700 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
701 %5 = bitcast <16 x i8> %4 to <16 x i8>
702 store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
706 ; ANYENDIAN: llvm_mips_xor_v_b_test:
711 ; ANYENDIAN: .size llvm_mips_xor_v_b_test
713 @llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
714 @llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
715 @llvm_mips_xor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
717 define void @llvm_mips_xor_v_h_test() nounwind {
719 %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
720 %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
721 %2 = bitcast <8 x i16> %0 to <16 x i8>
722 %3 = bitcast <8 x i16> %1 to <16 x i8>
723 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
724 %5 = bitcast <16 x i8> %4 to <8 x i16>
725 store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
729 ; ANYENDIAN: llvm_mips_xor_v_h_test:
734 ; ANYENDIAN: .size llvm_mips_xor_v_h_test
736 @llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
737 @llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
738 @llvm_mips_xor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
740 define void @llvm_mips_xor_v_w_test() nounwind {
742 %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
743 %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
744 %2 = bitcast <4 x i32> %0 to <16 x i8>
745 %3 = bitcast <4 x i32> %1 to <16 x i8>
746 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
747 %5 = bitcast <16 x i8> %4 to <4 x i32>
748 store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
752 ; ANYENDIAN: llvm_mips_xor_v_w_test:
757 ; ANYENDIAN: .size llvm_mips_xor_v_w_test
759 @llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
760 @llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
761 @llvm_mips_xor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
763 define void @llvm_mips_xor_v_d_test() nounwind {
765 %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
766 %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
767 %2 = bitcast <2 x i64> %0 to <16 x i8>
768 %3 = bitcast <2 x i64> %1 to <16 x i8>
769 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
770 %5 = bitcast <16 x i8> %4 to <2 x i64>
771 store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
775 ; ANYENDIAN: llvm_mips_xor_v_d_test:
780 ; ANYENDIAN: .size llvm_mips_xor_v_d_test
782 define void @xor_v_b_test() nounwind {
784 %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
785 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
786 %2 = xor <16 x i8> %0, %1
787 store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
791 ; CHECK: xor_v_b_test:
796 ; CHECK: .size xor_v_b_test
798 define void @xor_v_h_test() nounwind {
800 %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
801 %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
802 %2 = xor <8 x i16> %0, %1
803 store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
807 ; CHECK: xor_v_h_test:
812 ; CHECK: .size xor_v_h_test
815 define void @xor_v_w_test() nounwind {
817 %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
818 %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
819 %2 = xor <4 x i32> %0, %1
820 store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
824 ; CHECK: xor_v_w_test:
829 ; CHECK: .size xor_v_w_test
832 define void @xor_v_d_test() nounwind {
834 %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
835 %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
836 %2 = xor <2 x i64> %0, %1
837 store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
841 ; CHECK: xor_v_d_test:
846 ; CHECK: .size xor_v_d_test
848 declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
849 declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>) nounwind
850 declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>) nounwind
851 declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
852 declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
853 declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
854 declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind