1 ; Test the MSA intrinsics that are encoded with the 3R instruction format.
2 ; There are lots of these so this covers those beginning with 'a'
4 ; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
6 @llvm_mips_add_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
7 @llvm_mips_add_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
8 @llvm_mips_add_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
10 define void @llvm_mips_add_a_b_test() nounwind {
12 %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1
13 %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2
14 %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
15 store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
19 declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
21 ; CHECK: llvm_mips_add_a_b_test:
26 ; CHECK: .size llvm_mips_add_a_b_test
28 @llvm_mips_add_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
29 @llvm_mips_add_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
30 @llvm_mips_add_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
32 define void @llvm_mips_add_a_h_test() nounwind {
34 %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1
35 %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2
36 %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
37 store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
41 declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
43 ; CHECK: llvm_mips_add_a_h_test:
48 ; CHECK: .size llvm_mips_add_a_h_test
50 @llvm_mips_add_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
51 @llvm_mips_add_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
52 @llvm_mips_add_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
54 define void @llvm_mips_add_a_w_test() nounwind {
56 %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1
57 %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2
58 %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
59 store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
63 declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
65 ; CHECK: llvm_mips_add_a_w_test:
70 ; CHECK: .size llvm_mips_add_a_w_test
72 @llvm_mips_add_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
73 @llvm_mips_add_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
74 @llvm_mips_add_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
76 define void @llvm_mips_add_a_d_test() nounwind {
78 %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1
79 %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2
80 %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
81 store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
85 declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
87 ; CHECK: llvm_mips_add_a_d_test:
92 ; CHECK: .size llvm_mips_add_a_d_test
94 @llvm_mips_adds_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
95 @llvm_mips_adds_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
96 @llvm_mips_adds_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
98 define void @llvm_mips_adds_a_b_test() nounwind {
100 %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1
101 %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2
102 %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
103 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
107 declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
109 ; CHECK: llvm_mips_adds_a_b_test:
114 ; CHECK: .size llvm_mips_adds_a_b_test
116 @llvm_mips_adds_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
117 @llvm_mips_adds_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
118 @llvm_mips_adds_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
120 define void @llvm_mips_adds_a_h_test() nounwind {
122 %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1
123 %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2
124 %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
125 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
129 declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
131 ; CHECK: llvm_mips_adds_a_h_test:
136 ; CHECK: .size llvm_mips_adds_a_h_test
138 @llvm_mips_adds_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
139 @llvm_mips_adds_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
140 @llvm_mips_adds_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
142 define void @llvm_mips_adds_a_w_test() nounwind {
144 %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1
145 %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2
146 %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
147 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
151 declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
153 ; CHECK: llvm_mips_adds_a_w_test:
158 ; CHECK: .size llvm_mips_adds_a_w_test
160 @llvm_mips_adds_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
161 @llvm_mips_adds_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
162 @llvm_mips_adds_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
164 define void @llvm_mips_adds_a_d_test() nounwind {
166 %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1
167 %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2
168 %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
169 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
173 declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
175 ; CHECK: llvm_mips_adds_a_d_test:
180 ; CHECK: .size llvm_mips_adds_a_d_test
182 @llvm_mips_adds_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
183 @llvm_mips_adds_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
184 @llvm_mips_adds_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
186 define void @llvm_mips_adds_s_b_test() nounwind {
188 %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1
189 %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2
190 %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
191 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
195 declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
197 ; CHECK: llvm_mips_adds_s_b_test:
202 ; CHECK: .size llvm_mips_adds_s_b_test
204 @llvm_mips_adds_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
205 @llvm_mips_adds_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
206 @llvm_mips_adds_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
208 define void @llvm_mips_adds_s_h_test() nounwind {
210 %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1
211 %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2
212 %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
213 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
217 declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
219 ; CHECK: llvm_mips_adds_s_h_test:
224 ; CHECK: .size llvm_mips_adds_s_h_test
226 @llvm_mips_adds_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
227 @llvm_mips_adds_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
228 @llvm_mips_adds_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
230 define void @llvm_mips_adds_s_w_test() nounwind {
232 %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1
233 %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2
234 %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
235 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
239 declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
241 ; CHECK: llvm_mips_adds_s_w_test:
246 ; CHECK: .size llvm_mips_adds_s_w_test
248 @llvm_mips_adds_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
249 @llvm_mips_adds_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
250 @llvm_mips_adds_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
252 define void @llvm_mips_adds_s_d_test() nounwind {
254 %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1
255 %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2
256 %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
257 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
261 declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
263 ; CHECK: llvm_mips_adds_s_d_test:
268 ; CHECK: .size llvm_mips_adds_s_d_test
270 @llvm_mips_adds_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
271 @llvm_mips_adds_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
272 @llvm_mips_adds_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
274 define void @llvm_mips_adds_u_b_test() nounwind {
276 %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1
277 %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2
278 %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
279 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
283 declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
285 ; CHECK: llvm_mips_adds_u_b_test:
290 ; CHECK: .size llvm_mips_adds_u_b_test
292 @llvm_mips_adds_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
293 @llvm_mips_adds_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
294 @llvm_mips_adds_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
296 define void @llvm_mips_adds_u_h_test() nounwind {
298 %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1
299 %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2
300 %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
301 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
305 declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
307 ; CHECK: llvm_mips_adds_u_h_test:
312 ; CHECK: .size llvm_mips_adds_u_h_test
314 @llvm_mips_adds_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
315 @llvm_mips_adds_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
316 @llvm_mips_adds_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
318 define void @llvm_mips_adds_u_w_test() nounwind {
320 %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1
321 %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2
322 %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
323 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
327 declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
329 ; CHECK: llvm_mips_adds_u_w_test:
334 ; CHECK: .size llvm_mips_adds_u_w_test
336 @llvm_mips_adds_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
337 @llvm_mips_adds_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
338 @llvm_mips_adds_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
340 define void @llvm_mips_adds_u_d_test() nounwind {
342 %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1
343 %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2
344 %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
345 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
349 declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
351 ; CHECK: llvm_mips_adds_u_d_test:
356 ; CHECK: .size llvm_mips_adds_u_d_test
358 @llvm_mips_addv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
359 @llvm_mips_addv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
360 @llvm_mips_addv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
362 define void @llvm_mips_addv_b_test() nounwind {
364 %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
365 %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
366 %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
367 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
371 declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
373 ; CHECK: llvm_mips_addv_b_test:
378 ; CHECK: .size llvm_mips_addv_b_test
380 @llvm_mips_addv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
381 @llvm_mips_addv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
382 @llvm_mips_addv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
384 define void @llvm_mips_addv_h_test() nounwind {
386 %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
387 %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
388 %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
389 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
393 declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
395 ; CHECK: llvm_mips_addv_h_test:
400 ; CHECK: .size llvm_mips_addv_h_test
402 @llvm_mips_addv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
403 @llvm_mips_addv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
404 @llvm_mips_addv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
406 define void @llvm_mips_addv_w_test() nounwind {
408 %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
409 %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
410 %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
411 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
415 declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
417 ; CHECK: llvm_mips_addv_w_test:
422 ; CHECK: .size llvm_mips_addv_w_test
424 @llvm_mips_addv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
425 @llvm_mips_addv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
426 @llvm_mips_addv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
428 define void @llvm_mips_addv_d_test() nounwind {
430 %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
431 %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
432 %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
433 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
437 declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
439 ; CHECK: llvm_mips_addv_d_test:
444 ; CHECK: .size llvm_mips_addv_d_test
447 define void @addv_b_test() nounwind {
449 %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
450 %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
451 %2 = add <16 x i8> %0, %1
452 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
456 ; CHECK: addv_b_test:
461 ; CHECK: .size addv_b_test
464 define void @addv_h_test() nounwind {
466 %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
467 %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
468 %2 = add <8 x i16> %0, %1
469 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
473 ; CHECK: addv_h_test:
478 ; CHECK: .size addv_h_test
481 define void @addv_w_test() nounwind {
483 %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
484 %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
485 %2 = add <4 x i32> %0, %1
486 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
490 ; CHECK: addv_w_test:
495 ; CHECK: .size addv_w_test
498 define void @addv_d_test() nounwind {
500 %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
501 %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
502 %2 = add <2 x i64> %0, %1
503 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
507 ; CHECK: addv_d_test:
512 ; CHECK: .size addv_d_test
514 @llvm_mips_asub_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
515 @llvm_mips_asub_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
516 @llvm_mips_asub_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
518 define void @llvm_mips_asub_s_b_test() nounwind {
520 %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1
521 %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2
522 %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
523 store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
527 declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
529 ; CHECK: llvm_mips_asub_s_b_test:
534 ; CHECK: .size llvm_mips_asub_s_b_test
536 @llvm_mips_asub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
537 @llvm_mips_asub_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
538 @llvm_mips_asub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
540 define void @llvm_mips_asub_s_h_test() nounwind {
542 %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1
543 %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2
544 %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
545 store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
549 declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
551 ; CHECK: llvm_mips_asub_s_h_test:
556 ; CHECK: .size llvm_mips_asub_s_h_test
558 @llvm_mips_asub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
559 @llvm_mips_asub_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
560 @llvm_mips_asub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
562 define void @llvm_mips_asub_s_w_test() nounwind {
564 %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1
565 %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2
566 %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
567 store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
571 declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
573 ; CHECK: llvm_mips_asub_s_w_test:
578 ; CHECK: .size llvm_mips_asub_s_w_test
580 @llvm_mips_asub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
581 @llvm_mips_asub_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
582 @llvm_mips_asub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
584 define void @llvm_mips_asub_s_d_test() nounwind {
586 %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1
587 %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2
588 %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
589 store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
593 declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
595 ; CHECK: llvm_mips_asub_s_d_test:
600 ; CHECK: .size llvm_mips_asub_s_d_test
602 @llvm_mips_asub_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
603 @llvm_mips_asub_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
604 @llvm_mips_asub_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
606 define void @llvm_mips_asub_u_b_test() nounwind {
608 %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1
609 %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2
610 %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
611 store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
615 declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
617 ; CHECK: llvm_mips_asub_u_b_test:
622 ; CHECK: .size llvm_mips_asub_u_b_test
624 @llvm_mips_asub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
625 @llvm_mips_asub_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
626 @llvm_mips_asub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
628 define void @llvm_mips_asub_u_h_test() nounwind {
630 %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1
631 %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2
632 %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
633 store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
637 declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
639 ; CHECK: llvm_mips_asub_u_h_test:
644 ; CHECK: .size llvm_mips_asub_u_h_test
646 @llvm_mips_asub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
647 @llvm_mips_asub_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
648 @llvm_mips_asub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
650 define void @llvm_mips_asub_u_w_test() nounwind {
652 %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1
653 %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2
654 %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
655 store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
659 declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
661 ; CHECK: llvm_mips_asub_u_w_test:
666 ; CHECK: .size llvm_mips_asub_u_w_test
668 @llvm_mips_asub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
669 @llvm_mips_asub_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
670 @llvm_mips_asub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
672 define void @llvm_mips_asub_u_d_test() nounwind {
674 %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1
675 %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2
676 %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
677 store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
681 declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
683 ; CHECK: llvm_mips_asub_u_d_test:
688 ; CHECK: .size llvm_mips_asub_u_d_test
690 @llvm_mips_ave_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
691 @llvm_mips_ave_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
692 @llvm_mips_ave_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
694 define void @llvm_mips_ave_s_b_test() nounwind {
696 %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1
697 %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2
698 %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
699 store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
703 declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
705 ; CHECK: llvm_mips_ave_s_b_test:
710 ; CHECK: .size llvm_mips_ave_s_b_test
712 @llvm_mips_ave_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
713 @llvm_mips_ave_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
714 @llvm_mips_ave_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
716 define void @llvm_mips_ave_s_h_test() nounwind {
718 %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1
719 %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2
720 %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
721 store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
725 declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
727 ; CHECK: llvm_mips_ave_s_h_test:
732 ; CHECK: .size llvm_mips_ave_s_h_test
734 @llvm_mips_ave_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
735 @llvm_mips_ave_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
736 @llvm_mips_ave_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
738 define void @llvm_mips_ave_s_w_test() nounwind {
740 %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1
741 %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2
742 %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
743 store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
747 declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
749 ; CHECK: llvm_mips_ave_s_w_test:
754 ; CHECK: .size llvm_mips_ave_s_w_test
756 @llvm_mips_ave_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
757 @llvm_mips_ave_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
758 @llvm_mips_ave_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
760 define void @llvm_mips_ave_s_d_test() nounwind {
762 %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1
763 %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2
764 %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
765 store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
769 declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
771 ; CHECK: llvm_mips_ave_s_d_test:
776 ; CHECK: .size llvm_mips_ave_s_d_test
778 @llvm_mips_ave_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
779 @llvm_mips_ave_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
780 @llvm_mips_ave_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
782 define void @llvm_mips_ave_u_b_test() nounwind {
784 %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1
785 %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2
786 %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
787 store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
791 declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
793 ; CHECK: llvm_mips_ave_u_b_test:
798 ; CHECK: .size llvm_mips_ave_u_b_test
800 @llvm_mips_ave_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
801 @llvm_mips_ave_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
802 @llvm_mips_ave_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
804 define void @llvm_mips_ave_u_h_test() nounwind {
806 %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1
807 %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2
808 %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
809 store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
813 declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
815 ; CHECK: llvm_mips_ave_u_h_test:
820 ; CHECK: .size llvm_mips_ave_u_h_test
822 @llvm_mips_ave_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
823 @llvm_mips_ave_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
824 @llvm_mips_ave_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
826 define void @llvm_mips_ave_u_w_test() nounwind {
828 %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1
829 %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2
830 %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
831 store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
835 declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
837 ; CHECK: llvm_mips_ave_u_w_test:
842 ; CHECK: .size llvm_mips_ave_u_w_test
844 @llvm_mips_ave_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
845 @llvm_mips_ave_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
846 @llvm_mips_ave_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
848 define void @llvm_mips_ave_u_d_test() nounwind {
850 %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1
851 %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2
852 %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
853 store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
857 declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
859 ; CHECK: llvm_mips_ave_u_d_test:
864 ; CHECK: .size llvm_mips_ave_u_d_test
866 @llvm_mips_aver_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
867 @llvm_mips_aver_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
868 @llvm_mips_aver_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
870 define void @llvm_mips_aver_s_b_test() nounwind {
872 %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1
873 %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2
874 %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
875 store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
879 declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
881 ; CHECK: llvm_mips_aver_s_b_test:
886 ; CHECK: .size llvm_mips_aver_s_b_test
888 @llvm_mips_aver_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
889 @llvm_mips_aver_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
890 @llvm_mips_aver_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
892 define void @llvm_mips_aver_s_h_test() nounwind {
894 %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1
895 %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2
896 %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
897 store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
901 declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
903 ; CHECK: llvm_mips_aver_s_h_test:
908 ; CHECK: .size llvm_mips_aver_s_h_test
910 @llvm_mips_aver_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
911 @llvm_mips_aver_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
912 @llvm_mips_aver_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
914 define void @llvm_mips_aver_s_w_test() nounwind {
916 %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1
917 %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2
918 %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
919 store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
923 declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
925 ; CHECK: llvm_mips_aver_s_w_test:
930 ; CHECK: .size llvm_mips_aver_s_w_test
932 @llvm_mips_aver_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
933 @llvm_mips_aver_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
934 @llvm_mips_aver_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
936 define void @llvm_mips_aver_s_d_test() nounwind {
938 %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1
939 %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2
940 %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
941 store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
945 declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
947 ; CHECK: llvm_mips_aver_s_d_test:
952 ; CHECK: .size llvm_mips_aver_s_d_test
954 @llvm_mips_aver_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
955 @llvm_mips_aver_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
956 @llvm_mips_aver_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
958 define void @llvm_mips_aver_u_b_test() nounwind {
960 %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1
961 %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2
962 %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
963 store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
967 declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
969 ; CHECK: llvm_mips_aver_u_b_test:
974 ; CHECK: .size llvm_mips_aver_u_b_test
976 @llvm_mips_aver_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
977 @llvm_mips_aver_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
978 @llvm_mips_aver_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
980 define void @llvm_mips_aver_u_h_test() nounwind {
982 %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1
983 %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2
984 %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
985 store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
989 declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
991 ; CHECK: llvm_mips_aver_u_h_test:
996 ; CHECK: .size llvm_mips_aver_u_h_test
998 @llvm_mips_aver_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
999 @llvm_mips_aver_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
1000 @llvm_mips_aver_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
1002 define void @llvm_mips_aver_u_w_test() nounwind {
1004 %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1
1005 %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2
1006 %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
1007 store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
1011 declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
1013 ; CHECK: llvm_mips_aver_u_w_test:
1018 ; CHECK: .size llvm_mips_aver_u_w_test
1020 @llvm_mips_aver_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
1021 @llvm_mips_aver_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
1022 @llvm_mips_aver_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
1024 define void @llvm_mips_aver_u_d_test() nounwind {
1026 %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1
1027 %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2
1028 %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
1029 store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
1033 declare <2 x i64> @llvm.mips.aver.u.d(<2 x i64>, <2 x i64>) nounwind
1035 ; CHECK: llvm_mips_aver_u_d_test:
1040 ; CHECK: .size llvm_mips_aver_u_d_test