1 ; Test the MSA intrinsics that are encoded with the 3R instruction format.
2 ; There are lots of these so this covers those beginning with 'a'
4 ; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
6 ; It should fail to compile without fp64.
7 ; RUN: not llc -march=mips -mattr=+msa < %s 2>&1 | \
8 ; RUN: FileCheck -check-prefix=FP32ERROR %s
9 ; FP32ERROR: LLVM ERROR: MSA requires a 64-bit FPU register file (FR=1 mode).
11 @llvm_mips_add_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
12 @llvm_mips_add_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
13 @llvm_mips_add_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
15 define void @llvm_mips_add_a_b_test() nounwind {
17 %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1
18 %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2
19 %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
20 store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
24 declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
26 ; CHECK: llvm_mips_add_a_b_test:
31 ; CHECK: .size llvm_mips_add_a_b_test
33 @llvm_mips_add_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
34 @llvm_mips_add_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
35 @llvm_mips_add_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
37 define void @llvm_mips_add_a_h_test() nounwind {
39 %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1
40 %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2
41 %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
42 store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
46 declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
48 ; CHECK: llvm_mips_add_a_h_test:
53 ; CHECK: .size llvm_mips_add_a_h_test
55 @llvm_mips_add_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
56 @llvm_mips_add_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
57 @llvm_mips_add_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
59 define void @llvm_mips_add_a_w_test() nounwind {
61 %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1
62 %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2
63 %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
64 store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
68 declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
70 ; CHECK: llvm_mips_add_a_w_test:
75 ; CHECK: .size llvm_mips_add_a_w_test
77 @llvm_mips_add_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
78 @llvm_mips_add_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
79 @llvm_mips_add_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
81 define void @llvm_mips_add_a_d_test() nounwind {
83 %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1
84 %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2
85 %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
86 store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
90 declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
92 ; CHECK: llvm_mips_add_a_d_test:
97 ; CHECK: .size llvm_mips_add_a_d_test
99 @llvm_mips_adds_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
100 @llvm_mips_adds_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
101 @llvm_mips_adds_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
103 define void @llvm_mips_adds_a_b_test() nounwind {
105 %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1
106 %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2
107 %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
108 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
112 declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
114 ; CHECK: llvm_mips_adds_a_b_test:
119 ; CHECK: .size llvm_mips_adds_a_b_test
121 @llvm_mips_adds_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
122 @llvm_mips_adds_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
123 @llvm_mips_adds_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
125 define void @llvm_mips_adds_a_h_test() nounwind {
127 %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1
128 %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2
129 %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
130 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
134 declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
136 ; CHECK: llvm_mips_adds_a_h_test:
141 ; CHECK: .size llvm_mips_adds_a_h_test
143 @llvm_mips_adds_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
144 @llvm_mips_adds_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
145 @llvm_mips_adds_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
147 define void @llvm_mips_adds_a_w_test() nounwind {
149 %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1
150 %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2
151 %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
152 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
156 declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
158 ; CHECK: llvm_mips_adds_a_w_test:
163 ; CHECK: .size llvm_mips_adds_a_w_test
165 @llvm_mips_adds_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
166 @llvm_mips_adds_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
167 @llvm_mips_adds_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
169 define void @llvm_mips_adds_a_d_test() nounwind {
171 %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1
172 %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2
173 %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
174 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
178 declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
180 ; CHECK: llvm_mips_adds_a_d_test:
185 ; CHECK: .size llvm_mips_adds_a_d_test
187 @llvm_mips_adds_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
188 @llvm_mips_adds_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
189 @llvm_mips_adds_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
191 define void @llvm_mips_adds_s_b_test() nounwind {
193 %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1
194 %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2
195 %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
196 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
200 declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
202 ; CHECK: llvm_mips_adds_s_b_test:
207 ; CHECK: .size llvm_mips_adds_s_b_test
209 @llvm_mips_adds_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
210 @llvm_mips_adds_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
211 @llvm_mips_adds_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
213 define void @llvm_mips_adds_s_h_test() nounwind {
215 %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1
216 %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2
217 %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
218 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
222 declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
224 ; CHECK: llvm_mips_adds_s_h_test:
229 ; CHECK: .size llvm_mips_adds_s_h_test
231 @llvm_mips_adds_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
232 @llvm_mips_adds_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
233 @llvm_mips_adds_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
235 define void @llvm_mips_adds_s_w_test() nounwind {
237 %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1
238 %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2
239 %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
240 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
244 declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
246 ; CHECK: llvm_mips_adds_s_w_test:
251 ; CHECK: .size llvm_mips_adds_s_w_test
253 @llvm_mips_adds_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
254 @llvm_mips_adds_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
255 @llvm_mips_adds_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
257 define void @llvm_mips_adds_s_d_test() nounwind {
259 %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1
260 %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2
261 %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
262 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
266 declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
268 ; CHECK: llvm_mips_adds_s_d_test:
273 ; CHECK: .size llvm_mips_adds_s_d_test
275 @llvm_mips_adds_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
276 @llvm_mips_adds_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
277 @llvm_mips_adds_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
279 define void @llvm_mips_adds_u_b_test() nounwind {
281 %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1
282 %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2
283 %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
284 store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
288 declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
290 ; CHECK: llvm_mips_adds_u_b_test:
295 ; CHECK: .size llvm_mips_adds_u_b_test
297 @llvm_mips_adds_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
298 @llvm_mips_adds_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
299 @llvm_mips_adds_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
301 define void @llvm_mips_adds_u_h_test() nounwind {
303 %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1
304 %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2
305 %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
306 store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
310 declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
312 ; CHECK: llvm_mips_adds_u_h_test:
317 ; CHECK: .size llvm_mips_adds_u_h_test
319 @llvm_mips_adds_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
320 @llvm_mips_adds_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
321 @llvm_mips_adds_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
323 define void @llvm_mips_adds_u_w_test() nounwind {
325 %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1
326 %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2
327 %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
328 store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
332 declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
334 ; CHECK: llvm_mips_adds_u_w_test:
339 ; CHECK: .size llvm_mips_adds_u_w_test
341 @llvm_mips_adds_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
342 @llvm_mips_adds_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
343 @llvm_mips_adds_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
345 define void @llvm_mips_adds_u_d_test() nounwind {
347 %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1
348 %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2
349 %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
350 store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
354 declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
356 ; CHECK: llvm_mips_adds_u_d_test:
361 ; CHECK: .size llvm_mips_adds_u_d_test
363 @llvm_mips_addv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
364 @llvm_mips_addv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
365 @llvm_mips_addv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
367 define void @llvm_mips_addv_b_test() nounwind {
369 %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
370 %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
371 %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
372 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
376 declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
378 ; CHECK: llvm_mips_addv_b_test:
383 ; CHECK: .size llvm_mips_addv_b_test
385 @llvm_mips_addv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
386 @llvm_mips_addv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
387 @llvm_mips_addv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
389 define void @llvm_mips_addv_h_test() nounwind {
391 %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
392 %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
393 %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
394 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
398 declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
400 ; CHECK: llvm_mips_addv_h_test:
405 ; CHECK: .size llvm_mips_addv_h_test
407 @llvm_mips_addv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
408 @llvm_mips_addv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
409 @llvm_mips_addv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
411 define void @llvm_mips_addv_w_test() nounwind {
413 %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
414 %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
415 %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
416 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
420 declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
422 ; CHECK: llvm_mips_addv_w_test:
427 ; CHECK: .size llvm_mips_addv_w_test
429 @llvm_mips_addv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
430 @llvm_mips_addv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
431 @llvm_mips_addv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
433 define void @llvm_mips_addv_d_test() nounwind {
435 %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
436 %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
437 %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
438 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
442 declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
444 ; CHECK: llvm_mips_addv_d_test:
449 ; CHECK: .size llvm_mips_addv_d_test
452 define void @addv_b_test() nounwind {
454 %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
455 %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
456 %2 = add <16 x i8> %0, %1
457 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
461 ; CHECK: addv_b_test:
466 ; CHECK: .size addv_b_test
469 define void @addv_h_test() nounwind {
471 %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
472 %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
473 %2 = add <8 x i16> %0, %1
474 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
478 ; CHECK: addv_h_test:
483 ; CHECK: .size addv_h_test
486 define void @addv_w_test() nounwind {
488 %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
489 %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
490 %2 = add <4 x i32> %0, %1
491 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
495 ; CHECK: addv_w_test:
500 ; CHECK: .size addv_w_test
503 define void @addv_d_test() nounwind {
505 %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
506 %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
507 %2 = add <2 x i64> %0, %1
508 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
512 ; CHECK: addv_d_test:
517 ; CHECK: .size addv_d_test
519 @llvm_mips_asub_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
520 @llvm_mips_asub_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
521 @llvm_mips_asub_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
523 define void @llvm_mips_asub_s_b_test() nounwind {
525 %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1
526 %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2
527 %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
528 store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
532 declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
534 ; CHECK: llvm_mips_asub_s_b_test:
539 ; CHECK: .size llvm_mips_asub_s_b_test
541 @llvm_mips_asub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
542 @llvm_mips_asub_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
543 @llvm_mips_asub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
545 define void @llvm_mips_asub_s_h_test() nounwind {
547 %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1
548 %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2
549 %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
550 store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
554 declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
556 ; CHECK: llvm_mips_asub_s_h_test:
561 ; CHECK: .size llvm_mips_asub_s_h_test
563 @llvm_mips_asub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
564 @llvm_mips_asub_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
565 @llvm_mips_asub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
567 define void @llvm_mips_asub_s_w_test() nounwind {
569 %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1
570 %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2
571 %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
572 store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
576 declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
578 ; CHECK: llvm_mips_asub_s_w_test:
583 ; CHECK: .size llvm_mips_asub_s_w_test
585 @llvm_mips_asub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
586 @llvm_mips_asub_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
587 @llvm_mips_asub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
589 define void @llvm_mips_asub_s_d_test() nounwind {
591 %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1
592 %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2
593 %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
594 store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
598 declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
600 ; CHECK: llvm_mips_asub_s_d_test:
605 ; CHECK: .size llvm_mips_asub_s_d_test
607 @llvm_mips_asub_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
608 @llvm_mips_asub_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
609 @llvm_mips_asub_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
611 define void @llvm_mips_asub_u_b_test() nounwind {
613 %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1
614 %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2
615 %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
616 store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
620 declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
622 ; CHECK: llvm_mips_asub_u_b_test:
627 ; CHECK: .size llvm_mips_asub_u_b_test
629 @llvm_mips_asub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
630 @llvm_mips_asub_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
631 @llvm_mips_asub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
633 define void @llvm_mips_asub_u_h_test() nounwind {
635 %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1
636 %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2
637 %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
638 store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
642 declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
644 ; CHECK: llvm_mips_asub_u_h_test:
649 ; CHECK: .size llvm_mips_asub_u_h_test
651 @llvm_mips_asub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
652 @llvm_mips_asub_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
653 @llvm_mips_asub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
655 define void @llvm_mips_asub_u_w_test() nounwind {
657 %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1
658 %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2
659 %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
660 store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
664 declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
666 ; CHECK: llvm_mips_asub_u_w_test:
671 ; CHECK: .size llvm_mips_asub_u_w_test
673 @llvm_mips_asub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
674 @llvm_mips_asub_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
675 @llvm_mips_asub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
677 define void @llvm_mips_asub_u_d_test() nounwind {
679 %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1
680 %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2
681 %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
682 store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
686 declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
688 ; CHECK: llvm_mips_asub_u_d_test:
693 ; CHECK: .size llvm_mips_asub_u_d_test
695 @llvm_mips_ave_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
696 @llvm_mips_ave_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
697 @llvm_mips_ave_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
699 define void @llvm_mips_ave_s_b_test() nounwind {
701 %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1
702 %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2
703 %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
704 store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
708 declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
710 ; CHECK: llvm_mips_ave_s_b_test:
715 ; CHECK: .size llvm_mips_ave_s_b_test
717 @llvm_mips_ave_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
718 @llvm_mips_ave_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
719 @llvm_mips_ave_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
721 define void @llvm_mips_ave_s_h_test() nounwind {
723 %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1
724 %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2
725 %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
726 store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
730 declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
732 ; CHECK: llvm_mips_ave_s_h_test:
737 ; CHECK: .size llvm_mips_ave_s_h_test
739 @llvm_mips_ave_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
740 @llvm_mips_ave_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
741 @llvm_mips_ave_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
743 define void @llvm_mips_ave_s_w_test() nounwind {
745 %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1
746 %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2
747 %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
748 store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
752 declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
754 ; CHECK: llvm_mips_ave_s_w_test:
759 ; CHECK: .size llvm_mips_ave_s_w_test
761 @llvm_mips_ave_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
762 @llvm_mips_ave_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
763 @llvm_mips_ave_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
765 define void @llvm_mips_ave_s_d_test() nounwind {
767 %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1
768 %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2
769 %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
770 store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
774 declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
776 ; CHECK: llvm_mips_ave_s_d_test:
781 ; CHECK: .size llvm_mips_ave_s_d_test
783 @llvm_mips_ave_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
784 @llvm_mips_ave_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
785 @llvm_mips_ave_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
787 define void @llvm_mips_ave_u_b_test() nounwind {
789 %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1
790 %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2
791 %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
792 store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
796 declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
798 ; CHECK: llvm_mips_ave_u_b_test:
803 ; CHECK: .size llvm_mips_ave_u_b_test
805 @llvm_mips_ave_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
806 @llvm_mips_ave_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
807 @llvm_mips_ave_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
809 define void @llvm_mips_ave_u_h_test() nounwind {
811 %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1
812 %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2
813 %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
814 store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
818 declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
820 ; CHECK: llvm_mips_ave_u_h_test:
825 ; CHECK: .size llvm_mips_ave_u_h_test
827 @llvm_mips_ave_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
828 @llvm_mips_ave_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
829 @llvm_mips_ave_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
831 define void @llvm_mips_ave_u_w_test() nounwind {
833 %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1
834 %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2
835 %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
836 store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
840 declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
842 ; CHECK: llvm_mips_ave_u_w_test:
847 ; CHECK: .size llvm_mips_ave_u_w_test
849 @llvm_mips_ave_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
850 @llvm_mips_ave_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
851 @llvm_mips_ave_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
853 define void @llvm_mips_ave_u_d_test() nounwind {
855 %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1
856 %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2
857 %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
858 store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
862 declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
864 ; CHECK: llvm_mips_ave_u_d_test:
869 ; CHECK: .size llvm_mips_ave_u_d_test
871 @llvm_mips_aver_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
872 @llvm_mips_aver_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
873 @llvm_mips_aver_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
875 define void @llvm_mips_aver_s_b_test() nounwind {
877 %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1
878 %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2
879 %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
880 store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
884 declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
886 ; CHECK: llvm_mips_aver_s_b_test:
891 ; CHECK: .size llvm_mips_aver_s_b_test
893 @llvm_mips_aver_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
894 @llvm_mips_aver_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
895 @llvm_mips_aver_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
897 define void @llvm_mips_aver_s_h_test() nounwind {
899 %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1
900 %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2
901 %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
902 store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
906 declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
908 ; CHECK: llvm_mips_aver_s_h_test:
913 ; CHECK: .size llvm_mips_aver_s_h_test
915 @llvm_mips_aver_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
916 @llvm_mips_aver_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
917 @llvm_mips_aver_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
919 define void @llvm_mips_aver_s_w_test() nounwind {
921 %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1
922 %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2
923 %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
924 store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
928 declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
930 ; CHECK: llvm_mips_aver_s_w_test:
935 ; CHECK: .size llvm_mips_aver_s_w_test
937 @llvm_mips_aver_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
938 @llvm_mips_aver_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
939 @llvm_mips_aver_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
941 define void @llvm_mips_aver_s_d_test() nounwind {
943 %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1
944 %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2
945 %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
946 store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
950 declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
952 ; CHECK: llvm_mips_aver_s_d_test:
957 ; CHECK: .size llvm_mips_aver_s_d_test
959 @llvm_mips_aver_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
960 @llvm_mips_aver_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
961 @llvm_mips_aver_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
963 define void @llvm_mips_aver_u_b_test() nounwind {
965 %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1
966 %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2
967 %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
968 store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
972 declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
974 ; CHECK: llvm_mips_aver_u_b_test:
979 ; CHECK: .size llvm_mips_aver_u_b_test
981 @llvm_mips_aver_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
982 @llvm_mips_aver_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
983 @llvm_mips_aver_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
985 define void @llvm_mips_aver_u_h_test() nounwind {
987 %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1
988 %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2
989 %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
990 store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
994 declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
996 ; CHECK: llvm_mips_aver_u_h_test:
1001 ; CHECK: .size llvm_mips_aver_u_h_test
1003 @llvm_mips_aver_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
1004 @llvm_mips_aver_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
1005 @llvm_mips_aver_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
1007 define void @llvm_mips_aver_u_w_test() nounwind {
1009 %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1
1010 %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2
1011 %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
1012 store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
1016 declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
1018 ; CHECK: llvm_mips_aver_u_w_test:
1023 ; CHECK: .size llvm_mips_aver_u_w_test
1025 @llvm_mips_aver_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
1026 @llvm_mips_aver_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
1027 @llvm_mips_aver_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
1029 define void @llvm_mips_aver_u_d_test() nounwind {
1031 %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1
1032 %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2
1033 %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
1034 store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
1038 declare <2 x i64> @llvm.mips.aver.u.d(<2 x i64>, <2 x i64>) nounwind
1040 ; CHECK: llvm_mips_aver_u_d_test:
1045 ; CHECK: .size llvm_mips_aver_u_d_test