1 ; Test the MSA intrinsics that are encoded with the VEC instruction format.
3 ; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
4 ; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
6 @llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
7 @llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
8 @llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
10 define void @llvm_mips_and_v_b_test() nounwind {
12 %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
13 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
14 %2 = bitcast <16 x i8> %0 to <16 x i8>
15 %3 = bitcast <16 x i8> %1 to <16 x i8>
16 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
17 %5 = bitcast <16 x i8> %4 to <16 x i8>
18 store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
22 ; ANYENDIAN: llvm_mips_and_v_b_test:
27 ; ANYENDIAN: .size llvm_mips_and_v_b_test
29 @llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
30 @llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
31 @llvm_mips_and_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
33 define void @llvm_mips_and_v_h_test() nounwind {
35 %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
36 %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
37 %2 = bitcast <8 x i16> %0 to <16 x i8>
38 %3 = bitcast <8 x i16> %1 to <16 x i8>
39 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
40 %5 = bitcast <16 x i8> %4 to <8 x i16>
41 store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
45 ; ANYENDIAN: llvm_mips_and_v_h_test:
50 ; ANYENDIAN: .size llvm_mips_and_v_h_test
52 @llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
53 @llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
54 @llvm_mips_and_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
56 define void @llvm_mips_and_v_w_test() nounwind {
58 %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
59 %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
60 %2 = bitcast <4 x i32> %0 to <16 x i8>
61 %3 = bitcast <4 x i32> %1 to <16 x i8>
62 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
63 %5 = bitcast <16 x i8> %4 to <4 x i32>
64 store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
68 ; ANYENDIAN: llvm_mips_and_v_w_test:
73 ; ANYENDIAN: .size llvm_mips_and_v_w_test
75 @llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
76 @llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
77 @llvm_mips_and_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
79 define void @llvm_mips_and_v_d_test() nounwind {
81 %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
82 %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
83 %2 = bitcast <2 x i64> %0 to <16 x i8>
84 %3 = bitcast <2 x i64> %1 to <16 x i8>
85 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
86 %5 = bitcast <16 x i8> %4 to <2 x i64>
87 store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
91 ; ANYENDIAN: llvm_mips_and_v_d_test:
96 ; ANYENDIAN: .size llvm_mips_and_v_d_test
98 define void @and_v_b_test() nounwind {
100 %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
101 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
102 %2 = and <16 x i8> %0, %1
103 store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
107 ; CHECK: and_v_b_test:
112 ; CHECK: .size and_v_b_test
114 define void @and_v_h_test() nounwind {
116 %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
117 %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
118 %2 = and <8 x i16> %0, %1
119 store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
123 ; CHECK: and_v_h_test:
128 ; CHECK: .size and_v_h_test
131 define void @and_v_w_test() nounwind {
133 %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
134 %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
135 %2 = and <4 x i32> %0, %1
136 store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
140 ; CHECK: and_v_w_test:
145 ; CHECK: .size and_v_w_test
148 define void @and_v_d_test() nounwind {
150 %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
151 %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
152 %2 = and <2 x i64> %0, %1
153 store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
157 ; CHECK: and_v_d_test:
162 ; CHECK: .size and_v_d_test
164 @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
165 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
166 @llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
167 @llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
169 define void @llvm_mips_bmnz_v_b_test() nounwind {
171 %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
172 %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
173 %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
174 %3 = bitcast <16 x i8> %0 to <16 x i8>
175 %4 = bitcast <16 x i8> %1 to <16 x i8>
176 %5 = bitcast <16 x i8> %2 to <16 x i8>
177 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
178 %7 = bitcast <16 x i8> %6 to <16 x i8>
179 store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
183 ; ANYENDIAN: llvm_mips_bmnz_v_b_test:
184 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
185 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
186 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
187 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
188 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
189 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
190 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
191 ; ANYENDIAN-DAG: st.b [[R4]], 0(
192 ; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
194 @llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
195 @llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
196 @llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
197 @llvm_mips_bmnz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
199 define void @llvm_mips_bmnz_v_h_test() nounwind {
201 %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
202 %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
203 %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
204 %3 = bitcast <8 x i16> %0 to <16 x i8>
205 %4 = bitcast <8 x i16> %1 to <16 x i8>
206 %5 = bitcast <8 x i16> %2 to <16 x i8>
207 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
208 %7 = bitcast <16 x i8> %6 to <8 x i16>
209 store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
213 ; ANYENDIAN: llvm_mips_bmnz_v_h_test:
214 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
215 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
216 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
217 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
218 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
219 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
220 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
221 ; ANYENDIAN-DAG: st.b [[R4]], 0(
222 ; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
224 @llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
225 @llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
226 @llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
227 @llvm_mips_bmnz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
229 define void @llvm_mips_bmnz_v_w_test() nounwind {
231 %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
232 %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
233 %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
234 %3 = bitcast <4 x i32> %0 to <16 x i8>
235 %4 = bitcast <4 x i32> %1 to <16 x i8>
236 %5 = bitcast <4 x i32> %2 to <16 x i8>
237 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
238 %7 = bitcast <16 x i8> %6 to <4 x i32>
239 store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
243 ; ANYENDIAN: llvm_mips_bmnz_v_w_test:
244 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
245 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
246 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
247 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
248 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
249 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
250 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
251 ; ANYENDIAN-DAG: st.b [[R4]], 0(
252 ; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
254 @llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
255 @llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
256 @llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
257 @llvm_mips_bmnz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
259 define void @llvm_mips_bmnz_v_d_test() nounwind {
261 %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
262 %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
263 %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
264 %3 = bitcast <2 x i64> %0 to <16 x i8>
265 %4 = bitcast <2 x i64> %1 to <16 x i8>
266 %5 = bitcast <2 x i64> %2 to <16 x i8>
267 %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
268 %7 = bitcast <16 x i8> %6 to <2 x i64>
269 store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
273 ; ANYENDIAN: llvm_mips_bmnz_v_d_test:
274 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
275 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
276 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
277 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
278 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
279 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
280 ; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
281 ; ANYENDIAN-DAG: st.b [[R4]], 0(
282 ; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
284 @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
285 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
286 @llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
287 @llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
289 define void @llvm_mips_bmz_v_b_test() nounwind {
291 %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
292 %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
293 %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3
294 %3 = bitcast <16 x i8> %0 to <16 x i8>
295 %4 = bitcast <16 x i8> %1 to <16 x i8>
296 %5 = bitcast <16 x i8> %2 to <16 x i8>
297 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
298 %7 = bitcast <16 x i8> %6 to <16 x i8>
299 store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
303 ; ANYENDIAN: llvm_mips_bmz_v_b_test:
304 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
305 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
306 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
307 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
308 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
309 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
310 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
311 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
312 ; ANYENDIAN-DAG: st.b [[R5]], 0(
313 ; ANYENDIAN: .size llvm_mips_bmz_v_b_test
315 @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
316 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
317 @llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
318 @llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
320 define void @llvm_mips_bmz_v_h_test() nounwind {
322 %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
323 %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
324 %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3
325 %3 = bitcast <8 x i16> %0 to <16 x i8>
326 %4 = bitcast <8 x i16> %1 to <16 x i8>
327 %5 = bitcast <8 x i16> %2 to <16 x i8>
328 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
329 %7 = bitcast <16 x i8> %6 to <8 x i16>
330 store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
334 ; ANYENDIAN: llvm_mips_bmz_v_h_test:
335 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
336 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
337 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
338 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
339 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
340 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
341 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
342 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
343 ; ANYENDIAN-DAG: st.b [[R5]], 0(
344 ; ANYENDIAN: .size llvm_mips_bmz_v_h_test
346 @llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
347 @llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
348 @llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
349 @llvm_mips_bmz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
351 define void @llvm_mips_bmz_v_w_test() nounwind {
353 %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
354 %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
355 %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3
356 %3 = bitcast <4 x i32> %0 to <16 x i8>
357 %4 = bitcast <4 x i32> %1 to <16 x i8>
358 %5 = bitcast <4 x i32> %2 to <16 x i8>
359 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
360 %7 = bitcast <16 x i8> %6 to <4 x i32>
361 store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
365 ; ANYENDIAN: llvm_mips_bmz_v_w_test:
366 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
367 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
368 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
369 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
370 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
371 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
372 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
373 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
374 ; ANYENDIAN-DAG: st.b [[R5]], 0(
375 ; ANYENDIAN: .size llvm_mips_bmz_v_w_test
377 @llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
378 @llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
379 @llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
380 @llvm_mips_bmz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
382 define void @llvm_mips_bmz_v_d_test() nounwind {
384 %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
385 %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
386 %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3
387 %3 = bitcast <2 x i64> %0 to <16 x i8>
388 %4 = bitcast <2 x i64> %1 to <16 x i8>
389 %5 = bitcast <2 x i64> %2 to <16 x i8>
390 %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
391 %7 = bitcast <16 x i8> %6 to <2 x i64>
392 store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
396 ; ANYENDIAN: llvm_mips_bmz_v_d_test:
397 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
398 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
399 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
400 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
401 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
402 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
403 ; bmnz.v is the same as bmz.v with ws and wd_in swapped
404 ; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
405 ; ANYENDIAN-DAG: st.b [[R5]], 0(
406 ; ANYENDIAN: .size llvm_mips_bmz_v_d_test
408 @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
409 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
410 @llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
411 @llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
413 define void @llvm_mips_bsel_v_b_test() nounwind {
415 %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
416 %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
417 %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3
418 %3 = bitcast <16 x i8> %0 to <16 x i8>
419 %4 = bitcast <16 x i8> %1 to <16 x i8>
420 %5 = bitcast <16 x i8> %2 to <16 x i8>
421 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
422 %7 = bitcast <16 x i8> %6 to <16 x i8>
423 store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
427 ; ANYENDIAN: llvm_mips_bsel_v_b_test:
428 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
429 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
430 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
431 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
432 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
433 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
434 ; bmnz.v is the same as bsel.v with wt and wd_in swapped
435 ; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
436 ; ANYENDIAN-DAG: st.b [[R6]], 0(
437 ; ANYENDIAN: .size llvm_mips_bsel_v_b_test
439 @llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
440 @llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
441 @llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
442 @llvm_mips_bsel_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
444 define void @llvm_mips_bsel_v_h_test() nounwind {
446 %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
447 %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
448 %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3
449 %3 = bitcast <8 x i16> %0 to <16 x i8>
450 %4 = bitcast <8 x i16> %1 to <16 x i8>
451 %5 = bitcast <8 x i16> %2 to <16 x i8>
452 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
453 %7 = bitcast <16 x i8> %6 to <8 x i16>
454 store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
458 ; ANYENDIAN: llvm_mips_bsel_v_h_test:
459 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
460 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
461 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
462 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
463 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
464 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
465 ; bmnz.v is the same as bsel.v with wt and wd_in swapped
466 ; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
467 ; ANYENDIAN-DAG: st.b [[R6]], 0(
468 ; ANYENDIAN: .size llvm_mips_bsel_v_h_test
470 @llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
471 @llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
472 @llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
473 @llvm_mips_bsel_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
475 define void @llvm_mips_bsel_v_w_test() nounwind {
477 %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
478 %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
479 %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3
480 %3 = bitcast <4 x i32> %0 to <16 x i8>
481 %4 = bitcast <4 x i32> %1 to <16 x i8>
482 %5 = bitcast <4 x i32> %2 to <16 x i8>
483 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
484 %7 = bitcast <16 x i8> %6 to <4 x i32>
485 store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
489 ; ANYENDIAN: llvm_mips_bsel_v_w_test:
490 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
491 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
492 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
493 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
494 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
495 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
496 ; bmnz.v is the same as bsel.v with wt and wd_in swapped
497 ; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
498 ; ANYENDIAN-DAG: st.b [[R6]], 0(
499 ; ANYENDIAN: .size llvm_mips_bsel_v_w_test
501 @llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
502 @llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
503 @llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
504 @llvm_mips_bsel_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
506 define void @llvm_mips_bsel_v_d_test() nounwind {
508 %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
509 %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
510 %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3
511 %3 = bitcast <2 x i64> %0 to <16 x i8>
512 %4 = bitcast <2 x i64> %1 to <16 x i8>
513 %5 = bitcast <2 x i64> %2 to <16 x i8>
514 %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
515 %7 = bitcast <16 x i8> %6 to <2 x i64>
516 store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
520 ; ANYENDIAN: llvm_mips_bsel_v_d_test:
521 ; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
522 ; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
523 ; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
524 ; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
525 ; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
526 ; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
527 ; bmnz.v is the same as bsel.v with wt and wd_in swapped
528 ; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
529 ; ANYENDIAN-DAG: st.b [[R6]], 0(
530 ; ANYENDIAN: .size llvm_mips_bsel_v_d_test
532 @llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
533 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
534 @llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
536 define void @llvm_mips_nor_v_b_test() nounwind {
538 %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
539 %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
540 %2 = bitcast <16 x i8> %0 to <16 x i8>
541 %3 = bitcast <16 x i8> %1 to <16 x i8>
542 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
543 %5 = bitcast <16 x i8> %4 to <16 x i8>
544 store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
548 ; ANYENDIAN: llvm_mips_nor_v_b_test:
553 ; ANYENDIAN: .size llvm_mips_nor_v_b_test
555 @llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
556 @llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
557 @llvm_mips_nor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
559 define void @llvm_mips_nor_v_h_test() nounwind {
561 %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
562 %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
563 %2 = bitcast <8 x i16> %0 to <16 x i8>
564 %3 = bitcast <8 x i16> %1 to <16 x i8>
565 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
566 %5 = bitcast <16 x i8> %4 to <8 x i16>
567 store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
571 ; ANYENDIAN: llvm_mips_nor_v_h_test:
576 ; ANYENDIAN: .size llvm_mips_nor_v_h_test
578 @llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
579 @llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
580 @llvm_mips_nor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
582 define void @llvm_mips_nor_v_w_test() nounwind {
584 %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
585 %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
586 %2 = bitcast <4 x i32> %0 to <16 x i8>
587 %3 = bitcast <4 x i32> %1 to <16 x i8>
588 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
589 %5 = bitcast <16 x i8> %4 to <4 x i32>
590 store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
594 ; ANYENDIAN: llvm_mips_nor_v_w_test:
599 ; ANYENDIAN: .size llvm_mips_nor_v_w_test
601 @llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
602 @llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
603 @llvm_mips_nor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
605 define void @llvm_mips_nor_v_d_test() nounwind {
607 %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
608 %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
609 %2 = bitcast <2 x i64> %0 to <16 x i8>
610 %3 = bitcast <2 x i64> %1 to <16 x i8>
611 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
612 %5 = bitcast <16 x i8> %4 to <2 x i64>
613 store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
617 ; ANYENDIAN: llvm_mips_nor_v_d_test:
622 ; ANYENDIAN: .size llvm_mips_nor_v_d_test
624 @llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
625 @llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
626 @llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
628 define void @llvm_mips_or_v_b_test() nounwind {
630 %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
631 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
632 %2 = bitcast <16 x i8> %0 to <16 x i8>
633 %3 = bitcast <16 x i8> %1 to <16 x i8>
634 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
635 %5 = bitcast <16 x i8> %4 to <16 x i8>
636 store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
640 ; ANYENDIAN: llvm_mips_or_v_b_test:
645 ; ANYENDIAN: .size llvm_mips_or_v_b_test
647 @llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
648 @llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
649 @llvm_mips_or_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
651 define void @llvm_mips_or_v_h_test() nounwind {
653 %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
654 %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
655 %2 = bitcast <8 x i16> %0 to <16 x i8>
656 %3 = bitcast <8 x i16> %1 to <16 x i8>
657 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
658 %5 = bitcast <16 x i8> %4 to <8 x i16>
659 store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
663 ; ANYENDIAN: llvm_mips_or_v_h_test:
668 ; ANYENDIAN: .size llvm_mips_or_v_h_test
670 @llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
671 @llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
672 @llvm_mips_or_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
674 define void @llvm_mips_or_v_w_test() nounwind {
676 %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
677 %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
678 %2 = bitcast <4 x i32> %0 to <16 x i8>
679 %3 = bitcast <4 x i32> %1 to <16 x i8>
680 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
681 %5 = bitcast <16 x i8> %4 to <4 x i32>
682 store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
686 ; ANYENDIAN: llvm_mips_or_v_w_test:
691 ; ANYENDIAN: .size llvm_mips_or_v_w_test
693 @llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
694 @llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
695 @llvm_mips_or_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
697 define void @llvm_mips_or_v_d_test() nounwind {
699 %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
700 %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
701 %2 = bitcast <2 x i64> %0 to <16 x i8>
702 %3 = bitcast <2 x i64> %1 to <16 x i8>
703 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
704 %5 = bitcast <16 x i8> %4 to <2 x i64>
705 store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
709 ; ANYENDIAN: llvm_mips_or_v_d_test:
714 ; ANYENDIAN: .size llvm_mips_or_v_d_test
716 define void @or_v_b_test() nounwind {
718 %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
719 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
720 %2 = or <16 x i8> %0, %1
721 store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
725 ; CHECK: or_v_b_test:
730 ; CHECK: .size or_v_b_test
732 define void @or_v_h_test() nounwind {
734 %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
735 %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
736 %2 = or <8 x i16> %0, %1
737 store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
741 ; CHECK: or_v_h_test:
746 ; CHECK: .size or_v_h_test
749 define void @or_v_w_test() nounwind {
751 %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
752 %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
753 %2 = or <4 x i32> %0, %1
754 store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
758 ; CHECK: or_v_w_test:
763 ; CHECK: .size or_v_w_test
766 define void @or_v_d_test() nounwind {
768 %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
769 %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
770 %2 = or <2 x i64> %0, %1
771 store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
775 ; CHECK: or_v_d_test:
780 ; CHECK: .size or_v_d_test
782 @llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
783 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
784 @llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
786 define void @llvm_mips_xor_v_b_test() nounwind {
788 %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
789 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
790 %2 = bitcast <16 x i8> %0 to <16 x i8>
791 %3 = bitcast <16 x i8> %1 to <16 x i8>
792 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
793 %5 = bitcast <16 x i8> %4 to <16 x i8>
794 store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
798 ; ANYENDIAN: llvm_mips_xor_v_b_test:
803 ; ANYENDIAN: .size llvm_mips_xor_v_b_test
805 @llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
806 @llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
807 @llvm_mips_xor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
809 define void @llvm_mips_xor_v_h_test() nounwind {
811 %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
812 %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
813 %2 = bitcast <8 x i16> %0 to <16 x i8>
814 %3 = bitcast <8 x i16> %1 to <16 x i8>
815 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
816 %5 = bitcast <16 x i8> %4 to <8 x i16>
817 store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
821 ; ANYENDIAN: llvm_mips_xor_v_h_test:
826 ; ANYENDIAN: .size llvm_mips_xor_v_h_test
828 @llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
829 @llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
830 @llvm_mips_xor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
832 define void @llvm_mips_xor_v_w_test() nounwind {
834 %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
835 %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
836 %2 = bitcast <4 x i32> %0 to <16 x i8>
837 %3 = bitcast <4 x i32> %1 to <16 x i8>
838 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
839 %5 = bitcast <16 x i8> %4 to <4 x i32>
840 store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
844 ; ANYENDIAN: llvm_mips_xor_v_w_test:
849 ; ANYENDIAN: .size llvm_mips_xor_v_w_test
851 @llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
852 @llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
853 @llvm_mips_xor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
855 define void @llvm_mips_xor_v_d_test() nounwind {
857 %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
858 %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
859 %2 = bitcast <2 x i64> %0 to <16 x i8>
860 %3 = bitcast <2 x i64> %1 to <16 x i8>
861 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
862 %5 = bitcast <16 x i8> %4 to <2 x i64>
863 store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
867 ; ANYENDIAN: llvm_mips_xor_v_d_test:
872 ; ANYENDIAN: .size llvm_mips_xor_v_d_test
874 define void @xor_v_b_test() nounwind {
876 %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
877 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
878 %2 = xor <16 x i8> %0, %1
879 store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
883 ; CHECK: xor_v_b_test:
888 ; CHECK: .size xor_v_b_test
890 define void @xor_v_h_test() nounwind {
892 %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
893 %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
894 %2 = xor <8 x i16> %0, %1
895 store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
899 ; CHECK: xor_v_h_test:
904 ; CHECK: .size xor_v_h_test
907 define void @xor_v_w_test() nounwind {
909 %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
910 %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
911 %2 = xor <4 x i32> %0, %1
912 store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
916 ; CHECK: xor_v_w_test:
921 ; CHECK: .size xor_v_w_test
924 define void @xor_v_d_test() nounwind {
926 %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
927 %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
928 %2 = xor <2 x i64> %0, %1
929 store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
933 ; CHECK: xor_v_d_test:
938 ; CHECK: .size xor_v_d_test
940 declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
941 declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
942 declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
943 declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
944 declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
945 declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
946 declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind