; Test the MSA intrinsics that are encoded with the VEC instruction format. ; RUN: llc -march=mips -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s ; RUN: llc -march=mipsel -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s @llvm_mips_and_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_and_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_and_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_and_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES ret void } ; ANYENDIAN: llvm_mips_and_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: and.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_and_v_b_test ; @llvm_mips_and_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_and_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_and_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_and_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES ret void } ; ANYENDIAN: llvm_mips_and_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: and.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_and_v_h_test ; @llvm_mips_and_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_and_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_and_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_and_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES ret void } ; ANYENDIAN: llvm_mips_and_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: and.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_and_v_w_test ; @llvm_mips_and_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_and_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_and_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_and_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES ret void } ; ANYENDIAN: llvm_mips_and_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: and.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_and_v_d_test ; @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_bmnz_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_bmnz_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_bmnz_v_b_RES ret void } ; ANYENDIAN: llvm_mips_bmnz_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmnz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmnz_v_b_test ; @llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_bmnz_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_bmnz_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_bmnz_v_h_RES ret void } ; ANYENDIAN: llvm_mips_bmnz_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmnz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmnz_v_h_test ; @llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_bmnz_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_bmnz_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_bmnz_v_w_RES ret void } ; ANYENDIAN: llvm_mips_bmnz_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmnz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmnz_v_w_test ; @llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_bmnz_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_bmnz_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_bmnz_v_d_RES ret void } ; ANYENDIAN: llvm_mips_bmnz_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmnz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmnz_v_d_test ; @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_bmz_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_bmz_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_bmz_v_b_RES ret void } ; ANYENDIAN: llvm_mips_bmz_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmz_v_b_test ; @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_bmz_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_bmz_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_bmz_v_h_RES ret void } ; ANYENDIAN: llvm_mips_bmz_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmz_v_h_test ; @llvm_mips_bmz_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_bmz_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_bmz_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_bmz_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_bmz_v_w_RES ret void } ; ANYENDIAN: llvm_mips_bmz_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmz_v_w_test ; @llvm_mips_bmz_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_bmz_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_bmz_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_bmz_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_bmz_v_d_RES ret void } ; ANYENDIAN: llvm_mips_bmz_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bmz.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bmz_v_d_test ; @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_bsel_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_bsel_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_bsel_v_b_RES ret void } ; ANYENDIAN: llvm_mips_bsel_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bsel.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bsel_v_b_test ; @llvm_mips_bsel_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_bsel_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_bsel_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_bsel_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_bsel_v_h_RES ret void } ; ANYENDIAN: llvm_mips_bsel_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bsel.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bsel_v_h_test ; @llvm_mips_bsel_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_bsel_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_bsel_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_bsel_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_bsel_v_w_RES ret void } ; ANYENDIAN: llvm_mips_bsel_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bsel.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bsel_v_w_test ; @llvm_mips_bsel_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_bsel_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_bsel_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_bsel_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_bsel_v_d_RES ret void } ; ANYENDIAN: llvm_mips_bsel_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: bsel.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_bsel_v_d_test ; @llvm_mips_nor_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_nor_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_nor_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES ret void } ; ANYENDIAN: llvm_mips_nor_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: nor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_nor_v_b_test ; @llvm_mips_nor_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_nor_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_nor_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_nor_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES ret void } ; ANYENDIAN: llvm_mips_nor_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: nor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_nor_v_h_test ; @llvm_mips_nor_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_nor_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_nor_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_nor_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES ret void } ; ANYENDIAN: llvm_mips_nor_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: nor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_nor_v_w_test ; @llvm_mips_nor_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_nor_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_nor_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_nor_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES ret void } ; ANYENDIAN: llvm_mips_nor_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: nor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_nor_v_d_test ; @llvm_mips_or_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_or_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_or_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_or_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES ret void } ; ANYENDIAN: llvm_mips_or_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: or.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_or_v_b_test ; @llvm_mips_or_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_or_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_or_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_or_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES ret void } ; ANYENDIAN: llvm_mips_or_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: or.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_or_v_h_test ; @llvm_mips_or_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_or_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_or_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_or_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES ret void } ; ANYENDIAN: llvm_mips_or_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: or.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_or_v_w_test ; @llvm_mips_or_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_or_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_or_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_or_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES ret void } ; ANYENDIAN: llvm_mips_or_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: or.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_or_v_d_test ; @llvm_mips_xor_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_xor_v_b_RES = global <16 x i8> , align 16 define void @llvm_mips_xor_v_b_test() nounwind { entry: %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES ret void } ; ANYENDIAN: llvm_mips_xor_v_b_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: xor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_xor_v_b_test ; @llvm_mips_xor_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_xor_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_xor_v_h_RES = global <8 x i16> , align 16 define void @llvm_mips_xor_v_h_test() nounwind { entry: %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES ret void } ; ANYENDIAN: llvm_mips_xor_v_h_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: xor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_xor_v_h_test ; @llvm_mips_xor_v_w_ARG1 = global <4 x i32> , align 16 @llvm_mips_xor_v_w_ARG2 = global <4 x i32> , align 16 @llvm_mips_xor_v_w_RES = global <4 x i32> , align 16 define void @llvm_mips_xor_v_w_test() nounwind { entry: %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <4 x i32> store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES ret void } ; ANYENDIAN: llvm_mips_xor_v_w_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: xor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_xor_v_w_test ; @llvm_mips_xor_v_d_ARG1 = global <2 x i64> , align 16 @llvm_mips_xor_v_d_ARG2 = global <2 x i64> , align 16 @llvm_mips_xor_v_d_RES = global <2 x i64> , align 16 define void @llvm_mips_xor_v_d_test() nounwind { entry: %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) %5 = bitcast <16 x i8> %4 to <2 x i64> store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES ret void } ; ANYENDIAN: llvm_mips_xor_v_d_test: ; ANYENDIAN: ld.b ; ANYENDIAN: ld.b ; ANYENDIAN: xor.v ; ANYENDIAN: st.b ; ANYENDIAN: .size llvm_mips_xor_v_d_test ; declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind