1 ; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
3 ; Test the MSA intrinsics that are encoded with the VEC instruction format.
5 @llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
6 @llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
7 @llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
9 define void @llvm_mips_and_v_b_test() nounwind {
11 %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
12 %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
13 %2 = bitcast <16 x i8> %0 to <16 x i8>
14 %3 = bitcast <16 x i8> %1 to <16 x i8>
15 %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
16 %5 = bitcast <16 x i8> %4 to <16 x i8>
17 store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
21 ; CHECK: llvm_mips_and_v_b_test:
26 ; CHECK: .size llvm_mips_and_v_b_test
28 @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
29 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
30 @llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
32 define void @llvm_mips_bmnz_v_b_test() nounwind {
34 %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
35 %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
36 %2 = bitcast <16 x i8> %0 to <16 x i8>
37 %3 = bitcast <16 x i8> %1 to <16 x i8>
38 %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3)
39 %5 = bitcast <16 x i8> %4 to <16 x i8>
40 store <16 x i8> %5, <16 x i8>* @llvm_mips_bmnz_v_b_RES
44 ; CHECK: llvm_mips_bmnz_v_b_test:
49 ; CHECK: .size llvm_mips_bmnz_v_b_test
51 @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
52 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
53 @llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
55 define void @llvm_mips_bmz_v_b_test() nounwind {
57 %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
58 %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
59 %2 = bitcast <16 x i8> %0 to <16 x i8>
60 %3 = bitcast <16 x i8> %1 to <16 x i8>
61 %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3)
62 %5 = bitcast <16 x i8> %4 to <16 x i8>
63 store <16 x i8> %5, <16 x i8>* @llvm_mips_bmz_v_b_RES
67 ; CHECK: llvm_mips_bmz_v_b_test:
72 ; CHECK: .size llvm_mips_bmz_v_b_test
74 @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
75 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
76 @llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
78 @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
79 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
80 @llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
82 define void @llvm_mips_bsel_v_b_test() nounwind {
84 %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
85 %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
86 %2 = bitcast <16 x i8> %0 to <16 x i8>
87 %3 = bitcast <16 x i8> %1 to <16 x i8>
88 %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3)
89 %5 = bitcast <16 x i8> %4 to <16 x i8>
90 store <16 x i8> %5, <16 x i8>* @llvm_mips_bsel_v_b_RES
94 ; CHECK: llvm_mips_bsel_v_b_test:
99 ; CHECK: .size llvm_mips_bsel_v_b_test
101 @llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
102 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
103 @llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
105 define void @llvm_mips_nor_v_b_test() nounwind {
107 %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
108 %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
109 %2 = bitcast <16 x i8> %0 to <16 x i8>
110 %3 = bitcast <16 x i8> %1 to <16 x i8>
111 %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
112 %5 = bitcast <16 x i8> %4 to <16 x i8>
113 store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
117 ; CHECK: llvm_mips_nor_v_b_test:
122 ; CHECK: .size llvm_mips_nor_v_b_test
124 @llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
125 @llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
126 @llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
128 define void @llvm_mips_or_v_b_test() nounwind {
130 %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
131 %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
132 %2 = bitcast <16 x i8> %0 to <16 x i8>
133 %3 = bitcast <16 x i8> %1 to <16 x i8>
134 %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
135 %5 = bitcast <16 x i8> %4 to <16 x i8>
136 store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
140 ; CHECK: llvm_mips_or_v_b_test:
145 ; CHECK: .size llvm_mips_or_v_b_test
147 @llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
148 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
149 @llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
151 define void @llvm_mips_xor_v_b_test() nounwind {
153 %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
154 %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
155 %2 = bitcast <16 x i8> %0 to <16 x i8>
156 %3 = bitcast <16 x i8> %1 to <16 x i8>
157 %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
158 %5 = bitcast <16 x i8> %4 to <16 x i8>
159 store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
163 ; CHECK: llvm_mips_xor_v_b_test:
168 ; CHECK: .size llvm_mips_xor_v_b_test
170 declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
171 declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>) nounwind
172 declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>) nounwind
173 declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>) nounwind
174 declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
175 declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
176 declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind