1 ; Test the MSA floating point to integer intrinsics that are encoded with the
2 ; 2RF instruction format. This includes conversions but other instructions such
3 ; as fclass are also here.
5 ; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
7 @llvm_mips_fclass_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
8 @llvm_mips_fclass_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
10 define void @llvm_mips_fclass_w_test() nounwind {
12 %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1
13 %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
14 store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
18 declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind
20 ; CHECK: llvm_mips_fclass_w_test:
24 ; CHECK: .size llvm_mips_fclass_w_test
26 @llvm_mips_fclass_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
27 @llvm_mips_fclass_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
29 define void @llvm_mips_fclass_d_test() nounwind {
31 %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1
32 %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
33 store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
37 declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind
39 ; CHECK: llvm_mips_fclass_d_test:
43 ; CHECK: .size llvm_mips_fclass_d_test
45 @llvm_mips_ftrunc_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
46 @llvm_mips_ftrunc_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
48 define void @llvm_mips_ftrunc_s_w_test() nounwind {
50 %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
51 %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
52 store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
56 declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind
58 ; CHECK: llvm_mips_ftrunc_s_w_test:
62 ; CHECK: .size llvm_mips_ftrunc_s_w_test
64 @llvm_mips_ftrunc_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
65 @llvm_mips_ftrunc_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
67 define void @llvm_mips_ftrunc_s_d_test() nounwind {
69 %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
70 %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
71 store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
75 declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind
77 ; CHECK: llvm_mips_ftrunc_s_d_test:
81 ; CHECK: .size llvm_mips_ftrunc_s_d_test
83 @llvm_mips_ftrunc_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
84 @llvm_mips_ftrunc_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
86 define void @llvm_mips_ftrunc_u_w_test() nounwind {
88 %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
89 %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
90 store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
94 declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind
96 ; CHECK: llvm_mips_ftrunc_u_w_test:
100 ; CHECK: .size llvm_mips_ftrunc_u_w_test
102 @llvm_mips_ftrunc_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
103 @llvm_mips_ftrunc_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
105 define void @llvm_mips_ftrunc_u_d_test() nounwind {
107 %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
108 %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
109 store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
113 declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind
115 ; CHECK: llvm_mips_ftrunc_u_d_test:
119 ; CHECK: .size llvm_mips_ftrunc_u_d_test
121 @llvm_mips_ftint_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
122 @llvm_mips_ftint_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
124 define void @llvm_mips_ftint_s_w_test() nounwind {
126 %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1
127 %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
128 store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
132 declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind
134 ; CHECK: llvm_mips_ftint_s_w_test:
138 ; CHECK: .size llvm_mips_ftint_s_w_test
140 @llvm_mips_ftint_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
141 @llvm_mips_ftint_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
143 define void @llvm_mips_ftint_s_d_test() nounwind {
145 %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1
146 %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
147 store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
151 declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind
153 ; CHECK: llvm_mips_ftint_s_d_test:
157 ; CHECK: .size llvm_mips_ftint_s_d_test
159 @llvm_mips_ftint_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
160 @llvm_mips_ftint_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
162 define void @llvm_mips_ftint_u_w_test() nounwind {
164 %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1
165 %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
166 store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
170 declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind
172 ; CHECK: llvm_mips_ftint_u_w_test:
176 ; CHECK: .size llvm_mips_ftint_u_w_test
178 @llvm_mips_ftint_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
179 @llvm_mips_ftint_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
181 define void @llvm_mips_ftint_u_d_test() nounwind {
183 %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1
184 %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
185 store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
189 declare <2 x i64> @llvm.mips.ftint.u.d(<2 x double>) nounwind
191 ; CHECK: llvm_mips_ftint_u_d_test:
195 ; CHECK: .size llvm_mips_ftint_u_d_test