1 ; RUN: llc < %s -march=arm -mattr=+neon,+fp16 | FileCheck %s
3 define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
6 %tmp1 = load <2 x float>* %A
7 %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
11 define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
12 ;CHECK: vcvt_f32tou32:
14 %tmp1 = load <2 x float>* %A
15 %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
19 define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
20 ;CHECK: vcvt_s32tof32:
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = sitofp <2 x i32> %tmp1 to <2 x float>
27 define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
28 ;CHECK: vcvt_u32tof32:
30 %tmp1 = load <2 x i32>* %A
31 %tmp2 = uitofp <2 x i32> %tmp1 to <2 x float>
35 define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
36 ;CHECK: vcvtQ_f32tos32:
38 %tmp1 = load <4 x float>* %A
39 %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
43 define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
44 ;CHECK: vcvtQ_f32tou32:
46 %tmp1 = load <4 x float>* %A
47 %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
51 define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
52 ;CHECK: vcvtQ_s32tof32:
54 %tmp1 = load <4 x i32>* %A
55 %tmp2 = sitofp <4 x i32> %tmp1 to <4 x float>
59 define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
60 ;CHECK: vcvtQ_u32tof32:
62 %tmp1 = load <4 x i32>* %A
63 %tmp2 = uitofp <4 x i32> %tmp1 to <4 x float>
67 define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
68 ;CHECK: vcvt_n_f32tos32:
70 %tmp1 = load <2 x float>* %A
71 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %tmp1, i32 1)
75 define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
76 ;CHECK: vcvt_n_f32tou32:
78 %tmp1 = load <2 x float>* %A
79 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %tmp1, i32 1)
83 define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
84 ;CHECK: vcvt_n_s32tof32:
86 %tmp1 = load <2 x i32>* %A
87 %tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
91 define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind {
92 ;CHECK: vcvt_n_u32tof32:
94 %tmp1 = load <2 x i32>* %A
95 %tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
99 declare <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32) nounwind readnone
100 declare <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32) nounwind readnone
101 declare <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
102 declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
104 define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
105 ;CHECK: vcvtQ_n_f32tos32:
107 %tmp1 = load <4 x float>* %A
108 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %tmp1, i32 1)
112 define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
113 ;CHECK: vcvtQ_n_f32tou32:
115 %tmp1 = load <4 x float>* %A
116 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %tmp1, i32 1)
120 define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
121 ;CHECK: vcvtQ_n_s32tof32:
123 %tmp1 = load <4 x i32>* %A
124 %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
125 ret <4 x float> %tmp2
128 define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind {
129 ;CHECK: vcvtQ_n_u32tof32:
131 %tmp1 = load <4 x i32>* %A
132 %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
133 ret <4 x float> %tmp2
136 declare <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32) nounwind readnone
137 declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) nounwind readnone
138 declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
139 declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
141 define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
142 ;CHECK: vcvt_f16tof32:
144 %tmp1 = load <4 x i16>* %A
145 %tmp2 = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %tmp1)
146 ret <4 x float> %tmp2
149 define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
150 ;CHECK: vcvt_f32tof16:
152 %tmp1 = load <4 x float>* %A
153 %tmp2 = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %tmp1)
157 declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone
158 declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
160 ; We currently estimate the cost of sext/zext/trunc v8(v16)i32 <-> v8(v16)i8
161 ; instructions as expensive. If lowering is improved the cost model needs to
163 ; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
164 %T0_5 = type <8 x i8>
165 %T1_5 = type <8 x i32>
167 define void @func_cvt5(%T0_5* %loadaddr, %T1_5* %storeaddr) {
171 %v0 = load %T0_5* %loadaddr
173 ; COST: cost of 3 {{.*}} sext
174 %r = sext %T0_5 %v0 to %T1_5
175 store %T1_5 %r, %T1_5* %storeaddr
178 ;; We currently estimate the cost of this instruction as expensive. If lowering
179 ;; is improved the cost needs to change.
180 %TA0_5 = type <8 x i8>
181 %TA1_5 = type <8 x i32>
183 define void @func_cvt1(%TA0_5* %loadaddr, %TA1_5* %storeaddr) {
187 %v0 = load %TA0_5* %loadaddr
189 ; COST: cost of 3 {{.*}} zext
190 %r = zext %TA0_5 %v0 to %TA1_5
191 store %TA1_5 %r, %TA1_5* %storeaddr
194 ;; We currently estimate the cost of this instruction as expensive. If lowering
195 ;; is improved the cost needs to change.
196 %T0_51 = type <8 x i32>
197 %T1_51 = type <8 x i8>
199 define void @func_cvt51(%T0_51* %loadaddr, %T1_51* %storeaddr) {
208 %v0 = load %T0_51* %loadaddr
210 ; COST: cost of 19 {{.*}} trunc
211 %r = trunc %T0_51 %v0 to %T1_51
212 store %T1_51 %r, %T1_51* %storeaddr
215 ;; We currently estimate the cost of this instruction as expensive. If lowering
216 ;; is improved the cost needs to change.
217 %TT0_5 = type <16 x i8>
218 %TT1_5 = type <16 x i32>
220 define void @func_cvt52(%TT0_5* %loadaddr, %TT1_5* %storeaddr) {
225 %v0 = load %TT0_5* %loadaddr
227 ; COST: cost of 6 {{.*}} sext
228 %r = sext %TT0_5 %v0 to %TT1_5
229 store %TT1_5 %r, %TT1_5* %storeaddr
232 ;; We currently estimate the cost of this instruction as expensive. If lowering
233 ;; is improved the cost needs to change.
234 %TTA0_5 = type <16 x i8>
235 %TTA1_5 = type <16 x i32>
237 define void @func_cvt12(%TTA0_5* %loadaddr, %TTA1_5* %storeaddr) {
242 %v0 = load %TTA0_5* %loadaddr
244 ; COST: cost of 6 {{.*}} zext
245 %r = zext %TTA0_5 %v0 to %TTA1_5
246 store %TTA1_5 %r, %TTA1_5* %storeaddr
249 ;; We currently estimate the cost of this instruction as expensive. If lowering
250 ;; is improved the cost needs to change.
251 %TT0_51 = type <16 x i32>
252 %TT1_51 = type <16 x i8>
253 ; CHECK: func_cvt512:
254 define void @func_cvt512(%TT0_51* %loadaddr, %TT1_51* %storeaddr) {
271 %v0 = load %TT0_51* %loadaddr
273 ; COST: cost of 38 {{.*}} trunc
274 %r = trunc %TT0_51 %v0 to %TT1_51
275 store %TT1_51 %r, %TT1_51* %storeaddr
279 ; CHECK: sext_v4i16_v4i64:
280 define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
283 %v0 = load <4 x i16>* %loadaddr
284 ; COST: sext_v4i16_v4i64
285 ; COST: cost of 3 {{.*}} sext
286 %r = sext <4 x i16> %v0 to <4 x i64>
287 store <4 x i64> %r, <4 x i64>* %storeaddr
291 ; CHECK: zext_v4i16_v4i64:
292 define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
295 %v0 = load <4 x i16>* %loadaddr
296 ; COST: zext_v4i16_v4i64
297 ; COST: cost of 3 {{.*}} zext
298 %r = zext <4 x i16> %v0 to <4 x i64>
299 store <4 x i64> %r, <4 x i64>* %storeaddr
303 ; CHECK: sext_v8i16_v8i64:
304 define void @sext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
309 %v0 = load <8 x i16>* %loadaddr
310 ; COST: sext_v8i16_v8i64
311 ; COST: cost of 6 {{.*}} sext
312 %r = sext <8 x i16> %v0 to <8 x i64>
313 store <8 x i64> %r, <8 x i64>* %storeaddr
317 ; CHECK: zext_v8i16_v8i64:
318 define void @zext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
323 %v0 = load <8 x i16>* %loadaddr
324 ; COST: zext_v8i16_v8i64
325 ; COST: cost of 6 {{.*}} zext
326 %r = zext <8 x i16> %v0 to <8 x i64>
327 store <8 x i64> %r, <8 x i64>* %storeaddr