1 ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
2 ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
3 ; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
4 ; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
6 ; Very basic fast-isel functionality.
7 define i32 @add(i32 %a, i32 %b) nounwind {
9 %a.addr = alloca i32, align 4
10 %b.addr = alloca i32, align 4
11 store i32 %a, i32* %a.addr
12 store i32 %b, i32* %b.addr
13 %tmp = load i32* %a.addr
14 %tmp1 = load i32* %b.addr
15 %add = add nsw i32 %tmp, %tmp1
19 ; Check truncate to bool
20 define void @test1(i32 %tmp) nounwind {
22 %tobool = trunc i32 %tmp to i1
23 br i1 %tobool, label %if.then, label %if.end
25 if.then: ; preds = %entry
26 call void @test1(i32 0)
29 if.end: ; preds = %if.then, %entry
37 ; Check some simple operations with immediates
38 define void @test2(i32 %tmp, i32* %ptr) nounwind {
43 %a = add i32 %tmp, 4096
44 store i32 %a, i32* %ptr
47 ; THUMB: add.w {{.*}} #4096
48 ; ARM: add {{.*}} #4096
51 %b = add i32 %tmp, 4095
52 store i32 %b, i32* %ptr
54 ; THUMB: addw {{.*}} #4095
55 ; ARM: movw {{.*}} #4095
60 store i32 %c, i32* %ptr
63 ; THUMB: orr {{.*}} #4
67 define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind {
72 %a1 = trunc i32 %tmp to i16
73 %a2 = trunc i16 %a1 to i8
74 %a3 = trunc i8 %a2 to i1
75 %a4 = zext i1 %a3 to i8
76 store i8 %a4, i8* %ptr3
77 %a5 = zext i8 %a4 to i16
78 store i16 %a5, i16* %ptr2
79 %a6 = zext i16 %a5 to i32
80 store i32 %a6, i32* %ptr1
95 %b1 = trunc i32 %tmp to i16
96 %b2 = trunc i16 %b1 to i8
97 store i8 %b2, i8* %ptr3
98 %b3 = sext i8 %b2 to i16
99 store i16 %b3, i16* %ptr2
100 %b4 = sext i16 %b3 to i32
101 store i32 %b4, i32* %ptr1
115 %c2 = load i16* %ptr2
116 %c3 = load i32* %ptr1
117 %c4 = zext i8 %c1 to i32
118 %c5 = sext i16 %c2 to i32
119 %c6 = add i32 %c4, %c5
120 %c7 = sub i32 %c3, %c6
121 store i32 %c7, i32* %ptr1
138 ; Check loads/stores with globals
139 @test4g = external global i32
141 define void @test4() {
142 %a = load i32* @test4g
144 store i32 %b, i32* @test4g
148 ; Note that relocations are either movw/movt or constant pool
149 ; loads. Different platforms will select different approaches.
151 ; THUMB: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
152 ; THUMB: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
153 ; THUMB: ldr r0, [r0]
154 ; THUMB: ldr r1, [r0]
156 ; THUMB: str r1, [r0]
158 ; ARM: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr r0, .LCPI)}}
159 ; ARM: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
162 ; ARM: add r1, r1, #1
166 ; Check unaligned stores
167 %struct.anon = type <{ float }>
169 @a = common global %struct.anon* null, align 4
171 define void @unaligned_store(float %x, float %y) nounwind {
173 ; ARM: @unaligned_store
177 ; THUMB: @unaligned_store
179 ; THUMB: str r1, [r0]
181 %add = fadd float %x, %y
182 %0 = load %struct.anon** @a, align 4
183 %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
184 store float %add, float* %x1, align 1
188 ; Doublewords require only word-alignment.
190 %struct.anon.0 = type { double }
192 @foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4
194 define void @test5(double %a, double %b) nounwind {
198 %add = fadd double %a, %b
199 store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4
200 ; ARM: vstr d16, [r0]
201 ; THUMB: vstr d16, [r0]
205 ; Check unaligned loads of floats
206 %class.TAlignTest = type <{ i16, float }>
208 define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 {
212 %0 = alloca %class.TAlignTest*, align 4
213 store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
214 %1 = load %class.TAlignTest** %0
215 %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
216 %3 = load float* %2, align 1
217 %4 = fcmp une float %3, 0.000000e+00
218 ; ARM: ldr r0, [r0, #2]
220 ; ARM: vcmpe.f32 s0, #0
221 ; THUMB: ldr.w r0, [r0, #2]
223 ; THUMB: vcmpe.f32 s0, #0
229 ; ARM: and r0, r0, #31
230 ; THUMB: and r0, r0, #31
231 define i32 @urem_fold(i32 %a) nounwind {
232 %rem = urem i32 %a, 32
236 define i32 @test7() noreturn nounwind {
242 tail call void @llvm.trap( )
246 declare void @llvm.trap() nounwind
248 define void @unaligned_i16_store(i16 %x, i16* %y) nounwind {
250 ; ARM-STRICT-ALIGN: @unaligned_i16_store
251 ; ARM-STRICT-ALIGN: strb
252 ; ARM-STRICT-ALIGN: strb
254 ; THUMB-STRICT-ALIGN: @unaligned_i16_store
255 ; THUMB-STRICT-ALIGN: strb
256 ; THUMB-STRICT-ALIGN: strb
258 store i16 %x, i16* %y, align 1
262 define i16 @unaligned_i16_load(i16* %x) nounwind {
264 ; ARM-STRICT-ALIGN: @unaligned_i16_load
265 ; ARM-STRICT-ALIGN: ldrb
266 ; ARM-STRICT-ALIGN: ldrb
268 ; THUMB-STRICT-ALIGN: @unaligned_i16_load
269 ; THUMB-STRICT-ALIGN: ldrb
270 ; THUMB-STRICT-ALIGN: ldrb
272 %0 = load i16* %x, align 1
276 define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
278 ; ARM-STRICT-ALIGN: @unaligned_i32_store
279 ; ARM-STRICT-ALIGN: strb
280 ; ARM-STRICT-ALIGN: strb
281 ; ARM-STRICT-ALIGN: strb
282 ; ARM-STRICT-ALIGN: strb
284 ; THUMB-STRICT-ALIGN: @unaligned_i32_store
285 ; THUMB-STRICT-ALIGN: strb
286 ; THUMB-STRICT-ALIGN: strb
287 ; THUMB-STRICT-ALIGN: strb
288 ; THUMB-STRICT-ALIGN: strb
290 store i32 %x, i32* %y, align 1
294 define i32 @unaligned_i32_load(i32* %x) nounwind {
296 ; ARM-STRICT-ALIGN: @unaligned_i32_load
297 ; ARM-STRICT-ALIGN: ldrb
298 ; ARM-STRICT-ALIGN: ldrb
299 ; ARM-STRICT-ALIGN: ldrb
300 ; ARM-STRICT-ALIGN: ldrb
302 ; THUMB-STRICT-ALIGN: @unaligned_i32_load
303 ; THUMB-STRICT-ALIGN: ldrb
304 ; THUMB-STRICT-ALIGN: ldrb
305 ; THUMB-STRICT-ALIGN: ldrb
306 ; THUMB-STRICT-ALIGN: ldrb
308 %0 = load i32* %x, align 1