1 ; RUN: llc -mtriple=thumb-eabi < %s -o - | FileCheck %s
3 ; Check that stack addresses are generated using a single ADD
4 define void @test1(i8** %p) {
5 %x = alloca i8, align 1
6 %y = alloca i8, align 1
7 %z = alloca i8, align 1
8 ; CHECK: add r1, sp, #8
10 store i8* %x, i8** %p, align 4
11 ; CHECK: add r1, sp, #4
13 store i8* %y, i8** %p, align 4
16 store i8* %z, i8** %p, align 4
20 ; Stack offsets larger than 1020 still need two ADDs
21 define void @test2([1024 x i8]** %p) {
22 %arr1 = alloca [1024 x i8], align 1
23 %arr2 = alloca [1024 x i8], align 1
24 ; CHECK: add r1, sp, #1020
27 store [1024 x i8]* %arr1, [1024 x i8]** %p, align 4
30 store [1024 x i8]* %arr2, [1024 x i8]** %p, align 4
34 ; If possible stack-based lrdb/ldrh are widened to use SP-based addressing
35 define i32 @test3() #0 {
36 %x = alloca i8, align 1
37 %y = alloca i8, align 1
39 %1 = load i8, i8* %x, align 1
40 ; CHECK: ldr r1, [sp, #4]
41 %2 = load i8, i8* %y, align 1
42 %3 = add nsw i8 %1, %2
43 %4 = zext i8 %3 to i32
47 define i32 @test4() #0 {
48 %x = alloca i16, align 2
49 %y = alloca i16, align 2
51 %1 = load i16, i16* %x, align 2
52 ; CHECK: ldr r1, [sp, #4]
53 %2 = load i16, i16* %y, align 2
54 %3 = add nsw i16 %1, %2
55 %4 = zext i16 %3 to i32
59 ; Don't widen if the value needs to be zero-extended
60 define zeroext i8 @test5() {
61 %x = alloca i8, align 1
63 ; CHECK: ldrb r0, [r0]
64 %1 = load i8, i8* %x, align 1
68 define zeroext i16 @test6() {
69 %x = alloca i16, align 2
71 ; CHECK: ldrh r0, [r0]
72 %1 = load i16, i16* %x, align 2
76 ; Accessing the bottom of a large array shouldn't require materializing a base
77 define void @test7() {
78 %arr = alloca [200 x i32], align 4
80 ; CHECK: movs [[REG:r[0-9]+]], #1
81 ; CHECK: str [[REG]], [sp, #4]
82 %arrayidx = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 1
83 store i32 1, i32* %arrayidx, align 4
85 ; CHECK: str [[REG]], [sp, #16]
86 %arrayidx1 = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 4
87 store i32 1, i32* %arrayidx1, align 4
92 ; Check that loads/stores with out-of-range offsets are handled correctly
93 define void @test8() {
94 %arr3 = alloca [224 x i32], align 4
95 %arr2 = alloca [224 x i32], align 4
96 %arr1 = alloca [224 x i32], align 4
98 ; CHECK: movs [[REG:r[0-9]+]], #1
99 ; CHECK: str [[REG]], [sp]
100 %arr1idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 0
101 store i32 1, i32* %arr1idx1, align 4
103 ; Offset in range for sp-based store, but not for non-sp-based store
104 ; CHECK: str [[REG]], [sp, #128]
105 %arr1idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 32
106 store i32 1, i32* %arr1idx2, align 4
108 ; CHECK: str [[REG]], [sp, #896]
109 %arr2idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 0
110 store i32 1, i32* %arr2idx1, align 4
112 ; %arr2 is in range, but this element of it is not
113 ; CHECK: str [[REG]], [{{r[0-9]+}}]
114 %arr2idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 32
115 store i32 1, i32* %arr2idx2, align 4
117 ; %arr3 is not in range
118 ; CHECK: str [[REG]], [{{r[0-9]+}}]
119 %arr3idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 0
120 store i32 1, i32* %arr3idx1, align 4
122 ; CHECK: str [[REG]], [{{r[0-9]+}}]
123 %arr3idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 32
124 store i32 1, i32* %arr3idx2, align 4