1 ; RUN: llc < %s -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s
2 ; RUN: llc < %s -force-align-stack -stack-alignment=32 -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s -check-prefix=FORCE-ALIGN
5 ; no VLAs or dynamic alignment
6 define i32 @t1() nounwind uwtable ssp {
8 %a = alloca i32, align 4
9 call void @t1_helper(i32* %a) nounwind
10 %0 = load i32* %a, align 4
11 %add = add nsw i32 %0, 13
15 ; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
16 ; CHECK: leaq [[OFFSET:[0-9]*]](%rsp), %rdi
17 ; CHECK: callq _t1_helper
18 ; CHECK: movl [[OFFSET]](%rsp), %eax
19 ; CHECK: addl $13, %eax
22 declare void @t1_helper(i32*)
25 define i32 @t2() nounwind uwtable ssp {
27 %a = alloca i32, align 4
28 %v = alloca <8 x float>, align 32
29 call void @t2_helper(i32* %a, <8 x float>* %v) nounwind
30 %0 = load i32* %a, align 4
31 %add = add nsw i32 %0, 13
36 ; CHECK: movq %rsp, %rbp
37 ; CHECK: andq $-32, %rsp
38 ; CHECK: subq ${{[0-9]+}}, %rsp
40 ; CHECK: leaq {{[0-9]*}}(%rsp), %rdi
41 ; CHECK: leaq {{[0-9]*}}(%rsp), %rsi
42 ; CHECK: callq _t2_helper
44 ; CHECK: movq %rbp, %rsp
48 declare void @t2_helper(i32*, <8 x float>*)
51 define i32 @t3(i64 %sz) nounwind uwtable ssp {
53 %a = alloca i32, align 4
54 %vla = alloca i32, i64 %sz, align 16
55 call void @t3_helper(i32* %a, i32* %vla) nounwind
56 %0 = load i32* %a, align 4
57 %add = add nsw i32 %0, 13
62 ; CHECK: movq %rsp, %rbp
64 ; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
65 ; CHECK: subq ${{[0-9]+}}, %rsp
67 ; CHECK: leaq -{{[0-9]+}}(%rbp), %rsp
72 declare void @t3_helper(i32*, i32*)
74 ; VLAs + Dynamic realignment
75 define i32 @t4(i64 %sz) nounwind uwtable ssp {
77 %a = alloca i32, align 4
78 %v = alloca <8 x float>, align 32
79 %vla = alloca i32, i64 %sz, align 16
80 call void @t4_helper(i32* %a, i32* %vla, <8 x float>* %v) nounwind
81 %0 = load i32* %a, align 4
82 %add = add nsw i32 %0, 13
87 ; CHECK: movq %rsp, %rbp
88 ; CHECK: andq $-32, %rsp
91 ; CHECK: subq $[[STACKADJ:[0-9]+]], %rsp
92 ; CHECK: movq %rsp, %rbx
94 ; CHECK: leaq {{[0-9]*}}(%rbx), %rdi
95 ; CHECK: leaq {{[0-9]*}}(%rbx), %rdx
96 ; CHECK: callq _t4_helper
98 ; CHECK: addq $[[STACKADJ]], %rsp
101 ; CHECK: movq %rbp, %rsp
105 declare void @t4_helper(i32*, i32*, <8 x float>*)
107 ; Dynamic realignment + Spill
108 define i32 @t5(float* nocapture %f) nounwind uwtable ssp {
110 %a = alloca i32, align 4
111 %0 = bitcast float* %f to <8 x float>*
112 %1 = load <8 x float>* %0, align 32
113 call void @t5_helper1(i32* %a) nounwind
114 call void @t5_helper2(<8 x float> %1) nounwind
115 %2 = load i32* %a, align 4
116 %add = add nsw i32 %2, 13
121 ; CHECK: movq %rsp, %rbp
122 ; CHECK: andq $-32, %rsp
123 ; CHECK: subq ${{[0-9]+}}, %rsp
125 ; CHECK: vmovaps (%rdi), [[AVXREG:%ymm[0-9]+]]
126 ; CHECK: vmovaps [[AVXREG]], (%rsp)
127 ; CHECK: leaq {{[0-9]+}}(%rsp), %rdi
128 ; CHECK: callq _t5_helper1
129 ; CHECK: vmovaps (%rsp), %ymm0
130 ; CHECK: callq _t5_helper2
131 ; CHECK: movl {{[0-9]+}}(%rsp), %eax
133 ; CHECK: movq %rbp, %rsp
137 declare void @t5_helper1(i32*)
139 declare void @t5_helper2(<8 x float>)
141 ; VLAs + Dynamic realignment + Spill
142 ; FIXME: RA has already reserved RBX, so we can't do dynamic realignment.
143 define i32 @t6(i64 %sz, float* nocapture %f) nounwind uwtable ssp {
146 %a = alloca i32, align 4
147 %0 = bitcast float* %f to <8 x float>*
148 %1 = load <8 x float>* %0, align 32
149 %vla = alloca i32, i64 %sz, align 16
150 call void @t6_helper1(i32* %a, i32* %vla) nounwind
151 call void @t6_helper2(<8 x float> %1) nounwind
152 %2 = load i32* %a, align 4
153 %add = add nsw i32 %2, 13
157 declare void @t6_helper1(i32*, i32*)
159 declare void @t6_helper2(<8 x float>)
161 ; VLAs + Dynamic realignment + byval
162 ; The byval adjust the sp after the prolog, but if we're restoring the sp from
163 ; the base pointer we use the original adjustment.
164 %struct.struct_t = type { [5 x i32] }
166 define void @t7(i32 %size, %struct.struct_t* byval align 8 %arg1) nounwind uwtable {
168 %x = alloca i32, align 32
169 store i32 0, i32* %x, align 32
170 %0 = zext i32 %size to i64
171 %vla = alloca i32, i64 %0, align 16
172 %1 = load i32* %x, align 32
173 call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1)
178 ; CHECK: movq %rsp, %rbp
179 ; CHECK: andq $-32, %rsp
181 ; CHECK: subq $[[ADJ:[0-9]+]], %rsp
182 ; CHECK: movq %rsp, %rbx
184 ; Stack adjustment for byval
185 ; CHECK: subq {{.*}}, %rsp
187 ; CHECK-NOT: addq {{.*}}, %rsp
188 ; CHECK: movq %rbx, %rsp
189 ; CHECK: addq $[[ADJ]], %rsp
191 ; CHECK: movq %rbp, %rsp
195 declare i8* @llvm.stacksave() nounwind
197 declare void @bar(i32, i32*, %struct.struct_t* byval align 8)
199 declare void @llvm.stackrestore(i8*) nounwind
202 ; Test when forcing stack alignment
203 define i32 @t8() nounwind uwtable {
205 %a = alloca i32, align 4
206 call void @t1_helper(i32* %a) nounwind
207 %0 = load i32* %a, align 4
208 %add = add nsw i32 %0, 13
212 ; FORCE-ALIGN: movq %rsp, %rbp
213 ; FORCE-ALIGN: andq $-32, %rsp
214 ; FORCE-ALIGN-NEXT: subq $32, %rsp
215 ; FORCE-ALIGN: movq %rbp, %rsp
216 ; FORCE-ALIGN: popq %rbp
220 define i32 @t9(i64 %sz) nounwind uwtable {
222 %a = alloca i32, align 4
223 %vla = alloca i32, i64 %sz, align 16
224 call void @t3_helper(i32* %a, i32* %vla) nounwind
225 %0 = load i32* %a, align 4
226 %add = add nsw i32 %0, 13
230 ; FORCE-ALIGN: pushq %rbp
231 ; FORCE-ALIGN: movq %rsp, %rbp
232 ; FORCE-ALIGN: andq $-32, %rsp
233 ; FORCE-ALIGN: pushq %rbx
234 ; FORCE-ALIGN: subq $24, %rsp
235 ; FORCE-ALIGN: movq %rsp, %rbx
237 ; FORCE-ALIGN: movq %rbx, %rsp
238 ; FORCE-ALIGN: addq $24, %rsp
239 ; FORCE-ALIGN: popq %rbx
240 ; FORCE-ALIGN: movq %rbp, %rsp
241 ; FORCE-ALIGN: popq %rbp