-; RUN: llc < %s -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s
-; RUN: llc < %s -force-align-stack -stack-alignment=32 -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s -check-prefix=FORCE-ALIGN
+; RUN: llc < %s -mcpu=generic -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mcpu=generic -stackrealign -stack-alignment=32 -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s -check-prefix=FORCE-ALIGN
; rdar://11496434
; no VLAs or dynamic alignment
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
%a = alloca i32, align 4
%v = alloca <8 x float>, align 32
call void @t2_helper(i32* %a, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
%v = alloca <8 x float>, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t4_helper(i32* %a, i32* %vla, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
declare void @t4_helper(i32*, i32*, <8 x float>*)
-; Dynamic realignment + Spill
+; Spilling an AVX register shouldn't cause dynamic realignment
define i32 @t5(float* nocapture %f) nounwind uwtable ssp {
entry:
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
call void @t5_helper1(i32* %a) nounwind
call void @t5_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
; CHECK: _t5
-; CHECK: pushq %rbp
-; CHECK: movq %rsp, %rbp
-; CHECK: andq $-32, %rsp
; CHECK: subq ${{[0-9]+}}, %rsp
;
; CHECK: vmovaps (%rdi), [[AVXREG:%ymm[0-9]+]]
-; CHECK: vmovaps [[AVXREG]], (%rsp)
+; CHECK: vmovups [[AVXREG]], (%rsp)
; CHECK: leaq {{[0-9]+}}(%rsp), %rdi
; CHECK: callq _t5_helper1
-; CHECK: vmovaps (%rsp), %ymm0
+; CHECK: vmovups (%rsp), %ymm0
; CHECK: callq _t5_helper2
; CHECK: movl {{[0-9]+}}(%rsp), %eax
-;
-; CHECK: movq %rbp, %rsp
-; CHECK: popq %rbp
}
declare void @t5_helper1(i32*)
; CHECK: _t6
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t6_helper1(i32* %a, i32* %vla) nounwind
call void @t6_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
}
store i32 0, i32* %x, align 32
%0 = zext i32 %size to i64
%vla = alloca i32, i64 %0, align 16
- %1 = load i32* %x, align 32
+ %1 = load i32, i32* %x, align 32
call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1)
ret void
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add