1 ; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-win32 | FileCheck %s
3 ; Verify that the var arg parameters which are passed in registers are stored
4 ; in home stack slots allocated by the caller and that AP is correctly
6 define void @average_va(i32 %count, ...) nounwind {
9 ; CHECK: movq %r9, 40(%rsp)
10 ; CHECK: movq %r8, 32(%rsp)
11 ; CHECK: movq %rdx, 24(%rsp)
12 ; CHECK: leaq 24(%rsp), %rax
14 %ap = alloca i8*, align 8 ; <i8**> [#uses=1]
15 %ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
16 call void @llvm.va_start(i8* %ap1)
20 declare void @llvm.va_start(i8*) nounwind
21 declare void @llvm.va_copy(i8*, i8*) nounwind
25 ; CHECK: leaq 56(%rsp),
26 define i8* @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
28 %ap = alloca i8*, align 8
29 %ap1 = bitcast i8** %ap to i8*
30 call void @llvm.va_start(i8* %ap1)
36 ; CHECK: leaq 48(%rsp),
37 define i8* @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
39 %ap = alloca i8*, align 8
40 %ap1 = bitcast i8** %ap to i8*
41 call void @llvm.va_start(i8* %ap1)
47 ; CHECK: leaq 40(%rsp),
48 define i8* @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
50 %ap = alloca i8*, align 8
51 %ap1 = bitcast i8** %ap to i8*
52 call void @llvm.va_start(i8* %ap1)
56 ; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes
57 ; are copied using va_copy.
61 ; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]]
62 ; CHECK: movq [[REG_copy1]], 8(%rsp)
63 ; CHECK: movq [[REG_copy1]], (%rsp)
66 define void @copy1(i64 %a0, ...) nounwind {
68 %ap = alloca i8*, align 8
69 %cp = alloca i8*, align 8
70 %ap1 = bitcast i8** %ap to i8*
71 %cp1 = bitcast i8** %cp to i8*
72 call void @llvm.va_start(i8* %ap1)
73 call void @llvm.va_copy(i8* %cp1, i8* %ap1)
79 ; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]]
80 ; CHECK: movq [[REG_copy4]], 8(%rsp)
81 ; CHECK: movq [[REG_copy4]], (%rsp)
84 define void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
86 %ap = alloca i8*, align 8
87 %cp = alloca i8*, align 8
88 %ap1 = bitcast i8** %ap to i8*
89 %cp1 = bitcast i8** %cp to i8*
90 call void @llvm.va_start(i8* %ap1)
91 call void @llvm.va_copy(i8* %cp1, i8* %ap1)
98 ; CHECK: leaq 48(%rsp), [[REG_arg4_1:%[a-z]+]]
99 ; CHECK: movq [[REG_arg4_1]], (%rsp)
101 ; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]]
102 ; CHECK: movq [[REG_arg4_2]], (%rsp)
103 ; CHECK: movl 48(%rsp), %eax
106 define i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
108 %ap = alloca i8*, align 8
109 %ap1 = bitcast i8** %ap to i8*
110 call void @llvm.va_start(i8* %ap1)
111 %tmp = va_arg i8** %ap, i32
115 define void @sret_arg(i32* sret %agg.result, i8* nocapture readnone %format, ...) {
118 %ap_i8 = bitcast i8** %ap to i8*
119 call void @llvm.va_start(i8* %ap_i8)
120 %tmp = va_arg i8** %ap, i32
121 store i32 %tmp, i32* %agg.result
124 ; CHECK-LABEL: sret_arg:
126 ; CHECK-DAG: movq %r9, 40(%rsp)
127 ; CHECK-DAG: movq %r8, 32(%rsp)
128 ; CHECK: movl 32(%rsp), %[[tmp:[^ ]*]]
129 ; CHECK: movl %[[tmp]], (%[[sret:[^ ]*]])
130 ; CHECK: movq %[[sret]], %rax