return false;
// Do not sibcall optimize vararg calls unless all arguments are passed via
- // registers
+ // registers.
if (isVarArg && !Outs.empty()) {
+
+ // Optimizing for varargs on Win64 is unlikely to be safe without
+ // additional testing.
+ if (Subtarget->isTargetWin64())
+ return false;
+
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- // Allocate shadow area for Win64
- if (Subtarget->isTargetWin64()) {
- CCInfo.AllocateStack(32, 8);
- }
-
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
if (!ArgLocs[i].isRegLoc())
; X64: @foo
; X64: jmp
; WIN64: @foo
-; WIN64: jmp
+; WIN64: callq
define void @foo(i64 %arg) nounwind optsize ssp noredzone {
entry:
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i64 0, i64 0), i64 %arg) nounwind optsize noredzone
; X64: @foo2
; X64: jmp
; WIN64: @foo2
-; WIN64: jmp
+; WIN64: callq
define i8* @foo2(i8* %arg) nounwind optsize ssp noredzone {
entry:
%tmp1 = load i8** @sel, align 8, !tbaa !0