return true;
// Look for obvious safe cases to perform tail call optimization.
- // For now, only consider callees which take no arguments.
- if (!Outs.empty())
- return false;
+ // If the callee takes no arguments then go on to check the results of the
+ // call.
+ if (!Outs.empty()) {
+ // Check if stack adjustment is needed. For now, do not do this if any
+ // argument is passed on the stack.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
+ ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
+ if (CCInfo.getNextStackOffset())
+ return false;
+ }
// If the caller does not return a value, then this is obviously safe.
// This is one case where it's safe to perform this optimization even
entry:
; CHECK: leal 15(%rsi), %edi
; CHECK-NOT: movl
-; CHECK: callq _foo
+; CHECK: jmp _foo
%0 = add i32 %a, 15 ; <i32> [#uses=1]
%1 = zext i32 %0 to i64 ; <i64> [#uses=1]
tail call void @foo(i64 %1) nounwind
-; RUN: llc < %s -march=x86 -asm-verbose=false | FileCheck %s
-; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -march=x86 -asm-verbose=false | FileCheck %s -check-prefix=32
+; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s -check-prefix=64
define void @t1(i32 %x) nounwind ssp {
entry:
-; CHECK: t1:
-; CHECK: jmp {{_?}}foo
+; 32: t1:
+; 32: jmp {{_?}}foo
+
+; 64: t1:
+; 64: jmp {{_?}}foo
tail call void @foo() nounwind
ret void
}
define void @t2() nounwind ssp {
entry:
-; CHECK: t2:
-; CHECK: jmp {{_?}}foo2
+; 32: t2:
+; 32: jmp {{_?}}foo2
+
+; 64: t2:
+; 64: jmp {{_?}}foo2
%0 = tail call i32 @foo2() nounwind
ret void
}
define void @t3() nounwind ssp {
entry:
-; CHECK: t3:
-; CHECK: jmp {{_?}}foo3
+; 32: t3:
+; 32: jmp {{_?}}foo3
+
+; 64: t3:
+; 64: jmp {{_?}}foo3
%0 = tail call i32 @foo3() nounwind
ret void
}
declare i32 @foo3()
+
+define void @t4(void (i32)* nocapture %x) nounwind ssp {
+entry:
+; 32: t4:
+; 32: call *
+; FIXME: gcc can generate a tailcall for this. But it's tricky.
+
+; 64: t4:
+; 64-NOT: call
+; 64: jmpq *
+ tail call void %x(i32 0) nounwind
+ ret void
+}
+
+define void @t5(void ()* nocapture %x) nounwind ssp {
+entry:
+; 32: t5:
+; 32-NOT: call
+; 32: jmpl *
+
+; 64: t5:
+; 64-NOT: call
+; 64: jmpq *
+ tail call void %x() nounwind
+ ret void
+}