// Look for obvious safe cases to perform tail call optimization that does not
// requite ABI changes. This is what gcc calls sibcall.
- // Do not sibcall optimize vararg calls for now.
- if (isVarArg)
+ // Do not sibcall optimize vararg calls unless the call site is not passing any
+ // arguments.
+ if (isVarArg && !Outs.empty())
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
; LINUX: .LJTI8_0:
; LINUX: .long .LBB8_2@GOTOFF
-; LINUX: .long .LBB8_2@GOTOFF
-; LINUX: .long .LBB8_7@GOTOFF
-; LINUX: .long .LBB8_3@GOTOFF
-; LINUX: .long .LBB8_7@GOTOFF
+; LINUX: .long .LBB8_8@GOTOFF
+; LINUX: .long .LBB8_14@GOTOFF
+; LINUX: .long .LBB8_9@GOTOFF
+; LINUX: .long .LBB8_10@GOTOFF
}
declare void @foo1(...)
}
declare double @bar4()
+
+; rdar://6283267
+define void @t17() nounwind ssp {
+entry:
+; 32: t17:
+; 32: jmp {{_?}}bar5
+
+; 64: t17:
+; 64: xorb %al, %al
+; 64: jmp {{_?}}bar5
+ tail call void (...)* @bar5() nounwind
+ ret void
+}
+
+declare void @bar5(...)
+
+; rdar://7774847
+define void @t18() nounwind ssp {
+entry:
+; 32: t18:
+; 32: call {{_?}}bar6
+; 32: fstp %st(0)
+
+; 64: t18:
+; 64: xorb %al, %al
+; 64: jmp {{_?}}bar6
+ %0 = tail call double (...)* @bar6() nounwind
+ ret void
+}
+
+declare double @bar6(...)
; X32: cmpl
; X32: sete
; X32-NOT: xor
-; X32: je
+; X32: jne
; X64: t2:
; X64: testl
; X64: testl
; X64: sete
; X64-NOT: xor
-; X64: je
+; X64: jne
entry:
%0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
%1 = icmp eq i32 %y, 0 ; <i1> [#uses=1]