1 ; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=core2 < %s | FileCheck %s
5 define i64 @test_trivial() {
6 %A = tail call i64 @testi()
10 ; CHECK: jmp _testi ## TAILCALL
13 define i64 @test_noop_bitcast() {
14 %A = tail call i64 @testi()
15 %B = bitcast i64 %A to i64
18 ; CHECK: test_noop_bitcast:
19 ; CHECK: jmp _testi ## TAILCALL
22 ; Tail call shouldn't be blocked by no-op inttoptr.
23 define i8* @test_inttoptr() {
24 %A = tail call i64 @testi()
25 %B = inttoptr i64 %A to i8*
29 ; CHECK: test_inttoptr:
30 ; CHECK: jmp _testi ## TAILCALL
33 declare <4 x float> @testv()
35 define <4 x i32> @test_vectorbitcast() {
36 %A = tail call <4 x float> @testv()
37 %B = bitcast <4 x float> %A to <4 x i32>
40 ; CHECK: test_vectorbitcast:
41 ; CHECK: jmp _testv ## TAILCALL
44 declare { i64, i64 } @testp()
46 define {i64, i64} @test_pair_trivial() {
47 %A = tail call { i64, i64} @testp()
50 ; CHECK: test_pair_trivial:
51 ; CHECK: jmp _testp ## TAILCALL
55 define {i64, i64} @test_pair_trivial_extract() {
56 %A = tail call { i64, i64} @testp()
57 %x = extractvalue { i64, i64} %A, 0
58 %y = extractvalue { i64, i64} %A, 1
60 %b = insertvalue {i64, i64} undef, i64 %x, 0
61 %c = insertvalue {i64, i64} %b, i64 %y, 1
66 ; CHECK: test_pair_trivial_extract:
67 ; CHECK: jmp _testp ## TAILCALL
69 define {i8*, i64} @test_pair_conv_extract() {
70 %A = tail call { i64, i64} @testp()
71 %x = extractvalue { i64, i64} %A, 0
72 %y = extractvalue { i64, i64} %A, 1
74 %x1 = inttoptr i64 %x to i8*
76 %b = insertvalue {i8*, i64} undef, i8* %x1, 0
77 %c = insertvalue {i8*, i64} %b, i64 %y, 1
82 ; CHECK: test_pair_conv_extract:
83 ; CHECK: jmp _testp ## TAILCALL
88 define { i64, i64 } @crash(i8* %this) {
89 %c = tail call { i64, i64 } @testp()
90 %mrv7 = insertvalue { i64, i64 } %c, i64 undef, 1
91 ret { i64, i64 } %mrv7
94 ; Check that we can fold an indexed load into a tail call instruction.
95 ; CHECK: fold_indexed_load
96 ; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]]
97 ; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) # TAILCALL
98 %struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 }
99 @func_table = external global [0 x %struct.funcs]
100 define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
102 %dsplen = getelementptr inbounds [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
103 %x1 = load i32 (i8*)** %dsplen, align 8
104 %call = tail call i32 %x1(i8* %mbstr) nounwind
108 ; <rdar://problem/12282281> Fold an indexed load into the tail call instruction.
109 ; Calling a varargs function with 6 arguments requires 7 registers (%al is the
110 ; vector count for varargs functions). This leaves %r11 as the only available
113 ; It is not possible to fold an indexed load into TCRETURNmi64 in that case.
115 ; typedef int (*funcptr)(void*, ...);
116 ; extern const funcptr funcs[];
118 ; return funcs[n](0, 0, 0, 0, 0, 0);
121 ; CHECK: rdar12282281
122 ; CHECK: jmpq *%r11 # TAILCALL
123 @funcs = external constant [0 x i32 (i8*, ...)*]
125 define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
127 %idxprom = sext i32 %n to i64
128 %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
129 %0 = load i32 (i8*, ...)** %arrayidx, align 8
130 %call = tail call i32 (i8*, ...)* %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind
134 define x86_fp80 @fp80_call(x86_fp80 %x) nounwind {
137 ; CHECK: jmp _fp80_callee
138 %call = tail call x86_fp80 @fp80_callee(x86_fp80 %x) nounwind
142 declare x86_fp80 @fp80_callee(x86_fp80)
145 define x86_fp80 @trunc_fp80(x86_fp80 %x) nounwind {
148 ; CHECK: callq _trunc
149 ; CHECK-NOT: jmp _trunc
151 %conv = fptrunc x86_fp80 %x to double
152 %call = tail call double @trunc(double %conv) nounwind readnone
153 %conv1 = fpext double %call to x86_fp80
157 declare double @trunc(double) nounwind readnone