1 //===- ARM64CallingConv.td - Calling Conventions for ARM64 -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for ARM64 architecture.
12 //===----------------------------------------------------------------------===//
14 /// CCIfAlign - Match of the original alignment of the arg
15 class CCIfAlign<string Align, CCAction A> :
16 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
18 //===----------------------------------------------------------------------===//
19 // ARM AAPCS64 Calling Convention
20 //===----------------------------------------------------------------------===//
22 def CC_ARM64_AAPCS : CallingConv<[
23 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
24 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
26 // An SRet is passed in X8, not X0 like a normal pointer parameter.
27 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
29 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
31 CCIfByVal<CCPassByVal<8, 8>>,
33 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
34 // up to eight each of GPR and FPR.
35 CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Reg">>,
36 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
37 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
38 // i128 is split to two i64s, we can't fit half to register X7.
39 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
42 // i128 is split to two i64s, and its stack alignment is 16 bytes.
43 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
45 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
46 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
47 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
48 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
49 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
50 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
51 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
52 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
53 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
54 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
55 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
57 // If more than will fit in registers, pass them on the stack instead.
58 CCIfType<[i1, i8, i16], CCAssignToStack<8, 8>>,
59 CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
60 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
61 CCAssignToStack<8, 8>>,
62 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
63 CCAssignToStack<16, 16>>
66 def RetCC_ARM64_AAPCS : CallingConv<[
67 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
68 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
70 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
71 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
72 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
73 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
74 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
75 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
76 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
77 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
78 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
79 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
80 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
81 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
82 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
86 // Darwin uses a calling convention which differs in only two ways
87 // from the standard one at this level:
88 // + i128s (i.e. split i64s) don't need even registers.
89 // + Stack slots are sized as needed rather than being at least 64-bit.
90 def CC_ARM64_DarwinPCS : CallingConv<[
91 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
92 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
94 // An SRet is passed in X8, not X0 like a normal pointer parameter.
95 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
97 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
99 CCIfByVal<CCPassByVal<8, 8>>,
101 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
102 // up to eight each of GPR and FPR.
103 CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Reg">>,
104 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
105 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
106 // i128 is split to two i64s, we can't fit half to register X7.
108 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
109 [W0, W1, W2, W3, W4, W5, W6]>>>,
110 // i128 is split to two i64s, and its stack alignment is 16 bytes.
111 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
113 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
114 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
115 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
116 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
117 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
118 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
119 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
120 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
121 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
122 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
123 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
125 // If more than will fit in registers, pass them on the stack instead.
126 CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Stack">>,
127 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
128 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
129 CCAssignToStack<8, 8>>,
130 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
133 def CC_ARM64_DarwinPCS_VarArg : CallingConv<[
134 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
135 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
137 // Handle all scalar types as either i64 or f64.
138 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
139 CCIfType<[f32], CCPromoteToType<f64>>,
141 // Everything is on the stack.
142 // i128 is split to two i64s, and its stack alignment is 16 bytes.
143 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
144 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32], CCAssignToStack<8, 8>>,
145 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
148 // The WebKit_JS calling convention only passes the first argument (the callee)
149 // in register and the remaining arguments on stack. We allow 32bit stack slots,
150 // so that WebKit can write partial values in the stack and define the other
151 // 32bit quantity as undef.
152 def CC_ARM64_WebKit_JS : CallingConv<[
153 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
154 CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_WebKit_JS_i1i8i16_Reg">>,
155 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
156 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
158 // Pass the remaining arguments on the stack instead.
159 CCIfType<[i1, i8, i16], CCAssignToStack<4, 4>>,
160 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
161 CCIfType<[i64, f64], CCAssignToStack<8, 8>>
164 def RetCC_ARM64_WebKit_JS : CallingConv<[
165 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
166 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
167 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
168 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
169 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
170 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
171 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
172 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
175 // FIXME: LR is only callee-saved in the sense that *we* preserve it and are
176 // presumably a callee to someone. External functions may not do so, but this
177 // is currently safe since BL has LR as an implicit-def and what happens after a
178 // tail call doesn't matter.
180 // It would be better to model its preservation semantics properly (create a
181 // vreg on entry, use it in RET & tail call generation; make that vreg def if we
182 // end up saving LR as part of a call frame). Watch this space...
183 def CSR_ARM64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
184 X23, X24, X25, X26, X27, X28,
186 D12, D13, D14, D15)>;
188 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
189 // 'this' and the pointer return value are both passed in X0 in these cases,
190 // this can be partially modelled by treating X0 as a callee-saved register;
191 // only the resulting RegMask is used; the SaveList is ignored
193 // (For generic ARM 64-bit ABI code, clang will not generate constructors or
194 // destructors with 'this' returns, so this RegMask will not be used in that
196 def CSR_ARM64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_ARM64_AAPCS, X0)>;
198 // The function used by Darwin to obtain the address of a thread-local variable
199 // guarantees more than a normal AAPCS function. x16 and x17 are used on the
200 // fast path for calculation, but other registers except X0 (argument/return)
201 // and LR (it is a call, after all) are preserved.
202 def CSR_ARM64_TLS_Darwin
203 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
205 (sequence "Q%u", 0, 31))>;
207 // The ELF stub used for TLS-descriptor access saves every feasible
208 // register. Only X0 and LR are clobbered.
209 def CSR_ARM64_TLS_ELF
210 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
211 (sequence "Q%u", 0, 31))>;
213 def CSR_ARM64_AllRegs
214 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
215 (sequence "X%u", 0, 28), FP, LR, SP,
216 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
217 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
218 (sequence "Q%u", 0, 31))>;