1 //=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for AArch64 architecture.
12 //===----------------------------------------------------------------------===//
14 /// CCIfAlign - Match of the original alignment of the arg
15 class CCIfAlign<string Align, CCAction A> :
16 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
17 /// CCIfBigEndian - Match only if we're in big endian mode.
18 class CCIfBigEndian<CCAction A> :
19 CCIf<"State.getTarget().getDataLayout()->isBigEndian()", A>;
21 class CCIfUnallocated<string Reg, CCAction A> :
22 CCIf<"!State.isAllocated(AArch64::" # Reg # ")", A>;
24 //===----------------------------------------------------------------------===//
25 // ARM AAPCS64 Calling Convention
26 //===----------------------------------------------------------------------===//
28 def CC_AArch64_AAPCS : CallingConv<[
29 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
30 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
32 // Big endian vectors must be passed as if they were 1-element vectors so that
33 // their lanes are in a consistent order.
34 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
35 CCBitConvertToType<f64>>>,
36 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
37 CCBitConvertToType<f128>>>,
39 // An SRet is passed in X8, not X0 like a normal pointer parameter.
40 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
42 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
44 CCIfByVal<CCPassByVal<8, 8>>,
46 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
47 // up to eight each of GPR and FPR.
48 CCIfType<[i1, i8, i16], CCIfUnallocated<"X7", CCPromoteToType<i32>>>,
49 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
50 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
51 // i128 is split to two i64s, we can't fit half to register X7.
52 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
55 // i128 is split to two i64s, and its stack alignment is 16 bytes.
56 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
58 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
59 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
60 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
61 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
62 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
63 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
64 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
65 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
66 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
67 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
68 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
70 // If more than will fit in registers, pass them on the stack instead.
71 CCIfType<[i1, i8, i16], CCAssignToStack<8, 8>>,
72 CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
73 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
74 CCAssignToStack<8, 8>>,
75 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
76 CCAssignToStack<16, 16>>
79 def RetCC_AArch64_AAPCS : CallingConv<[
80 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
81 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
83 // Big endian vectors must be passed as if they were 1-element vectors so that
84 // their lanes are in a consistent order.
85 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
86 CCBitConvertToType<f64>>>,
87 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
88 CCBitConvertToType<f128>>>,
90 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
91 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
92 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
93 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
94 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
95 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
96 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
97 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
98 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
99 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
100 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
101 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
102 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
106 // Darwin uses a calling convention which differs in only two ways
107 // from the standard one at this level:
108 // + i128s (i.e. split i64s) don't need even registers.
109 // + Stack slots are sized as needed rather than being at least 64-bit.
110 def CC_AArch64_DarwinPCS : CallingConv<[
111 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
112 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
114 // An SRet is passed in X8, not X0 like a normal pointer parameter.
115 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
117 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
119 CCIfByVal<CCPassByVal<8, 8>>,
121 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
122 // up to eight each of GPR and FPR.
123 CCIfType<[i1, i8, i16], CCIfUnallocated<"X7", CCPromoteToType<i32>>>,
124 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
125 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
126 // i128 is split to two i64s, we can't fit half to register X7.
128 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
129 [W0, W1, W2, W3, W4, W5, W6]>>>,
130 // i128 is split to two i64s, and its stack alignment is 16 bytes.
131 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
133 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
134 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
135 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
136 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
137 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
138 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
139 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
140 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
141 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
142 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
143 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
145 // If more than will fit in registers, pass them on the stack instead.
146 CCIfType<[i1, i8], CCAssignToStack<1, 1>>,
147 CCIfType<[i16], CCAssignToStack<2, 2>>,
148 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
149 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
150 CCAssignToStack<8, 8>>,
151 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
154 def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
155 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
156 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
158 // Handle all scalar types as either i64 or f64.
159 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
160 CCIfType<[f32], CCPromoteToType<f64>>,
162 // Everything is on the stack.
163 // i128 is split to two i64s, and its stack alignment is 16 bytes.
164 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
165 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32], CCAssignToStack<8, 8>>,
166 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
169 // The WebKit_JS calling convention only passes the first argument (the callee)
170 // in register and the remaining arguments on stack. We allow 32bit stack slots,
171 // so that WebKit can write partial values in the stack and define the other
172 // 32bit quantity as undef.
173 def CC_AArch64_WebKit_JS : CallingConv<[
174 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
175 CCIfType<[i1, i8, i16], CCIfUnallocated<"X0", CCPromoteToType<i32>>>,
176 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
177 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
179 // Pass the remaining arguments on the stack instead.
180 CCIfType<[i1, i8, i16], CCAssignToStack<4, 4>>,
181 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
182 CCIfType<[i64, f64], CCAssignToStack<8, 8>>
185 def RetCC_AArch64_WebKit_JS : CallingConv<[
186 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
187 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
188 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
189 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
190 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
191 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
192 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
193 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
196 // FIXME: LR is only callee-saved in the sense that *we* preserve it and are
197 // presumably a callee to someone. External functions may not do so, but this
198 // is currently safe since BL has LR as an implicit-def and what happens after a
199 // tail call doesn't matter.
201 // It would be better to model its preservation semantics properly (create a
202 // vreg on entry, use it in RET & tail call generation; make that vreg def if we
203 // end up saving LR as part of a call frame). Watch this space...
204 def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
205 X23, X24, X25, X26, X27, X28,
207 D12, D13, D14, D15)>;
209 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
210 // 'this' and the pointer return value are both passed in X0 in these cases,
211 // this can be partially modelled by treating X0 as a callee-saved register;
212 // only the resulting RegMask is used; the SaveList is ignored
214 // (For generic ARM 64-bit ABI code, clang will not generate constructors or
215 // destructors with 'this' returns, so this RegMask will not be used in that
217 def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
219 // The function used by Darwin to obtain the address of a thread-local variable
220 // guarantees more than a normal AAPCS function. x16 and x17 are used on the
221 // fast path for calculation, but other registers except X0 (argument/return)
222 // and LR (it is a call, after all) are preserved.
223 def CSR_AArch64_TLS_Darwin
224 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
226 (sequence "Q%u", 0, 31))>;
228 // The ELF stub used for TLS-descriptor access saves every feasible
229 // register. Only X0 and LR are clobbered.
230 def CSR_AArch64_TLS_ELF
231 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
232 (sequence "Q%u", 0, 31))>;
234 def CSR_AArch64_AllRegs
235 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
236 (sequence "X%u", 0, 28), FP, LR, SP,
237 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
238 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
239 (sequence "Q%u", 0, 31))>;