1 //===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for the X86-32 and X86-64
13 //===----------------------------------------------------------------------===//
15 /// CCIfSubtarget - Match if the current subtarget has a feature F.
16 class CCIfSubtarget<string F, CCAction A>
17 : CCIf<!strconcat("static_cast<const X86Subtarget&>"
18 "(State.getMachineFunction().getSubtarget()).", F),
21 //===----------------------------------------------------------------------===//
22 // Return Value Calling Conventions
23 //===----------------------------------------------------------------------===//
25 // Return-value conventions common to all X86 CC's.
26 def RetCC_X86Common : CallingConv<[
27 // Scalar values are returned in AX first, then DX. For i8, the ABI
28 // requires the values to be in AL and AH, however this code uses AL and DL
29 // instead. This is because using AH for the second register conflicts with
30 // the way LLVM does multiple return values -- a return of {i16,i8} would end
31 // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
32 // for functions that return two i8 values are currently expected to pack the
33 // values into an i16 (which uses AX, and thus AL:AH).
35 // For code that doesn't care about the ABI, we allow returning more than two
36 // integer values in registers.
37 CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
38 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
39 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
40 CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
42 // Boolean vectors of AVX-512 are returned in SIMD registers.
43 // The call from AVX to AVX-512 function should work,
44 // since the boolean types in AVX/AVX2 are promoted by default.
45 CCIfType<[v2i1], CCPromoteToType<v2i64>>,
46 CCIfType<[v4i1], CCPromoteToType<v4i32>>,
47 CCIfType<[v8i1], CCPromoteToType<v8i16>>,
48 CCIfType<[v16i1], CCPromoteToType<v16i8>>,
49 CCIfType<[v32i1], CCPromoteToType<v32i8>>,
50 CCIfType<[v64i1], CCPromoteToType<v64i8>>,
52 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
53 // can only be used by ABI non-compliant code. If the target doesn't have XMM
54 // registers, it won't have vector types.
55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
56 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
58 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
59 // can only be used by ABI non-compliant code. This vector type is only
60 // supported while using the AVX target feature.
61 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
62 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
64 // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
65 // can only be used by ABI non-compliant code. This vector type is only
66 // supported while using the AVX-512 target feature.
67 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
68 CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
70 // MMX vector types are always returned in MM0. If the target doesn't have
71 // MM0, it doesn't support these vector types.
72 CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
74 // Long double types are always returned in FP0 (even with SSE).
75 CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>
78 // X86-32 C return-value convention.
79 def RetCC_X86_32_C : CallingConv<[
80 // The X86-32 calling convention returns FP values in FP0, unless marked
81 // with "inreg" (used here to distinguish one kind of reg from another,
82 // weirdly; this is really the sse-regparm calling convention) in which
83 // case they use XMM0, otherwise it is the same as the common X86 calling
85 CCIfInReg<CCIfSubtarget<"hasSSE2()",
86 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
87 CCIfType<[f32,f64], CCAssignToReg<[FP0, FP1]>>,
88 CCDelegateTo<RetCC_X86Common>
91 // X86-32 FastCC return-value convention.
92 def RetCC_X86_32_Fast : CallingConv<[
93 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
95 // This can happen when a float, 2 x float, or 3 x float vector is split by
96 // target lowering, and is returned in 1-3 sse regs.
97 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
98 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
100 // For integers, ECX can be used as an extra return register
101 CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
102 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
103 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
105 // Otherwise, it is the same as the common X86 calling convention.
106 CCDelegateTo<RetCC_X86Common>
109 // Intel_OCL_BI return-value convention.
110 def RetCC_Intel_OCL_BI : CallingConv<[
111 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
112 CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
113 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
115 // 256-bit FP vectors
116 // No more than 4 registers
117 CCIfType<[v8f32, v4f64, v8i32, v4i64],
118 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
120 // 512-bit FP vectors
121 CCIfType<[v16f32, v8f64, v16i32, v8i64],
122 CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
124 // i32, i64 in the standard way
125 CCDelegateTo<RetCC_X86Common>
128 // X86-32 HiPE return-value convention.
129 def RetCC_X86_32_HiPE : CallingConv<[
130 // Promote all types to i32
131 CCIfType<[i8, i16], CCPromoteToType<i32>>,
133 // Return: HP, P, VAL1, VAL2
134 CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
137 // X86-32 HiPE return-value convention.
138 def RetCC_X86_32_VectorCall : CallingConv<[
139 // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
140 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
141 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
143 // 256-bit FP vectors
144 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
145 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
147 // 512-bit FP vectors
148 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
149 CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
151 // Return integers in the standard way.
152 CCDelegateTo<RetCC_X86Common>
155 // X86-64 C return-value convention.
156 def RetCC_X86_64_C : CallingConv<[
157 // The X86-64 calling convention always returns FP values in XMM0.
158 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
159 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
161 // MMX vector types are always returned in XMM0.
162 CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
163 CCDelegateTo<RetCC_X86Common>
166 // X86-Win64 C return-value convention.
167 def RetCC_X86_Win64_C : CallingConv<[
168 // The X86-Win64 calling convention always returns __m64 values in RAX.
169 CCIfType<[x86mmx], CCBitConvertToType<i64>>,
171 // Otherwise, everything is the same as 'normal' X86-64 C CC.
172 CCDelegateTo<RetCC_X86_64_C>
175 // X86-64 HiPE return-value convention.
176 def RetCC_X86_64_HiPE : CallingConv<[
177 // Promote all types to i64
178 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
180 // Return: HP, P, VAL1, VAL2
181 CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
184 // X86-64 WebKit_JS return-value convention.
185 def RetCC_X86_64_WebKit_JS : CallingConv<[
186 // Promote all types to i64
187 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
190 CCIfType<[i64], CCAssignToReg<[RAX]>>
193 // X86-64 AnyReg return-value convention. No explicit register is specified for
194 // the return-value. The register allocator is allowed and expected to choose
195 // any free register.
197 // This calling convention is currently only supported by the stackmap and
198 // patchpoint intrinsics. All other uses will result in an assert on Debug
199 // builds. On Release builds we fallback to the X86 C calling convention.
200 def RetCC_X86_64_AnyReg : CallingConv<[
201 CCCustom<"CC_X86_AnyReg_Error">
204 // This is the root return-value convention for the X86-32 backend.
205 def RetCC_X86_32 : CallingConv<[
206 // If FastCC, use RetCC_X86_32_Fast.
207 CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
208 // If HiPE, use RetCC_X86_32_HiPE.
209 CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
210 CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
212 // Otherwise, use RetCC_X86_32_C.
213 CCDelegateTo<RetCC_X86_32_C>
216 // This is the root return-value convention for the X86-64 backend.
217 def RetCC_X86_64 : CallingConv<[
218 // HiPE uses RetCC_X86_64_HiPE
219 CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
221 // Handle JavaScript calls.
222 CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
223 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
225 // Handle explicit CC selection
226 CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
227 CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
229 // Mingw64 and native Win64 use Win64 CC
230 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
232 // Otherwise, drop to normal X86-64 CC
233 CCDelegateTo<RetCC_X86_64_C>
236 // This is the return-value convention used for the entire X86 backend.
237 def RetCC_X86 : CallingConv<[
239 // Check if this is the Intel OpenCL built-ins calling convention
240 CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
242 CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
243 CCDelegateTo<RetCC_X86_32>
246 //===----------------------------------------------------------------------===//
247 // X86-64 Argument Calling Conventions
248 //===----------------------------------------------------------------------===//
250 def CC_X86_64_C : CallingConv<[
251 // Handles byval parameters.
252 CCIfByVal<CCPassByVal<8, 8>>,
254 // Promote i8/i16 arguments to i32.
255 CCIfType<[i8, i16], CCPromoteToType<i32>>,
257 // The 'nest' parameter, if any, is passed in R10.
258 CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
259 CCIfNest<CCAssignToReg<[R10]>>,
261 // The first 6 integer arguments are passed in integer registers.
262 CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
263 CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
265 // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
267 CCIfSubtarget<"isTargetDarwin()",
268 CCIfSubtarget<"hasSSE2()",
269 CCPromoteToType<v2i64>>>>,
271 // Boolean vectors of AVX-512 are returned in SIMD registers.
272 // The call from AVX to AVX-512 function should work,
273 // since the boolean types in AVX/AVX2 are promoted by default.
274 CCIfType<[v2i1], CCPromoteToType<v2i64>>,
275 CCIfType<[v4i1], CCPromoteToType<v4i32>>,
276 CCIfType<[v8i1], CCPromoteToType<v8i16>>,
277 CCIfType<[v16i1], CCPromoteToType<v16i8>>,
278 CCIfType<[v32i1], CCPromoteToType<v32i8>>,
279 CCIfType<[v64i1], CCPromoteToType<v64i8>>,
281 // The first 8 FP/Vector arguments are passed in XMM registers.
282 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
283 CCIfSubtarget<"hasSSE1()",
284 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
286 // The first 8 256-bit vector arguments are passed in YMM registers, unless
287 // this is a vararg function.
288 // FIXME: This isn't precisely correct; the x86-64 ABI document says that
289 // fixed arguments to vararg functions are supposed to be passed in
290 // registers. Actually modeling that would be a lot of work, though.
291 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
292 CCIfSubtarget<"hasFp256()",
293 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
294 YMM4, YMM5, YMM6, YMM7]>>>>,
296 // The first 8 512-bit vector arguments are passed in ZMM registers.
297 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
298 CCIfSubtarget<"hasAVX512()",
299 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
301 // Integer/FP values get stored in stack slots that are 8 bytes in size and
302 // 8-byte aligned if there are no more registers to hold them.
303 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
305 // Long doubles get stack slots whose size and alignment depends on the
307 CCIfType<[f80], CCAssignToStack<0, 0>>,
309 // Vectors get 16-byte stack slots that are 16-byte aligned.
310 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
312 // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
313 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
314 CCAssignToStack<32, 32>>,
316 // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
317 CCIfType<[v16i32, v8i64, v16f32, v8f64],
318 CCAssignToStack<64, 64>>
321 // Calling convention used on Win64
322 def CC_X86_Win64_C : CallingConv<[
323 // FIXME: Handle byval stuff.
324 // FIXME: Handle varargs.
326 // Promote i8/i16 arguments to i32.
327 CCIfType<[i8, i16], CCPromoteToType<i32>>,
329 // The 'nest' parameter, if any, is passed in R10.
330 CCIfNest<CCAssignToReg<[R10]>>,
332 // 128 bit vectors are passed by pointer
333 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
336 // 256 bit vectors are passed by pointer
337 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
339 // 512 bit vectors are passed by pointer
340 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
342 // The first 4 MMX vector arguments are passed in GPRs.
343 CCIfType<[x86mmx], CCBitConvertToType<i64>>,
345 // The first 4 integer arguments are passed in integer registers.
346 CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
347 [XMM0, XMM1, XMM2, XMM3]>>,
349 // Do not pass the sret argument in RCX, the Win64 thiscall calling
350 // convention requires "this" to be passed in RCX.
351 CCIfCC<"CallingConv::X86_ThisCall",
352 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8 , R9 ],
353 [XMM1, XMM2, XMM3]>>>>,
355 CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8 , R9 ],
356 [XMM0, XMM1, XMM2, XMM3]>>,
358 // The first 4 FP/Vector arguments are passed in XMM registers.
359 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
360 CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
361 [RCX , RDX , R8 , R9 ]>>,
363 // Integer/FP values get stored in stack slots that are 8 bytes in size and
364 // 8-byte aligned if there are no more registers to hold them.
365 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
367 // Long doubles get stack slots whose size and alignment depends on the
369 CCIfType<[f80], CCAssignToStack<0, 0>>
372 def CC_X86_Win64_VectorCall : CallingConv<[
373 // The first 6 floating point and vector types of 128 bits or less use
375 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
376 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]>>,
378 // 256-bit vectors use YMM registers.
379 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
380 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5]>>,
382 // 512-bit vectors use ZMM registers.
383 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
384 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5]>>,
386 // Delegate to fastcall to handle integer types.
387 CCDelegateTo<CC_X86_Win64_C>
391 def CC_X86_64_GHC : CallingConv<[
392 // Promote i8/i16/i32 arguments to i64.
393 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
395 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
397 CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
399 // Pass in STG registers: F1, F2, F3, F4, D1, D2
400 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
401 CCIfSubtarget<"hasSSE1()",
402 CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
405 def CC_X86_64_HiPE : CallingConv<[
406 // Promote i8/i16/i32 arguments to i64.
407 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
409 // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
410 CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
412 // Integer/FP values get stored in stack slots that are 8 bytes in size and
413 // 8-byte aligned if there are no more registers to hold them.
414 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
417 def CC_X86_64_WebKit_JS : CallingConv<[
418 // Promote i8/i16 arguments to i32.
419 CCIfType<[i8, i16], CCPromoteToType<i32>>,
421 // Only the first integer argument is passed in register.
422 CCIfType<[i32], CCAssignToReg<[EAX]>>,
423 CCIfType<[i64], CCAssignToReg<[RAX]>>,
425 // The remaining integer arguments are passed on the stack. 32bit integer and
426 // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
427 // 64bit integer and floating-point arguments are aligned to 8 byte and stored
428 // in 8 byte stack slots.
429 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
430 CCIfType<[i64, f64], CCAssignToStack<8, 8>>
433 // No explicit register is specified for the AnyReg calling convention. The
434 // register allocator may assign the arguments to any free register.
436 // This calling convention is currently only supported by the stackmap and
437 // patchpoint intrinsics. All other uses will result in an assert on Debug
438 // builds. On Release builds we fallback to the X86 C calling convention.
439 def CC_X86_64_AnyReg : CallingConv<[
440 CCCustom<"CC_X86_AnyReg_Error">
443 //===----------------------------------------------------------------------===//
444 // X86 C Calling Convention
445 //===----------------------------------------------------------------------===//
447 /// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
448 /// values are spilled on the stack, and the first 4 vector values go in XMM
450 def CC_X86_32_Common : CallingConv<[
451 // Handles byval parameters.
452 CCIfByVal<CCPassByVal<4, 4>>,
454 // The first 3 float or double arguments, if marked 'inreg' and if the call
455 // is not a vararg call and if SSE2 is available, are passed in SSE registers.
456 CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
457 CCIfSubtarget<"hasSSE2()",
458 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
460 // The first 3 __m64 vector arguments are passed in mmx registers if the
461 // call is not a vararg call.
462 CCIfNotVarArg<CCIfType<[x86mmx],
463 CCAssignToReg<[MM0, MM1, MM2]>>>,
465 // Integer/Float values get stored in stack slots that are 4 bytes in
466 // size and 4-byte aligned.
467 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
469 // Doubles get 8-byte slots that are 4-byte aligned.
470 CCIfType<[f64], CCAssignToStack<8, 4>>,
472 // Long doubles get slots whose size depends on the subtarget.
473 CCIfType<[f80], CCAssignToStack<0, 4>>,
475 // The first 4 SSE vector arguments are passed in XMM registers.
476 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
477 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
479 // The first 4 AVX 256-bit vector arguments are passed in YMM registers.
480 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
481 CCIfSubtarget<"hasFp256()",
482 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
484 // The first 4 AVX 512-bit vector arguments are passed in ZMM registers.
485 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
486 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
488 // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
489 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
491 // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
492 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
493 CCAssignToStack<32, 32>>,
495 // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
496 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
497 CCAssignToStack<64, 64>>,
499 // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
500 // passed in the parameter area.
501 CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>;
503 def CC_X86_32_C : CallingConv<[
504 // Promote i8/i16 arguments to i32.
505 CCIfType<[i8, i16], CCPromoteToType<i32>>,
507 // The 'nest' parameter, if any, is passed in ECX.
508 CCIfNest<CCAssignToReg<[ECX]>>,
510 // The first 3 integer arguments, if marked 'inreg' and if the call is not
511 // a vararg call, are passed in integer registers.
512 CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
514 // Otherwise, same as everything else.
515 CCDelegateTo<CC_X86_32_Common>
518 def CC_X86_32_FastCall : CallingConv<[
519 // Promote i8/i16 arguments to i32.
520 CCIfType<[i8, i16], CCPromoteToType<i32>>,
522 // The 'nest' parameter, if any, is passed in EAX.
523 CCIfNest<CCAssignToReg<[EAX]>>,
525 // The first 2 integer arguments are passed in ECX/EDX
526 CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
528 // Otherwise, same as everything else.
529 CCDelegateTo<CC_X86_32_Common>
532 def CC_X86_32_VectorCall : CallingConv<[
533 // The first 6 floating point and vector types of 128 bits or less use
535 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
536 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]>>,
538 // 256-bit vectors use YMM registers.
539 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
540 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5]>>,
542 // 512-bit vectors use ZMM registers.
543 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
544 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5]>>,
546 // Otherwise, pass it indirectly.
547 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64,
548 v32i8, v16i16, v8i32, v4i64, v8f32, v4f64,
549 v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
550 CCCustom<"CC_X86_32_VectorCallIndirect">>,
552 // Delegate to fastcall to handle integer types.
553 CCDelegateTo<CC_X86_32_FastCall>
556 def CC_X86_32_ThisCall_Common : CallingConv<[
557 // The first integer argument is passed in ECX
558 CCIfType<[i32], CCAssignToReg<[ECX]>>,
560 // Otherwise, same as everything else.
561 CCDelegateTo<CC_X86_32_Common>
564 def CC_X86_32_ThisCall_Mingw : CallingConv<[
565 // Promote i8/i16 arguments to i32.
566 CCIfType<[i8, i16], CCPromoteToType<i32>>,
568 CCDelegateTo<CC_X86_32_ThisCall_Common>
571 def CC_X86_32_ThisCall_Win : CallingConv<[
572 // Promote i8/i16 arguments to i32.
573 CCIfType<[i8, i16], CCPromoteToType<i32>>,
575 // Pass sret arguments indirectly through stack.
576 CCIfSRet<CCAssignToStack<4, 4>>,
578 CCDelegateTo<CC_X86_32_ThisCall_Common>
581 def CC_X86_32_ThisCall : CallingConv<[
582 CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
583 CCDelegateTo<CC_X86_32_ThisCall_Win>
586 def CC_X86_32_FastCC : CallingConv<[
587 // Handles byval parameters. Note that we can't rely on the delegation
588 // to CC_X86_32_Common for this because that happens after code that
589 // puts arguments in registers.
590 CCIfByVal<CCPassByVal<4, 4>>,
592 // Promote i8/i16 arguments to i32.
593 CCIfType<[i8, i16], CCPromoteToType<i32>>,
595 // The 'nest' parameter, if any, is passed in EAX.
596 CCIfNest<CCAssignToReg<[EAX]>>,
598 // The first 2 integer arguments are passed in ECX/EDX
599 CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
601 // The first 3 float or double arguments, if the call is not a vararg
602 // call and if SSE2 is available, are passed in SSE registers.
603 CCIfNotVarArg<CCIfType<[f32,f64],
604 CCIfSubtarget<"hasSSE2()",
605 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
607 // Doubles get 8-byte slots that are 8-byte aligned.
608 CCIfType<[f64], CCAssignToStack<8, 8>>,
610 // Otherwise, same as everything else.
611 CCDelegateTo<CC_X86_32_Common>
614 def CC_X86_32_GHC : CallingConv<[
615 // Promote i8/i16 arguments to i32.
616 CCIfType<[i8, i16], CCPromoteToType<i32>>,
618 // Pass in STG registers: Base, Sp, Hp, R1
619 CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
622 def CC_X86_32_HiPE : CallingConv<[
623 // Promote i8/i16 arguments to i32.
624 CCIfType<[i8, i16], CCPromoteToType<i32>>,
626 // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
627 CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
629 // Integer/Float values get stored in stack slots that are 4 bytes in
630 // size and 4-byte aligned.
631 CCIfType<[i32, f32], CCAssignToStack<4, 4>>
634 // X86-64 Intel OpenCL built-ins calling convention.
635 def CC_Intel_OCL_BI : CallingConv<[
637 CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
638 CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8, R9 ]>>>,
640 CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
641 CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
643 CCIfType<[i32], CCAssignToStack<4, 4>>,
645 // The SSE vector arguments are passed in XMM registers.
646 CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
647 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
649 // The 256-bit vector arguments are passed in YMM registers.
650 CCIfType<[v8f32, v4f64, v8i32, v4i64],
651 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
653 // The 512-bit vector arguments are passed in ZMM registers.
654 CCIfType<[v16f32, v8f64, v16i32, v8i64],
655 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
657 // Pass masks in mask registers
658 CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
660 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
661 CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>,
662 CCDelegateTo<CC_X86_32_C>
665 //===----------------------------------------------------------------------===//
666 // X86 Root Argument Calling Conventions
667 //===----------------------------------------------------------------------===//
669 // This is the root argument convention for the X86-32 backend.
670 def CC_X86_32 : CallingConv<[
671 CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
672 CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_32_VectorCall>>,
673 CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
674 CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
675 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
676 CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
678 // Otherwise, drop to normal X86-32 CC
679 CCDelegateTo<CC_X86_32_C>
682 // This is the root argument convention for the X86-64 backend.
683 def CC_X86_64 : CallingConv<[
684 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
685 CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
686 CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
687 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
688 CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
689 CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
690 CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
692 // Mingw64 and native Win64 use Win64 CC
693 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
695 // Otherwise, drop to normal X86-64 CC
696 CCDelegateTo<CC_X86_64_C>
699 // This is the argument convention used for the entire X86 backend.
700 def CC_X86 : CallingConv<[
701 CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
702 CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
703 CCDelegateTo<CC_X86_32>
706 //===----------------------------------------------------------------------===//
707 // Callee-saved Registers.
708 //===----------------------------------------------------------------------===//
710 def CSR_NoRegs : CalleeSavedRegs<(add)>;
712 def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
713 def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
715 def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
716 def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
718 def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
719 (sequence "XMM%u", 6, 15))>;
721 // All GPRs - except r11
722 def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
725 // All registers - except r11
726 def CSR_64_RT_AllRegs : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
727 (sequence "XMM%u", 0, 15))>;
728 def CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
729 (sequence "YMM%u", 0, 15))>;
731 def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
732 R11, R12, R13, R14, R15, RBP,
733 (sequence "XMM%u", 0, 15))>;
735 def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
736 (sequence "XMM%u", 16, 31))>;
737 def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
738 (sequence "YMM%u", 0, 31)),
739 (sequence "XMM%u", 0, 15))>;
741 // Standard C + YMM6-15
742 def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
744 (sequence "YMM%u", 6, 15))>;
746 def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
748 (sequence "ZMM%u", 6, 21),
750 //Standard C + XMM 8-15
751 def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
752 (sequence "XMM%u", 8, 15))>;
754 //Standard C + YMM 8-15
755 def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
756 (sequence "YMM%u", 8, 15))>;
758 def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
759 (sequence "ZMM%u", 16, 31),