X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86CallingConv.td;h=56863d9492e2fcc3e844d8e33433231cc900c0f0;hb=b0269cd2c8d8512ea156a6c6df798faa6c76145c;hp=a5774e1f241f8382b5871059d82ce53059d82580;hpb=ded05e34b65dc42998e9db6ca1abd513e7a9d120;p=oota-llvm.git diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index a5774e1f241..56863d9492e 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -33,16 +33,22 @@ def RetCC_X86Common : CallingConv<[ CCIfType<[i16], CCAssignToReg<[AX, DX]>>, CCIfType<[i32], CCAssignToReg<[EAX, EDX]>>, CCIfType<[i64], CCAssignToReg<[RAX, RDX]>>, - - // Vector types are returned in XMM0 and XMM1, when they fit. XMMM2 and XMM3 + + // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 // can only be used by ABI non-compliant code. If the target doesn't have XMM // registers, it won't have vector types. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, + // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 + // can only be used by ABI non-compliant code. This vector type is only + // supported while using the AVX target feature. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCIfSubtarget<"hasAVX()", CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>>, + // MMX vector types are always returned in MM0. If the target doesn't have // MM0, it doesn't support these vector types. - CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[MM0]>>, + CCIfType<[x86mmx, v1i64], CCAssignToReg<[MM0]>>, // Long double types are always returned in ST0 (even with SSE). CCIfType<[f80], CCAssignToReg<[ST0, ST1]>> @@ -89,14 +95,14 @@ def RetCC_X86_64_C : CallingConv<[ // returned in RAX. This disagrees with ABI documentation but is bug // compatible with gcc. CCIfType<[v1i64], CCAssignToReg<[RAX]>>, - CCIfType<[v8i8, v4i16, v2i32, v2f32], CCAssignToReg<[XMM0, XMM1]>>, + CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>, CCDelegateTo ]>; // X86-Win64 C return-value convention. def RetCC_X86_Win64_C : CallingConv<[ // The X86-Win64 calling convention always returns __m64 values in RAX. - CCIfType<[v8i8, v4i16, v2i32, v1i64], CCBitConvertToType>, + CCIfType<[x86mmx, v1i64], CCBitConvertToType>, // And FP in XMM0 only. CCIfType<[f32], CCAssignToReg<[XMM0]>>, @@ -155,7 +161,7 @@ def CC_X86_64_C : CallingConv<[ // The first 8 MMX (except for v1i64) vector arguments are passed in XMM // registers on Darwin. - CCIfType<[v8i8, v4i16, v2i32, v2f32], + CCIfType<[x86mmx], CCIfSubtarget<"isTargetDarwin()", CCIfSubtarget<"hasSSE2()", CCPromoteToType>>>, @@ -164,11 +170,16 @@ def CC_X86_64_C : CallingConv<[ CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCIfSubtarget<"hasSSE1()", CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>, - + + // The first 8 256-bit vector arguments are passed in YMM registers. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCIfSubtarget<"hasAVX()", + CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7]>>>, + // Integer/FP values get stored in stack slots that are 8 bytes in size and // 8-byte aligned if there are no more registers to hold them. CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, - + // Long doubles get stack slots whose size and alignment depends on the // subtarget. CCIfType<[f80], CCAssignToStack<0, 0>>, @@ -176,8 +187,12 @@ def CC_X86_64_C : CallingConv<[ // Vectors get 16-byte stack slots that are 16-byte aligned. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, + // 256-bit vectors get 32-byte stack slots that are 32-byte aligned. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCAssignToStack<32, 32>>, + // __m64 vectors get 8-byte stack slots that are 8-byte aligned. - CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToStack<8, 8>> + CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>> ]>; // Calling convention used on Win64 @@ -195,8 +210,7 @@ def CC_X86_Win64_C : CallingConv<[ CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect>, // The first 4 MMX vector arguments are passed in GPRs. - CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], - CCBitConvertToType>, + CCIfType<[x86mmx, v1i64], CCBitConvertToType>, // The first 4 integer arguments are passed in integer registers. CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ], @@ -218,7 +232,7 @@ def CC_X86_Win64_C : CallingConv<[ CCIfType<[f80], CCAssignToStack<0, 0>>, // __m64 vectors get 8-byte stack slots that are 8-byte aligned. - CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>> + CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>> ]>; def CC_X86_64_GHC : CallingConv<[ @@ -254,7 +268,7 @@ def CC_X86_32_Common : CallingConv<[ // The first 3 __m64 (except for v1i64) vector arguments are passed in mmx // registers if the call is not a vararg call. - CCIfNotVarArg>>, // Integer/Float values get stored in stack slots that are 4 bytes in @@ -271,12 +285,21 @@ def CC_X86_32_Common : CallingConv<[ CCIfNotVarArg>>, + // The first 4 AVX 256-bit vector arguments are passed in YMM registers. + CCIfNotVarArg>>>, + // Other SSE vectors get 16-byte stack slots that are 16-byte aligned. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, + // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCAssignToStack<32, 32>>, + // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are // passed in the parameter area. - CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>; + CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 4>>]>; def CC_X86_32_C : CallingConv<[ // Promote i8/i16 arguments to i32. @@ -356,3 +379,35 @@ def CC_X86_32_GHC : CallingConv<[ // Pass in STG registers: Base, Sp, Hp, R1 CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>> ]>; + +//===----------------------------------------------------------------------===// +// X86 Root Argument Calling Conventions +//===----------------------------------------------------------------------===// + +// This is the root argument convention for the X86-32 backend. +def CC_X86_32 : CallingConv<[ + CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo>, + CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo>, + CCIfCC<"CallingConv::Fast", CCDelegateTo>, + CCIfCC<"CallingConv::GHC", CCDelegateTo>, + + // Otherwise, drop to normal X86-32 CC + CCDelegateTo +]>; + +// This is the root argument convention for the X86-64 backend. +def CC_X86_64 : CallingConv<[ + CCIfCC<"CallingConv::GHC", CCDelegateTo>, + + // Mingw64 and native Win64 use Win64 CC + CCIfSubtarget<"isTargetWin64()", CCDelegateTo>, + + // Otherwise, drop to normal X86-64 CC + CCDelegateTo +]>; + +// This is the argument convention used for the entire X86 backend. +def CC_X86 : CallingConv<[ + CCIfSubtarget<"is64Bit()", CCDelegateTo>, + CCDelegateTo +]>;