/// \brief MSVC calling convention that passes vectors and vector aggregates
/// in SSE registers.
- X86_VectorCall = 80
+ X86_VectorCall = 80,
+
+ /// \brief Calling convention used by HipHop Virtual Machine (HHVM) to
+ /// perform calls to and from translation cache, and for calling PHP
+ /// functions.
+ /// HHVM calling convention supports tail/sibling call elimination.
+ HHVM = 81,
+
+ /// \brief HHVM calling convention for invoking C/C++ helpers.
+ HHVM_C = 82
};
} // End CallingConv namespace
/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
+/// If non-zero \p Skew is specified, the return value will be a minimal
+/// integer that is greater than or equal to \p Value and equal to
+/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
+/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
+///
/// Examples:
/// \code
/// RoundUpToAlignment(5, 8) = 8
/// RoundUpToAlignment(17, 8) = 24
/// RoundUpToAlignment(~0LL, 8) = 0
/// RoundUpToAlignment(321, 255) = 510
+///
+/// RoundUpToAlignment(5, 8, 7) = 7
+/// RoundUpToAlignment(17, 8, 1) = 17
+/// RoundUpToAlignment(~0LL, 8, 3) = 3
+/// RoundUpToAlignment(321, 255, 42) = 552
/// \endcode
-inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
- return (Value + Align - 1) / Align * Align;
+inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align,
+ uint64_t Skew = 0) {
+ Skew %= Align;
+ return (Value + Align - 1 - Skew) / Align * Align + Skew;
}
/// Returns the offset to the next integer (mod 2**64) that is greater than
return StackRealignable;
}
+ /// Return the skew that has to be applied to stack alignment under
+ /// certain conditions (e.g. stack was adjusted before function \p MF
+ /// was called).
+ virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;
+
/// getOffsetOfLocalArea - This method returns the offset of the local area
/// from the stack pointer on entrance to a function.
///
KEYWORD(preserve_mostcc);
KEYWORD(preserve_allcc);
KEYWORD(ghccc);
+ KEYWORD(hhvmcc);
+ KEYWORD(hhvm_ccc);
KEYWORD(cc);
KEYWORD(c);
/// ::= 'preserve_mostcc'
/// ::= 'preserve_allcc'
/// ::= 'ghccc'
+/// ::= 'hhvmcc'
+/// ::= 'hhvm_ccc'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break;
case lltok::kw_ghccc: CC = CallingConv::GHC; break;
+ case lltok::kw_hhvmcc: CC = CallingConv::HHVM; break;
+ case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break;
case lltok::kw_cc: {
Lex.Lex();
return ParseUInt32(CC);
kw_webkit_jscc, kw_anyregcc,
kw_preserve_mostcc, kw_preserve_allcc,
kw_ghccc,
+ kw_hhvmcc, kw_hhvm_ccc,
// Attributes:
kw_attributes,
static inline void
AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
- unsigned &MaxAlign) {
+ unsigned &MaxAlign, unsigned Skew) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
Offset += MFI->getObjectSize(FrameIdx);
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo *MFI, bool StackGrowsDown,
- int64_t &Offset, unsigned &MaxAlign) {
+ int64_t &Offset, unsigned &MaxAlign, unsigned Skew) {
for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
E = UnassignedObjs.end(); I != E; ++I) {
int i = *I;
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
ProtectedObjs.insert(i);
}
}
&& "Local area offset should be in direction of stack growth");
int64_t Offset = LocalAreaOffset;
+ // Skew to be applied to alignment.
+ unsigned Skew = TFI.getStackAlignmentSkew(Fn);
+
// If there are fixed sized objects that are preallocated in the local area,
// non-fixed objects can't be allocated right at the start of local area.
// We currently don't support filling in holes in between fixed sized
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
- AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
}
// FIXME: Once this is working, then enable flag will change to a target
unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
- Offset, MaxAlign);
+ Offset, MaxAlign, Skew);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign);
+ Offset, MaxAlign, Skew);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign);
+ Offset, MaxAlign, Skew);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign);
+ Offset, MaxAlign, Skew);
}
// Then assign frame offsets to stack objects that are not used to spill
if (ProtectedObjs.count(i))
continue;
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
}
// Make sure the special register scavenging spill slot is closest to the
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
- AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
}
if (!TFI.targetHandlesStackFrameRounding()) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
- Offset = RoundUpToAlignment(Offset, StackAlign);
+ Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
}
// Update frame info to pretend that this is part of the stack...
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
SavedRegs.set(Reg);
}
}
+
+unsigned TargetFrameLowering::getStackAlignmentSkew(
+ const MachineFunction &MF) const {
+ // When HHVM function is called, the stack is skewed as the return address
+ // is removed from the stack before we enter the function.
+ if (LLVM_UNLIKELY(MF.getFunction()->getCallingConv() == CallingConv::HHVM))
+ return MF.getTarget().getPointerSize();
+
+ return 0;
+}
case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
+ case CallingConv::HHVM: Out << "hhvmcc"; break;
+ case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
}
}
CCCustom<"CC_X86_AnyReg_Error">
]>;
+// X86-64 HHVM return-value convention.
+def RetCC_X86_64_HHVM: CallingConv<[
+ // Promote all types to i64
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ // Return: could return in any GP register save RSP and R12.
+ CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
+ RAX, R10, R11, R13, R14, R15]>>
+]>;
+
// This is the root return-value convention for the X86-32 backend.
def RetCC_X86_32 : CallingConv<[
// If FastCC, use RetCC_X86_32_Fast.
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
+ // Handle HHVM calls.
+ CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,
+
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
CCAssignToStack<64, 64>>
]>;
+// Calling convention for X86-64 HHVM.
+def CC_X86_64_HHVM : CallingConv<[
+ // Use all/any GP registers for args, except RSP.
+ CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
+ RDI, RSI, RDX, RCX, R8, R9,
+ RAX, R10, R11, R13, R14]>>
+]>;
+
+// Calling convention for helper functions in HHVM.
+def CC_X86_64_HHVM_C : CallingConv<[
+ // Pass the first argument in RBP.
+ CCIfType<[i64], CCAssignToReg<[RBP]>>,
+
+ // Otherwise it's the same as the regular C calling convention.
+ CCDelegateTo<CC_X86_64_C>
+]>;
+
// Calling convention used on Win64
def CC_X86_Win64_C : CallingConv<[
// FIXME: Handle byval stuff.
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
+ CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
+ CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
(sequence "ZMM%u", 16, 31),
K4, K5, K6, K7)>;
+
+// Only R12 is preserved for PHP calls in HHVM.
+def CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
/// supports tail call optimization.
static bool IsTailCallConvention(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
- CC == CallingConv::HiPE);
+ CC == CallingConv::HiPE || CC == CallingConv::HHVM);
}
/// \brief Return true if the calling convention is a C calling convention.
/// Callee pop is necessary to support tail calls.
bool X86::isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool TailCallOpt) {
+
+ if (IsTailCallConvention(CallingConv))
+ return IsVarArg ? false : TailCallOpt;
+
switch (CallingConv) {
default:
return false;
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
return !is64Bit;
- case CallingConv::Fast:
- case CallingConv::GHC:
- case CallingConv::HiPE:
- if (IsVarArg)
- return false;
- return TailCallOpt;
}
}
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
+ case CallingConv::HHVM:
+ return CSR_64_HHVM_SaveList;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_SaveList;
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
+ case CallingConv::HHVM:
+ return CSR_64_HHVM_RegMask;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
--- /dev/null
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare hhvmcc i64 @bar(i64, i64, i64) nounwind
+
+; Simply check we can modify %rbx and %rbp before returning via call to bar.
+define hhvmcc i64 @foo(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: foo:
+; CHECK-DAG: movl $1, %ebx
+; CHECK-DAG: movl $3, %ebp
+; CHECK: jmp bar
+ %ret = musttail call hhvmcc i64 @bar(i64 1, i64 %b, i64 3)
+ ret i64 %ret
+}
+
+; Check that we can read and modify %rbx returned from PHP function.
+define hhvmcc i64 @mod_return(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: mod_return:
+; CHECK-NEXT: {{^#.*}}
+; CHECK-NEXT: callq bar
+; CHECK-NEXT: incq %rbx
+ %tmp = call hhvmcc i64 @bar(i64 %a, i64 %b, i64 %c)
+ %retval = add i64 %tmp, 1
+ ret i64 %retval
+}
+
+%rettype = type { i64, i64, i64, i64, i64, i64, i64,
+ i64, i64, i64, i64, i64, i64, i64
+}
+
+; Check that we can return up to 14 64-bit args in registers.
+define hhvmcc %rettype @return_all(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: return_all:
+; CHECK-DAG: movl $1, %ebx
+; CHECK-DAG: movl $2, %ebp
+; CHECK-DAG: movl $3, %edi
+; CHECK-DAG: movl $4, %esi
+; CHECK-DAG: movl $5, %edx
+; CHECK-DAG: movl $6, %ecx
+; CHECK-DAG: movl $7, %r8
+; CHECK-DAG: movl $8, %r9
+; CHECK-DAG: movl $9, %eax
+; CHECK-DAG: movl $10, %r10
+; CHECK-DAG: movl $11, %r11
+; CHECK-DAG: movl $12, %r13
+; CHECK-DAG: movl $13, %r14
+; CHECK-DAG: movl $14, %r15
+; CHECK: retq
+ %r1 = insertvalue %rettype zeroinitializer, i64 1, 0
+ %r2 = insertvalue %rettype %r1, i64 2, 1
+ %r3 = insertvalue %rettype %r2, i64 3, 2
+ %r4 = insertvalue %rettype %r3, i64 4, 3
+ %r5 = insertvalue %rettype %r4, i64 5, 4
+ %r6 = insertvalue %rettype %r5, i64 6, 5
+ %r7 = insertvalue %rettype %r6, i64 7, 6
+ %r8 = insertvalue %rettype %r7, i64 8, 7
+ %r9 = insertvalue %rettype %r8, i64 9, 8
+ %r10 = insertvalue %rettype %r9, i64 10, 9
+ %r11 = insertvalue %rettype %r10, i64 11, 10
+ %r12 = insertvalue %rettype %r11, i64 12, 11
+ %r13 = insertvalue %rettype %r12, i64 13, 12
+ %r14 = insertvalue %rettype %r13, i64 14, 13
+ ret %rettype %r14
+}
+
+declare hhvmcc void @return_all_tc(i64, i64, i64, i64, i64, i64, i64, i64,
+ i64, i64, i64, i64, i64, i64, i64)
+
+; Check that we can return up to 14 64-bit args in registers via tail call.
+define hhvmcc void @test_return_all_tc(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_return_all_tc:
+; CHECK-NEXT: {{^#.*}}
+; CHECK-DAG: movl $1, %ebx
+; CHECK-DAG: movl $3, %ebp
+; CHECK-DAG: movl $4, %r15
+; CHECK-DAG: movl $5, %edi
+; CHECK-DAG: movl $6, %esi
+; CHECK-DAG: movl $7, %edx
+; CHECK-DAG: movl $8, %ecx
+; CHECK-DAG: movl $9, %r8
+; CHECK-DAG: movl $10, %r9
+; CHECK-DAG: movl $11, %eax
+; CHECK-DAG: movl $12, %r10
+; CHECK-DAG: movl $13, %r11
+; CHECK-DAG: movl $14, %r13
+; CHECK-DAG: movl $15, %r14
+; CHECK: jmp return_all_tc
+ tail call hhvmcc void @return_all_tc(
+ i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7,
+ i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15)
+ ret void
+}
+
+declare hhvmcc {i64, i64} @php_short(i64, i64, i64, i64)
+
+define hhvmcc i64 @test_php_short(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_php_short:
+; CHECK-NEXT: {{^#.*}}
+; CHECK-NEXT: movl $42, %r15
+; CHECK-NEXT: callq php_short
+; CHECK-NEXT: leaq (%rbp,%r12), %rbx
+; CHECK-NEXT: retq
+ %pair = call hhvmcc {i64, i64} @php_short(i64 %a, i64 %b, i64 %c, i64 42)
+ %fp = extractvalue {i64, i64} %pair, 1
+ %rv = add i64 %fp, %b
+ ret i64 %rv
+}
+
+declare hhvmcc %rettype @php_all(i64, i64, i64, i64, i64, i64, i64,
+ i64, i64, i64, i64, i64, i64, i64, i64)
+
+; Check that we can pass 15 arguments in registers.
+; Also check that %r12 (2nd arg) is not spilled.
+define hhvmcc i64 @test_php_all(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_php_all:
+; CHECK-NEXT: {{^#.*}}
+; CHECK-NOT: sub
+; CHECK-NOT: sub
+; CHECK-DAG: movl $1, %ebx
+; CHECK-DAG: movl $3, %ebp
+; CHECK-DAG: movl $4, %r15
+; CHECK-DAG: movl $5, %edi
+; CHECK-DAG: movl $6, %esi
+; CHECK-DAG: movl $7, %edx
+; CHECK-DAG: movl $8, %ecx
+; CHECK-DAG: movl $9, %r8
+; CHECK-DAG: movl $10, %r9
+; CHECK-DAG: movl $11, %eax
+; CHECK-DAG: movl $12, %r10
+; CHECK-DAG: movl $13, %r11
+; CHECK-DAG: movl $14, %r13
+; CHECK-DAG: movl $15, %r14
+; CHECK: callq php_all
+ %pair = call hhvmcc %rettype @php_all(
+ i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7,
+ i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15)
+ %fp = extractvalue %rettype %pair, 1
+ %rv = add i64 %fp, %b
+ ret i64 %rv
+}
+
+declare hhvmcc void @svcreq(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64,
+ i64, i64)
+
+define hhvmcc void @test_svcreq(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_svcreq:
+; CHECK-DAG: movl $42, %r10
+; CHECK-DAG: movl $1, %edi
+; CHECK-DAG: movl $2, %esi
+; CHECK-DAG: movl $3, %edx
+; CHECK-DAG: movl $4, %ecx
+; CHECK-DAG: movl $5, %r8
+; CHECK-DAG: movl $6, %r9
+; CHECK: jmp svcreq
+ tail call hhvmcc void @svcreq(i64 %a, i64 %b, i64 %c, i64 undef, i64 1,
+ i64 2, i64 3, i64 4, i64 5, i64 6, i64 undef,
+ i64 42)
+ ret void
+}
+
+declare hhvm_ccc void @helper_short(i64, i64, i64, i64, i64, i64, i64)
+
+; Pass all arguments in registers and check that we don't adjust stack
+; for the call.
+define hhvmcc void @test_helper_short(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_helper_short:
+; CHECK-NOT: push
+; CHECK-NOT: sub
+; CHECK-DAG: movl $1, %edi
+; CHECK-DAG: movl $2, %esi
+; CHECK-DAG: movl $3, %edx
+; CHECK-DAG: movl $4, %ecx
+; CHECK-DAG: movl $5, %r8
+; CHECK-DAG: movl $6, %r9
+; CHECK: callq helper_short
+ call hhvm_ccc void @helper_short(i64 %c, i64 1, i64 2, i64 3, i64 4,
+ i64 5, i64 6)
+ ret void
+}
+
+declare hhvm_ccc void @helper(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64)
+
+define hhvmcc void @test_helper(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_helper:
+; CHECK-DAG: movl $1, %edi
+; CHECK-DAG: movl $2, %esi
+; CHECK-DAG: movl $3, %edx
+; CHECK-DAG: movl $4, %ecx
+; CHECK-DAG: movl $5, %r8
+; CHECK-DAG: movl $6, %r9
+; CHECK: callq helper
+ call hhvm_ccc void @helper(i64 %c, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6,
+ i64 7, i64 8, i64 9)
+ ret void
+}
+
+; When we enter function with HHVM calling convention, the stack is aligned
+; at 16 bytes. This means we align objects on the stack differently and
+; adjust the stack differently for calls.
+declare hhvm_ccc void @stack_helper(i64, i64, i64)
+declare hhvm_ccc void @stack_helper2(<2 x double>, i64)
+
+define hhvmcc void @test_stack_helper(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_stack_helper:
+; CHECK-NOT: push
+; CHECK: subq $32, %rsp
+; CHECK: movaps 16(%rsp), %xmm0
+; CHECK: callq stack_helper2
+ %t1 = alloca <2 x double>, align 16
+ %t2 = alloca i64, align 8
+ %t3 = alloca i64, align 8
+ %load3 = load i64, i64 *%t3
+ call hhvm_ccc void @stack_helper(i64 %c, i64 %load3, i64 42)
+ %load = load <2 x double>, <2 x double> *%t1
+ %load2 = load i64, i64 *%t2
+ call hhvm_ccc void @stack_helper2(<2 x double> %load, i64 %load2)
+ ret void
+}
+
+; Check that we are not adjusting the stack before calling the helper.
+define hhvmcc void @test_stack_helper2(i64 %a, i64 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: test_stack_helper2:
+; CHECK-NOT: push
+; CHECK-NOT: subq
+ call hhvm_ccc void @stack_helper(i64 %c, i64 7, i64 42)
+ ret void
+}
+
ret void
}
+declare hhvm_ccc void @hhvm_c_callee()
+
+define hhvmcc void @hhvm_caller() {
+ call hhvm_ccc void @hhvm_c_callee()
+ ret void
+}
+
declare i32 @__gxx_personality_v0(...)
EXPECT_EQ(8u, RoundUpToAlignment(5, 8));
EXPECT_EQ(24u, RoundUpToAlignment(17, 8));
EXPECT_EQ(0u, RoundUpToAlignment(~0LL, 8));
+
+ EXPECT_EQ(7u, RoundUpToAlignment(5, 8, 7));
+ EXPECT_EQ(17u, RoundUpToAlignment(17, 8, 1));
+ EXPECT_EQ(3u, RoundUpToAlignment(~0LL, 8, 3));
+ EXPECT_EQ(552u, RoundUpToAlignment(321, 255, 42));
}
}
syn keyword llvmKeyword blockaddress byval c catch cc ccc cleanup coldcc common
syn keyword llvmKeyword constant datalayout declare default define deplibs
syn keyword llvmKeyword distinct dllexport dllimport except extern_weak external
-syn keyword llvmKeyword externally_initialized fastcc filter gc global hidden
-syn keyword llvmKeyword initialexec inlinehint inreg intel_ocl_bicc inteldialect
-syn keyword llvmKeyword internal linkonce linkonce_odr localdynamic localexec
-syn keyword llvmKeyword minsize module monotonic msp430_intrcc naked nest
+syn keyword llvmKeyword externally_initialized fastcc filter gc global hhvmcc
+syn keyword llvmKeyword hhvm_ccc hidden initialexec inlinehint inreg
+syn keyword llvmKeyword intel_ocl_bicc inteldialect internal linkonce
+syn keyword llvmKeyword linkonce_odr localdynamic localexec minsize module
+syn keyword llvmKeyword monotonic msp430_intrcc musttail naked nest
syn keyword llvmKeyword noalias nocapture noimplicitfloat noinline nonlazybind
syn keyword llvmKeyword noredzone noreturn nounwind optnone optsize personality
syn keyword llvmKeyword private protected ptx_device ptx_kernel readnone