/// \brief HHVM calling convention for invoking C/C++ helpers.
HHVM_C = 82,
+ /// X86_INTR - x86 hardware interrupt context. Callee may take one or two
+ /// parameters, where the 1st represents a pointer to hardware context frame
+ /// and the 2nd represents hardware error code, the presence of the later
+ /// depends on the interrupt vector taken. Valid for both 32- and 64-bit
+ /// subtargets.
+ X86_INTR = 83,
+
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
KEYWORD(preserve_mostcc);
KEYWORD(preserve_allcc);
KEYWORD(ghccc);
+ KEYWORD(x86_intrcc);
KEYWORD(hhvmcc);
KEYWORD(hhvm_ccc);
KEYWORD(cxx_fast_tlscc);
/// ::= 'preserve_mostcc'
/// ::= 'preserve_allcc'
/// ::= 'ghccc'
+/// ::= 'x86_intrcc'
/// ::= 'hhvmcc'
/// ::= 'hhvm_ccc'
/// ::= 'cxx_fast_tlscc'
case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break;
case lltok::kw_ghccc: CC = CallingConv::GHC; break;
+ case lltok::kw_x86_intrcc: CC = CallingConv::X86_INTR; break;
case lltok::kw_hhvmcc: CC = CallingConv::HHVM; break;
case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break;
case lltok::kw_cxx_fast_tlscc: CC = CallingConv::CXX_FAST_TLS; break;
kw_webkit_jscc, kw_anyregcc,
kw_preserve_mostcc, kw_preserve_allcc,
kw_ghccc,
+ kw_x86_intrcc,
kw_hhvmcc, kw_hhvm_ccc,
kw_cxx_fast_tlscc,
// in the various CC lowering callbacks.
Flags.setByVal();
}
+ if (F.getCallingConv() == CallingConv::X86_INTR) {
+ // IA Interrupt passes frame (1st parameter) by value in the stack.
+ if (Idx == 1)
+ Flags.setByVal();
+ }
if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
+ case CallingConv::X86_INTR: Out << "x86_intrcc"; break;
case CallingConv::HHVM: Out << "hhvmcc"; break;
case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
}
CCDelegateTo<CC_X86_32_C>
]>;
+def CC_X86_32_Intr : CallingConv<[
+ CCAssignToStack<4, 4>
+]>;
+
+def CC_X86_64_Intr : CallingConv<[
+ CCAssignToStack<8, 8>
+]>;
+
//===----------------------------------------------------------------------===//
// X86 Root Argument Calling Conventions
//===----------------------------------------------------------------------===//
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
+ CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
// Otherwise, drop to normal X86-32 CC
CCDelegateTo<CC_X86_32_C>
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
+ CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_64_Intr>>,
// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
R11, R12, R13, R14, R15, RBP,
(sequence "XMM%u", 0, 15))>;
+def CSR_32_AllRegs : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
+ EDI, ESP)>;
+def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
+ (sequence "XMM%u", 0, 7))>;
+
def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
(sequence "XMM%u", 16, 31))>;
def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
-//===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a pass that expands pseudo instructions into target
-// instructions to allow proper scheduling, if-conversion, other late
-// optimizations, or simply the encoding of the instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86.h"
-#include "X86FrameLowering.h"
-#include "X86InstrBuilder.h"
-#include "X86InstrInfo.h"
-#include "X86MachineFunctionInfo.h"
-#include "X86Subtarget.h"
-#include "llvm/Analysis/EHPersonalities.h"
-#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/IR/GlobalValue.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "x86-pseudo"
-
-namespace {
-class X86ExpandPseudo : public MachineFunctionPass {
-public:
- static char ID;
- X86ExpandPseudo() : MachineFunctionPass(ID) {}
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- const X86Subtarget *STI;
- const X86InstrInfo *TII;
- const X86RegisterInfo *TRI;
- const X86FrameLowering *X86FL;
-
- bool runOnMachineFunction(MachineFunction &Fn) override;
-
- const char *getPassName() const override {
- return "X86 pseudo instruction expansion pass";
- }
-
-private:
- bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
- bool ExpandMBB(MachineBasicBlock &MBB);
-};
-char X86ExpandPseudo::ID = 0;
-} // End anonymous namespace.
-
-/// If \p MBBI is a pseudo instruction, this method expands
-/// it to the corresponding (sequence of) actual instruction(s).
-/// \returns true if \p MBBI has been expanded.
-bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) {
- MachineInstr &MI = *MBBI;
- unsigned Opcode = MI.getOpcode();
- DebugLoc DL = MBBI->getDebugLoc();
- switch (Opcode) {
- default:
- return false;
- case X86::TCRETURNdi:
- case X86::TCRETURNri:
- case X86::TCRETURNmi:
- case X86::TCRETURNdi64:
- case X86::TCRETURNri64:
- case X86::TCRETURNmi64: {
- bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
- assert(StackAdjust.isImm() && "Expecting immediate value.");
-
- // Adjust stack pointer.
- int StackAdj = StackAdjust.getImm();
-
- if (StackAdj) {
- // Check for possible merge with preceding ADD instruction.
- StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
- X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
- }
-
- // Jump to label or value in register.
- bool IsWin64 = STI->isTargetWin64();
- if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
- unsigned Op = (Opcode == X86::TCRETURNdi)
- ? X86::TAILJMPd
- : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
- if (JumpTarget.isGlobal())
- MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- else {
- assert(JumpTarget.isSymbol());
- MIB.addExternalSymbol(JumpTarget.getSymbolName(),
- JumpTarget.getTargetFlags());
- }
- } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
- unsigned Op = (Opcode == X86::TCRETURNmi)
- ? X86::TAILJMPm
- : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
- for (unsigned i = 0; i != 5; ++i)
- MIB.addOperand(MBBI->getOperand(i));
- } else if (Opcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL,
- TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
- .addReg(JumpTarget.getReg(), RegState::Kill);
- } else {
- BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
- .addReg(JumpTarget.getReg(), RegState::Kill);
- }
-
- MachineInstr *NewMI = std::prev(MBBI);
- NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);
-
- // Delete the pseudo instruction TCRETURN.
- MBB.erase(MBBI);
-
- return true;
- }
- case X86::EH_RETURN:
- case X86::EH_RETURN64: {
- MachineOperand &DestAddr = MBBI->getOperand(0);
- assert(DestAddr.isReg() && "Offset should be in register!");
- const bool Uses64BitFramePtr =
- STI->isTarget64BitLP64() || STI->isTargetNaCl64();
- unsigned StackPtr = TRI->getStackRegister();
- BuildMI(MBB, MBBI, DL,
- TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
- .addReg(DestAddr.getReg());
- // The EH_RETURN pseudo is really removed during the MC Lowering.
- return true;
- }
-
- case X86::EH_RESTORE: {
- // Restore ESP and EBP, and optionally ESI if required.
- bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn()));
- X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
- MBBI->eraseFromParent();
- return true;
- }
- }
- llvm_unreachable("Previous switch has a fallthrough?");
-}
-
-/// Expand all pseudo instructions contained in \p MBB.
-/// \returns true if any expansion occurred for \p MBB.
-bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- // MBBI may be invalidated by the expansion.
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- while (MBBI != E) {
- MachineBasicBlock::iterator NMBBI = std::next(MBBI);
- Modified |= ExpandMI(MBB, MBBI);
- MBBI = NMBBI;
- }
-
- return Modified;
-}
-
-bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
- STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
- TII = STI->getInstrInfo();
- TRI = STI->getRegisterInfo();
- X86FL = STI->getFrameLowering();
-
- bool Modified = false;
- for (MachineBasicBlock &MBB : MF)
- Modified |= ExpandMBB(MBB);
- return Modified;
-}
-
-/// Returns an instance of the pseudo instruction expansion pass.
-FunctionPass *llvm::createX86ExpandPseudoPass() {
- return new X86ExpandPseudo();
-}
+//===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//\r
+//\r
+// The LLVM Compiler Infrastructure\r
+//\r
+// This file is distributed under the University of Illinois Open Source\r
+// License. See LICENSE.TXT for details.\r
+//\r
+//===----------------------------------------------------------------------===//\r
+//\r
+// This file contains a pass that expands pseudo instructions into target\r
+// instructions to allow proper scheduling, if-conversion, other late\r
+// optimizations, or simply the encoding of the instructions.\r
+//\r
+//===----------------------------------------------------------------------===//\r
+\r
+#include "X86.h"\r
+#include "X86FrameLowering.h"\r
+#include "X86InstrBuilder.h"\r
+#include "X86InstrInfo.h"\r
+#include "X86MachineFunctionInfo.h"\r
+#include "X86Subtarget.h"\r
+#include "llvm/Analysis/EHPersonalities.h"\r
+#include "llvm/CodeGen/MachineFunctionPass.h"\r
+#include "llvm/CodeGen/MachineInstrBuilder.h"\r
+#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.\r
+#include "llvm/IR/GlobalValue.h"\r
+using namespace llvm;\r
+\r
+#define DEBUG_TYPE "x86-pseudo"\r
+\r
+namespace {\r
+class X86ExpandPseudo : public MachineFunctionPass {\r
+public:\r
+ static char ID;\r
+ X86ExpandPseudo() : MachineFunctionPass(ID) {}\r
+\r
+ void getAnalysisUsage(AnalysisUsage &AU) const override {\r
+ AU.setPreservesCFG();\r
+ AU.addPreservedID(MachineLoopInfoID);\r
+ AU.addPreservedID(MachineDominatorsID);\r
+ MachineFunctionPass::getAnalysisUsage(AU);\r
+ }\r
+\r
+ const X86Subtarget *STI;\r
+ const X86InstrInfo *TII;\r
+ const X86RegisterInfo *TRI;\r
+ const X86FrameLowering *X86FL;\r
+\r
+ bool runOnMachineFunction(MachineFunction &Fn) override;\r
+\r
+ const char *getPassName() const override {\r
+ return "X86 pseudo instruction expansion pass";\r
+ }\r
+\r
+private:\r
+ bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);\r
+ bool ExpandMBB(MachineBasicBlock &MBB);\r
+};\r
+char X86ExpandPseudo::ID = 0;\r
+} // End anonymous namespace.\r
+\r
+/// If \p MBBI is a pseudo instruction, this method expands\r
+/// it to the corresponding (sequence of) actual instruction(s).\r
+/// \returns true if \p MBBI has been expanded.\r
+bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,\r
+ MachineBasicBlock::iterator MBBI) {\r
+ MachineInstr &MI = *MBBI;\r
+ unsigned Opcode = MI.getOpcode();\r
+ DebugLoc DL = MBBI->getDebugLoc();\r
+ switch (Opcode) {\r
+ default:\r
+ return false;\r
+ case X86::TCRETURNdi:\r
+ case X86::TCRETURNri:\r
+ case X86::TCRETURNmi:\r
+ case X86::TCRETURNdi64:\r
+ case X86::TCRETURNri64:\r
+ case X86::TCRETURNmi64: {\r
+ bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;\r
+ MachineOperand &JumpTarget = MBBI->getOperand(0);\r
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);\r
+ assert(StackAdjust.isImm() && "Expecting immediate value.");\r
+\r
+ // Adjust stack pointer.\r
+ int StackAdj = StackAdjust.getImm();\r
+\r
+ if (StackAdj) {\r
+ // Check for possible merge with preceding ADD instruction.\r
+ StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);\r
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);\r
+ }\r
+\r
+ // Jump to label or value in register.\r
+ bool IsWin64 = STI->isTargetWin64();\r
+ if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {\r
+ unsigned Op = (Opcode == X86::TCRETURNdi)\r
+ ? X86::TAILJMPd\r
+ : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);\r
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));\r
+ if (JumpTarget.isGlobal())\r
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),\r
+ JumpTarget.getTargetFlags());\r
+ else {\r
+ assert(JumpTarget.isSymbol());\r
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),\r
+ JumpTarget.getTargetFlags());\r
+ }\r
+ } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {\r
+ unsigned Op = (Opcode == X86::TCRETURNmi)\r
+ ? X86::TAILJMPm\r
+ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);\r
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));\r
+ for (unsigned i = 0; i != 5; ++i)\r
+ MIB.addOperand(MBBI->getOperand(i));\r
+ } else if (Opcode == X86::TCRETURNri64) {\r
+ BuildMI(MBB, MBBI, DL,\r
+ TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))\r
+ .addReg(JumpTarget.getReg(), RegState::Kill);\r
+ } else {\r
+ BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))\r
+ .addReg(JumpTarget.getReg(), RegState::Kill);\r
+ }\r
+\r
+ MachineInstr *NewMI = std::prev(MBBI);\r
+ NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);\r
+\r
+ // Delete the pseudo instruction TCRETURN.\r
+ MBB.erase(MBBI);\r
+\r
+ return true;\r
+ }\r
+ case X86::EH_RETURN:\r
+ case X86::EH_RETURN64: {\r
+ MachineOperand &DestAddr = MBBI->getOperand(0);\r
+ assert(DestAddr.isReg() && "Offset should be in register!");\r
+ const bool Uses64BitFramePtr =\r
+ STI->isTarget64BitLP64() || STI->isTargetNaCl64();\r
+ unsigned StackPtr = TRI->getStackRegister();\r
+ BuildMI(MBB, MBBI, DL,\r
+ TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)\r
+ .addReg(DestAddr.getReg());\r
+ // The EH_RETURN pseudo is really removed during the MC Lowering.\r
+ return true;\r
+ }\r
+ case X86::IRET: {\r
+ // Adjust stack to erase error code\r
+ int64_t StackAdj = MBBI->getOperand(0).getImm();\r
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, true);\r
+ // Replace pseudo with machine iret\r
+ BuildMI(MBB, MBBI, DL,\r
+ TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));\r
+ MBB.erase(MBBI);\r
+ return true;\r
+ }\r
+ case X86::EH_RESTORE: {\r
+ // Restore ESP and EBP, and optionally ESI if required.\r
+ bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(\r
+ MBB.getParent()->getFunction()->getPersonalityFn()));\r
+ X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);\r
+ MBBI->eraseFromParent();\r
+ return true;\r
+ }\r
+ }\r
+ llvm_unreachable("Previous switch has a fallthrough?");\r
+}\r
+\r
+/// Expand all pseudo instructions contained in \p MBB.\r
+/// \returns true if any expansion occurred for \p MBB.\r
+bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {\r
+ bool Modified = false;\r
+\r
+ // MBBI may be invalidated by the expansion.\r
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();\r
+ while (MBBI != E) {\r
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);\r
+ Modified |= ExpandMI(MBB, MBBI);\r
+ MBBI = NMBBI;\r
+ }\r
+\r
+ return Modified;\r
+}\r
+\r
+bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {\r
+ STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());\r
+ TII = STI->getInstrInfo();\r
+ TRI = STI->getRegisterInfo();\r
+ X86FL = STI->getFrameLowering();\r
+\r
+ bool Modified = false;\r
+ for (MachineBasicBlock &MBB : MF)\r
+ Modified |= ExpandMBB(MBB);\r
+ return Modified;\r
+}\r
+\r
+/// Returns an instance of the pseudo instruction expansion pass.\r
+FunctionPass *llvm::createX86ExpandPseudoPass() {\r
+ return new X86ExpandPseudo();\r
+}\r
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ if (CallConv == CallingConv::X86_INTR && !Outs.empty())
+ report_fatal_error("X86 interrupts may not return any value");
+
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
+ X86ISD::NodeType opcode = X86ISD::RET_FLAG;
+ if (CallConv == CallingConv::X86_INTR)
+ opcode = X86ISD::IRET;
+ return DAG.getNode(opcode, dl, MVT::Other, RetOps);
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
else
ValVT = VA.getValVT();
+ // Calculate SP offset of interrupt parameter, re-arrange the slot normally
+ // taken by a return address.
+ int Offset = 0;
+ if (CallConv == CallingConv::X86_INTR) {
+ const X86Subtarget& Subtarget =
+ static_cast<const X86Subtarget&>(DAG.getSubtarget());
+ // X86 interrupts may take one or two arguments.
+ // On the stack there will be no return address as in regular call.
+ // Offset of last argument need to be set to -4/-8 bytes.
+ // Where offset of the first argument out of two, should be set to 0 bytes.
+ Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
+ }
+
// FIXME: For now, all byval parameter objects are marked mutable. This can be
// changed with more analysis.
// In case of tail call optimization mark all arguments mutable. Since they
unsigned Bytes = Flags.getByValSize();
if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
+ // Adjust SP offset of interrupt parameter.
+ if (CallConv == CallingConv::X86_INTR) {
+ MFI->setObjectOffset(FI, Offset);
+ }
return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
+ // Adjust SP offset of interrupt parameter.
+ if (CallConv == CallingConv::X86_INTR) {
+ MFI->setObjectOffset(FI, Offset);
+ }
+
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
+ if (CallConv == CallingConv::X86_INTR) {
+ bool isLegal = Ins.size() == 1 ||
+ (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
+ (!Is64Bit && Ins[1].VT == MVT::i32)));
+ if (!isLegal)
+ report_fatal_error("X86 interrupts may take one or two arguments");
+ }
+
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt)) {
FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
+ } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
+ // X86 interrupts must pop the error code if present
+ FuncInfo->setBytesToPopOnReturn(Is64Bit ? 8 : 4);
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ if (CallConv == CallingConv::X86_INTR)
+ report_fatal_error("X86 interrupts may not be called directly");
+
if (Attr.getValueAsString() == "true")
isTailCall = false;
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
+ case X86ISD::IRET: return "X86ISD::IRET";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
/// 1 is the number of bytes of stack to pop.
RET_FLAG,
+ /// Return from interrupt. Operand 0 is the number of bytes to pop.
+ IRET,
+
/// Repeat fill, corresponds to X86::REP_STOSx.
REP_STOS,
"{l}ret{|f}q\t$amt", [], IIC_RET>, Requires<[In64BitMode]>;
def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
"{l}ret{w|f}\t$amt", [], IIC_RET>, OpSize16;
+
+ // The machine return from interrupt instruction, but sometimes we need to
+ // perform a post-epilogue stack adjustment. Codegen emits the pseudo form
+ // which expands to include an SP adjustment if necessary.
+ def IRET16 : I <0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>,
+ OpSize16;
+ def IRET32 : I <0xcf, RawFrm, (outs), (ins), "iret{l|d}", [],
+ IIC_IRET>, OpSize32;
+ def IRET64 : RI <0xcf, RawFrm, (outs), (ins), "iretq", [],
+ IIC_IRET>, Requires<[In64BitMode]>;
+ let isCodeGenOnly = 1 in
+ def IRET : PseudoI<(outs), (ins i16imm:$adj), [(X86iret timm:$adj)]>;
+
}
// Unconditional branches.
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
+ [SDNPHasChain, SDNPOptInGlue]>;
def X86vastart_save_xmm_regs :
SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
IIC_SYS_ENTER_EXIT>, TB;
def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", [],
IIC_SYS_ENTER_EXIT>, TB, Requires<[In64BitMode]>;
-
-def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize16;
-def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>,
- OpSize32;
-def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>,
- Requires<[In64BitMode]>;
} // SchedRW
def : Pat<(debugtrap),
const MCPhysReg *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
+ bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->getMMI().callsEHReturn();
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
+ case CallingConv::X86_INTR:
+ if (Is64Bit) {
+ if (HasAVX)
+ return CSR_64_AllRegs_AVX_SaveList;
+ else
+ return CSR_64_AllRegs_SaveList;
+ } else {
+ if (HasSSE)
+ return CSR_32_AllRegs_SSE_SaveList;
+ else
+ return CSR_32_AllRegs_SaveList;
+ }
default:
break;
}
X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
+ bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
break;
- default:
- break;
case CallingConv::X86_64_Win64:
return CSR_Win64_RegMask;
case CallingConv::X86_64_SysV:
return CSR_64_RegMask;
+ case CallingConv::X86_INTR:
+ if (Is64Bit) {
+ if (HasAVX)
+ return CSR_64_AllRegs_AVX_RegMask;
+ else
+ return CSR_64_AllRegs_RegMask;
+ } else {
+ if (HasSSE)
+ return CSR_32_AllRegs_SSE_RegMask;
+ else
+ return CSR_32_AllRegs_RegMask;
+ }
+ default:
+ break;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
--- /dev/null
+; RUN: llc -mtriple=i686-unknown-unknown < %s | FileCheck %s\r
+; RUN: llc -mtriple=i686-unknown-unknown -O0 < %s | FileCheck %s -check-prefix=CHECK0\r
+\r
+%struct.interrupt_frame = type { i32, i32, i32, i32, i32 }\r
+\r
+@llvm.used = appending global [3 x i8*] [i8* bitcast (void (%struct.interrupt_frame*)* @test_isr_no_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i32)* @test_isr_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i32)* @test_isr_clobbers to i8*)], section "llvm.metadata"\r
+\r
+; Spills eax, putting original esp at +4.\r
+; No stack adjustment if declared with no error code\r
+define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) {\r
+ ; CHECK-LABEL: test_isr_no_ecode:\r
+ ; CHECK: pushl %eax\r
+ ; CHECK: movl 12(%esp), %eax\r
+ ; CHECK: popl %eax\r
+ ; CHECK: iretl\r
+ ; CHECK0-LABEL: test_isr_no_ecode:\r
+ ; CHECK0: pushl %eax\r
+ ; CHECK0: leal 4(%esp), %eax\r
+ ; CHECK0: movl 8(%eax), %eax\r
+ ; CHECK0: popl %eax\r
+ ; CHECK0: iretl\r
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2\r
+ %flags = load i32, i32* %pflags, align 4\r
+ call void asm sideeffect "", "r"(i32 %flags)\r
+ ret void\r
+}\r
+\r
+; Spills eax and ecx, putting original esp at +8. Stack is adjusted up another 4 bytes\r
+; before return, popping the error code.\r
+define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i32 %ecode) {\r
+ ; CHECK-LABEL: test_isr_ecode\r
+ ; CHECK: pushl %ecx\r
+ ; CHECK: pushl %eax\r
+ ; CHECK: movl 8(%esp), %eax\r
+ ; CHECK: movl 20(%esp), %ecx\r
+ ; CHECK: popl %eax\r
+ ; CHECK: popl %ecx\r
+ ; CHECK: addl $4, %esp\r
+ ; CHECK: iretl\r
+ ; CHECK0-LABEL: test_isr_ecode\r
+ ; CHECK0: pushl %ecx\r
+ ; CHECK0: pushl %eax\r
+ ; CHECK0: movl 8(%esp), %eax\r
+ ; CHECK0: leal 12(%esp), %ecx\r
+ ; CHECK0: movl 8(%ecx), %ecx\r
+ ; CHECK0: popl %eax\r
+ ; CHECK0: popl %ecx\r
+ ; CHECK0: addl $4, %esp\r
+ ; CHECK0: iretl\r
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2\r
+ %flags = load i32, i32* %pflags, align 4\r
+ call x86_fastcallcc void asm sideeffect "", "r,r"(i32 %flags, i32 %ecode)\r
+ ret void\r
+}\r
+\r
+; All clobbered registers must be saved\r
+define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i32 %ecode) {\r
+ call void asm sideeffect "", "~{eax},~{ebx},~{ebp}"()\r
+ ; CHECK-LABEL: test_isr_clobbers\r
+ ; CHECK-SSE-NEXT: pushl %ebp\r
+ ; CHECK-SSE-NEXT: pushl %ebx\r
+ ; CHECK-SSE-NEXT; pushl %eax\r
+ ; CHECK-SSE-NEXT: popl %eax\r
+ ; CHECK-SSE-NEXT: popl %ebx\r
+ ; CHECK-SSE-NEXT: popl %ebp\r
+ ; CHECK-SSE-NEXT: addl $4, %esp\r
+ ; CHECK-SSE-NEXT: iretl\r
+ ; CHECK0-LABEL: test_isr_clobbers\r
+ ; CHECK0-SSE-NEXT: pushl %ebp\r
+ ; CHECK0-SSE-NEXT: pushl %ebx\r
+ ; CHECK0-SSE-NEXT; pushl %eax\r
+ ; CHECK0-SSE-NEXT: popl %eax\r
+ ; CHECK0-SSE-NEXT: popl %ebx\r
+ ; CHECK0-SSE-NEXT: popl %ebp\r
+ ; CHECK0-SSE-NEXT: addl $4, %esp\r
+ ; CHECK0-SSE-NEXT: iretl\r
+ ret void\r
+}\r
+\r
--- /dev/null
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s\r
+; RUN: llc -mtriple=x86_64-unknown-unknown -O0 < %s | FileCheck %s -check-prefix=CHECK0\r
+\r
+%struct.interrupt_frame = type { i64, i64, i64, i64, i64 }\r
+\r
+@llvm.used = appending global [3 x i8*] [i8* bitcast (void (%struct.interrupt_frame*)* @test_isr_no_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_ecode to i8*), i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_clobbers to i8*)], section "llvm.metadata"\r
+\r
+; Spills rax, putting original esp at +8.\r
+; No stack adjustment if declared with no error code\r
+define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) {\r
+ ; CHECK-LABEL: test_isr_no_ecode:\r
+ ; CHECK: pushq %rax\r
+ ; CHECK: movq 24(%rsp), %rax\r
+ ; CHECK: popq %rax\r
+ ; CHECK: iretq\r
+ ; CHECK0-LABEL: test_isr_no_ecode:\r
+ ; CHECK0: pushq %rax\r
+ ; CHECK0: leaq 8(%rsp), %rax\r
+ ; CHECK0: movq 16(%rax), %rax\r
+ ; CHECK0: popq %rax\r
+ ; CHECK0: iretq\r
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2\r
+ %flags = load i64, i64* %pflags, align 4\r
+ call void asm sideeffect "", "r"(i64 %flags)\r
+ ret void\r
+}\r
+\r
+; Spills rax and rcx, putting original rsp at +16. Stack is adjusted up another 8 bytes\r
+; before return, popping the error code.\r
+define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i64 %ecode) {\r
+ ; CHECK-LABEL: test_isr_ecode\r
+ ; CHECK: pushq %rax\r
+ ; CHECK: pushq %rcx\r
+ ; CHECK: movq 16(%rsp), %rax\r
+ ; CHECK: movq 40(%rsp), %rcx\r
+ ; CHECK: popq %rcx\r
+ ; CHECK: popq %rax\r
+ ; CHECK: addq $8, %rsp\r
+ ; CHECK: iretq\r
+ ; CHECK0-LABEL: test_isr_ecode\r
+ ; CHECK0: pushq %rax\r
+ ; CHECK0: pushq %rcx\r
+ ; CHECK0: movq 16(%rsp), %rax\r
+ ; CHECK0: leaq 24(%rsp), %rcx\r
+ ; CHECK0: movq 16(%rcx), %rcx\r
+ ; CHECK0: popq %rcx\r
+ ; CHECK0: popq %rax\r
+ ; CHECK0: addq $8, %rsp\r
+ ; CHECK0: iretq\r
+ %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2\r
+ %flags = load i64, i64* %pflags, align 4\r
+ call void asm sideeffect "", "r,r"(i64 %flags, i64 %ecode)\r
+ ret void\r
+}\r
+\r
+; All clobbered registers must be saved\r
+define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {\r
+ call void asm sideeffect "", "~{rax},~{rbx},~{rbp},~{r11},~{xmm0}"()\r
+ ; CHECK-LABEL: test_isr_clobbers\r
+ ; CHECK-SSE-NEXT: pushq %rax\r
+ ; CHECK-SSE-NEXT; pushq %r11\r
+ ; CHECK-SSE-NEXT: pushq %rbp\r
+ ; CHECK-SSE-NEXT: pushq %rbx\r
+ ; CHECK-SSE-NEXT: movaps %xmm0\r
+ ; CHECK-SSE-NEXT: movaps %xmm0\r
+ ; CHECK-SSE-NEXT: popq %rbx\r
+ ; CHECK-SSE-NEXT: popq %rbp\r
+ ; CHECK-SSE-NEXT: popq %r11\r
+ ; CHECK-SSE-NEXT: popq %rax\r
+ ; CHECK-SSE-NEXT: addq $8, %rsp\r
+ ; CHECK-SSE-NEXT: iretq\r
+ ; CHECK0-LABEL: test_isr_clobbers\r
+ ; CHECK0-SSE-NEXT: pushq %rax\r
+ ; CHECK0-SSE-NEXT; pushq %r11\r
+ ; CHECK0-SSE-NEXT: pushq %rbp\r
+ ; CHECK0-SSE-NEXT: pushq %rbx\r
+ ; CHECK0-SSE-NEXT: movaps %xmm0\r
+ ; CHECK0-SSE-NEXT: movaps %xmm0\r
+ ; CHECK0-SSE-NEXT: popq %rbx\r
+ ; CHECK0-SSE-NEXT: popq %rbp\r
+ ; CHECK0-SSE-NEXT: popq %r11\r
+ ; CHECK0-SSE-NEXT: popq %rax\r
+ ; CHECK0-SSE-NEXT: addq $8, %rsp\r
+ ; CHECK0-SSE-NEXT: iretq\r
+ ret void\r
+}
\ No newline at end of file