X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FCallingConvLower.cpp;h=034ffb34b9cc28d4130d8a489161704fc5670a9b;hb=2267264ceb135ddca3beee193c47496d05a74e23;hp=62ad8171a9d4f04230135a7ad6f31ed0ed8cfa8d;hpb=84023e0fbefc406a4c611d3d64a10df5d3a97dd7;p=oota-llvm.git diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index 62ad8171a9d..034ffb34b9c 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -13,29 +13,35 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/DataLayout.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SaveAndRestore.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; -CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm, - SmallVector &locs, LLVMContext &C) - : CallingConv(CC), IsVarArg(isVarArg), TM(tm), - TRI(*TM.getRegisterInfo()), Locs(locs), Context(C) { +CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, + SmallVectorImpl &locs, LLVMContext &C) + : CallingConv(CC), IsVarArg(isVarArg), MF(mf), + TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C), + CallOrPrologue(Unknown) { // No stack is used. StackOffset = 0; - + + clearByValRegsInfo(); UsedRegs.resize((TRI.getNumRegs()+31)/32); } -// HandleByVal - Allocate a stack slot large enough to pass an argument by -// value. The size and alignment information of the argument is encoded in its -// parameter attribute. -void CCState::HandleByVal(unsigned ValNo, EVT ValVT, - EVT LocVT, CCValAssign::LocInfo LocInfo, +// HandleByVal - Allocate space on the stack large enough to pass an argument +// by value. The size and alignment information of the argument is encoded in +// its parameter attribute. +void CCState::HandleByVal(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags) { unsigned Align = ArgFlags.getByValAlign(); @@ -44,18 +50,17 @@ void CCState::HandleByVal(unsigned ValNo, EVT ValVT, Size = MinSize; if (MinAlign > (int)Align) Align = MinAlign; + MF.getFrameInfo()->ensureMaxAlignment(Align); + MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align); + Size = unsigned(RoundUpToAlignment(Size, MinAlign)); unsigned Offset = AllocateStack(Size, Align); - addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } /// MarkAllocated - Mark a register and all of its aliases as allocated. void CCState::MarkAllocated(unsigned Reg) { - UsedRegs[Reg/32] |= 1 << (Reg&31); - - if (const unsigned *RegAliases = TRI.getAliasSet(Reg)) - for (; (Reg = *RegAliases); ++RegAliases) - UsedRegs[Reg/32] |= 1 << (Reg&31); + for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) + UsedRegs[*AI/32] |= 1 << (*AI&31); } /// AnalyzeFormalArguments - Analyze an array of argument values, @@ -66,14 +71,14 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl &Ins, unsigned NumArgs = Ins.size(); for (unsigned i = 0; i != NumArgs; ++i) { - EVT ArgVT = Ins[i].VT; + MVT ArgVT = Ins[i].VT; ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { #ifndef NDEBUG dbgs() << "Formal argument #" << i << " has unhandled type " - << ArgVT.getEVTString(); + << EVT(ArgVT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); } } } @@ -84,7 +89,7 @@ bool CCState::CheckReturn(const SmallVectorImpl &Outs, CCAssignFn Fn) { // Determine which register each value should be copied into. for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT VT = Outs[i].VT; + MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) return false; @@ -98,14 +103,14 @@ void CCState::AnalyzeReturn(const SmallVectorImpl &Outs, CCAssignFn Fn) { // Determine which register each value should be copied into. for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT VT = Outs[i].VT; + MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) { #ifndef NDEBUG dbgs() << "Return operand #" << i << " has unhandled type " - << VT.getEVTString(); + << EVT(VT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); } } } @@ -116,33 +121,33 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl &Outs, CCAssignFn Fn) { unsigned NumOps = Outs.size(); for (unsigned i = 0; i != NumOps; ++i) { - EVT ArgVT = Outs[i].VT; + MVT ArgVT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { #ifndef NDEBUG dbgs() << "Call operand #" << i << " has unhandled type " - << ArgVT.getEVTString(); + << EVT(ArgVT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); } } } /// AnalyzeCallOperands - Same as above except it takes vectors of types /// and argument flags. -void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, +void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, SmallVectorImpl &Flags, CCAssignFn Fn) { unsigned NumOps = ArgVTs.size(); for (unsigned i = 0; i != NumOps; ++i) { - EVT ArgVT = ArgVTs[i]; + MVT ArgVT = ArgVTs[i]; ISD::ArgFlagsTy ArgFlags = Flags[i]; if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { #ifndef NDEBUG dbgs() << "Call operand #" << i << " has unhandled type " - << ArgVT.getEVTString(); + << EVT(ArgVT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); } } } @@ -152,26 +157,93 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, void CCState::AnalyzeCallResult(const SmallVectorImpl &Ins, CCAssignFn Fn) { for (unsigned i = 0, e = Ins.size(); i != e; ++i) { - EVT VT = Ins[i].VT; + MVT VT = Ins[i].VT; ISD::ArgFlagsTy Flags = Ins[i].Flags; if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { #ifndef NDEBUG dbgs() << "Call result #" << i << " has unhandled type " - << VT.getEVTString(); + << EVT(VT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); } } } /// AnalyzeCallResult - Same as above except it's specialized for calls which /// produce a single value. -void CCState::AnalyzeCallResult(EVT VT, CCAssignFn Fn) { +void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { #ifndef NDEBUG dbgs() << "Call result has unhandled type " - << VT.getEVTString(); + << EVT(VT).getEVTString() << '\n'; #endif - llvm_unreachable(0); + llvm_unreachable(nullptr); + } +} + +static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { + if (VT.isVector()) + return true; // Assume -msse-regparm might be in effect. + if (!VT.isInteger()) + return false; + if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall) + return true; + return false; +} + +void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, + MVT VT, CCAssignFn Fn) { + unsigned SavedStackOffset = StackOffset; + unsigned NumLocs = Locs.size(); + + // Set the 'inreg' flag if it is used for this calling convention. + ISD::ArgFlagsTy Flags; + if (isValueTypeInRegForCC(CallingConv, VT)) + Flags.setInReg(); + + // Allocate something of this value type repeatedly until we get assigned a + // location in memory. + bool HaveRegParm = true; + while (HaveRegParm) { + if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { +#ifndef NDEBUG + dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() + << " while computing remaining regparms\n"; +#endif + llvm_unreachable(nullptr); + } + HaveRegParm = Locs.back().isRegLoc(); + } + + // Copy all the registers from the value locations we added. + assert(NumLocs < Locs.size() && "CC assignment failed to add location"); + for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) + if (Locs[I].isRegLoc()) + Regs.push_back(MCPhysReg(Locs[I].getLocReg())); + + // Clear the assigned values and stack memory. We leave the registers marked + // as allocated so that future queries don't return the same registers, i.e. + // when i64 and f64 are both passed in GPRs. + StackOffset = SavedStackOffset; + Locs.resize(NumLocs); +} + +void CCState::analyzeMustTailForwardedRegisters( + SmallVectorImpl &Forwards, ArrayRef RegParmTypes, + CCAssignFn Fn) { + // Oftentimes calling conventions will not user register parameters for + // variadic functions, so we need to assume we're not variadic so that we get + // all the registers that might be used in a non-variadic call. + SaveAndRestore SavedVarArg(IsVarArg, false); + + for (MVT RegVT : RegParmTypes) { + SmallVector RemainingRegs; + getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); + const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); + const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); + for (MCPhysReg PReg : RemainingRegs) { + unsigned VReg = MF.addLiveIn(PReg, RC); + Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); + } } }