bool FastLowerArguments() override;
bool FastLowerCall(CallLoweringInfo &CLI) override;
+ bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
#include "X86GenFastISel.inc"
bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I);
- bool X86VisitIntrinsicCall(const IntrinsicInst &I);
- bool X86SelectCall(const Instruction *I);
-
- bool DoSelectCall(const Instruction *I, const char *MemIntName);
-
const X86InstrInfo *getInstrInfo() const {
- return getTargetMachine()->getInstrInfo();
+ return getTargetMachine()->getSubtargetImpl()->getInstrInfo();
}
const X86TargetMachine *getTargetMachine() const {
return static_cast<const X86TargetMachine *>(&TM);
bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
+ unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
+ unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned X86MaterializeGV(const GlobalValue *GV,MVT VT);
unsigned TargetMaterializeConstant(const Constant *C) override;
unsigned TargetMaterializeAlloca(const AllocaInst *C) override;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
- CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
- I->getContext());
+ CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
const Value *RV = Ret->getOperand(0);
// The calling-convention tables for x87 returns don't tell
// the whole story.
- if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
+ if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
return false;
unsigned SrcReg = Reg + VA.getValNo();
return true;
}
-static bool isCommutativeIntrinsic(IntrinsicInst const &I) {
- switch (I.getIntrinsicID()) {
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::smul_with_overflow:
- case Intrinsic::umul_with_overflow:
- return true;
- default:
- return false;
- }
-}
-
-bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
+bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
// FIXME: Handle more intrinsics.
- switch (I.getIntrinsicID()) {
+ switch (II->getIntrinsicID()) {
default: return false;
case Intrinsic::frameaddress: {
- Type *RetTy = I.getCalledFunction()->getReturnType();
+ Type *RetTy = II->getCalledFunction()->getReturnType();
MVT VT;
if (!isTypeLegal(RetTy, VT))
MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(*(FuncInfo.MF));
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
// movq (%rax), %rax
// ...
unsigned DestReg;
- unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
+ unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
while (Depth--) {
DestReg = createResultReg(RC);
addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
SrcReg = DestReg;
}
- UpdateValueMap(&I, SrcReg);
+ UpdateValueMap(II, SrcReg);
return true;
}
case Intrinsic::memcpy: {
- const MemCpyInst &MCI = cast<MemCpyInst>(I);
+ const MemCpyInst *MCI = cast<MemCpyInst>(II);
// Don't handle volatile or variable length memcpys.
- if (MCI.isVolatile())
+ if (MCI->isVolatile())
return false;
- if (isa<ConstantInt>(MCI.getLength())) {
+ if (isa<ConstantInt>(MCI->getLength())) {
// Small memcpy's are common enough that we want to do them
// without a call if possible.
- uint64_t Len = cast<ConstantInt>(MCI.getLength())->getZExtValue();
+ uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
if (IsMemcpySmall(Len)) {
X86AddressMode DestAM, SrcAM;
- if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
- !X86SelectAddress(MCI.getRawSource(), SrcAM))
+ if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
+ !X86SelectAddress(MCI->getRawSource(), SrcAM))
return false;
TryEmitSmallMemcpy(DestAM, SrcAM, Len);
return true;
}
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MCI.getLength()->getType()->isIntegerTy(SizeWidth))
+ if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
return false;
- if (MCI.getSourceAddressSpace() > 255 || MCI.getDestAddressSpace() > 255)
+ if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
return false;
- return DoSelectCall(&I, "memcpy");
+ return LowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
- const MemSetInst &MSI = cast<MemSetInst>(I);
+ const MemSetInst *MSI = cast<MemSetInst>(II);
- if (MSI.isVolatile())
+ if (MSI->isVolatile())
return false;
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth))
+ if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
return false;
- if (MSI.getDestAddressSpace() > 255)
+ if (MSI->getDestAddressSpace() > 255)
return false;
- return DoSelectCall(&I, "memset");
+ return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
case Intrinsic::stackprotector: {
// Emit code to store the stack guard onto the stack.
EVT PtrTy = TLI.getPointerTy();
- const Value *Op1 = I.getArgOperand(0); // The guard's value.
- const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
+ const Value *Op1 = II->getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
return true;
}
case Intrinsic::dbg_declare: {
- const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
X86AddressMode AM;
assert(DI->getAddress() && "Null address should be checked earlier!");
if (!X86SelectAddress(DI->getAddress(), AM))
if (!Subtarget->hasSSE1())
return false;
- Type *RetTy = I.getCalledFunction()->getReturnType();
+ Type *RetTy = II->getCalledFunction()->getReturnType();
MVT VT;
if (!isTypeLegal(RetTy, VT))
case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;
}
- const Value *SrcVal = I.getArgOperand(0);
+ const Value *SrcVal = II->getArgOperand(0);
unsigned SrcReg = getRegForValue(SrcVal);
if (SrcReg == 0)
MIB.addReg(SrcReg);
- UpdateValueMap(&I, ResultReg);
+ UpdateValueMap(II, ResultReg);
return true;
}
case Intrinsic::sadd_with_overflow:
case Intrinsic::umul_with_overflow: {
// This implements the basic lowering of the xalu with overflow intrinsics
// into add/sub/mul followed by either seto or setb.
- const Function *Callee = I.getCalledFunction();
+ const Function *Callee = II->getCalledFunction();
auto *Ty = cast<StructType>(Callee->getReturnType());
Type *RetTy = Ty->getTypeAtIndex(0U);
Type *CondTy = Ty->getTypeAtIndex(1);
if (VT < MVT::i8 || VT > MVT::i64)
return false;
- const Value *LHS = I.getArgOperand(0);
- const Value *RHS = I.getArgOperand(1);
+ const Value *LHS = II->getArgOperand(0);
+ const Value *RHS = II->getArgOperand(1);
// Canonicalize immediate to the RHS.
if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
- isCommutativeIntrinsic(I))
+ isCommutativeIntrinsic(II))
std::swap(LHS, RHS);
+ bool UseIncDec = false;
+ if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
+ UseIncDec = true;
+
unsigned BaseOpc, CondOpc;
- switch (I.getIntrinsicID()) {
+ switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::sadd_with_overflow:
- BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break;
+ BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD);
+ CondOpc = X86::SETOr;
+ break;
case Intrinsic::uadd_with_overflow:
BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
case Intrinsic::ssub_with_overflow:
- BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break;
+ BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB);
+ CondOpc = X86::SETOr;
+ break;
case Intrinsic::usub_with_overflow:
BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
case Intrinsic::smul_with_overflow:
unsigned ResultReg = 0;
// Check if we have an immediate version.
- if (auto const *C = dyn_cast<ConstantInt>(RHS)) {
- ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
- C->getZExtValue());
+ if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
+ static const unsigned Opc[2][2][4] = {
+ { { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
+ { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } },
+ { { X86::INC8r, X86::INC64_16r, X86::INC64_32r, X86::INC64r },
+ { X86::DEC8r, X86::DEC64_16r, X86::DEC64_32r, X86::DEC64r } }
+ };
+
+ if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) {
+ ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ bool Is64Bit = Subtarget->is64Bit();
+ bool IsDec = BaseOpc == X86ISD::DEC;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ } else
+ ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ CI->getZExtValue());
}
unsigned RHSReg;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
ResultReg2);
- UpdateValueMap(&I, ResultReg, 2);
+ UpdateValueMap(II, ResultReg, 2);
return true;
}
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64: {
bool IsInputDouble;
- switch (I.getIntrinsicID()) {
+ switch (II->getIntrinsicID()) {
default: llvm_unreachable("Unexpected intrinsic.");
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
break;
}
- Type *RetTy = I.getCalledFunction()->getReturnType();
+ Type *RetTy = II->getCalledFunction()->getReturnType();
MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
}
// Check if we can fold insertelement instructions into the convert.
- const Value *Op = I.getArgOperand(0);
+ const Value *Op = II->getArgOperand(0);
while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
const Value *Index = IE->getOperand(2);
if (!isa<ConstantInt>(Index))
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Reg);
- UpdateValueMap(&I, ResultReg);
+ UpdateValueMap(II, ResultReg);
return true;
}
}
return true;
}
-bool X86FastISel::X86SelectCall(const Instruction *I) {
- const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
-
- // Can't handle inline asm yet.
- if (isa<InlineAsm>(Callee))
- return false;
-
- // Handle intrinsic calls.
- if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
- return X86VisitIntrinsicCall(*II);
-
- // Allow SelectionDAG isel to handle tail calls.
- if (cast<CallInst>(I)->isTailCall())
- return false;
-
- return DoSelectCall(I, nullptr);
-}
-
static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
CallingConv::ID CC,
ImmutableCallSite *CS) {
return 4;
}
-// Select either a call, or an llvm.memcpy/memmove/memset intrinsic
-bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
- const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
-
- // Handle only C and fastcc calling conventions for now.
- ImmutableCallSite CS(CI);
- CallingConv::ID CC = CS.getCallingConv();
- bool isWin64 = Subtarget->isCallingConvWin64(CC);
- if (CC != CallingConv::C && CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 &&
- CC != CallingConv::X86_64_SysV)
- return false;
-
- // fastcc with -tailcallopt is intended to provide a guaranteed
- // tail call optimization. Fastisel doesn't know how to do that.
- if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
- return false;
-
- PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- bool isVarArg = FTy->isVarArg();
-
- // Don't know how to handle Win64 varargs yet. Nothing special needed for
- // x86-32. Special handling for x86-64 is implemented.
- if (isVarArg && isWin64)
- return false;
-
- // Don't know about inalloca yet.
- if (CS.hasInAllocaArgument())
- return false;
-
- // Fast-isel doesn't know about callee-pop yet.
- if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg,
- TM.Options.GuaranteedTailCallOpt))
- return false;
-
- // Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(I->getType(), CS.getAttributes(), Outs, TLI);
- bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- *FuncInfo.MF, FTy->isVarArg(),
- Outs, FTy->getContext());
- if (!CanLowerReturn)
- return false;
-
- // Materialize callee address in a register. FIXME: GV address can be
- // handled with a CALLpcrel32 instead.
- X86AddressMode CalleeAM;
- if (!X86SelectCallAddress(Callee, CalleeAM))
- return false;
- unsigned CalleeOp = 0;
- const GlobalValue *GV = nullptr;
- if (CalleeAM.GV != nullptr) {
- GV = CalleeAM.GV;
- } else if (CalleeAM.Base.Reg != 0) {
- CalleeOp = CalleeAM.Base.Reg;
- } else
- return false;
-
- // Deal with call operands first.
- SmallVector<const Value *, 8> ArgVals;
- SmallVector<unsigned, 8> Args;
- SmallVector<MVT, 8> ArgVTs;
- SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
- unsigned arg_size = CS.arg_size();
- Args.reserve(arg_size);
- ArgVals.reserve(arg_size);
- ArgVTs.reserve(arg_size);
- ArgFlags.reserve(arg_size);
- for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i) {
- // If we're lowering a mem intrinsic instead of a regular call, skip the
- // last two arguments, which should not passed to the underlying functions.
- if (MemIntName && e-i <= 2)
- break;
- Value *ArgVal = *i;
- ISD::ArgFlagsTy Flags;
- unsigned AttrInd = i - CS.arg_begin() + 1;
- if (CS.paramHasAttr(AttrInd, Attribute::SExt))
- Flags.setSExt();
- if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
- Flags.setZExt();
-
- if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
- PointerType *Ty = cast<PointerType>(ArgVal->getType());
- Type *ElementTy = Ty->getElementType();
- unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
- unsigned FrameAlign = CS.getParamAlignment(AttrInd);
- if (!FrameAlign)
- FrameAlign = TLI.getByValTypeAlignment(ElementTy);
- Flags.setByVal();
- Flags.setByValSize(FrameSize);
- Flags.setByValAlign(FrameAlign);
- if (!IsMemcpySmall(FrameSize))
- return false;
- }
-
- if (CS.paramHasAttr(AttrInd, Attribute::InReg))
- Flags.setInReg();
- if (CS.paramHasAttr(AttrInd, Attribute::Nest))
- Flags.setNest();
-
- // If this is an i1/i8/i16 argument, promote to i32 to avoid an extra
- // instruction. This is safe because it is common to all fastisel supported
- // calling conventions on x86.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(ArgVal)) {
- if (CI->getBitWidth() == 1 || CI->getBitWidth() == 8 ||
- CI->getBitWidth() == 16) {
- if (Flags.isSExt())
- ArgVal = ConstantExpr::getSExt(CI,Type::getInt32Ty(CI->getContext()));
- else
- ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext()));
- }
- }
-
- unsigned ArgReg;
-
- // Passing bools around ends up doing a trunc to i1 and passing it.
- // Codegen this as an argument + "and 1".
- if (ArgVal->getType()->isIntegerTy(1) && isa<TruncInst>(ArgVal) &&
- cast<TruncInst>(ArgVal)->getParent() == I->getParent() &&
- ArgVal->hasOneUse()) {
- ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
- ArgReg = getRegForValue(ArgVal);
- if (ArgReg == 0) return false;
-
- MVT ArgVT;
- if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false;
-
- ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg,
- ArgVal->hasOneUse(), 1);
- } else {
- ArgReg = getRegForValue(ArgVal);
- }
-
- if (ArgReg == 0) return false;
-
- Type *ArgTy = ArgVal->getType();
- MVT ArgVT;
- if (!isTypeLegal(ArgTy, ArgVT))
- return false;
- if (ArgVT == MVT::x86mmx)
- return false;
- unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
- Flags.setOrigAlign(OriginalAlignment);
-
- Args.push_back(ArgReg);
- ArgVals.push_back(ArgVal);
- ArgVTs.push_back(ArgVT);
- ArgFlags.push_back(Flags);
- }
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
- I->getParent()->getContext());
-
- // Allocate shadow area for Win64
- if (isWin64)
- CCInfo.AllocateStack(32, 8);
-
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
-
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
-
- // Issue CALLSEQ_START
- unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
- .addImm(NumBytes);
-
- // Process argument: walk the register/memloc assignments, inserting
- // copies / loads.
- SmallVector<unsigned, 4> RegArgs;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- unsigned Arg = Args[VA.getValNo()];
- EVT ArgVT = ArgVTs[VA.getValNo()];
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- case CCValAssign::Full: break;
- case CCValAssign::SExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::ZExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::AExt: {
- assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
- "Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
-
- assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
- ISD::BITCAST, Arg, /*TODO: Kill=*/false);
- assert(BC != 0 && "Failed to emit a bitcast!");
- Arg = BC;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::VExt:
- // VExt has not been implemented, so this should be impossible to reach
- // for now. However, fallback to Selection DAG isel once implemented.
- return false;
- case CCValAssign::Indirect:
- // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
- // support this.
- return false;
- case CCValAssign::FPExt:
- llvm_unreachable("Unexpected loc info!");
- }
-
- if (VA.isRegLoc()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
- RegArgs.push_back(VA.getLocReg());
- } else {
- unsigned LocMemOffset = VA.getLocMemOffset();
- X86AddressMode AM;
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo*>(
- getTargetMachine()->getRegisterInfo());
- AM.Base.Reg = RegInfo->getStackRegister();
- AM.Disp = LocMemOffset;
- const Value *ArgVal = ArgVals[VA.getValNo()];
- ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()];
-
- if (Flags.isByVal()) {
- X86AddressMode SrcAM;
- SrcAM.Base.Reg = Arg;
- bool Res = TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize());
- assert(Res && "memcpy length already checked!"); (void)Res;
- } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
- // If this is a really simple value, emit this with the Value* version
- // of X86FastEmitStore. If it isn't simple, we don't want to do this,
- // as it can cause us to reevaluate the argument.
- if (!X86FastEmitStore(ArgVT, ArgVal, AM))
- return false;
- } else {
- if (!X86FastEmitStore(ArgVT, Arg, /*ValIsKill=*/false, AM))
- return false;
- }
- }
- }
-
- // ELF / PIC requires GOT in the EBX register before function calls via PLT
- // GOT pointer.
- if (Subtarget->isPICStyleGOT()) {
- unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
- }
-
- if (Subtarget->is64Bit() && isVarArg && !isWin64) {
- // Count the number of XMM registers allocated.
- static const MCPhysReg XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
- };
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
- X86::AL).addImm(NumXMMRegs);
- }
-
- // Issue the call.
- MachineInstrBuilder MIB;
- if (CalleeOp) {
- // Register-indirect call.
- unsigned CallOpc;
- if (Subtarget->is64Bit())
- CallOpc = X86::CALL64r;
- else
- CallOpc = X86::CALL32r;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
- .addReg(CalleeOp);
-
- } else {
- // Direct call.
- assert(GV && "Not a direct call");
- unsigned CallOpc;
- if (Subtarget->is64Bit())
- CallOpc = X86::CALL64pcrel32;
- else
- CallOpc = X86::CALLpcrel32;
-
- // See if we need any target-specific flags on the GV operand.
- unsigned char OpFlags = 0;
-
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
- // external symbols most go through the PLT in PIC mode. If the symbol
- // has hidden or protected visibility, or if it is static or local, then
- // we don't need to use the PLT - we can directly call it.
- if (Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_ &&
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- (GV->isDeclaration() || GV->isWeakForLinker()) &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- }
-
-
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
- if (MemIntName)
- MIB.addExternalSymbol(MemIntName, OpFlags);
- else
- MIB.addGlobalAddress(GV, 0, OpFlags);
- }
-
- // Add a register mask with the call-preserved registers.
- // Proper defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
-
- // Add an implicit use GOT pointer in EBX.
- if (Subtarget->isPICStyleGOT())
- MIB.addReg(X86::EBX, RegState::Implicit);
-
- if (Subtarget->is64Bit() && isVarArg && !isWin64)
- MIB.addReg(X86::AL, RegState::Implicit);
-
- // Add implicit physical register uses to the call.
- for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
- MIB.addReg(RegArgs[i], RegState::Implicit);
-
- // Issue CALLSEQ_END
- unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- unsigned NumBytesCallee = computeBytesPoppedByCallee(Subtarget, CC, &CS);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
- .addImm(NumBytes).addImm(NumBytesCallee);
-
- // Build info for return calling conv lowering code.
- // FIXME: This is practically a copy-paste from TargetLowering::LowerCallTo.
- SmallVector<ISD::InputArg, 32> Ins;
- SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(TLI, I->getType(), RetTys);
- for (unsigned i = 0, e = RetTys.size(); i != e; ++i) {
- EVT VT = RetTys[i];
- MVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT);
- unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT);
- for (unsigned j = 0; j != NumRegs; ++j) {
- ISD::InputArg MyFlags;
- MyFlags.VT = RegisterVT;
- MyFlags.Used = !CS.getInstruction()->use_empty();
- if (CS.paramHasAttr(0, Attribute::SExt))
- MyFlags.Flags.setSExt();
- if (CS.paramHasAttr(0, Attribute::ZExt))
- MyFlags.Flags.setZExt();
- if (CS.paramHasAttr(0, Attribute::InReg))
- MyFlags.Flags.setInReg();
- Ins.push_back(MyFlags);
- }
- }
-
- // Now handle call return values.
- SmallVector<unsigned, 4> UsedRegs;
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
- I->getParent()->getContext());
- unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
- CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- EVT CopyVT = RVLocs[i].getValVT();
- unsigned CopyReg = ResultReg + i;
-
- // If this is a call to a function that returns an fp value on the x87 fp
- // stack, but where we prefer to use the value in xmm registers, copy it
- // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
- if ((RVLocs[i].getLocReg() == X86::ST0 ||
- RVLocs[i].getLocReg() == X86::ST1)) {
- if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
- CopyVT = MVT::f80;
- CopyReg = createResultReg(&X86::RFP80RegClass);
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(X86::FpPOP_RETVAL), CopyReg);
- } else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY),
- CopyReg).addReg(RVLocs[i].getLocReg());
- UsedRegs.push_back(RVLocs[i].getLocReg());
- }
-
- if (CopyVT != RVLocs[i].getValVT()) {
- // Round the F80 the right size, which also moves to the appropriate xmm
- // register. This is accomplished by storing the F80 value in memory and
- // then loading it back. Ewww...
- EVT ResVT = RVLocs[i].getValVT();
- unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
- unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc)), FI)
- .addReg(CopyReg);
- Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc), ResultReg + i), FI);
- }
- }
-
- if (RVLocs.size())
- UpdateValueMap(I, ResultReg, RVLocs.size());
-
- // Set all unused physreg defs as dead.
- static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
-
- return true;
-}
-
bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
auto &OutVals = CLI.OutVals;
auto &OutFlags = CLI.OutFlags;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs,
- CLI.RetTy->getContext());
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
// Allocate shadow area for Win64
if (IsWin64)
.addImm(NumBytes);
// Walk the register/memloc assignments, inserting copies/loads.
- const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo *>(TM.getRegisterInfo());
+ const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign const &VA = ArgLocs[i];
const Value *ArgVal = OutVals[VA.getValNo()];
OutRegs.push_back(VA.getLocReg());
} else {
assert(VA.isMemLoc());
+
+ // Don't emit stores for undef values.
+ if (isa<UndefValue>(ArgVal))
+ continue;
+
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
AM.Base.Reg = RegInfo->getStackRegister();
MachineInstrBuilder MIB;
if (CalleeOp) {
// Register-indirect call.
- unsigned CallOpc = Is64Bit ? X86::CALL64r : CallOpc = X86::CALL32r;
+ unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
.addReg(CalleeOp);
} else {
// Now handle call return values.
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs,
+ CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
CLI.RetTy->getContext());
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
report_fatal_error("SSE register return with SSE disabled");
}
- // If this is a call to a function that returns an fp value on the floating
- // point stack, we must guarantee the value is popped from the stack, so
- // a COPY is not good enough - the copy instruction may be eliminated if the
- // return value is not used. We use the FpPOP_RETVAL instruction instead.
- if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
- // If we prefer to use the value in xmm registers, copy it out as f80 and
- // use a truncate to move it from fp stack reg to xmm reg.
- if (isScalarFPTypeInSSEReg(VA.getValVT())) {
- CopyVT = MVT::f80;
- CopyReg = createResultReg(&X86::RFP80RegClass);
- }
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(X86::FpPOP_RETVAL), CopyReg);
-
- // Round the f80 to the right size, which also moves it to the appropriate
- // xmm register. This is accomplished by storing the f80 value in memory
- // and then loading it back.
- if (CopyVT != VA.getValVT()) {
- EVT ResVT = VA.getValVT();
- unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
- unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc)), FI)
- .addReg(CopyReg);
- Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc), ResultReg + i), FI);
- }
- } else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
- InRegs.push_back(VA.getLocReg());
+ // If we prefer to use the value in xmm registers, copy it out as f80 and
+ // use a truncate to move it from fp stack reg to xmm reg.
+ if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
+ isScalarFPTypeInSSEReg(VA.getValVT())) {
+ CopyVT = MVT::f80;
+ CopyReg = createResultReg(&X86::RFP80RegClass);
+ }
+
+ // Copy out the result.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
+ InRegs.push_back(VA.getLocReg());
+
+ // Round the f80 to the right size, which also moves it to the appropriate
+ // xmm register. This is accomplished by storing the f80 value in memory
+ // and then loading it back.
+ if (CopyVT != VA.getValVT()) {
+ EVT ResVT = VA.getValVT();
+ unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
+ unsigned MemSize = ResVT.getSizeInBits()/8;
+ int FI = MFI.CreateStackObject(MemSize, MemSize, false);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc)), FI)
+ .addReg(CopyReg);
+ Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg + i), FI);
}
}
return X86SelectZExt(I);
case Instruction::Br:
return X86SelectBranch(I);
- case Instruction::Call:
- return X86SelectCall(I);
case Instruction::LShr:
case Instruction::AShr:
case Instruction::Shl:
return false;
}
-unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
- MVT VT;
- if (!isTypeLegal(C->getType(), VT))
+unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
+ if (VT > MVT::i64)
return 0;
+ uint64_t Imm = CI->getZExtValue();
+ if (Imm == 0) {
+ unsigned SrcReg = FastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type");
+ case MVT::i1:
+ case MVT::i8:
+ return FastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
+ X86::sub_8bit);
+ case MVT::i16:
+ return FastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
+ X86::sub_16bit);
+ case MVT::i32:
+ return SrcReg;
+ case MVT::i64: {
+ unsigned ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
+ .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
+ return ResultReg;
+ }
+ }
+ }
+
+ unsigned Opc = 0;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type");
+ case MVT::i1: VT = MVT::i8; // fall-through
+ case MVT::i8: Opc = X86::MOV8ri; break;
+ case MVT::i16: Opc = X86::MOV16ri; break;
+ case MVT::i32: Opc = X86::MOV32ri; break;
+ case MVT::i64: {
+ if (isUInt<32>(Imm))
+ Opc = X86::MOV32ri;
+ else if (isInt<32>(Imm))
+ Opc = X86::MOV64ri32;
+ else
+ Opc = X86::MOV64ri;
+ break;
+ }
+ }
+ if (VT == MVT::i64 && Opc == X86::MOV32ri) {
+ unsigned SrcReg = FastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
+ unsigned ResultReg = createResultReg(&X86::GR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
+ .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
+ return ResultReg;
+ }
+ return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+}
+
+unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
+ if (CFP->isNullValue())
+ return TargetMaterializeFloatZero(CFP);
+
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return 0;
const TargetRegisterClass *RC = nullptr;
switch (VT.SimpleTy) {
default: return 0;
- case MVT::i8:
- Opc = X86::MOV8rm;
- RC = &X86::GR8RegClass;
- break;
- case MVT::i16:
- Opc = X86::MOV16rm;
- RC = &X86::GR16RegClass;
- break;
- case MVT::i32:
- Opc = X86::MOV32rm;
- RC = &X86::GR32RegClass;
- break;
- case MVT::i64:
- // Must be in x86-64 mode.
- Opc = X86::MOV64rm;
- RC = &X86::GR64RegClass;
- break;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
return 0;
}
- // Materialize addresses with LEA/MOV instructions.
- if (isa<GlobalValue>(C)) {
- X86AddressMode AM;
- if (X86SelectAddress(C, AM)) {
- // If the expression is just a basereg, then we're done, otherwise we need
- // to emit an LEA.
- if (AM.BaseType == X86AddressMode::RegBase &&
- AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
- return AM.Base.Reg;
-
- unsigned ResultReg = createResultReg(RC);
- if (TM.getRelocationModel() == Reloc::Static &&
- TLI.getPointerTy() == MVT::i64) {
- // The displacement code be more than 32 bits away so we need to use
- // an instruction with a 64 bit immediate
- Opc = X86::MOV64ri;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc), ResultReg).addGlobalAddress(cast<GlobalValue>(C));
- } else {
- Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc), ResultReg), AM);
- }
- return ResultReg;
- }
- return 0;
- }
-
// MachineConstantPool wants an explicit alignment.
- unsigned Align = DL.getPrefTypeAlignment(C->getType());
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
if (Align == 0) {
- // Alignment of vector types. FIXME!
- Align = DL.getTypeAllocSize(C->getType());
+ // Alignment of vector types. FIXME!
+ Align = DL.getTypeAllocSize(CFP->getType());
}
// x86-32 PIC requires a PIC base register for constant pools.
}
// Create the load from the constant pool.
- unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
+ unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
unsigned ResultReg = createResultReg(RC);
+
addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg),
- MCPOffset, PICBase, OpFlag);
-
+ CPI, PICBase, OpFlag);
return ResultReg;
}
+unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
+ // Can't handle alternate code models yet.
+ if (TM.getCodeModel() != CodeModel::Small)
+ return 0;
+
+ // Materialize addresses with LEA/MOV instructions.
+ X86AddressMode AM;
+ if (X86SelectAddress(GV, AM)) {
+ // If the expression is just a basereg, then we're done, otherwise we need
+ // to emit an LEA.
+ if (AM.BaseType == X86AddressMode::RegBase &&
+ AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
+ return AM.Base.Reg;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ if (TM.getRelocationModel() == Reloc::Static &&
+ TLI.getPointerTy() == MVT::i64) {
+ // The displacement code could be more than 32 bits away so we need to use
+ // an instruction with a 64 bit immediate
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
+ ResultReg)
+ .addGlobalAddress(GV);
+ } else {
+ unsigned Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg), AM);
+ }
+ return ResultReg;
+ }
+ return 0;
+}
+
+unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
+ EVT CEVT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!CEVT.isSimple())
+ return 0;
+ MVT VT = CEVT.getSimpleVT();
+
+ if (const auto *CI = dyn_cast<ConstantInt>(C))
+ return X86MaterializeInt(CI, VT);
+ else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return X86MaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return X86MaterializeGV(GV, VT);
+
+ return 0;
+}
+
unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// Fail on dynamic allocas. At this point, getRegForValue has already
// checked its CSE maps, so if we're here trying to handle a dynamic