#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/GCStrategy.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
AA = &aa;
GFI = gfi;
LibInfo = li;
- DL = DAG.getSubtarget().getDataLayout();
+ DL = DAG.getTarget().getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
Reg, VT);
SDValue Cmp;
- unsigned PopCount = CountPopulation_64(B.Mask);
+ unsigned PopCount = countPopulation(B.Mask);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (PopCount == 1) {
// Testing for a single bit; just compare the shift count with what it
// There is only one zero bit in the range, test for it directly.
Cmp = DAG.getSetCC(
getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
- DAG.getConstant(CountTrailingOnes_64(B.Mask), VT), ISD::SETNE);
+ DAG.getConstant(countTrailingOnes(B.Mask), VT), ISD::SETNE);
} else {
// Make desired shift
SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT,
DEBUG(dbgs() << "Selecting best pivot: \n"
<< "First: " << First << ", Last: " << Last <<'\n'
<< "LSize: " << LSize << ", RSize: " << RSize << '\n');
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
J!=E; ++I, ++J) {
const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
"Invalid case distance");
// Use volatile double here to avoid excess precision issues on some hosts,
// e.g. that use 80-bit X87 registers.
+ // Only consider the density of sub-ranges that actually have sufficient
+ // entries to be lowered as a jump table.
volatile double LDensity =
- LSize.roundToDouble() / (LEnd - First + 1ULL).roundToDouble();
+ LSize.ult(TLI.getMinimumJumpTableEntries())
+ ? 0.0
+ : LSize.roundToDouble() / (LEnd - First + 1ULL).roundToDouble();
volatile double RDensity =
- RSize.roundToDouble() / (Last - RBegin + 1ULL).roundToDouble();
+ RSize.ult(TLI.getMinimumJumpTableEntries())
+ ? 0.0
+ : RSize.roundToDouble() / (Last - RBegin + 1ULL).roundToDouble();
volatile double Metric = Range.logBase2() * (LDensity + RDensity);
// Should always split in some non-trivial place
DEBUG(dbgs() <<"=>Step\n"
RSize -= J->size();
}
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (areJTsAllowed(TLI)) {
- // If our case is dense we *really* should handle it earlier!
- assert((FMetric > 0) && "Should handle dense range earlier!");
- } else {
+ if (FMetric == 0 || !areJTsAllowed(TLI))
Pivot = CR.Range.first + Size/2;
- }
splitSwitchCase(CR, Pivot, WorkList, SV, SwitchBB);
return true;
}
getMachineMemOperand(MachinePointerInfo(PtrOperand),
MachineMemOperand::MOStore, VT.getStoreSize(),
Alignment, AAInfo);
- SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, MMO);
+ SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
+ MMO, false);
DAG.setRoot(StoreNode);
setValue(&I, StoreNode);
}
MachineMemOperand::MOLoad, VT.getStoreSize(),
Alignment, AAInfo, Ranges);
- SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, MMO);
+ SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
+ ISD::NON_EXTLOAD);
SDValue OutChain = Load.getValue(1);
DAG.setRoot(OutChain);
setValue(&I, Load);
return DAG.getConstantFP(1.0, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction();
- if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize) ||
+ if (!F->hasFnAttribute(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.
- CountPopulation_32(Val)+Log2_32(Val) < 7) {
+ countPopulation(Val) + Log2_32(Val) < 7) {
// We use the simple binary decomposition method to generate the multiply
// sequence. There are more optimal ways to do this (for example,
// powi(x,15) generates one more multiply than it should), but this has
case Intrinsic::longjmp:
return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
case Intrinsic::memcpy: {
+ // FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
return nullptr;
}
case Intrinsic::memset: {
+ // FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
return nullptr;
}
case Intrinsic::memmove: {
+ // FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
}
case Intrinsic::experimental_gc_result_int:
case Intrinsic::experimental_gc_result_float:
- case Intrinsic::experimental_gc_result_ptr: {
+ case Intrinsic::experimental_gc_result_ptr:
+ case Intrinsic::experimental_gc_result: {
visitGCResult(I);
return nullptr;
}
return nullptr;
}
+ case Intrinsic::eh_begincatch:
+ case Intrinsic::eh_endcatch:
+ llvm_unreachable("begin/end catch intrinsics not lowered in codegen");
}
}
CLI.setChain(getRoot());
}
-
- const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
- std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
assert((CLI.IsTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
// If this is a constraint for a single physreg, or a constraint for a
// register class, find it.
- std::pair<unsigned, const TargetRegisterClass*> PhysReg =
- TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
- OpInfo.ConstraintVT);
+ std::pair<unsigned, const TargetRegisterClass *> PhysReg =
+ TLI.getRegForInlineAsmConstraint(MF.getSubtarget().getRegisterInfo(),
+ OpInfo.ConstraintCode,
+ OpInfo.ConstraintVT);
unsigned NumRegs = 1;
if (OpInfo.ConstraintVT != MVT::Other) {
SDISelAsmOperandInfoVector ConstraintOperands;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- TargetLowering::AsmOperandInfoVector
- TargetConstraints = TLI.ParseConstraints(CS);
+ TargetLowering::AsmOperandInfoVector TargetConstraints =
+ TLI.ParseConstraints(DAG.getSubtarget().getRegisterInfo(), CS);
bool hasMemory = false;
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
- std::pair<unsigned, const TargetRegisterClass*> MatchRC =
- TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
- OpInfo.ConstraintVT);
- std::pair<unsigned, const TargetRegisterClass*> InputRC =
- TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
- Input.ConstraintVT);
+ const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
+ std::pair<unsigned, const TargetRegisterClass *> MatchRC =
+ TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
+ OpInfo.ConstraintVT);
+ std::pair<unsigned, const TargetRegisterClass *> InputRC =
+ TLI.getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
+ Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(MatchRC.second != InputRC.second)) {
// Push the arguments from the call instruction up to the register mask.
SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
- for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
- Ops.push_back(*i);
+ Ops.append(Call->op_begin() + 2, e);
// Push live variables for the stack map.
addStackMapLiveVars(CS, NumMetaOpers + NumArgs, Ops, *this);
}
if (Args[i].isNest)
Flags.setNest();
- if (NeedsRegBlock) {
+ if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
- if (Value == NumValues - 1)
- Flags.setInConsecutiveRegsLast();
- }
Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
CLI.Outs.push_back(MyFlags);
CLI.OutVals.push_back(Parts[j]);
}
+
+ if (NeedsRegBlock && Value == NumValues - 1)
+ CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
}
}
ISD::ArgFlagsTy Flags;
Flags.setSRet();
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
- ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
+ ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
+ ISD::InputArg::NoArgIndex, 0);
Ins.push_back(RetArg);
}
}
if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
Flags.setNest();
- if (NeedsRegBlock) {
+ if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
- if (Value == NumValues - 1)
- Flags.setInConsecutiveRegsLast();
- }
Flags.setOrigAlign(OriginalAlignment);
MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
MyFlags.Flags.setOrigAlign(1);
Ins.push_back(MyFlags);
}
+ if (NeedsRegBlock && Value == NumValues - 1)
+ Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
PartBase += VT.getStoreSize();
}
}
assert(i == InVals.size() && "Argument register count mismatch!");
// Finally, if the target has anything special to do, allow it to do so.
- // FIXME: this should insert code into the DAG!
EmitFunctionEntryCode();
}