#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineDebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include <map>
#include <set>
#include <iostream>
+#include <algorithm>
using namespace llvm;
#ifndef NDEBUG
static const bool ViewSchedDAGs = 0;
#endif
+// Scheduling heuristics
+enum SchedHeuristics {
+ defaultScheduling, // Let the target specify its preference.
+ noScheduling, // No scheduling, emit breadth first sequence.
+ simpleScheduling, // Two pass, min. critical path, max. utilization.
+ simpleNoItinScheduling, // Same as above exact using generic latency.
+ listSchedulingBURR, // Bottom up reg reduction list scheduling.
+ listSchedulingTD // Top-down list scheduler.
+};
+
namespace {
cl::opt<SchedHeuristics>
ISHeuristic(
"except using generic latency"),
clEnumValN(listSchedulingBURR, "list-burr",
"Bottom up register reduction list scheduling"),
+ clEnumValN(listSchedulingTD, "list-td",
+ "Top-down list scheduler"),
clEnumValEnd));
} // namespace
+namespace {
+ /// RegsForValue - This struct represents the physical registers that a
+ /// particular value is assigned and the type information about the value.
+ /// This is needed because values can be promoted into larger registers and
+ /// expanded into multiple smaller registers than the value.
+ struct RegsForValue {
+ /// Regs - This list hold the register (for legal and promoted values)
+ /// or register set (for expanded values) that the value should be assigned
+ /// to.
+ std::vector<unsigned> Regs;
+
+ /// RegVT - The value type of each register.
+ ///
+ MVT::ValueType RegVT;
+
+ /// ValueVT - The value type of the LLVM value, which may be promoted from
+ /// RegVT or made from merging the two expanded parts.
+ MVT::ValueType ValueVT;
+
+ RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
+
+ RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
+ : RegVT(regvt), ValueVT(valuevt) {
+ Regs.push_back(Reg);
+ }
+ RegsForValue(const std::vector<unsigned> ®s,
+ MVT::ValueType regvt, MVT::ValueType valuevt)
+ : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
+ }
+
+ /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
+ /// this value and returns the result as a ValueVT value. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ SDOperand getCopyFromRegs(SelectionDAG &DAG,
+ SDOperand &Chain, SDOperand &Flag) const;
+
+ /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+ /// specified value into the registers specified by this object. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
+ SDOperand &Chain, SDOperand &Flag) const;
+
+ /// AddInlineAsmOperands - Add this value to the specified inlineasm node
+ /// operand list. This adds the code marker and includes the number of
+ /// values added into it.
+ void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
+ std::vector<SDOperand> &Ops) const;
+ };
+}
namespace llvm {
//===--------------------------------------------------------------------===//
return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
}
- unsigned CreateRegForValue(const Value *V) {
- MVT::ValueType VT = TLI.getValueType(V->getType());
- // The common case is that we will only create one register for this
- // value. If we have that case, create and return the virtual register.
- unsigned NV = TLI.getNumElements(VT);
- if (NV == 1) {
- // If we are promoting this value, pick the next largest supported type.
- return MakeReg(TLI.getTypeToTransformTo(VT));
- }
-
- // If this value is represented with multiple target registers, make sure
- // to create enough consequtive registers of the right (smaller) type.
- unsigned NT = VT-1; // Find the type to use.
- while (TLI.getNumElements((MVT::ValueType)NT) != 1)
- --NT;
-
- unsigned R = MakeReg((MVT::ValueType)NT);
- for (unsigned i = 1; i != NV; ++i)
- MakeReg((MVT::ValueType)NT);
- return R;
- }
-
+ unsigned CreateRegForValue(const Value *V);
+
unsigned InitializeRegForValue(const Value *V) {
unsigned &R = ValueMap[V];
assert(R == 0 && "Already initialized this value register!");
}
/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
-/// PHI nodes or outside of the basic block that defines it.
+/// PHI nodes or outside of the basic block that defines it, or used by a
+/// switch instruction, which may expand to multiple basic blocks.
static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
if (isa<PHINode>(I)) return true;
BasicBlock *BB = I->getParent();
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
+ if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
+ isa<SwitchInst>(*UI))
return true;
return false;
}
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
-/// entry block, return true.
+/// entry block, return true. This includes arguments used by switches, since
+/// the switch may expand into multiple basic blocks.
static bool isOnlyUsedInEntryBlock(Argument *A) {
BasicBlock *Entry = A->getParent()->begin();
for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != Entry)
+ if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
return false; // Use not in entry block.
return true;
}
for (BasicBlock::iterator I = BB->begin();
(PN = dyn_cast<PHINode>(I)); ++I)
if (!PN->use_empty()) {
- unsigned NumElements =
- TLI.getNumElements(TLI.getValueType(PN->getType()));
+ MVT::ValueType VT = TLI.getValueType(PN->getType());
+ unsigned NumElements;
+ if (VT != MVT::Vector)
+ NumElements = TLI.getNumElements(VT);
+ else {
+ MVT::ValueType VT1,VT2;
+ NumElements =
+ TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
+ VT1, VT2);
+ }
unsigned PHIReg = ValueMap[PN];
assert(PHIReg &&"PHI node does not have an assigned virtual register!");
for (unsigned i = 0; i != NumElements; ++i)
}
}
-
+/// CreateRegForValue - Allocate the appropriate number of virtual registers of
+/// the correctly promoted or expanded types. Assign these registers
+/// consecutive vreg numbers and return the first assigned number.
+unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
+ MVT::ValueType VT = TLI.getValueType(V->getType());
+
+ // The number of multiples of registers that we need, to, e.g., split up
+ // a <2 x int64> -> 4 x i32 registers.
+ unsigned NumVectorRegs = 1;
+
+ // If this is a packed type, figure out what type it will decompose into
+ // and how many of the elements it will use.
+ if (VT == MVT::Vector) {
+ const PackedType *PTy = cast<PackedType>(V->getType());
+ unsigned NumElts = PTy->getNumElements();
+ MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
+
+ // Divide the input until we get to a supported size. This will always
+ // end with a scalar if the target doesn't support vectors.
+ while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
+ NumElts >>= 1;
+ NumVectorRegs <<= 1;
+ }
+ if (NumElts == 1)
+ VT = EltTy;
+ else
+ VT = getVectorType(EltTy, NumElts);
+ }
+
+ // The common case is that we will only create one register for this
+ // value. If we have that case, create and return the virtual register.
+ unsigned NV = TLI.getNumElements(VT);
+ if (NV == 1) {
+ // If we are promoting this value, pick the next largest supported type.
+ MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
+ unsigned Reg = MakeReg(PromotedType);
+ // If this is a vector of supported or promoted types (e.g. 4 x i16),
+ // create all of the registers.
+ for (unsigned i = 1; i != NumVectorRegs; ++i)
+ MakeReg(PromotedType);
+ return Reg;
+ }
+
+ // If this value is represented with multiple target registers, make sure
+ // to create enough consecutive registers of the right (smaller) type.
+ unsigned NT = VT-1; // Find the type to use.
+ while (TLI.getNumElements((MVT::ValueType)NT) != 1)
+ --NT;
+
+ unsigned R = MakeReg((MVT::ValueType)NT);
+ for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
+ MakeReg((MVT::ValueType)NT);
+ return R;
+}
//===----------------------------------------------------------------------===//
/// SelectionDAGLowering - This is the common target-independent lowering
/// analysis.
std::vector<SDOperand> PendingLoads;
+ /// Case - A pair of values to record the Value for a switch case, and the
+ /// case's target basic block.
+ typedef std::pair<Constant*, MachineBasicBlock*> Case;
+ typedef std::vector<Case>::iterator CaseItr;
+ typedef std::pair<CaseItr, CaseItr> CaseRange;
+
+ /// CaseRec - A struct with ctor used in lowering switches to a binary tree
+ /// of conditional branches.
+ struct CaseRec {
+ CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
+ CaseBB(bb), LT(lt), GE(ge), Range(r) {}
+
+ /// CaseBB - The MBB in which to emit the compare and branch
+ MachineBasicBlock *CaseBB;
+ /// LT, GE - If nonzero, we know the current case value must be less-than or
+ /// greater-than-or-equal-to these Constants.
+ Constant *LT;
+ Constant *GE;
+ /// Range - A pair of iterators representing the range of case values to be
+ /// processed at this point in the binary search tree.
+ CaseRange Range;
+ };
+
+ /// The comparison function for sorting Case values.
+ struct CaseCmp {
+ bool operator () (const Case& C1, const Case& C2) {
+ if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
+ return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
+
+ const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
+ return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
+ }
+ };
+
public:
// TLI - This is information that describes the available target features we
// need for lowering. This indicates when operations are unavailable,
SelectionDAG &DAG;
const TargetData &TD;
+ /// SwitchCases - Vector of CaseBlock structures used to communicate
+ /// SwitchInst code generation information.
+ std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
+
/// FuncInfo - Information about the function as a whole.
///
FunctionLoweringInfo &FuncInfo;
void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
+ SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
+ SDOperand SrcValue, SDOperand Root,
+ bool isVolatile);
SDOperand getIntPtrConstant(uint64_t Val) {
return DAG.getConstant(Val, TLI.getPointerTy());
}
- SDOperand getValue(const Value *V) {
- SDOperand &N = NodeMap[V];
- if (N.Val) return N;
-
- const Type *VTy = V->getType();
- MVT::ValueType VT = TLI.getValueType(VTy);
- if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- visit(CE->getOpcode(), *CE);
- assert(N.Val && "visit didn't populate the ValueMap!");
- return N;
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
- return N = DAG.getGlobalAddress(GV, VT);
- } else if (isa<ConstantPointerNull>(C)) {
- return N = DAG.getConstant(0, TLI.getPointerTy());
- } else if (isa<UndefValue>(C)) {
- return N = DAG.getNode(ISD::UNDEF, VT);
- } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
- return N = DAG.getConstantFP(CFP->getValue(), VT);
- } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
- unsigned NumElements = PTy->getNumElements();
- MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Now that we know the number and type of the elements, push a
- // Constant or ConstantFP node onto the ops list for each element of
- // the packed constant.
- std::vector<SDOperand> Ops;
- if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
- if (MVT::isFloatingPoint(PVT)) {
- for (unsigned i = 0; i != NumElements; ++i) {
- const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
- Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
- }
- } else {
- for (unsigned i = 0; i != NumElements; ++i) {
- const ConstantIntegral *El =
- cast<ConstantIntegral>(CP->getOperand(i));
- Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
- }
- }
- } else {
- assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
- SDOperand Op;
- if (MVT::isFloatingPoint(PVT))
- Op = DAG.getConstantFP(0, PVT);
- else
- Op = DAG.getConstant(0, PVT);
- Ops.assign(NumElements, Op);
- }
-
- // Handle the case where we have a 1-element vector, in which
- // case we want to immediately turn it into a scalar constant.
- if (Ops.size() == 1) {
- return N = Ops[0];
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
- } else {
- // If the packed type isn't legal, then create a ConstantVec node with
- // generic Vector type instead.
- return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
- }
- } else {
- // Canonicalize all constant ints to be unsigned.
- return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
- }
-
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- std::map<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
- if (SI != FuncInfo.StaticAllocaMap.end())
- return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
- }
-
- std::map<const Value*, unsigned>::const_iterator VMI =
- FuncInfo.ValueMap.find(V);
- assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
-
- unsigned InReg = VMI->second;
-
- // If this type is not legal, make it so now.
- MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
-
- N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
- if (DestVT < VT) {
- // Source must be expanded. This input value is actually coming from the
- // register pair VMI->second and VMI->second+1.
- N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
- DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
- } else {
- if (DestVT > VT) { // Promotion case
- if (MVT::isFloatingPoint(VT))
- N = DAG.getNode(ISD::FP_ROUND, VT, N);
- else
- N = DAG.getNode(ISD::TRUNCATE, VT, N);
- }
- }
-
- return N;
- }
+ SDOperand getValue(const Value *V);
const SDOperand &setValue(const Value *V, SDOperand NewN) {
SDOperand &N = NodeMap[V];
return N = NewN;
}
- unsigned GetAvailableRegister(bool OutReg, bool InReg,
- const std::vector<unsigned> &RegChoices,
- std::set<unsigned> &OutputRegs,
- std::set<unsigned> &InputRegs);
+ RegsForValue GetRegistersForValue(const std::string &ConstrCode,
+ MVT::ValueType VT,
+ bool OutReg, bool InReg,
+ std::set<unsigned> &OutputRegs,
+ std::set<unsigned> &InputRegs);
// Terminator instructions.
void visitRet(ReturnInst &I);
void visitBr(BranchInst &I);
+ void visitSwitch(SwitchInst &I);
void visitUnreachable(UnreachableInst &I) { /* noop */ }
+ // Helper for visitSwitch
+ void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
+
// These all get lowered before this pass.
- void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
- void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
- void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
- //
void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
void visitShift(User &I, unsigned Opcode);
void visitAdd(User &I) {
}
void visitDiv(User &I) {
const Type *Ty = I.getType();
- visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
+ visitBinary(I,
+ Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
+ Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
}
void visitRem(User &I) {
const Type *Ty = I.getType();
visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
}
- void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
- void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
- void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
+ void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
+ void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
+ void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
void visitShl(User &I) { visitShift(I, ISD::SHL); }
void visitShr(User &I) {
visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
+ void visitExtractElement(User &I);
+ void visitInsertElement(User &I);
+
void visitGetElementPtr(User &I);
void visitCast(User &I);
void visitSelect(User &I);
- //
void visitMalloc(MallocInst &I);
void visitFree(FreeInst &I);
void visitCall(CallInst &I);
void visitInlineAsm(CallInst &I);
const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
+ void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
void visitVAStart(CallInst &I);
void visitVAArg(VAArgInst &I);
};
} // end namespace llvm
+SDOperand SelectionDAGLowering::getValue(const Value *V) {
+ SDOperand &N = NodeMap[V];
+ if (N.Val) return N;
+
+ const Type *VTy = V->getType();
+ MVT::ValueType VT = TLI.getValueType(VTy);
+ if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ visit(CE->getOpcode(), *CE);
+ assert(N.Val && "visit didn't populate the ValueMap!");
+ return N;
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ return N = DAG.getGlobalAddress(GV, VT);
+ } else if (isa<ConstantPointerNull>(C)) {
+ return N = DAG.getConstant(0, TLI.getPointerTy());
+ } else if (isa<UndefValue>(C)) {
+ if (!isa<PackedType>(VTy))
+ return N = DAG.getNode(ISD::UNDEF, VT);
+
+ // Create a VBUILD_VECTOR of undef nodes.
+ const PackedType *PTy = cast<PackedType>(VTy);
+ unsigned NumElements = PTy->getNumElements();
+ MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
+
+ std::vector<SDOperand> Ops;
+ Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
+
+ // Create a VConstant node with generic Vector type.
+ Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
+ Ops.push_back(DAG.getValueType(PVT));
+ return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ return N = DAG.getConstantFP(CFP->getValue(), VT);
+ } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
+ unsigned NumElements = PTy->getNumElements();
+ MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
+
+ // Now that we know the number and type of the elements, push a
+ // Constant or ConstantFP node onto the ops list for each element of
+ // the packed constant.
+ std::vector<SDOperand> Ops;
+ if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
+ for (unsigned i = 0; i != NumElements; ++i)
+ Ops.push_back(getValue(CP->getOperand(i)));
+ } else {
+ assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
+ SDOperand Op;
+ if (MVT::isFloatingPoint(PVT))
+ Op = DAG.getConstantFP(0, PVT);
+ else
+ Op = DAG.getConstant(0, PVT);
+ Ops.assign(NumElements, Op);
+ }
+
+ // Create a VBUILD_VECTOR node with generic Vector type.
+ Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
+ Ops.push_back(DAG.getValueType(PVT));
+ return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+ } else {
+ // Canonicalize all constant ints to be unsigned.
+ return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
+ }
+ }
+
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ std::map<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end())
+ return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
+ }
+
+ std::map<const Value*, unsigned>::const_iterator VMI =
+ FuncInfo.ValueMap.find(V);
+ assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
+
+ unsigned InReg = VMI->second;
+
+ // If this type is not legal, make it so now.
+ if (VT != MVT::Vector) {
+ MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
+
+ N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
+ if (DestVT < VT) {
+ // Source must be expanded. This input value is actually coming from the
+ // register pair VMI->second and VMI->second+1.
+ N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
+ DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
+ } else if (DestVT > VT) { // Promotion case
+ if (MVT::isFloatingPoint(VT))
+ N = DAG.getNode(ISD::FP_ROUND, VT, N);
+ else
+ N = DAG.getNode(ISD::TRUNCATE, VT, N);
+ }
+ } else {
+ // Otherwise, if this is a vector, make it available as a generic vector
+ // here.
+ MVT::ValueType PTyElementVT, PTyLegalElementVT;
+ unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(VTy),PTyElementVT,
+ PTyLegalElementVT);
+
+ // Build a VBUILD_VECTOR with the input registers.
+ std::vector<SDOperand> Ops;
+ if (PTyElementVT == PTyLegalElementVT) {
+ // If the value types are legal, just VBUILD the CopyFromReg nodes.
+ for (unsigned i = 0; i != NE; ++i)
+ Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT));
+ } else if (PTyElementVT < PTyLegalElementVT) {
+ // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
+ for (unsigned i = 0; i != NE; ++i) {
+ SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ if (MVT::isFloatingPoint(PTyElementVT))
+ Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
+ else
+ Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
+ Ops.push_back(Op);
+ }
+ } else {
+ // If the register was expanded, use BUILD_PAIR.
+ assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
+ for (unsigned i = 0; i != NE/2; ++i) {
+ SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
+ }
+ }
+
+ Ops.push_back(DAG.getConstant(NE, MVT::i32));
+ Ops.push_back(DAG.getValueType(PTyLegalElementVT));
+ N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+ }
+
+ return N;
+}
+
+
void SelectionDAGLowering::visitRet(ReturnInst &I) {
if (I.getNumOperands() == 0) {
DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
void SelectionDAGLowering::visitBr(BranchInst &I) {
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
+ CurMBB->addSuccessor(Succ0MBB);
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
DAG.getBasicBlock(Succ0MBB)));
} else {
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
+ CurMBB->addSuccessor(Succ1MBB);
SDOperand Cond = getValue(I.getCondition());
if (Succ1MBB == NextBlock) {
SDOperand True = DAG.getConstant(1, Cond.getValueType());
Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
}
- Ops.push_back(Cond);
- Ops.push_back(DAG.getBasicBlock(Succ0MBB));
- Ops.push_back(DAG.getBasicBlock(Succ1MBB));
- DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
+ SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
+ DAG.getBasicBlock(Succ0MBB));
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
+ DAG.getBasicBlock(Succ1MBB)));
+ }
+ }
+}
+
+/// visitSwitchCase - Emits the necessary code to represent a single node in
+/// the binary search tree resulting from lowering a switch instruction.
+void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
+ SDOperand SwitchOp = getValue(CB.SwitchV);
+ SDOperand CaseOp = getValue(CB.CaseC);
+ SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
+
+ // Set NextBlock to be the MBB immediately after the current one, if any.
+ // This is used to avoid emitting unnecessary branches to the next block.
+ MachineBasicBlock *NextBlock = 0;
+ MachineFunction::iterator BBI = CurMBB;
+ if (++BBI != CurMBB->getParent()->end())
+ NextBlock = BBI;
+
+ // If the lhs block is the next block, invert the condition so that we can
+ // fall through to the lhs instead of the rhs block.
+ if (CB.LHSBB == NextBlock) {
+ std::swap(CB.LHSBB, CB.RHSBB);
+ SDOperand True = DAG.getConstant(1, Cond.getValueType());
+ Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
+ }
+ SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
+ DAG.getBasicBlock(CB.LHSBB));
+ if (CB.RHSBB == NextBlock)
+ DAG.setRoot(BrCond);
+ else
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
+ DAG.getBasicBlock(CB.RHSBB)));
+ // Update successor info
+ CurMBB->addSuccessor(CB.LHSBB);
+ CurMBB->addSuccessor(CB.RHSBB);
+}
+
+void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
+ // Figure out which block is immediately after the current one.
+ MachineBasicBlock *NextBlock = 0;
+ MachineFunction::iterator BBI = CurMBB;
+ if (++BBI != CurMBB->getParent()->end())
+ NextBlock = BBI;
+
+ // If there is only the default destination, branch to it if it is not the
+ // next basic block. Otherwise, just fall through.
+ if (I.getNumOperands() == 2) {
+ // Update machine-CFG edges.
+ MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
+ // If this is not a fall-through branch, emit the branch.
+ if (DefaultMBB != NextBlock)
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
+ DAG.getBasicBlock(DefaultMBB)));
+ return;
+ }
+
+ // If there are any non-default case statements, create a vector of Cases
+ // representing each one, and sort the vector so that we can efficiently
+ // create a binary search tree from them.
+ std::vector<Case> Cases;
+ for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
+ MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
+ Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
+ }
+ std::sort(Cases.begin(), Cases.end(), CaseCmp());
+
+ // Get the Value to be switched on and default basic blocks, which will be
+ // inserted into CaseBlock records, representing basic blocks in the binary
+ // search tree.
+ Value *SV = I.getOperand(0);
+ MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
+
+ // Get the current MachineFunction and LLVM basic block, for use in creating
+ // and inserting new MBBs during the creation of the binary search tree.
+ MachineFunction *CurMF = CurMBB->getParent();
+ const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
+
+ // Push the initial CaseRec onto the worklist
+ std::vector<CaseRec> CaseVec;
+ CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
+
+ while (!CaseVec.empty()) {
+ // Grab a record representing a case range to process off the worklist
+ CaseRec CR = CaseVec.back();
+ CaseVec.pop_back();
+
+ // Size is the number of Cases represented by this range. If Size is 1,
+ // then we are processing a leaf of the binary search tree. Otherwise,
+ // we need to pick a pivot, and push left and right ranges onto the
+ // worklist.
+ unsigned Size = CR.Range.second - CR.Range.first;
+
+ if (Size == 1) {
+ // Create a CaseBlock record representing a conditional branch to
+ // the Case's target mbb if the value being switched on SV is equal
+ // to C. Otherwise, branch to default.
+ Constant *C = CR.Range.first->first;
+ MachineBasicBlock *Target = CR.Range.first->second;
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
+ CR.CaseBB);
+ // If the MBB representing the leaf node is the current MBB, then just
+ // call visitSwitchCase to emit the code into the current block.
+ // Otherwise, push the CaseBlock onto the vector to be later processed
+ // by SDISel, and insert the node's MBB before the next MBB.
+ if (CR.CaseBB == CurMBB)
+ visitSwitchCase(CB);
+ else {
+ SwitchCases.push_back(CB);
+ CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
+ }
+ } else {
+ // split case range at pivot
+ CaseItr Pivot = CR.Range.first + (Size / 2);
+ CaseRange LHSR(CR.Range.first, Pivot);
+ CaseRange RHSR(Pivot, CR.Range.second);
+ Constant *C = Pivot->first;
+ MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
+ // We know that we branch to the LHS if the Value being switched on is
+ // less than the Pivot value, C. We use this to optimize our binary
+ // tree a bit, by recognizing that if SV is greater than or equal to the
+ // LHS's Case Value, and that Case Value is exactly one less than the
+ // Pivot's Value, then we can branch directly to the LHS's Target,
+ // rather than creating a leaf node for it.
+ if ((LHSR.second - LHSR.first) == 1 &&
+ LHSR.first->first == CR.GE &&
+ cast<ConstantIntegral>(C)->getRawValue() ==
+ (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
+ LHSBB = LHSR.first->second;
+ } else {
+ LHSBB = new MachineBasicBlock(LLVMBB);
+ CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
+ }
+ // Similar to the optimization above, if the Value being switched on is
+ // known to be less than the Constant CR.LT, and the current Case Value
+ // is CR.LT - 1, then we can branch directly to the target block for
+ // the current Case Value, rather than emitting a RHS leaf node for it.
+ if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
+ cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
+ (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
+ RHSBB = RHSR.first->second;
+ } else {
+ RHSBB = new MachineBasicBlock(LLVMBB);
+ CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
+ }
+ // Create a CaseBlock record representing a conditional branch to
+ // the LHS node if the value being switched on SV is less than C.
+ // Otherwise, branch to LHS.
+ ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
+ SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
+ if (CR.CaseBB == CurMBB)
+ visitSwitchCase(CB);
+ else {
+ SwitchCases.push_back(CB);
+ CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
+ }
}
}
}
setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
} else {
const PackedType *PTy = cast<PackedType>(Ty);
- unsigned NumElements = PTy->getNumElements();
- MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Immediately scalarize packed types containing only one element, so that
- // the Legalize pass does not have to deal with them. Similarly, if the
- // abstract vector is going to turn into one that the target natively
- // supports, generate that type now so that Legalize doesn't have to deal
- // with that either. These steps ensure that Legalize only has to handle
- // vector types in its Expand case.
- unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
- if (NumElements == 1) {
- setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
- } else {
- SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
- SDOperand Typ = DAG.getValueType(PVT);
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- }
+ SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
+ SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
+ setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
}
}
void SelectionDAGLowering::visitCast(User &I) {
SDOperand N = getValue(I.getOperand(0));
- MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
- MVT::ValueType DestTy = TLI.getValueType(I.getType());
-
- if (N.getValueType() == DestTy) {
+ MVT::ValueType SrcVT = N.getValueType();
+ MVT::ValueType DestVT = TLI.getValueType(I.getType());
+
+ if (DestVT == MVT::Vector) {
+ // This is a cast to a vector from something else. This is always a bit
+ // convert. Get information about the input vector.
+ const PackedType *DestTy = cast<PackedType>(I.getType());
+ MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
+ setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
+ DAG.getConstant(DestTy->getNumElements(),MVT::i32),
+ DAG.getValueType(EltVT)));
+ } else if (SrcVT == DestVT) {
setValue(&I, N); // noop cast.
- } else if (DestTy == MVT::i1) {
+ } else if (DestVT == MVT::i1) {
// Cast to bool is a comparison against zero, not truncation to zero.
- SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
+ SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
DAG.getConstantFP(0.0, N.getValueType());
setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
- } else if (isInteger(SrcTy)) {
- if (isInteger(DestTy)) { // Int -> Int cast
- if (DestTy < SrcTy) // Truncating cast?
- setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
+ } else if (isInteger(SrcVT)) {
+ if (isInteger(DestVT)) { // Int -> Int cast
+ if (DestVT < SrcVT) // Truncating cast?
+ setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
else if (I.getOperand(0)->getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
- } else { // Int -> FP cast
+ setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
+ } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
if (I.getOperand(0)->getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
+ } else {
+ assert(0 && "Unknown cast!");
}
- } else {
- assert(isFloatingPoint(SrcTy) && "Unknown value type!");
- if (isFloatingPoint(DestTy)) { // FP -> FP cast
- if (DestTy < SrcTy) // Rounding cast?
- setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
+ } else if (isFloatingPoint(SrcVT)) {
+ if (isFloatingPoint(DestVT)) { // FP -> FP cast
+ if (DestVT < SrcVT) // Rounding cast?
+ setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
- } else { // FP -> Int cast.
+ setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
+ } else if (isInteger(DestVT)) { // FP -> Int cast.
if (I.getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
+ } else {
+ assert(0 && "Unknown cast!");
}
+ } else {
+ assert(SrcVT == MVT::Vector && "Unknown cast!");
+ assert(DestVT != MVT::Vector && "Casts to vector already handled!");
+ // This is a cast from a vector to something else. This is always a bit
+ // convert. Get information about the input vector.
+ setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
}
}
+void SelectionDAGLowering::visitInsertElement(User &I) {
+ SDOperand InVec = getValue(I.getOperand(0));
+ SDOperand InVal = getValue(I.getOperand(1));
+ SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
+ getValue(I.getOperand(2)));
+
+ SDOperand Num = *(InVec.Val->op_end()-2);
+ SDOperand Typ = *(InVec.Val->op_end()-1);
+ setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
+ InVec, InVal, InIdx, Num, Typ));
+}
+
+void SelectionDAGLowering::visitExtractElement(User &I) {
+ SDOperand InVec = getValue(I.getOperand(0));
+ SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
+ getValue(I.getOperand(1)));
+ SDOperand Typ = *(InVec.Val->op_end()-1);
+ setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
+ TLI.getValueType(I.getType()), InVec, InIdx));
+}
+
void SelectionDAGLowering::visitGetElementPtr(User &I) {
SDOperand N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
}
-/// getStringValue - Turn an LLVM constant pointer that eventually points to a
-/// global into a string value. Return an empty string if we can't do it.
-///
-static std::string getStringValue(GlobalVariable *GV, unsigned Offset = 0) {
- if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
- ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
- if (Init->isString()) {
- std::string Result = Init->getAsString();
- if (Offset < Result.size()) {
- // If we are pointing INTO The string, erase the beginning...
- Result.erase(Result.begin(), Result.begin()+Offset);
- return Result;
- }
- }
- }
- return "";
-}
-
void SelectionDAGLowering::visitLoad(LoadInst &I) {
SDOperand Ptr = getValue(I.getOperand(0));
// Do not serialize non-volatile loads against each other.
Root = DAG.getRoot();
}
-
- const Type *Ty = I.getType();
+
+ setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
+ Root, I.isVolatile()));
+}
+
+SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
+ SDOperand SrcValue, SDOperand Root,
+ bool isVolatile) {
SDOperand L;
-
if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- unsigned NumElements = PTy->getNumElements();
MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Immediately scalarize packed types containing only one element, so that
- // the Legalize pass does not have to deal with them.
- if (NumElements == 1) {
- L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
- } else {
- L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
- DAG.getSrcValue(I.getOperand(0)));
- }
+ L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
} else {
- L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
- DAG.getSrcValue(I.getOperand(0)));
+ L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
}
- setValue(&I, L);
- if (I.isVolatile())
+ if (isVolatile)
DAG.setRoot(L.getValue(1));
else
PendingLoads.push_back(L.getValue(1));
+
+ return L;
}
DAG.getSrcValue(I.getOperand(1))));
}
+/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
+/// access memory and has no other side effects at all.
+static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
+#define GET_NO_MEMORY_INTRINSICS
+#include "llvm/Intrinsics.gen"
+#undef GET_NO_MEMORY_INTRINSICS
+ return false;
+}
+
+/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
+/// node.
+void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
+ unsigned Intrinsic) {
+ bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
+
+ // Build the operand list.
+ std::vector<SDOperand> Ops;
+ if (HasChain) // If this intrinsic has side-effects, chainify it.
+ Ops.push_back(getRoot());
+
+ // Add the intrinsic ID as an integer operand.
+ Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
+
+ // Add all operands of the call to the operand list.
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
+ SDOperand Op = getValue(I.getOperand(i));
+
+ // If this is a vector type, force it to the right packed type.
+ if (Op.getValueType() == MVT::Vector) {
+ const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
+ MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
+
+ MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
+ assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
+ Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
+ }
+
+ assert(TLI.isTypeLegal(Op.getValueType()) &&
+ "Intrinsic uses a non-legal type?");
+ Ops.push_back(Op);
+ }
+
+ std::vector<MVT::ValueType> VTs;
+ if (I.getType() != Type::VoidTy) {
+ MVT::ValueType VT = TLI.getValueType(I.getType());
+ if (VT == MVT::Vector) {
+ const PackedType *DestTy = cast<PackedType>(I.getType());
+ MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
+
+ VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
+ assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
+ }
+
+ assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
+ VTs.push_back(VT);
+ }
+ if (HasChain)
+ VTs.push_back(MVT::Other);
+
+ // Create the node.
+ SDOperand Result;
+ if (!HasChain)
+ Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
+ else if (I.getType() != Type::VoidTy)
+ Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
+ else
+ Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
+
+ if (HasChain)
+ DAG.setRoot(Result.getValue(Result.Val->getNumValues()-1));
+ if (I.getType() != Type::VoidTy) {
+ if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
+ MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
+ Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
+ DAG.getConstant(PTy->getNumElements(), MVT::i32),
+ DAG.getValueType(EVT));
+ }
+ setValue(&I, Result);
+ }
+}
+
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
/// we want to emit this as a call to a named external function, return the name
/// otherwise lower it and return null.
const char *
SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
switch (Intrinsic) {
+ default:
+ // By default, turn this into a target intrinsic node.
+ visitTargetIntrinsic(I, Intrinsic);
+ return 0;
case Intrinsic::vastart: visitVAStart(I); return 0;
case Intrinsic::vaend: visitVAEnd(I); return 0;
case Intrinsic::vacopy: visitVACopy(I); return 0;
case Intrinsic::longjmp:
return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
break;
- case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
- case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
- case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
-
- case Intrinsic::readport:
- case Intrinsic::readio: {
- std::vector<MVT::ValueType> VTs;
- VTs.push_back(TLI.getValueType(I.getType()));
- VTs.push_back(MVT::Other);
- std::vector<SDOperand> Ops;
- Ops.push_back(getRoot());
- Ops.push_back(getValue(I.getOperand(1)));
- SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
- ISD::READPORT : ISD::READIO, VTs, Ops);
-
- setValue(&I, Tmp);
- DAG.setRoot(Tmp.getValue(1));
+ case Intrinsic::memcpy_i32:
+ case Intrinsic::memcpy_i64:
+ visitMemIntrinsic(I, ISD::MEMCPY);
return 0;
- }
- case Intrinsic::writeport:
- case Intrinsic::writeio:
- DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
- ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
- getRoot(), getValue(I.getOperand(1)),
- getValue(I.getOperand(2))));
+ case Intrinsic::memset_i32:
+ case Intrinsic::memset_i64:
+ visitMemIntrinsic(I, ISD::MEMSET);
+ return 0;
+ case Intrinsic::memmove_i32:
+ case Intrinsic::memmove_i64:
+ visitMemIntrinsic(I, ISD::MEMMOVE);
return 0;
case Intrinsic::dbg_stoppoint: {
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_debugger_stop";
-
MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
- if (DebugInfo && DebugInfo->Verify(I.getOperand(4))) {
+ DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
+ if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
std::vector<SDOperand> Ops;
- // Input Chain
Ops.push_back(getRoot());
-
- // line number
- Ops.push_back(getValue(I.getOperand(2)));
-
- // column
- Ops.push_back(getValue(I.getOperand(3)));
+ Ops.push_back(getValue(SPI.getLineValue()));
+ Ops.push_back(getValue(SPI.getColumnValue()));
- DebugInfoDesc *DD = DebugInfo->getDescFor(I.getOperand(4));
+ DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
assert(DD && "Not a debug information descriptor");
- CompileUnitDesc *CompileUnit = dyn_cast<CompileUnitDesc>(DD);
- assert(CompileUnit && "Not a compile unit");
+ CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
+
Ops.push_back(DAG.getString(CompileUnit->getFileName()));
Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
- if (Ops.size() == 5) // Found filename/workingdir.
- DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
+ DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
}
-
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+
return 0;
}
- case Intrinsic::dbg_region_start:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_region_start";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ case Intrinsic::dbg_region_start: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
+ if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_region_end:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_region_end";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_region_end: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
+ if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_func_start:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_subprogram";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_func_start: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
+ if (DebugInfo && FSI.getSubprogram() &&
+ DebugInfo->Verify(FSI.getSubprogram())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_declare:
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_declare: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
+ if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
+ std::vector<SDOperand> Ops;
+
+ SDOperand AddressOp = getValue(DI.getAddress());
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
+ DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
+ }
+ }
+
return 0;
+ }
case Intrinsic::isunordered_f32:
case Intrinsic::isunordered_f64:
case Intrinsic::prefetch:
// FIXME: Currently discarding prefetches.
return 0;
- default:
- std::cerr << I;
- assert(0 && "This intrinsic is not implemented yet!");
- return 0;
}
}
return;
} else { // Not an LLVM intrinsic.
const std::string &Name = F->getName();
- if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
+ if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
+ if (I.getNumOperands() == 3 && // Basic sanity checks.
+ I.getOperand(1)->getType()->isFloatingPoint() &&
+ I.getType() == I.getOperand(1)->getType() &&
+ I.getType() == I.getOperand(2)->getType()) {
+ SDOperand LHS = getValue(I.getOperand(1));
+ SDOperand RHS = getValue(I.getOperand(2));
+ setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
+ LHS, RHS));
+ return;
+ }
+ } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
if (I.getNumOperands() == 2 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
DAG.setRoot(Result.second);
}
-/// GetAvailableRegister - Pick a register from RegChoices that is available
-/// for input and/or output as specified by isOutReg/isInReg. If an allocatable
-/// register is found, it is returned and added to the specified set of used
-/// registers. If not, zero is returned.
-unsigned SelectionDAGLowering::
-GetAvailableRegister(bool isOutReg, bool isInReg,
- const std::vector<unsigned> &RegChoices,
- std::set<unsigned> &OutputRegs,
+SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
+ SDOperand &Chain, SDOperand &Flag)const{
+ SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
+ Chain = Val.getValue(1);
+ Flag = Val.getValue(2);
+
+ // If the result was expanded, copy from the top part.
+ if (Regs.size() > 1) {
+ assert(Regs.size() == 2 &&
+ "Cannot expand to more than 2 elts yet!");
+ SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
+ Chain = Val.getValue(1);
+ Flag = Val.getValue(2);
+ if (DAG.getTargetLoweringInfo().isLittleEndian())
+ return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
+ else
+ return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
+ }
+
+ // Otherwise, if the return value was promoted, truncate it to the
+ // appropriate type.
+ if (RegVT == ValueVT)
+ return Val;
+
+ if (MVT::isInteger(RegVT))
+ return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
+ else
+ return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
+}
+
+/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+/// specified value into the registers specified by this object. This uses
+/// Chain/Flag as the input and updates them for the output Chain/Flag.
+void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
+ SDOperand &Chain, SDOperand &Flag) const {
+ if (Regs.size() == 1) {
+ // If there is a single register and the types differ, this must be
+ // a promotion.
+ if (RegVT != ValueVT) {
+ if (MVT::isInteger(RegVT))
+ Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
+ else
+ Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
+ }
+ Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
+ Flag = Chain.getValue(1);
+ } else {
+ std::vector<unsigned> R(Regs);
+ if (!DAG.getTargetLoweringInfo().isLittleEndian())
+ std::reverse(R.begin(), R.end());
+
+ for (unsigned i = 0, e = R.size(); i != e; ++i) {
+ SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
+ DAG.getConstant(i, MVT::i32));
+ Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
+ Flag = Chain.getValue(1);
+ }
+ }
+}
+
+/// AddInlineAsmOperands - Add this value to the specified inlineasm node
+/// operand list. This adds the code marker and includes the number of
+/// values added into it.
+void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
+ std::vector<SDOperand> &Ops) const {
+ Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(Regs[i], RegVT));
+}
+
+/// isAllocatableRegister - If the specified register is safe to allocate,
+/// i.e. it isn't a stack pointer or some other special register, return the
+/// register class for the register. Otherwise, return null.
+static const TargetRegisterClass *
+isAllocatableRegister(unsigned Reg, MachineFunction &MF,
+ const TargetLowering &TLI, const MRegisterInfo *MRI) {
+ for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
+ E = MRI->regclass_end(); RCI != E; ++RCI) {
+ const TargetRegisterClass *RC = *RCI;
+ // If none of the the value types for this register class are valid, we
+ // can't use it. For example, 64-bit reg classes on 32-bit targets.
+ bool isLegal = false;
+ for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
+ I != E; ++I) {
+ if (TLI.isTypeLegal(*I)) {
+ isLegal = true;
+ break;
+ }
+ }
+
+ if (!isLegal) continue;
+
+ // NOTE: This isn't ideal. In particular, this might allocate the
+ // frame pointer in functions that need it (due to them not being taken
+ // out of allocation, because a variable sized allocation hasn't been seen
+ // yet). This is a slight code pessimization, but should still work.
+ for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
+ E = RC->allocation_order_end(MF); I != E; ++I)
+ if (*I == Reg)
+ return RC;
+ }
+ return 0;
+}
+
+RegsForValue SelectionDAGLowering::
+GetRegistersForValue(const std::string &ConstrCode,
+ MVT::ValueType VT, bool isOutReg, bool isInReg,
+ std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs) {
+ std::pair<unsigned, const TargetRegisterClass*> PhysReg =
+ TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
+ std::vector<unsigned> Regs;
+
+ unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
+ MVT::ValueType RegVT;
+ MVT::ValueType ValueVT = VT;
+
+ if (PhysReg.first) {
+ if (VT == MVT::Other)
+ ValueVT = *PhysReg.second->vt_begin();
+ RegVT = VT;
+
+ // This is a explicit reference to a physical register.
+ Regs.push_back(PhysReg.first);
+
+ // If this is an expanded reference, add the rest of the regs to Regs.
+ if (NumRegs != 1) {
+ RegVT = *PhysReg.second->vt_begin();
+ TargetRegisterClass::iterator I = PhysReg.second->begin();
+ TargetRegisterClass::iterator E = PhysReg.second->end();
+ for (; *I != PhysReg.first; ++I)
+ assert(I != E && "Didn't find reg!");
+
+ // Already added the first reg.
+ --NumRegs; ++I;
+ for (; NumRegs; --NumRegs, ++I) {
+ assert(I != E && "Ran out of registers to allocate!");
+ Regs.push_back(*I);
+ }
+ }
+ return RegsForValue(Regs, RegVT, ValueVT);
+ }
+
+ // This is a reference to a register class. Allocate NumRegs consecutive,
+ // available, registers from the class.
+ std::vector<unsigned> RegClassRegs =
+ TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
+
const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
MachineFunction &MF = *CurMBB->getParent();
- for (unsigned i = 0, e = RegChoices.size(); i != e; ++i) {
- unsigned Reg = RegChoices[i];
+ unsigned NumAllocated = 0;
+ for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
+ unsigned Reg = RegClassRegs[i];
// See if this register is available.
- if (isOutReg && OutputRegs.count(Reg)) continue; // Already used.
- if (isInReg && InputRegs.count(Reg)) continue; // Already used.
-
+ if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
+ (isInReg && InputRegs.count(Reg))) { // Already used.
+ // Make sure we find consecutive registers.
+ NumAllocated = 0;
+ continue;
+ }
+
// Check to see if this register is allocatable (i.e. don't give out the
// stack pointer).
- bool Found = false;
- for (MRegisterInfo::regclass_iterator RC = MRI->regclass_begin(),
- E = MRI->regclass_end(); !Found && RC != E; ++RC) {
- // NOTE: This isn't ideal. In particular, this might allocate the
- // frame pointer in functions that need it (due to them not being taken
- // out of allocation, because a variable sized allocation hasn't been seen
- // yet). This is a slight code pessimization, but should still work.
- for (TargetRegisterClass::iterator I = (*RC)->allocation_order_begin(MF),
- E = (*RC)->allocation_order_end(MF); I != E; ++I)
- if (*I == Reg) {
- Found = true;
- break;
- }
+ const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
+ if (!RC) {
+ // Make sure we find consecutive registers.
+ NumAllocated = 0;
+ continue;
}
- if (!Found) continue;
- // Okay, this register is good, return it.
- if (isOutReg) OutputRegs.insert(Reg); // Mark used.
- if (isInReg) InputRegs.insert(Reg); // Mark used.
- return Reg;
+ // Okay, this register is good, we can use it.
+ ++NumAllocated;
+
+ // If we allocated enough consecutive
+ if (NumAllocated == NumRegs) {
+ unsigned RegStart = (i-NumAllocated)+1;
+ unsigned RegEnd = i+1;
+ // Mark all of the allocated registers used.
+ for (unsigned i = RegStart; i != RegEnd; ++i) {
+ unsigned Reg = RegClassRegs[i];
+ Regs.push_back(Reg);
+ if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
+ if (isInReg) InputRegs.insert(Reg); // Mark reg used.
+ }
+
+ return RegsForValue(Regs, *RC->vt_begin(), VT);
+ }
}
- return 0;
+
+ // Otherwise, we couldn't allocate enough registers for this.
+ return RegsForValue();
}
+
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
bool hasSideEffects = IA->hasSideEffects();
std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
+ std::vector<MVT::ValueType> ConstraintVTs;
/// AsmNodeOperands - A list of pairs. The first element is a register, the
/// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
SDOperand Chain = getRoot();
SDOperand Flag;
- // Loop over all of the inputs, copying the operand values into the
- // appropriate registers and processing the output regs.
- unsigned RetValReg = 0;
- std::vector<std::pair<unsigned, Value*> > IndirectStoresToEmit;
- unsigned OpNum = 1;
- bool FoundOutputConstraint = false;
-
// We fully assign registers here at isel time. This is not optimal, but
// should work. For register classes that correspond to LLVM classes, we
// could let the LLVM RA do its thing, but we currently don't. Do a prepass
// over the constraints, collecting fixed registers that we know we can't use.
std::set<unsigned> OutputRegs, InputRegs;
+ unsigned OpNum = 1;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
std::string &ConstraintCode = Constraints[i].Codes[0];
- std::vector<unsigned> Regs =
- TLI.getRegForInlineAsmConstraint(ConstraintCode);
- if (Regs.size() != 1) continue; // Not assigned a fixed reg.
- unsigned TheReg = Regs[0];
+ MVT::ValueType OpVT;
+
+ // Compute the value type for each operand and add it to ConstraintVTs.
+ switch (Constraints[i].Type) {
+ case InlineAsm::isOutput:
+ if (!Constraints[i].isIndirectOutput) {
+ assert(I.getType() != Type::VoidTy && "Bad inline asm!");
+ OpVT = TLI.getValueType(I.getType());
+ } else {
+ const Type *OpTy = I.getOperand(OpNum)->getType();
+ OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
+ OpNum++; // Consumes a call operand.
+ }
+ break;
+ case InlineAsm::isInput:
+ OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
+ OpNum++; // Consumes a call operand.
+ break;
+ case InlineAsm::isClobber:
+ OpVT = MVT::Other;
+ break;
+ }
+
+ ConstraintVTs.push_back(OpVT);
+
+ if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
+ continue; // Not assigned a fixed reg.
+
+ // Build a list of regs that this operand uses. This always has a single
+ // element for promoted/expanded operands.
+ RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
+ false, false,
+ OutputRegs, InputRegs);
switch (Constraints[i].Type) {
case InlineAsm::isOutput:
// We can't assign any other output to this register.
- OutputRegs.insert(TheReg);
+ OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
// If this is an early-clobber output, it cannot be assigned to the same
// value as the input reg.
if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
- InputRegs.insert(TheReg);
- break;
- case InlineAsm::isClobber:
- // Clobbered regs cannot be used as inputs or outputs.
- InputRegs.insert(TheReg);
- OutputRegs.insert(TheReg);
+ InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
break;
case InlineAsm::isInput:
// We can't assign any other input to this register.
- InputRegs.insert(TheReg);
+ InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
+ break;
+ case InlineAsm::isClobber:
+ // Clobbered regs cannot be used as inputs or outputs.
+ InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
+ OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
break;
}
}
+ // Loop over all of the inputs, copying the operand values into the
+ // appropriate registers and processing the output regs.
+ RegsForValue RetValRegs;
+ std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
+ OpNum = 1;
+
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
std::string &ConstraintCode = Constraints[i].Codes[0];
+
switch (Constraints[i].Type) {
case InlineAsm::isOutput: {
- // Copy the output from the appropriate register.
- std::vector<unsigned> Regs =
- TLI.getRegForInlineAsmConstraint(ConstraintCode);
-
- // Find a regsister that we can use.
- unsigned DestReg;
- if (Regs.size() == 1)
- DestReg = Regs[0];
- else {
- bool UsesInputRegister = false;
- // If this is an early-clobber output, or if there is an input
- // constraint that matches this, we need to reserve the input register
- // so no other inputs allocate to it.
- if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
- UsesInputRegister = true;
- DestReg = GetAvailableRegister(true, UsesInputRegister,
- Regs, OutputRegs, InputRegs);
+ TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
+ if (ConstraintCode.size() == 1) // not a physreg name.
+ CTy = TLI.getConstraintType(ConstraintCode[0]);
+
+ if (CTy == TargetLowering::C_Memory) {
+ // Memory output.
+ SDOperand InOperandVal = getValue(I.getOperand(OpNum));
+
+ // Check that the operand (the address to store to) isn't a float.
+ if (!MVT::isInteger(InOperandVal.getValueType()))
+ assert(0 && "MATCH FAIL!");
+
+ if (!Constraints[i].isIndirectOutput)
+ assert(0 && "MATCH FAIL!");
+
+ OpNum++; // Consumes a call operand.
+
+ // Extend/truncate to the right pointer type if needed.
+ MVT::ValueType PtrType = TLI.getPointerTy();
+ if (InOperandVal.getValueType() < PtrType)
+ InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
+ else if (InOperandVal.getValueType() > PtrType)
+ InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
+
+ // Add information to the INLINEASM node to know about this output.
+ unsigned ResOpType = 4/*MEM*/ | (1 << 3);
+ AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
+ AsmNodeOperands.push_back(InOperandVal);
+ break;
}
+
+ // Otherwise, this is a register output.
+ assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
+
+ // If this is an early-clobber output, or if there is an input
+ // constraint that matches this, we need to reserve the input register
+ // so no other inputs allocate to it.
+ bool UsesInputRegister = false;
+ if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
+ UsesInputRegister = true;
- assert(DestReg && "Couldn't allocate output reg!");
+ // Copy the output from the appropriate register. Find a register that
+ // we can use.
+ RegsForValue Regs =
+ GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
+ true, UsesInputRegister,
+ OutputRegs, InputRegs);
+ assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
- const Type *OpTy;
if (!Constraints[i].isIndirectOutput) {
- assert(!FoundOutputConstraint &&
+ assert(RetValRegs.Regs.empty() &&
"Cannot have multiple output constraints yet!");
- FoundOutputConstraint = true;
assert(I.getType() != Type::VoidTy && "Bad inline asm!");
-
- RetValReg = DestReg;
- OpTy = I.getType();
+ RetValRegs = Regs;
} else {
- IndirectStoresToEmit.push_back(std::make_pair(DestReg,
+ IndirectStoresToEmit.push_back(std::make_pair(Regs,
I.getOperand(OpNum)));
- OpTy = I.getOperand(OpNum)->getType();
- OpTy = cast<PointerType>(OpTy)->getElementType();
OpNum++; // Consumes a call operand.
}
// Add information to the INLINEASM node to know that this register is
// set.
- AsmNodeOperands.push_back(DAG.getRegister(DestReg,
- TLI.getValueType(OpTy)));
- AsmNodeOperands.push_back(DAG.getConstant(2, MVT::i32)); // ISDEF
-
+ Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
break;
}
case InlineAsm::isInput: {
- Value *Operand = I.getOperand(OpNum);
- const Type *OpTy = Operand->getType();
+ SDOperand InOperandVal = getValue(I.getOperand(OpNum));
OpNum++; // Consumes a call operand.
-
- unsigned SrcReg;
- SDOperand ResOp;
- unsigned ResOpType;
- SDOperand InOperandVal = getValue(Operand);
if (isdigit(ConstraintCode[0])) { // Matching constraint?
// If this is required to match an output register we have already set,
// just use its register.
unsigned OperandNo = atoi(ConstraintCode.c_str());
- SrcReg = cast<RegisterSDNode>(AsmNodeOperands[OperandNo*2+2])->getReg();
- ResOp = DAG.getRegister(SrcReg, TLI.getValueType(OpTy));
- ResOpType = 1;
- Chain = DAG.getCopyToReg(Chain, SrcReg, InOperandVal, Flag);
- Flag = Chain.getValue(1);
- } else {
- TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
- if (ConstraintCode.size() == 1) // not a physreg name.
- CTy = TLI.getConstraintType(ConstraintCode[0]);
-
- switch (CTy) {
- default: assert(0 && "Unknown constraint type! FAIL!");
- case TargetLowering::C_RegisterClass: {
- // Copy the input into the appropriate register.
- std::vector<unsigned> Regs =
- TLI.getRegForInlineAsmConstraint(ConstraintCode);
- if (Regs.size() == 1)
- SrcReg = Regs[0];
- else
- SrcReg = GetAvailableRegister(false, true, Regs,
- OutputRegs, InputRegs);
- // FIXME: should be match fail.
- assert(SrcReg && "Wasn't able to allocate register!");
- Chain = DAG.getCopyToReg(Chain, SrcReg, InOperandVal, Flag);
- Flag = Chain.getValue(1);
-
- ResOp = DAG.getRegister(SrcReg, TLI.getValueType(OpTy));
- ResOpType = 1;
- break;
+ // Scan until we find the definition we already emitted of this operand.
+ // When we find it, create a RegsForValue operand.
+ unsigned CurOp = 2; // The first operand.
+ for (; OperandNo; --OperandNo) {
+ // Advance to the next operand.
+ unsigned NumOps =
+ cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
+ assert((NumOps & 7) == 2 /*REGDEF*/ &&
+ "Skipped past definitions?");
+ CurOp += (NumOps>>3)+1;
}
- case TargetLowering::C_Other:
- if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
- assert(0 && "MATCH FAIL!");
- ResOp = InOperandVal;
- ResOpType = 3;
- break;
+
+ unsigned NumOps =
+ cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
+ assert((NumOps & 7) == 2 /*REGDEF*/ &&
+ "Skipped past definitions?");
+
+ // Add NumOps>>3 registers to MatchedRegs.
+ RegsForValue MatchedRegs;
+ MatchedRegs.ValueVT = InOperandVal.getValueType();
+ MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
+ for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
+ unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
+ MatchedRegs.Regs.push_back(Reg);
}
+
+ // Use the produced MatchedRegs object to
+ MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
+ MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
+ break;
+ }
+
+ TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
+ if (ConstraintCode.size() == 1) // not a physreg name.
+ CTy = TLI.getConstraintType(ConstraintCode[0]);
+
+ if (CTy == TargetLowering::C_Other) {
+ if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
+ assert(0 && "MATCH FAIL!");
+
+ // Add information to the INLINEASM node to know about this input.
+ unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
+ AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
+ AsmNodeOperands.push_back(InOperandVal);
+ break;
+ } else if (CTy == TargetLowering::C_Memory) {
+ // Memory input.
+
+ // Check that the operand isn't a float.
+ if (!MVT::isInteger(InOperandVal.getValueType()))
+ assert(0 && "MATCH FAIL!");
+
+ // Extend/truncate to the right pointer type if needed.
+ MVT::ValueType PtrType = TLI.getPointerTy();
+ if (InOperandVal.getValueType() < PtrType)
+ InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
+ else if (InOperandVal.getValueType() > PtrType)
+ InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
+
+ // Add information to the INLINEASM node to know about this input.
+ unsigned ResOpType = 4/*MEM*/ | (1 << 3);
+ AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
+ AsmNodeOperands.push_back(InOperandVal);
+ break;
}
+
+ assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
+
+ // Copy the input into the appropriate registers.
+ RegsForValue InRegs =
+ GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
+ false, true, OutputRegs, InputRegs);
+ // FIXME: should be match fail.
+ assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
+
+ InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
- // Add information to the INLINEASM node to know about this input.
- AsmNodeOperands.push_back(ResOp);
- AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
+ InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
break;
}
- case InlineAsm::isClobber:
- // Nothing to do.
+ case InlineAsm::isClobber: {
+ RegsForValue ClobberedRegs =
+ GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
+ OutputRegs, InputRegs);
+ // Add the clobbered value to the operand list, so that the register
+ // allocator is aware that the physreg got clobbered.
+ if (!ClobberedRegs.Regs.empty())
+ ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
break;
}
+ }
}
// Finish up input operands.
// If this asm returns a register value, copy the result from that register
// and set it as the value of the call.
- if (RetValReg) {
- SDOperand Val = DAG.getCopyFromReg(Chain, RetValReg,
- TLI.getValueType(I.getType()), Flag);
- Chain = Val.getValue(1);
- Flag = Val.getValue(2);
- setValue(&I, Val);
- }
+ if (!RetValRegs.Regs.empty())
+ setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
// Process indirect outputs, first output all of the flagged copies out of
// physregs.
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
+ RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
Value *Ptr = IndirectStoresToEmit[i].second;
- const Type *Ty = cast<PointerType>(Ptr->getType())->getElementType();
- SDOperand Val = DAG.getCopyFromReg(Chain, IndirectStoresToEmit[i].first,
- TLI.getValueType(Ty), Flag);
- Chain = Val.getValue(1);
- Flag = Val.getValue(2);
- StoresToEmit.push_back(std::make_pair(Val, Ptr));
+ SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
+ StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
}
// Emit the non-flagged stores from the physregs.
SDOperand Value = getMemsetValue(Op2, VT, DAG);
SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
Value,
- getMemBasePlusOffset(Op1, Offset, DAG, TLI),
- DAG.getSrcValue(I.getOperand(1), Offset));
+ getMemBasePlusOffset(Op1, Offset, DAG, TLI),
+ DAG.getSrcValue(I.getOperand(1), Offset));
OutChains.push_back(Store);
Offset += VTSize;
}
if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
Size->getValue(), Align, TLI)) {
unsigned NumMemOps = MemOps.size();
- unsigned SrcOff = 0, DstOff = 0;
+ unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
GlobalAddressSDNode *G = NULL;
std::string Str;
+ bool CopyFromStr = false;
if (Op2.getOpcode() == ISD::GlobalAddress)
G = cast<GlobalAddressSDNode>(Op2);
Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
Op2.getOperand(1).getOpcode() == ISD::Constant) {
G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
- SrcOff += cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
+ SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
}
if (G) {
GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
- if (GV)
- Str = getStringValue(GV);
+ if (GV) {
+ Str = GV->getStringValue(false);
+ if (!Str.empty()) {
+ CopyFromStr = true;
+ SrcOff += SrcDelta;
+ }
+ }
}
for (unsigned i = 0; i < NumMemOps; i++) {
unsigned VTSize = getSizeInBits(VT) / 8;
SDOperand Value, Chain, Store;
- if (!Str.empty()) {
+ if (CopyFromStr) {
Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
Chain = getRoot();
Store =
SelectionDAG &DAG = SDL.DAG;
if (SrcVT == DestVT) {
return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
+ } else if (SrcVT == MVT::Vector) {
+ // Handle copies from generic vectors to registers.
+ MVT::ValueType PTyElementVT, PTyLegalElementVT;
+ unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
+ PTyElementVT, PTyLegalElementVT);
+
+ // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
+ // MVT::Vector type.
+ Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
+ DAG.getConstant(NE, MVT::i32),
+ DAG.getValueType(PTyElementVT));
+
+ // Loop over all of the elements of the resultant vector,
+ // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
+ // copying them into output registers.
+ std::vector<SDOperand> OutChains;
+ SDOperand Root = SDL.getRoot();
+ for (unsigned i = 0; i != NE; ++i) {
+ SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
+ Op, DAG.getConstant(i, MVT::i32));
+ if (PTyElementVT == PTyLegalElementVT) {
+ // Elements are legal.
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
+ } else if (PTyLegalElementVT > PTyElementVT) {
+ // Elements are promoted.
+ if (MVT::isFloatingPoint(PTyLegalElementVT))
+ Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
+ else
+ Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
+ } else {
+ // Elements are expanded.
+ // The src value is expanded into multiple registers.
+ SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
+ Elt, DAG.getConstant(0, MVT::i32));
+ SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
+ Elt, DAG.getConstant(1, MVT::i32));
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
+ }
+ }
+ return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
} else if (SrcVT < DestVT) {
// The src value is promoted to the register.
if (MVT::isFloatingPoint(SrcVT))
void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
- FunctionLoweringInfo &FuncInfo) {
+ FunctionLoweringInfo &FuncInfo) {
SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
std::vector<SDOperand> UnorderedChains;
for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
I != E; ++I)
SDL.visit(*I);
-
+
// Ensure that all instructions which are used outside of their defining
// blocks are available as virtual registers.
for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
// Lower the terminator after the copies are emitted.
SDL.visit(*LLVMBB->getTerminator());
+ // Copy over any CaseBlock records that may now exist due to SwitchInst
+ // lowering.
+ SwitchCases.clear();
+ SwitchCases = SDL.SwitchCases;
+
// Make sure the root of the DAG is up-to-date.
DAG.setRoot(SDL.getRoot());
}
-void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
- FunctionLoweringInfo &FuncInfo) {
- SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
- CurDAG = &DAG;
- std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
-
- // First step, lower LLVM code to some DAG. This DAG may use operations and
- // types that are not supported by the target.
- BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
-
+void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
// Run the DAG combiner in pre-legalize mode.
DAG.Combine(false);
DEBUG(std::cerr << "Lowered selection DAG:\n");
DEBUG(DAG.dump());
-
+
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
DAG.Legalize();
-
+
DEBUG(std::cerr << "Legalized selection DAG:\n");
DEBUG(DAG.dump());
-
+
// Run the DAG combiner in post-legalize mode.
DAG.Combine(true);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
InstructionSelectBasicBlock(DAG);
-
+
DEBUG(std::cerr << "Selected machine code:\n");
DEBUG(BB->dump());
+}
+void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
+ FunctionLoweringInfo &FuncInfo) {
+ std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+ {
+ SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ CurDAG = &DAG;
+
+ // First step, lower LLVM code to some DAG. This DAG may use operations and
+ // types that are not supported by the target.
+ BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
+
+ // Second step, emit the lowered DAG as machine code.
+ CodeGenAndEmitDAG(DAG);
+ }
+
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
- for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
- MachineInstr *PHI = PHINodesToUpdate[i].first;
- assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
- "This is not a machine PHI node that we are updating!");
- PHI->addRegOperand(PHINodesToUpdate[i].second);
- PHI->addMachineBasicBlockOperand(BB);
+ if (SwitchCases.empty()) {
+ for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
+ MachineInstr *PHI = PHINodesToUpdate[i].first;
+ assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
+ "This is not a machine PHI node that we are updating!");
+ PHI->addRegOperand(PHINodesToUpdate[i].second);
+ PHI->addMachineBasicBlockOperand(BB);
+ }
+ return;
}
-
- // Finally, add the CFG edges from the last selected MBB to the successor
- // MBBs.
- TerminatorInst *TI = LLVMBB->getTerminator();
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
- MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
- BB->addSuccessor(Succ0MBB);
+
+ // If we generated any switch lowering information, build and codegen any
+ // additional DAGs necessary.
+ for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ CurDAG = &SDAG;
+ SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
+ // Set the current basic block to the mbb we wish to insert the code into
+ BB = SwitchCases[i].ThisBB;
+ SDL.setCurrentBasicBlock(BB);
+ // Emit the code
+ SDL.visitSwitchCase(SwitchCases[i]);
+ SDAG.setRoot(SDL.getRoot());
+ CodeGenAndEmitDAG(SDAG);
+ // Iterate over the phi nodes, if there is a phi node in a successor of this
+ // block (for instance, the default block), then add a pair of operands to
+ // the phi node for this block, as if we were coming from the original
+ // BB before switch expansion.
+ for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
+ MachineInstr *PHI = PHINodesToUpdate[pi].first;
+ MachineBasicBlock *PHIBB = PHI->getParent();
+ assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
+ "This is not a machine PHI node that we are updating!");
+ if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
+ PHI->addRegOperand(PHINodesToUpdate[pi].second);
+ PHI->addMachineBasicBlockOperand(BB);
+ }
+ }
}
}
SL = createBURRListDAGScheduler(DAG, BB);
break;
case noScheduling:
+ SL = createBFS_DAGScheduler(DAG, BB);
+ break;
case simpleScheduling:
+ SL = createSimpleDAGScheduler(false, DAG, BB);
+ break;
case simpleNoItinScheduling:
- SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
+ SL = createSimpleDAGScheduler(true, DAG, BB);
break;
case listSchedulingBURR:
SL = createBURRListDAGScheduler(DAG, BB);
+ break;
+ case listSchedulingTD:
+ SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
+ break;
}
BB = SL->Run();
delete SL;
}
+
+HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
+ return new HazardRecognizer();
+}
+
+/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
+/// by tblgen. Others should not call it.
+void SelectionDAGISel::
+SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
+ std::vector<SDOperand> InOps;
+ std::swap(InOps, Ops);
+
+ Ops.push_back(InOps[0]); // input chain.
+ Ops.push_back(InOps[1]); // input asm string.
+
+ const char *AsmStr = cast<ExternalSymbolSDNode>(InOps[1])->getSymbol();
+ unsigned i = 2, e = InOps.size();
+ if (InOps[e-1].getValueType() == MVT::Flag)
+ --e; // Don't process a flag operand if it is here.
+
+ while (i != e) {
+ unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
+ if ((Flags & 7) != 4 /*MEM*/) {
+ // Just skip over this operand, copying the operands verbatim.
+ Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
+ i += (Flags >> 3) + 1;
+ } else {
+ assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
+ // Otherwise, this is a memory operand. Ask the target to select it.
+ std::vector<SDOperand> SelOps;
+ if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
+ std::cerr << "Could not match memory address. Inline asm failure!\n";
+ exit(1);
+ }
+
+ // Add this to the output node.
+ Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
+ Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
+ i += 2;
+ }
+ }
+
+ // Add the flag input back if present.
+ if (e != InOps.size())
+ Ops.push_back(InOps.back());
+}