#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineDebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
#include <map>
#include <set>
#include <iostream>
+#include <algorithm>
using namespace llvm;
#ifndef NDEBUG
ViewSchedDAGs("view-sched-dags", cl::Hidden,
cl::desc("Pop up a window to show sched dags as they are processed"));
#else
-static const bool ViewISelDAGs = 0;
-static const bool ViewSchedDAGs = 0;
+static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
#endif
namespace {
- cl::opt<SchedHeuristics>
+ cl::opt<ScheduleDAG::SchedHeuristics>
ISHeuristic(
"sched",
cl::desc("Choose scheduling style"),
- cl::init(defaultScheduling),
+ cl::init(ScheduleDAG::defaultScheduling),
cl::values(
- clEnumValN(defaultScheduling, "default",
+ clEnumValN(ScheduleDAG::defaultScheduling, "default",
"Target preferred scheduling style"),
- clEnumValN(noScheduling, "none",
+ clEnumValN(ScheduleDAG::noScheduling, "none",
"No scheduling: breadth first sequencing"),
- clEnumValN(simpleScheduling, "simple",
+ clEnumValN(ScheduleDAG::simpleScheduling, "simple",
"Simple two pass scheduling: minimize critical path "
"and maximize processor utilization"),
- clEnumValN(simpleNoItinScheduling, "simple-noitin",
+ clEnumValN(ScheduleDAG::simpleNoItinScheduling, "simple-noitin",
"Simple two pass scheduling: Same as simple "
"except using generic latency"),
- clEnumValN(listSchedulingBURR, "list-burr",
- "Bottom up register reduction list scheduling"),
+ clEnumValN(ScheduleDAG::listSchedulingBURR, "list-burr",
+ "Bottom-up register reduction list scheduling"),
+ clEnumValN(ScheduleDAG::listSchedulingTDRR, "list-tdrr",
+ "Top-down register reduction list scheduling"),
+ clEnumValN(ScheduleDAG::listSchedulingTD, "list-td",
+ "Top-down list scheduler"),
clEnumValEnd));
} // namespace
return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
}
- unsigned CreateRegForValue(const Value *V) {
- MVT::ValueType VT = TLI.getValueType(V->getType());
- // The common case is that we will only create one register for this
- // value. If we have that case, create and return the virtual register.
- unsigned NV = TLI.getNumElements(VT);
- if (NV == 1) {
- // If we are promoting this value, pick the next largest supported type.
- return MakeReg(TLI.getTypeToTransformTo(VT));
- }
-
- // If this value is represented with multiple target registers, make sure
- // to create enough consecutive registers of the right (smaller) type.
- unsigned NT = VT-1; // Find the type to use.
- while (TLI.getNumElements((MVT::ValueType)NT) != 1)
- --NT;
-
- unsigned R = MakeReg((MVT::ValueType)NT);
- for (unsigned i = 1; i != NV; ++i)
- MakeReg((MVT::ValueType)NT);
- return R;
- }
-
+ unsigned CreateRegForValue(const Value *V);
+
unsigned InitializeRegForValue(const Value *V) {
unsigned &R = ValueMap[V];
assert(R == 0 && "Already initialized this value register!");
}
/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
-/// PHI nodes or outside of the basic block that defines it.
+/// PHI nodes or outside of the basic block that defines it, or used by a
+/// switch instruction, which may expand to multiple basic blocks.
static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
if (isa<PHINode>(I)) return true;
BasicBlock *BB = I->getParent();
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
+ if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
+ isa<SwitchInst>(*UI))
return true;
return false;
}
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
-/// entry block, return true.
+/// entry block, return true. This includes arguments used by switches, since
+/// the switch may expand into multiple basic blocks.
static bool isOnlyUsedInEntryBlock(Argument *A) {
BasicBlock *Entry = A->getParent()->begin();
for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != Entry)
+ if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
return false; // Use not in entry block.
return true;
}
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
AI->getAlignment());
// If the alignment of the value is smaller than the size of the value,
for (BasicBlock::iterator I = BB->begin();
(PN = dyn_cast<PHINode>(I)); ++I)
if (!PN->use_empty()) {
- unsigned NumElements =
- TLI.getNumElements(TLI.getValueType(PN->getType()));
+ MVT::ValueType VT = TLI.getValueType(PN->getType());
+ unsigned NumElements;
+ if (VT != MVT::Vector)
+ NumElements = TLI.getNumElements(VT);
+ else {
+ MVT::ValueType VT1,VT2;
+ NumElements =
+ TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
+ VT1, VT2);
+ }
unsigned PHIReg = ValueMap[PN];
assert(PHIReg &&"PHI node does not have an assigned virtual register!");
for (unsigned i = 0; i != NumElements; ++i)
}
}
-
+/// CreateRegForValue - Allocate the appropriate number of virtual registers of
+/// the correctly promoted or expanded types. Assign these registers
+/// consecutive vreg numbers and return the first assigned number.
+unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
+ MVT::ValueType VT = TLI.getValueType(V->getType());
+
+ // The number of multiples of registers that we need, to, e.g., split up
+ // a <2 x int64> -> 4 x i32 registers.
+ unsigned NumVectorRegs = 1;
+
+ // If this is a packed type, figure out what type it will decompose into
+ // and how many of the elements it will use.
+ if (VT == MVT::Vector) {
+ const PackedType *PTy = cast<PackedType>(V->getType());
+ unsigned NumElts = PTy->getNumElements();
+ MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
+
+ // Divide the input until we get to a supported size. This will always
+ // end with a scalar if the target doesn't support vectors.
+ while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
+ NumElts >>= 1;
+ NumVectorRegs <<= 1;
+ }
+ if (NumElts == 1)
+ VT = EltTy;
+ else
+ VT = getVectorType(EltTy, NumElts);
+ }
+
+ // The common case is that we will only create one register for this
+ // value. If we have that case, create and return the virtual register.
+ unsigned NV = TLI.getNumElements(VT);
+ if (NV == 1) {
+ // If we are promoting this value, pick the next largest supported type.
+ MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
+ unsigned Reg = MakeReg(PromotedType);
+ // If this is a vector of supported or promoted types (e.g. 4 x i16),
+ // create all of the registers.
+ for (unsigned i = 1; i != NumVectorRegs; ++i)
+ MakeReg(PromotedType);
+ return Reg;
+ }
+
+ // If this value is represented with multiple target registers, make sure
+ // to create enough consecutive registers of the right (smaller) type.
+ unsigned NT = VT-1; // Find the type to use.
+ while (TLI.getNumElements((MVT::ValueType)NT) != 1)
+ --NT;
+
+ unsigned R = MakeReg((MVT::ValueType)NT);
+ for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
+ MakeReg((MVT::ValueType)NT);
+ return R;
+}
//===----------------------------------------------------------------------===//
/// SelectionDAGLowering - This is the common target-independent lowering
/// analysis.
std::vector<SDOperand> PendingLoads;
+ /// Case - A pair of values to record the Value for a switch case, and the
+ /// case's target basic block.
+ typedef std::pair<Constant*, MachineBasicBlock*> Case;
+ typedef std::vector<Case>::iterator CaseItr;
+ typedef std::pair<CaseItr, CaseItr> CaseRange;
+
+ /// CaseRec - A struct with ctor used in lowering switches to a binary tree
+ /// of conditional branches.
+ struct CaseRec {
+ CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
+ CaseBB(bb), LT(lt), GE(ge), Range(r) {}
+
+ /// CaseBB - The MBB in which to emit the compare and branch
+ MachineBasicBlock *CaseBB;
+ /// LT, GE - If nonzero, we know the current case value must be less-than or
+ /// greater-than-or-equal-to these Constants.
+ Constant *LT;
+ Constant *GE;
+ /// Range - A pair of iterators representing the range of case values to be
+ /// processed at this point in the binary search tree.
+ CaseRange Range;
+ };
+
+ /// The comparison function for sorting Case values.
+ struct CaseCmp {
+ bool operator () (const Case& C1, const Case& C2) {
+ if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
+ return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
+
+ const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
+ return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
+ }
+ };
+
public:
// TLI - This is information that describes the available target features we
// need for lowering. This indicates when operations are unavailable,
// implemented with a libcall, etc.
TargetLowering &TLI;
SelectionDAG &DAG;
- const TargetData &TD;
+ const TargetData *TD;
+ /// SwitchCases - Vector of CaseBlock structures used to communicate
+ /// SwitchInst code generation information.
+ std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
+ SelectionDAGISel::JumpTable JT;
+
/// FuncInfo - Information about the function as a whole.
///
FunctionLoweringInfo &FuncInfo;
SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
FunctionLoweringInfo &funcinfo)
: TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
- FuncInfo(funcinfo) {
+ JT(0,0,0,0), FuncInfo(funcinfo) {
}
/// getRoot - Return the current virtual root of the Selection DAG.
void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
+ SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
+ SDOperand SrcValue, SDOperand Root,
+ bool isVolatile);
SDOperand getIntPtrConstant(uint64_t Val) {
return DAG.getConstant(Val, TLI.getPointerTy());
}
- SDOperand getValue(const Value *V) {
- SDOperand &N = NodeMap[V];
- if (N.Val) return N;
-
- const Type *VTy = V->getType();
- MVT::ValueType VT = TLI.getValueType(VTy);
- if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- visit(CE->getOpcode(), *CE);
- assert(N.Val && "visit didn't populate the ValueMap!");
- return N;
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
- return N = DAG.getGlobalAddress(GV, VT);
- } else if (isa<ConstantPointerNull>(C)) {
- return N = DAG.getConstant(0, TLI.getPointerTy());
- } else if (isa<UndefValue>(C)) {
- return N = DAG.getNode(ISD::UNDEF, VT);
- } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
- return N = DAG.getConstantFP(CFP->getValue(), VT);
- } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
- unsigned NumElements = PTy->getNumElements();
- MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Now that we know the number and type of the elements, push a
- // Constant or ConstantFP node onto the ops list for each element of
- // the packed constant.
- std::vector<SDOperand> Ops;
- if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
- if (MVT::isFloatingPoint(PVT)) {
- for (unsigned i = 0; i != NumElements; ++i) {
- const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
- Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
- }
- } else {
- for (unsigned i = 0; i != NumElements; ++i) {
- const ConstantIntegral *El =
- cast<ConstantIntegral>(CP->getOperand(i));
- Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
- }
- }
- } else {
- assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
- SDOperand Op;
- if (MVT::isFloatingPoint(PVT))
- Op = DAG.getConstantFP(0, PVT);
- else
- Op = DAG.getConstant(0, PVT);
- Ops.assign(NumElements, Op);
- }
-
- // Handle the case where we have a 1-element vector, in which
- // case we want to immediately turn it into a scalar constant.
- if (Ops.size() == 1) {
- return N = Ops[0];
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
- } else {
- // If the packed type isn't legal, then create a ConstantVec node with
- // generic Vector type instead.
- return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
- }
- } else {
- // Canonicalize all constant ints to be unsigned.
- return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
- }
-
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- std::map<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
- if (SI != FuncInfo.StaticAllocaMap.end())
- return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
- }
-
- std::map<const Value*, unsigned>::const_iterator VMI =
- FuncInfo.ValueMap.find(V);
- assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
-
- unsigned InReg = VMI->second;
-
- // If this type is not legal, make it so now.
- MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
-
- N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
- if (DestVT < VT) {
- // Source must be expanded. This input value is actually coming from the
- // register pair VMI->second and VMI->second+1.
- N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
- DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
- } else {
- if (DestVT > VT) { // Promotion case
- if (MVT::isFloatingPoint(VT))
- N = DAG.getNode(ISD::FP_ROUND, VT, N);
- else
- N = DAG.getNode(ISD::TRUNCATE, VT, N);
- }
- }
-
- return N;
- }
+ SDOperand getValue(const Value *V);
const SDOperand &setValue(const Value *V, SDOperand NewN) {
SDOperand &N = NodeMap[V];
bool OutReg, bool InReg,
std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs);
-
+
// Terminator instructions.
void visitRet(ReturnInst &I);
void visitBr(BranchInst &I);
+ void visitSwitch(SwitchInst &I);
void visitUnreachable(UnreachableInst &I) { /* noop */ }
+ // Helper for visitSwitch
+ void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
+ void visitJumpTable(SelectionDAGISel::JumpTable &JT);
+
// These all get lowered before this pass.
- void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
- void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
- void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
- //
void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
void visitShift(User &I, unsigned Opcode);
void visitAdd(User &I) {
}
void visitDiv(User &I) {
const Type *Ty = I.getType();
- visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
+ visitBinary(I,
+ Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
+ Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
}
void visitRem(User &I) {
const Type *Ty = I.getType();
visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
}
- void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
- void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
- void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
+ void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
+ void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
+ void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
void visitShl(User &I) { visitShift(I, ISD::SHL); }
void visitShr(User &I) {
visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
+ void visitExtractElement(User &I);
+ void visitInsertElement(User &I);
+ void visitShuffleVector(User &I);
+
void visitGetElementPtr(User &I);
void visitCast(User &I);
void visitSelect(User &I);
- //
void visitMalloc(MallocInst &I);
void visitFree(FreeInst &I);
void visitCall(CallInst &I);
void visitInlineAsm(CallInst &I);
const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
+ void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
void visitVAStart(CallInst &I);
void visitVAArg(VAArgInst &I);
};
} // end namespace llvm
+SDOperand SelectionDAGLowering::getValue(const Value *V) {
+ SDOperand &N = NodeMap[V];
+ if (N.Val) return N;
+
+ const Type *VTy = V->getType();
+ MVT::ValueType VT = TLI.getValueType(VTy);
+ if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ visit(CE->getOpcode(), *CE);
+ assert(N.Val && "visit didn't populate the ValueMap!");
+ return N;
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ return N = DAG.getGlobalAddress(GV, VT);
+ } else if (isa<ConstantPointerNull>(C)) {
+ return N = DAG.getConstant(0, TLI.getPointerTy());
+ } else if (isa<UndefValue>(C)) {
+ if (!isa<PackedType>(VTy))
+ return N = DAG.getNode(ISD::UNDEF, VT);
+
+ // Create a VBUILD_VECTOR of undef nodes.
+ const PackedType *PTy = cast<PackedType>(VTy);
+ unsigned NumElements = PTy->getNumElements();
+ MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
+
+ std::vector<SDOperand> Ops;
+ Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
+
+ // Create a VConstant node with generic Vector type.
+ Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
+ Ops.push_back(DAG.getValueType(PVT));
+ return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ return N = DAG.getConstantFP(CFP->getValue(), VT);
+ } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
+ unsigned NumElements = PTy->getNumElements();
+ MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
+
+ // Now that we know the number and type of the elements, push a
+ // Constant or ConstantFP node onto the ops list for each element of
+ // the packed constant.
+ std::vector<SDOperand> Ops;
+ if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
+ for (unsigned i = 0; i != NumElements; ++i)
+ Ops.push_back(getValue(CP->getOperand(i)));
+ } else {
+ assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
+ SDOperand Op;
+ if (MVT::isFloatingPoint(PVT))
+ Op = DAG.getConstantFP(0, PVT);
+ else
+ Op = DAG.getConstant(0, PVT);
+ Ops.assign(NumElements, Op);
+ }
+
+ // Create a VBUILD_VECTOR node with generic Vector type.
+ Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
+ Ops.push_back(DAG.getValueType(PVT));
+ return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+ } else {
+ // Canonicalize all constant ints to be unsigned.
+ return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
+ }
+ }
+
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ std::map<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end())
+ return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
+ }
+
+ std::map<const Value*, unsigned>::const_iterator VMI =
+ FuncInfo.ValueMap.find(V);
+ assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
+
+ unsigned InReg = VMI->second;
+
+ // If this type is not legal, make it so now.
+ if (VT != MVT::Vector) {
+ MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
+
+ N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
+ if (DestVT < VT) {
+ // Source must be expanded. This input value is actually coming from the
+ // register pair VMI->second and VMI->second+1.
+ N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
+ DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
+ } else if (DestVT > VT) { // Promotion case
+ if (MVT::isFloatingPoint(VT))
+ N = DAG.getNode(ISD::FP_ROUND, VT, N);
+ else
+ N = DAG.getNode(ISD::TRUNCATE, VT, N);
+ }
+ } else {
+ // Otherwise, if this is a vector, make it available as a generic vector
+ // here.
+ MVT::ValueType PTyElementVT, PTyLegalElementVT;
+ const PackedType *PTy = cast<PackedType>(VTy);
+ unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
+ PTyLegalElementVT);
+
+ // Build a VBUILD_VECTOR with the input registers.
+ std::vector<SDOperand> Ops;
+ if (PTyElementVT == PTyLegalElementVT) {
+ // If the value types are legal, just VBUILD the CopyFromReg nodes.
+ for (unsigned i = 0; i != NE; ++i)
+ Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT));
+ } else if (PTyElementVT < PTyLegalElementVT) {
+ // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
+ for (unsigned i = 0; i != NE; ++i) {
+ SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ if (MVT::isFloatingPoint(PTyElementVT))
+ Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
+ else
+ Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
+ Ops.push_back(Op);
+ }
+ } else {
+ // If the register was expanded, use BUILD_PAIR.
+ assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
+ for (unsigned i = 0; i != NE/2; ++i) {
+ SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
+ PTyElementVT);
+ Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
+ }
+ }
+
+ Ops.push_back(DAG.getConstant(NE, MVT::i32));
+ Ops.push_back(DAG.getValueType(PTyLegalElementVT));
+ N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
+
+ // Finally, use a VBIT_CONVERT to make this available as the appropriate
+ // vector type.
+ N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
+ DAG.getConstant(PTy->getNumElements(),
+ MVT::i32),
+ DAG.getValueType(TLI.getValueType(PTy->getElementType())));
+ }
+
+ return N;
+}
+
+
void SelectionDAGLowering::visitRet(ReturnInst &I) {
if (I.getNumOperands() == 0) {
DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
void SelectionDAGLowering::visitBr(BranchInst &I) {
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
+ CurMBB->addSuccessor(Succ0MBB);
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
DAG.getBasicBlock(Succ0MBB)));
} else {
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
+ CurMBB->addSuccessor(Succ1MBB);
SDOperand Cond = getValue(I.getCondition());
if (Succ1MBB == NextBlock) {
SDOperand True = DAG.getConstant(1, Cond.getValueType());
Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
}
- Ops.push_back(Cond);
- Ops.push_back(DAG.getBasicBlock(Succ0MBB));
- Ops.push_back(DAG.getBasicBlock(Succ1MBB));
- DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
+ SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
+ DAG.getBasicBlock(Succ0MBB));
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
+ DAG.getBasicBlock(Succ1MBB)));
+ }
+ }
+}
+
+/// visitSwitchCase - Emits the necessary code to represent a single node in
+/// the binary search tree resulting from lowering a switch instruction.
+void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
+ SDOperand SwitchOp = getValue(CB.SwitchV);
+ SDOperand CaseOp = getValue(CB.CaseC);
+ SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
+
+ // Set NextBlock to be the MBB immediately after the current one, if any.
+ // This is used to avoid emitting unnecessary branches to the next block.
+ MachineBasicBlock *NextBlock = 0;
+ MachineFunction::iterator BBI = CurMBB;
+ if (++BBI != CurMBB->getParent()->end())
+ NextBlock = BBI;
+
+ // If the lhs block is the next block, invert the condition so that we can
+ // fall through to the lhs instead of the rhs block.
+ if (CB.LHSBB == NextBlock) {
+ std::swap(CB.LHSBB, CB.RHSBB);
+ SDOperand True = DAG.getConstant(1, Cond.getValueType());
+ Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
+ }
+ SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
+ DAG.getBasicBlock(CB.LHSBB));
+ if (CB.RHSBB == NextBlock)
+ DAG.setRoot(BrCond);
+ else
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
+ DAG.getBasicBlock(CB.RHSBB)));
+ // Update successor info
+ CurMBB->addSuccessor(CB.LHSBB);
+ CurMBB->addSuccessor(CB.RHSBB);
+}
+
+/// visitSwitchCase - Emits the necessary code to represent a single node in
+/// the binary search tree resulting from lowering a switch instruction.
+void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
+ // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically,
+ // we need to add the address of the jump table to the value loaded, since
+ // the entries in the jump table will be differences rather than absolute
+ // addresses.
+
+ // Emit the code for the jump table
+ MVT::ValueType PTy = TLI.getPointerTy();
+ unsigned PTyBytes = MVT::getSizeInBits(PTy)/8;
+ SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
+ SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy,
+ DAG.getConstant(PTyBytes, PTy));
+ SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, DAG.getJumpTable(JT.JTI,PTy));
+ SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0));
+ DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
+}
+
+void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
+ // Figure out which block is immediately after the current one.
+ MachineBasicBlock *NextBlock = 0;
+ MachineFunction::iterator BBI = CurMBB;
+ if (++BBI != CurMBB->getParent()->end())
+ NextBlock = BBI;
+
+ // If there is only the default destination, branch to it if it is not the
+ // next basic block. Otherwise, just fall through.
+ if (I.getNumOperands() == 2) {
+ // Update machine-CFG edges.
+ MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
+ // If this is not a fall-through branch, emit the branch.
+ if (DefaultMBB != NextBlock)
+ DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
+ DAG.getBasicBlock(DefaultMBB)));
+ return;
+ }
+
+ // If there are any non-default case statements, create a vector of Cases
+ // representing each one, and sort the vector so that we can efficiently
+ // create a binary search tree from them.
+ std::vector<Case> Cases;
+ for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
+ MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
+ Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
+ }
+ std::sort(Cases.begin(), Cases.end(), CaseCmp());
+
+ // Get the Value to be switched on and default basic blocks, which will be
+ // inserted into CaseBlock records, representing basic blocks in the binary
+ // search tree.
+ Value *SV = I.getOperand(0);
+ MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
+
+ // Get the MachineFunction which holds the current MBB. This is used during
+ // emission of jump tables, and when inserting any additional MBBs necessary
+ // to represent the switch.
+ MachineFunction *CurMF = CurMBB->getParent();
+ const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
+ Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel();
+
+ // If the switch has more than 5 blocks, and at least 31.25% dense, and the
+ // target supports indirect branches, then emit a jump table rather than
+ // lowering the switch to a binary tree of conditional branches.
+ // FIXME: Make this work with PIC code
+ if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
+ (Relocs == Reloc::Static || Relocs == Reloc::DynamicNoPIC) &&
+ Cases.size() > 5) {
+ uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue();
+ uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue();
+ double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
+
+ if (Density >= 0.3125) {
+ // Create a new basic block to hold the code for loading the address
+ // of the jump table, and jumping to it. Update successor information;
+ // we will either branch to the default case for the switch, or the jump
+ // table.
+ MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
+ CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
+ CurMBB->addSuccessor(Default);
+ CurMBB->addSuccessor(JumpTableBB);
+
+ // Subtract the lowest switch case value from the value being switched on
+ // and conditional branch to default mbb if the result is greater than the
+ // difference between smallest and largest cases.
+ SDOperand SwitchOp = getValue(SV);
+ MVT::ValueType VT = SwitchOp.getValueType();
+ SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
+ DAG.getConstant(First, VT));
+
+ // The SDNode we just created, which holds the value being switched on
+ // minus the the smallest case value, needs to be copied to a virtual
+ // register so it can be used as an index into the jump table in a
+ // subsequent basic block. This value may be smaller or larger than the
+ // target's pointer type, and therefore require extension or truncating.
+ if (VT > TLI.getPointerTy())
+ SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
+ else
+ SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
+ unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
+ SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
+
+ // Emit the range check for the jump table, and branch to the default
+ // block for the switch statement if the value being switched on exceeds
+ // the largest case in the switch.
+ SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
+ DAG.getConstant(Last-First,VT), ISD::SETUGT);
+ DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
+ DAG.getBasicBlock(Default)));
+
+ // Build a vector of destination BBs, corresponding to each target
+ // of the jump table. If the value of the jump table slot corresponds to
+ // a case statement, push the case's BB onto the vector, otherwise, push
+ // the default BB.
+ std::set<MachineBasicBlock*> UniqueBBs;
+ std::vector<MachineBasicBlock*> DestBBs;
+ uint64_t TEI = First;
+ for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) {
+ if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) {
+ DestBBs.push_back(ii->second);
+ UniqueBBs.insert(ii->second);
+ ++ii;
+ } else {
+ DestBBs.push_back(Default);
+ UniqueBBs.insert(Default);
+ }
+ }
+
+ // Update successor info
+ for (std::set<MachineBasicBlock*>::iterator ii = UniqueBBs.begin(),
+ ee = UniqueBBs.end(); ii != ee; ++ii)
+ JumpTableBB->addSuccessor(*ii);
+
+ // Create a jump table index for this jump table, or return an existing
+ // one.
+ unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
+
+ // Set the jump table information so that we can codegen it as a second
+ // MachineBasicBlock
+ JT.Reg = JumpTableReg;
+ JT.JTI = JTI;
+ JT.MBB = JumpTableBB;
+ JT.Default = Default;
+ return;
+ }
+ }
+
+ // Push the initial CaseRec onto the worklist
+ std::vector<CaseRec> CaseVec;
+ CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
+
+ while (!CaseVec.empty()) {
+ // Grab a record representing a case range to process off the worklist
+ CaseRec CR = CaseVec.back();
+ CaseVec.pop_back();
+
+ // Size is the number of Cases represented by this range. If Size is 1,
+ // then we are processing a leaf of the binary search tree. Otherwise,
+ // we need to pick a pivot, and push left and right ranges onto the
+ // worklist.
+ unsigned Size = CR.Range.second - CR.Range.first;
+
+ if (Size == 1) {
+ // Create a CaseBlock record representing a conditional branch to
+ // the Case's target mbb if the value being switched on SV is equal
+ // to C. Otherwise, branch to default.
+ Constant *C = CR.Range.first->first;
+ MachineBasicBlock *Target = CR.Range.first->second;
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
+ CR.CaseBB);
+ // If the MBB representing the leaf node is the current MBB, then just
+ // call visitSwitchCase to emit the code into the current block.
+ // Otherwise, push the CaseBlock onto the vector to be later processed
+ // by SDISel, and insert the node's MBB before the next MBB.
+ if (CR.CaseBB == CurMBB)
+ visitSwitchCase(CB);
+ else {
+ SwitchCases.push_back(CB);
+ CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
+ }
+ } else {
+ // split case range at pivot
+ CaseItr Pivot = CR.Range.first + (Size / 2);
+ CaseRange LHSR(CR.Range.first, Pivot);
+ CaseRange RHSR(Pivot, CR.Range.second);
+ Constant *C = Pivot->first;
+ MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
+ // We know that we branch to the LHS if the Value being switched on is
+ // less than the Pivot value, C. We use this to optimize our binary
+ // tree a bit, by recognizing that if SV is greater than or equal to the
+ // LHS's Case Value, and that Case Value is exactly one less than the
+ // Pivot's Value, then we can branch directly to the LHS's Target,
+ // rather than creating a leaf node for it.
+ if ((LHSR.second - LHSR.first) == 1 &&
+ LHSR.first->first == CR.GE &&
+ cast<ConstantIntegral>(C)->getRawValue() ==
+ (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
+ LHSBB = LHSR.first->second;
+ } else {
+ LHSBB = new MachineBasicBlock(LLVMBB);
+ CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
+ }
+ // Similar to the optimization above, if the Value being switched on is
+ // known to be less than the Constant CR.LT, and the current Case Value
+ // is CR.LT - 1, then we can branch directly to the target block for
+ // the current Case Value, rather than emitting a RHS leaf node for it.
+ if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
+ cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
+ (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
+ RHSBB = RHSR.first->second;
+ } else {
+ RHSBB = new MachineBasicBlock(LLVMBB);
+ CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
+ }
+ // Create a CaseBlock record representing a conditional branch to
+ // the LHS node if the value being switched on SV is less than C.
+ // Otherwise, branch to LHS.
+ ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
+ SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
+ if (CR.CaseBB == CurMBB)
+ visitSwitchCase(CB);
+ else {
+ SwitchCases.push_back(CB);
+ CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
+ }
}
}
}
setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
} else {
const PackedType *PTy = cast<PackedType>(Ty);
- unsigned NumElements = PTy->getNumElements();
- MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Immediately scalarize packed types containing only one element, so that
- // the Legalize pass does not have to deal with them. Similarly, if the
- // abstract vector is going to turn into one that the target natively
- // supports, generate that type now so that Legalize doesn't have to deal
- // with that either. These steps ensure that Legalize only has to handle
- // vector types in its Expand case.
- unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
- if (NumElements == 1) {
- setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
- } else {
- SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
- SDOperand Typ = DAG.getValueType(PVT);
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- }
+ SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
+ SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
+ setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
}
}
SDOperand Cond = getValue(I.getOperand(0));
SDOperand TrueVal = getValue(I.getOperand(1));
SDOperand FalseVal = getValue(I.getOperand(2));
- setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
- TrueVal, FalseVal));
+ if (!isa<PackedType>(I.getType())) {
+ setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
+ TrueVal, FalseVal));
+ } else {
+ setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
+ *(TrueVal.Val->op_end()-2),
+ *(TrueVal.Val->op_end()-1)));
+ }
}
void SelectionDAGLowering::visitCast(User &I) {
SDOperand N = getValue(I.getOperand(0));
- MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
- MVT::ValueType DestTy = TLI.getValueType(I.getType());
-
- if (N.getValueType() == DestTy) {
+ MVT::ValueType SrcVT = N.getValueType();
+ MVT::ValueType DestVT = TLI.getValueType(I.getType());
+
+ if (DestVT == MVT::Vector) {
+ // This is a cast to a vector from something else. This is always a bit
+ // convert. Get information about the input vector.
+ const PackedType *DestTy = cast<PackedType>(I.getType());
+ MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
+ setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
+ DAG.getConstant(DestTy->getNumElements(),MVT::i32),
+ DAG.getValueType(EltVT)));
+ } else if (SrcVT == DestVT) {
setValue(&I, N); // noop cast.
- } else if (DestTy == MVT::i1) {
+ } else if (DestVT == MVT::i1) {
// Cast to bool is a comparison against zero, not truncation to zero.
- SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
+ SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
DAG.getConstantFP(0.0, N.getValueType());
setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
- } else if (isInteger(SrcTy)) {
- if (isInteger(DestTy)) { // Int -> Int cast
- if (DestTy < SrcTy) // Truncating cast?
- setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
+ } else if (isInteger(SrcVT)) {
+ if (isInteger(DestVT)) { // Int -> Int cast
+ if (DestVT < SrcVT) // Truncating cast?
+ setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
else if (I.getOperand(0)->getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
- } else { // Int -> FP cast
+ setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
+ } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
if (I.getOperand(0)->getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
+ } else {
+ assert(0 && "Unknown cast!");
}
- } else {
- assert(isFloatingPoint(SrcTy) && "Unknown value type!");
- if (isFloatingPoint(DestTy)) { // FP -> FP cast
- if (DestTy < SrcTy) // Rounding cast?
- setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
+ } else if (isFloatingPoint(SrcVT)) {
+ if (isFloatingPoint(DestVT)) { // FP -> FP cast
+ if (DestVT < SrcVT) // Rounding cast?
+ setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
- } else { // FP -> Int cast.
+ setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
+ } else if (isInteger(DestVT)) { // FP -> Int cast.
if (I.getType()->isSigned())
- setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
else
- setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
+ } else {
+ assert(0 && "Unknown cast!");
}
+ } else {
+ assert(SrcVT == MVT::Vector && "Unknown cast!");
+ assert(DestVT != MVT::Vector && "Casts to vector already handled!");
+ // This is a cast from a vector to something else. This is always a bit
+ // convert. Get information about the input vector.
+ setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
}
}
+void SelectionDAGLowering::visitInsertElement(User &I) {
+ SDOperand InVec = getValue(I.getOperand(0));
+ SDOperand InVal = getValue(I.getOperand(1));
+ SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
+ getValue(I.getOperand(2)));
+
+ SDOperand Num = *(InVec.Val->op_end()-2);
+ SDOperand Typ = *(InVec.Val->op_end()-1);
+ setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
+ InVec, InVal, InIdx, Num, Typ));
+}
+
+void SelectionDAGLowering::visitExtractElement(User &I) {
+ SDOperand InVec = getValue(I.getOperand(0));
+ SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
+ getValue(I.getOperand(1)));
+ SDOperand Typ = *(InVec.Val->op_end()-1);
+ setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
+ TLI.getValueType(I.getType()), InVec, InIdx));
+}
+
+void SelectionDAGLowering::visitShuffleVector(User &I) {
+ SDOperand V1 = getValue(I.getOperand(0));
+ SDOperand V2 = getValue(I.getOperand(1));
+ SDOperand Mask = getValue(I.getOperand(2));
+
+ SDOperand Num = *(V1.Val->op_end()-2);
+ SDOperand Typ = *(V2.Val->op_end()-1);
+ setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
+ V1, V2, Mask, Num, Typ));
+}
+
+
void SelectionDAGLowering::visitGetElementPtr(User &I) {
SDOperand N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field) {
// N = N + Offset
- uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
+ uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
N = DAG.getNode(ISD::ADD, N.getValueType(), N,
getIntPtrConstant(Offset));
}
uint64_t Offs;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
continue;
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
SDOperand IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
- unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
I.getAlignment());
SDOperand AllocSize = getValue(I.getArraySize());
CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
}
-/// getStringValue - Turn an LLVM constant pointer that eventually points to a
-/// global into a string value. Return an empty string if we can't do it.
-///
-static std::string getStringValue(GlobalVariable *GV, unsigned Offset = 0) {
- if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
- ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
- if (Init->isString()) {
- std::string Result = Init->getAsString();
- if (Offset < Result.size()) {
- // If we are pointing INTO The string, erase the beginning...
- Result.erase(Result.begin(), Result.begin()+Offset);
- return Result;
- }
- }
- }
- return "";
-}
-
void SelectionDAGLowering::visitLoad(LoadInst &I) {
SDOperand Ptr = getValue(I.getOperand(0));
// Do not serialize non-volatile loads against each other.
Root = DAG.getRoot();
}
-
- const Type *Ty = I.getType();
+
+ setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
+ Root, I.isVolatile()));
+}
+
+SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
+ SDOperand SrcValue, SDOperand Root,
+ bool isVolatile) {
SDOperand L;
-
if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- unsigned NumElements = PTy->getNumElements();
MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
- MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
-
- // Immediately scalarize packed types containing only one element, so that
- // the Legalize pass does not have to deal with them.
- if (NumElements == 1) {
- L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
- } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
- L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
- } else {
- L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
- DAG.getSrcValue(I.getOperand(0)));
- }
+ L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
} else {
- L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
- DAG.getSrcValue(I.getOperand(0)));
+ L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
}
- setValue(&I, L);
- if (I.isVolatile())
+ if (isVolatile)
DAG.setRoot(L.getValue(1));
else
PendingLoads.push_back(L.getValue(1));
+
+ return L;
}
DAG.getSrcValue(I.getOperand(1))));
}
+/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
+/// access memory and has no other side effects at all.
+static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
+#define GET_NO_MEMORY_INTRINSICS
+#include "llvm/Intrinsics.gen"
+#undef GET_NO_MEMORY_INTRINSICS
+ return false;
+}
+
+// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
+// have any side-effects or if it only reads memory.
+static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
+#define GET_SIDE_EFFECT_INFO
+#include "llvm/Intrinsics.gen"
+#undef GET_SIDE_EFFECT_INFO
+ return false;
+}
+
+/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
+/// node.
+void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
+ unsigned Intrinsic) {
+ bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
+ bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
+
+ // Build the operand list.
+ std::vector<SDOperand> Ops;
+ if (HasChain) { // If this intrinsic has side-effects, chainify it.
+ if (OnlyLoad) {
+ // We don't need to serialize loads against other loads.
+ Ops.push_back(DAG.getRoot());
+ } else {
+ Ops.push_back(getRoot());
+ }
+ }
+
+ // Add the intrinsic ID as an integer operand.
+ Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
+
+ // Add all operands of the call to the operand list.
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
+ SDOperand Op = getValue(I.getOperand(i));
+
+ // If this is a vector type, force it to the right packed type.
+ if (Op.getValueType() == MVT::Vector) {
+ const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
+ MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
+
+ MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
+ assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
+ Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
+ }
+
+ assert(TLI.isTypeLegal(Op.getValueType()) &&
+ "Intrinsic uses a non-legal type?");
+ Ops.push_back(Op);
+ }
+
+ std::vector<MVT::ValueType> VTs;
+ if (I.getType() != Type::VoidTy) {
+ MVT::ValueType VT = TLI.getValueType(I.getType());
+ if (VT == MVT::Vector) {
+ const PackedType *DestTy = cast<PackedType>(I.getType());
+ MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
+
+ VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
+ assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
+ }
+
+ assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
+ VTs.push_back(VT);
+ }
+ if (HasChain)
+ VTs.push_back(MVT::Other);
+
+ // Create the node.
+ SDOperand Result;
+ if (!HasChain)
+ Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
+ else if (I.getType() != Type::VoidTy)
+ Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
+ else
+ Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
+
+ if (HasChain) {
+ SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
+ if (OnlyLoad)
+ PendingLoads.push_back(Chain);
+ else
+ DAG.setRoot(Chain);
+ }
+ if (I.getType() != Type::VoidTy) {
+ if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
+ MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
+ Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
+ DAG.getConstant(PTy->getNumElements(), MVT::i32),
+ DAG.getValueType(EVT));
+ }
+ setValue(&I, Result);
+ }
+}
+
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
/// we want to emit this as a call to a named external function, return the name
/// otherwise lower it and return null.
const char *
SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
switch (Intrinsic) {
+ default:
+ // By default, turn this into a target intrinsic node.
+ visitTargetIntrinsic(I, Intrinsic);
+ return 0;
case Intrinsic::vastart: visitVAStart(I); return 0;
case Intrinsic::vaend: visitVAEnd(I); return 0;
case Intrinsic::vacopy: visitVACopy(I); return 0;
case Intrinsic::longjmp:
return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
break;
- case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
- case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
- case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
-
- case Intrinsic::readport:
- case Intrinsic::readio: {
- std::vector<MVT::ValueType> VTs;
- VTs.push_back(TLI.getValueType(I.getType()));
- VTs.push_back(MVT::Other);
- std::vector<SDOperand> Ops;
- Ops.push_back(getRoot());
- Ops.push_back(getValue(I.getOperand(1)));
- SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
- ISD::READPORT : ISD::READIO, VTs, Ops);
-
- setValue(&I, Tmp);
- DAG.setRoot(Tmp.getValue(1));
+ case Intrinsic::memcpy_i32:
+ case Intrinsic::memcpy_i64:
+ visitMemIntrinsic(I, ISD::MEMCPY);
return 0;
- }
- case Intrinsic::writeport:
- case Intrinsic::writeio:
- DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
- ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
- getRoot(), getValue(I.getOperand(1)),
- getValue(I.getOperand(2))));
+ case Intrinsic::memset_i32:
+ case Intrinsic::memset_i64:
+ visitMemIntrinsic(I, ISD::MEMSET);
+ return 0;
+ case Intrinsic::memmove_i32:
+ case Intrinsic::memmove_i64:
+ visitMemIntrinsic(I, ISD::MEMMOVE);
return 0;
case Intrinsic::dbg_stoppoint: {
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_debugger_stop";
-
MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
- if (DebugInfo && DebugInfo->Verify(I.getOperand(4))) {
+ DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
+ if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
std::vector<SDOperand> Ops;
- // Input Chain
Ops.push_back(getRoot());
-
- // line number
- Ops.push_back(getValue(I.getOperand(2)));
-
- // column
- Ops.push_back(getValue(I.getOperand(3)));
+ Ops.push_back(getValue(SPI.getLineValue()));
+ Ops.push_back(getValue(SPI.getColumnValue()));
- DebugInfoDesc *DD = DebugInfo->getDescFor(I.getOperand(4));
+ DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
assert(DD && "Not a debug information descriptor");
- CompileUnitDesc *CompileUnit = dyn_cast<CompileUnitDesc>(DD);
- assert(CompileUnit && "Not a compile unit");
+ CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
+
Ops.push_back(DAG.getString(CompileUnit->getFileName()));
Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
- if (Ops.size() == 5) // Found filename/workingdir.
- DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
+ DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
}
-
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+
return 0;
}
- case Intrinsic::dbg_region_start:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_region_start";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ case Intrinsic::dbg_region_start: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
+ if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_region_end:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_region_end";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_region_end: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
+ if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_func_start:
- if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
- return "llvm_dbg_subprogram";
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_func_start: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
+ if (DebugInfo && FSI.getSubprogram() &&
+ DebugInfo->Verify(FSI.getSubprogram())) {
+ std::vector<SDOperand> Ops;
+
+ unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
+
+ Ops.push_back(getRoot());
+ Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
+
+ DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
+ }
+
return 0;
- case Intrinsic::dbg_declare:
- if (I.getType() != Type::VoidTy)
- setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
+ }
+ case Intrinsic::dbg_declare: {
+ MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
+ if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
+ std::vector<SDOperand> Ops;
+
+ SDOperand AddressOp = getValue(DI.getAddress());
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
+ DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
+ }
+ }
+
return 0;
+ }
case Intrinsic::isunordered_f32:
case Intrinsic::isunordered_f64:
case Intrinsic::prefetch:
// FIXME: Currently discarding prefetches.
return 0;
- default:
- std::cerr << I;
- assert(0 && "This intrinsic is not implemented yet!");
- return 0;
}
}
return;
} else { // Not an LLVM intrinsic.
const std::string &Name = F->getName();
- if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
+ if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
+ if (I.getNumOperands() == 3 && // Basic sanity checks.
+ I.getOperand(1)->getType()->isFloatingPoint() &&
+ I.getType() == I.getOperand(1)->getType() &&
+ I.getType() == I.getOperand(2)->getType()) {
+ SDOperand LHS = getValue(I.getOperand(1));
+ SDOperand RHS = getValue(I.getOperand(2));
+ setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
+ LHS, RHS));
+ return;
+ }
+ } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
if (I.getNumOperands() == 2 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
static const TargetRegisterClass *
isAllocatableRegister(unsigned Reg, MachineFunction &MF,
const TargetLowering &TLI, const MRegisterInfo *MRI) {
+ MVT::ValueType FoundVT = MVT::Other;
+ const TargetRegisterClass *FoundRC = 0;
for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
E = MRI->regclass_end(); RCI != E; ++RCI) {
+ MVT::ValueType ThisVT = MVT::Other;
+
const TargetRegisterClass *RC = *RCI;
// If none of the the value types for this register class are valid, we
// can't use it. For example, 64-bit reg classes on 32-bit targets.
- bool isLegal = false;
for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
I != E; ++I) {
if (TLI.isTypeLegal(*I)) {
- isLegal = true;
- break;
+ // If we have already found this register in a different register class,
+ // choose the one with the largest VT specified. For example, on
+ // PowerPC, we favor f64 register classes over f32.
+ if (FoundVT == MVT::Other ||
+ MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
+ ThisVT = *I;
+ break;
+ }
}
}
- if (!isLegal) continue;
+ if (ThisVT == MVT::Other) continue;
// NOTE: This isn't ideal. In particular, this might allocate the
// frame pointer in functions that need it (due to them not being taken
// yet). This is a slight code pessimization, but should still work.
for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
E = RC->allocation_order_end(MF); I != E; ++I)
- if (*I == Reg)
- return RC;
+ if (*I == Reg) {
+ // We found a matching register class. Keep looking at others in case
+ // we find one with larger registers that this physreg is also in.
+ FoundRC = RC;
+ FoundVT = ThisVT;
+ break;
+ }
}
- return 0;
+ return FoundRC;
}
RegsForValue SelectionDAGLowering::
assert(I.getType() != Type::VoidTy && "Bad inline asm!");
OpVT = TLI.getValueType(I.getType());
} else {
- Value *CallOperand = I.getOperand(OpNum);
- const Type *OpTy = CallOperand->getType();
+ const Type *OpTy = I.getOperand(OpNum)->getType();
OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
OpNum++; // Consumes a call operand.
}
switch (Constraints[i].Type) {
case InlineAsm::isOutput: {
+ TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
+ if (ConstraintCode.size() == 1) // not a physreg name.
+ CTy = TLI.getConstraintType(ConstraintCode[0]);
+
+ if (CTy == TargetLowering::C_Memory) {
+ // Memory output.
+ SDOperand InOperandVal = getValue(I.getOperand(OpNum));
+
+ // Check that the operand (the address to store to) isn't a float.
+ if (!MVT::isInteger(InOperandVal.getValueType()))
+ assert(0 && "MATCH FAIL!");
+
+ if (!Constraints[i].isIndirectOutput)
+ assert(0 && "MATCH FAIL!");
+
+ OpNum++; // Consumes a call operand.
+
+ // Extend/truncate to the right pointer type if needed.
+ MVT::ValueType PtrType = TLI.getPointerTy();
+ if (InOperandVal.getValueType() < PtrType)
+ InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
+ else if (InOperandVal.getValueType() > PtrType)
+ InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
+
+ // Add information to the INLINEASM node to know about this output.
+ unsigned ResOpType = 4/*MEM*/ | (1 << 3);
+ AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
+ AsmNodeOperands.push_back(InOperandVal);
+ break;
+ }
+
+ // Otherwise, this is a register output.
+ assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
+
// If this is an early-clobber output, or if there is an input
// constraint that matches this, we need to reserve the input register
// so no other inputs allocate to it.
assert(I.getType() != Type::VoidTy && "Bad inline asm!");
RetValRegs = Regs;
} else {
- Value *CallOperand = I.getOperand(OpNum);
- IndirectStoresToEmit.push_back(std::make_pair(Regs, CallOperand));
+ IndirectStoresToEmit.push_back(std::make_pair(Regs,
+ I.getOperand(OpNum)));
OpNum++; // Consumes a call operand.
}
break;
}
case InlineAsm::isInput: {
- Value *CallOperand = I.getOperand(OpNum);
+ SDOperand InOperandVal = getValue(I.getOperand(OpNum));
OpNum++; // Consumes a call operand.
-
- SDOperand InOperandVal = getValue(CallOperand);
if (isdigit(ConstraintCode[0])) { // Matching constraint?
// If this is required to match an output register we have already set,
Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
// Scale the source by the type size.
- uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
+ uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
Src, getIntPtrConstant(ElementSize));
std::vector<std::pair<SDOperand, const Type*> > Args;
- Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
+ Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
void SelectionDAGLowering::visitFree(FreeInst &I) {
std::vector<std::pair<SDOperand, const Type*> > Args;
Args.push_back(std::make_pair(getValue(I.getOperand(0)),
- TLI.getTargetData().getIntPtrType()));
+ TLI.getTargetData()->getIntPtrType()));
MVT::ValueType IntPtr = TLI.getPointerTy();
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
DAG.getSrcValue(I.getOperand(2))));
}
+/// TargetLowering::LowerArguments - This is the default LowerArguments
+/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
+/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be removed.
+std::vector<SDOperand>
+TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
+ // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
+ std::vector<SDOperand> Ops;
+ Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
+ Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
+
+ // Add one result value for each formal argument.
+ std::vector<MVT::ValueType> RetVals;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
+ MVT::ValueType VT = getValueType(I->getType());
+
+ switch (getTypeAction(VT)) {
+ default: assert(0 && "Unknown type action!");
+ case Legal:
+ RetVals.push_back(VT);
+ break;
+ case Promote:
+ RetVals.push_back(getTypeToTransformTo(VT));
+ break;
+ case Expand:
+ if (VT != MVT::Vector) {
+ // If this is a large integer, it needs to be broken up into small
+ // integers. Figure out what the destination type is and how many small
+ // integers it turns into.
+ MVT::ValueType NVT = getTypeToTransformTo(VT);
+ unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
+ for (unsigned i = 0; i != NumVals; ++i)
+ RetVals.push_back(NVT);
+ } else {
+ // Otherwise, this is a vector type. We only support legal vectors
+ // right now.
+ unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
+ const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
+
+ // Figure out if there is a Packed type corresponding to this Vector
+ // type. If so, convert to the packed type.
+ MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
+ if (TVT != MVT::Other && isTypeLegal(TVT)) {
+ RetVals.push_back(TVT);
+ } else {
+ assert(0 && "Don't support illegal by-val vector arguments yet!");
+ }
+ }
+ break;
+ }
+ }
+
+ if (RetVals.size() == 0)
+ RetVals.push_back(MVT::isVoid);
+
+ // Create the node.
+ SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, RetVals, Ops).Val;
+
+ // Set up the return result vector.
+ Ops.clear();
+ unsigned i = 0;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
+ MVT::ValueType VT = getValueType(I->getType());
+
+ switch (getTypeAction(VT)) {
+ default: assert(0 && "Unknown type action!");
+ case Legal:
+ Ops.push_back(SDOperand(Result, i++));
+ break;
+ case Promote: {
+ SDOperand Op(Result, i++);
+ if (MVT::isInteger(VT)) {
+ unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
+ : ISD::AssertZext;
+ Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
+ Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
+ } else {
+ assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
+ Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
+ }
+ Ops.push_back(Op);
+ break;
+ }
+ case Expand:
+ if (VT != MVT::Vector) {
+ // If this is a large integer, it needs to be reassembled from small
+ // integers. Figure out what the source elt type is and how many small
+ // integers it is.
+ MVT::ValueType NVT = getTypeToTransformTo(VT);
+ unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
+ if (NumVals == 2) {
+ SDOperand Lo = SDOperand(Result, i++);
+ SDOperand Hi = SDOperand(Result, i++);
+
+ if (!isLittleEndian())
+ std::swap(Lo, Hi);
+
+ Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
+ } else {
+ // Value scalarized into many values. Unimp for now.
+ assert(0 && "Cannot expand i64 -> i16 yet!");
+ }
+ } else {
+ // Otherwise, this is a vector type. We only support legal vectors
+ // right now.
+ const PackedType *PTy = cast<PackedType>(I->getType());
+ unsigned NumElems = PTy->getNumElements();
+ const Type *EltTy = PTy->getElementType();
+
+ // Figure out if there is a Packed type corresponding to this Vector
+ // type. If so, convert to the packed type.
+ MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
+ if (TVT != MVT::Other && isTypeLegal(TVT)) {
+ SDOperand N = SDOperand(Result, i++);
+ // Handle copies from generic vectors to registers.
+ MVT::ValueType PTyElementVT, PTyLegalElementVT;
+ unsigned NE = getPackedTypeBreakdown(PTy, PTyElementVT,
+ PTyLegalElementVT);
+ // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
+ // "N x PTyElementVT" MVT::Vector type.
+ N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
+ DAG.getConstant(NE, MVT::i32),
+ DAG.getValueType(PTyElementVT));
+ Ops.push_back(N);
+ } else {
+ assert(0 && "Don't support illegal by-val vector arguments yet!");
+ }
+ }
+ break;
+ }
+ }
+ return Ops;
+}
+
// It is always conservatively correct for llvm.returnaddress and
// llvm.frameaddress to return 0.
std::pair<SDOperand, SDOperand>
if (G) {
GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
if (GV) {
- Str = getStringValue(GV);
+ Str = GV->getStringValue(false);
if (!Str.empty()) {
CopyFromStr = true;
SrcOff += SrcDelta;
}
+/// OptimizeNoopCopyExpression - We have determined that the specified cast
+/// instruction is a noop copy (e.g. it's casting from one pointer type to
+/// another, int->uint, or int->sbyte on PPC.
+///
+/// Return true if any changes are made.
+static bool OptimizeNoopCopyExpression(CastInst *CI) {
+ BasicBlock *DefBB = CI->getParent();
+
+ /// InsertedCasts - Only insert a cast in each block once.
+ std::map<BasicBlock*, CastInst*> InsertedCasts;
+
+ bool MadeChange = false;
+ for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
+ UI != E; ) {
+ Use &TheUse = UI.getUse();
+ Instruction *User = cast<Instruction>(*UI);
+
+ // Figure out which BB this cast is used in. For PHI's this is the
+ // appropriate predecessor block.
+ BasicBlock *UserBB = User->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(User)) {
+ unsigned OpVal = UI.getOperandNo()/2;
+ UserBB = PN->getIncomingBlock(OpVal);
+ }
+
+ // Preincrement use iterator so we don't invalidate it.
+ ++UI;
+
+ // If this user is in the same block as the cast, don't change the cast.
+ if (UserBB == DefBB) continue;
+
+ // If we have already inserted a cast into this block, use it.
+ CastInst *&InsertedCast = InsertedCasts[UserBB];
+
+ if (!InsertedCast) {
+ BasicBlock::iterator InsertPt = UserBB->begin();
+ while (isa<PHINode>(InsertPt)) ++InsertPt;
+
+ InsertedCast =
+ new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
+ MadeChange = true;
+ }
+
+ // Replace a use of the cast with a use of the new casat.
+ TheUse = InsertedCast;
+ }
+
+ // If we removed all uses, nuke the cast.
+ if (CI->use_empty())
+ CI->eraseFromParent();
+
+ return MadeChange;
+}
+
/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
/// casting to the type of GEPI.
-static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
- Value *Ptr, Value *PtrOffset) {
+static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
+ Instruction *GEPI, Value *Ptr,
+ Value *PtrOffset) {
if (V) return V; // Already computed.
BasicBlock::iterator InsertPt;
// Add the offset, cast it to the right type.
Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
- Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
- return V = Ptr;
+ return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
+}
+
+/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
+/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
+/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
+/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
+/// sink PtrOffset into user blocks where doing so will likely allow us to fold
+/// the constant add into a load or store instruction. Additionally, if a user
+/// is a pointer-pointer cast, we look through it to find its users.
+static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
+ Constant *PtrOffset, BasicBlock *DefBB,
+ GetElementPtrInst *GEPI,
+ std::map<BasicBlock*,Instruction*> &InsertedExprs) {
+ while (!RepPtr->use_empty()) {
+ Instruction *User = cast<Instruction>(RepPtr->use_back());
+
+ // If the user is a Pointer-Pointer cast, recurse.
+ if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
+ ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
+
+ // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
+ // could invalidate an iterator.
+ User->setOperand(0, UndefValue::get(RepPtr->getType()));
+ continue;
+ }
+
+ // If this is a load of the pointer, or a store through the pointer, emit
+ // the increment into the load/store block.
+ Instruction *NewVal;
+ if (isa<LoadInst>(User) ||
+ (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
+ NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
+ User->getParent(), GEPI,
+ Ptr, PtrOffset);
+ } else {
+ // If this use is not foldable into the addressing mode, use a version
+ // emitted in the GEP block.
+ NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
+ Ptr, PtrOffset);
+ }
+
+ if (GEPI->getType() != RepPtr->getType()) {
+ BasicBlock::iterator IP = NewVal;
+ ++IP;
+ NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
+ }
+ User->replaceUsesOfWith(RepPtr, NewVal);
+ }
}
/// defined, the addressing expression of the GEP cannot be folded into loads or
/// stores that use it. In this case, decompose the GEP and move constant
/// indices into blocks that use it.
-static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
- const TargetData &TD) {
+static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
+ const TargetData *TD) {
// If this GEP is only used inside the block it is defined in, there is no
// need to rewrite it.
bool isUsedOutsideDefBB = false;
break;
}
}
- if (!isUsedOutsideDefBB) return;
+ if (!isUsedOutsideDefBB) return false;
// If this GEP has no non-zero constant indices, there is nothing we can do,
// ignore it.
bool hasConstantIndex = false;
+ bool hasVariableIndex = false;
for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
E = GEPI->op_end(); OI != E; ++OI) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
if (CI->getRawValue()) {
hasConstantIndex = true;
break;
}
+ } else {
+ hasVariableIndex = true;
+ }
}
+
+ // If this is a "GEP X, 0, 0, 0", turn this into a cast.
+ if (!hasConstantIndex && !hasVariableIndex) {
+ Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
+ GEPI->getName(), GEPI);
+ GEPI->replaceAllUsesWith(NC);
+ GEPI->eraseFromParent();
+ return true;
+ }
+
// If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
- if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
+ if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
+ return false;
// Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
// constant offset (which we now know is non-zero) and deal with it later.
uint64_t ConstantOffset = 0;
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
const Type *Ty = GEPI->getOperand(0)->getType();
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field)
- ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
+ ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
if (CI->getRawValue() == 0) continue;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
continue;
}
// Cast Idx to UIntPtrTy if needed.
Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
// Mask off bits that should not be set.
ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
// block, otherwise we use a canonical version right next to the gep (these
// won't be foldable as addresses, so we might as well share the computation).
- std::map<BasicBlock*,Value*> InsertedExprs;
- while (!GEPI->use_empty()) {
- Instruction *User = cast<Instruction>(GEPI->use_back());
-
- // If this use is not foldable into the addressing mode, use a version
- // emitted in the GEP block.
- Value *NewVal;
- if (!isa<LoadInst>(User) &&
- (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
- NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
- Ptr, PtrOffset);
- } else {
- // Otherwise, insert the code in the User's block so it can be folded into
- // any users in that block.
- NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
- User->getParent(), GEPI,
- Ptr, PtrOffset);
- }
- User->replaceUsesOfWith(GEPI, NewVal);
- }
+ std::map<BasicBlock*,Instruction*> InsertedExprs;
+ ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
// Finally, the GEP is dead, remove it.
GEPI->eraseFromParent();
+
+ return true;
}
bool SelectionDAGISel::runOnFunction(Function &Fn) {
// constants, this way the load of the constant into a vreg will not be placed
// into MBBs that are used some other way.
//
- // In this pass we also look for GEP instructions that are used across basic
- // blocks and rewrites them to improve basic-block-at-a-time selection.
+ // In this pass we also look for GEP and cast instructions that are used
+ // across basic blocks and rewrite them to improve basic-block-at-a-time
+ // selection.
+ //
//
+ bool MadeChange = true;
+ while (MadeChange) {
+ MadeChange = false;
for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
PHINode *PN;
BasicBlock::iterator BBI;
if (isa<Constant>(PN->getIncomingValue(i)))
SplitCriticalEdge(PN->getIncomingBlock(i), BB);
- for (BasicBlock::iterator E = BB->end(); BBI != E; )
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
- OptimizeGEPExpression(GEPI, TLI.getTargetData());
+ for (BasicBlock::iterator E = BB->end(); BBI != E; ) {
+ Instruction *I = BBI++;
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
+ MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
+ } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
+ // If this is a noop copy, sink it into user blocks to reduce the number
+ // of virtual registers that must be created and coallesced.
+ MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
+ MVT::ValueType DstVT = TLI.getValueType(CI->getType());
+
+ // This is an fp<->int conversion?
+ if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
+ continue;
+
+ // If this is an extension, it will be a zero or sign extension, which
+ // isn't a noop.
+ if (SrcVT < DstVT) continue;
+
+ // If these values will be promoted, find out what they will be promoted
+ // to. This helps us consider truncates on PPC as noop copies when they
+ // are.
+ if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
+ SrcVT = TLI.getTypeToTransformTo(SrcVT);
+ if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
+ DstVT = TLI.getTypeToTransformTo(DstVT);
+
+ // If, after promotion, these are the same types, this is a noop copy.
+ if (SrcVT == DstVT)
+ MadeChange |= OptimizeNoopCopyExpression(CI);
+ }
+ }
+ }
}
FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
SelectionDAG &DAG = SDL.DAG;
if (SrcVT == DestVT) {
return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
+ } else if (SrcVT == MVT::Vector) {
+ // Handle copies from generic vectors to registers.
+ MVT::ValueType PTyElementVT, PTyLegalElementVT;
+ unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
+ PTyElementVT, PTyLegalElementVT);
+
+ // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
+ // MVT::Vector type.
+ Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
+ DAG.getConstant(NE, MVT::i32),
+ DAG.getValueType(PTyElementVT));
+
+ // Loop over all of the elements of the resultant vector,
+ // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
+ // copying them into output registers.
+ std::vector<SDOperand> OutChains;
+ SDOperand Root = SDL.getRoot();
+ for (unsigned i = 0; i != NE; ++i) {
+ SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
+ Op, DAG.getConstant(i, MVT::i32));
+ if (PTyElementVT == PTyLegalElementVT) {
+ // Elements are legal.
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
+ } else if (PTyLegalElementVT > PTyElementVT) {
+ // Elements are promoted.
+ if (MVT::isFloatingPoint(PTyLegalElementVT))
+ Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
+ else
+ Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
+ } else {
+ // Elements are expanded.
+ // The src value is expanded into multiple registers.
+ SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
+ Elt, DAG.getConstant(0, MVT::i32));
+ SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
+ Elt, DAG.getConstant(1, MVT::i32));
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
+ OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
+ }
+ }
+ return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
} else if (SrcVT < DestVT) {
// The src value is promoted to the register.
if (MVT::isFloatingPoint(SrcVT))
AI != E; ++AI, ++a)
if (!AI->use_empty()) {
SDL.setValue(AI, Args[a]);
-
+
// If this argument is live outside of the entry block, insert a copy from
// whereever we got it to the vreg that other BB's will reference it as.
if (FuncInfo.ValueMap.count(AI)) {
void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
- FunctionLoweringInfo &FuncInfo) {
+ FunctionLoweringInfo &FuncInfo) {
SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
std::vector<SDOperand> UnorderedChains;
for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
I != E; ++I)
SDL.visit(*I);
-
+
// Ensure that all instructions which are used outside of their defining
// blocks are available as virtual registers.
for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
// Remember that this register needs to added to the machine PHI node as
// the input for this MBB.
- unsigned NumElements =
- TLI.getNumElements(TLI.getValueType(PN->getType()));
+ MVT::ValueType VT = TLI.getValueType(PN->getType());
+ unsigned NumElements;
+ if (VT != MVT::Vector)
+ NumElements = TLI.getNumElements(VT);
+ else {
+ MVT::ValueType VT1,VT2;
+ NumElements =
+ TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
+ VT1, VT2);
+ }
for (unsigned i = 0, e = NumElements; i != e; ++i)
PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
}
// Lower the terminator after the copies are emitted.
SDL.visit(*LLVMBB->getTerminator());
+ // Copy over any CaseBlock records that may now exist due to SwitchInst
+ // lowering, as well as any jump table information.
+ SwitchCases.clear();
+ SwitchCases = SDL.SwitchCases;
+ JT = SDL.JT;
+
// Make sure the root of the DAG is up-to-date.
DAG.setRoot(SDL.getRoot());
}
-void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
- FunctionLoweringInfo &FuncInfo) {
- SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
- CurDAG = &DAG;
- std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
-
- // First step, lower LLVM code to some DAG. This DAG may use operations and
- // types that are not supported by the target.
- BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
-
+void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
// Run the DAG combiner in pre-legalize mode.
DAG.Combine(false);
DEBUG(std::cerr << "Lowered selection DAG:\n");
DEBUG(DAG.dump());
-
+
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
DAG.Legalize();
-
+
DEBUG(std::cerr << "Legalized selection DAG:\n");
DEBUG(DAG.dump());
-
+
// Run the DAG combiner in post-legalize mode.
DAG.Combine(true);
if (ViewISelDAGs) DAG.viewGraph();
-
+
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
InstructionSelectBasicBlock(DAG);
-
+
DEBUG(std::cerr << "Selected machine code:\n");
DEBUG(BB->dump());
+}
+
+void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
+ FunctionLoweringInfo &FuncInfo) {
+ std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+ {
+ SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ CurDAG = &DAG;
+
+ // First step, lower LLVM code to some DAG. This DAG may use operations and
+ // types that are not supported by the target.
+ BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
+ // Second step, emit the lowered DAG as machine code.
+ CodeGenAndEmitDAG(DAG);
+ }
+
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
- for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
- MachineInstr *PHI = PHINodesToUpdate[i].first;
- assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
- "This is not a machine PHI node that we are updating!");
- PHI->addRegOperand(PHINodesToUpdate[i].second);
- PHI->addMachineBasicBlockOperand(BB);
+ if (SwitchCases.empty() && JT.Reg == 0) {
+ for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
+ MachineInstr *PHI = PHINodesToUpdate[i].first;
+ assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
+ "This is not a machine PHI node that we are updating!");
+ PHI->addRegOperand(PHINodesToUpdate[i].second);
+ PHI->addMachineBasicBlockOperand(BB);
+ }
+ return;
}
-
- // Finally, add the CFG edges from the last selected MBB to the successor
- // MBBs.
- TerminatorInst *TI = LLVMBB->getTerminator();
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
- MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
- BB->addSuccessor(Succ0MBB);
+
+ // If the JumpTable record is filled in, then we need to emit a jump table.
+ // Updating the PHI nodes is tricky in this case, since we need to determine
+ // whether the PHI is a successor of the range check MBB or the jump table MBB
+ if (JT.Reg) {
+ assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ CurDAG = &SDAG;
+ SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
+ MachineBasicBlock *RangeBB = BB;
+ // Set the current basic block to the mbb we wish to insert the code into
+ BB = JT.MBB;
+ SDL.setCurrentBasicBlock(BB);
+ // Emit the code
+ SDL.visitJumpTable(JT);
+ SDAG.setRoot(SDL.getRoot());
+ CodeGenAndEmitDAG(SDAG);
+ // Update PHI Nodes
+ for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
+ MachineInstr *PHI = PHINodesToUpdate[pi].first;
+ MachineBasicBlock *PHIBB = PHI->getParent();
+ assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
+ "This is not a machine PHI node that we are updating!");
+ if (PHIBB == JT.Default) {
+ PHI->addRegOperand(PHINodesToUpdate[pi].second);
+ PHI->addMachineBasicBlockOperand(RangeBB);
+ }
+ if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
+ PHI->addRegOperand(PHINodesToUpdate[pi].second);
+ PHI->addMachineBasicBlockOperand(BB);
+ }
+ }
+ return;
+ }
+
+ // If we generated any switch lowering information, build and codegen any
+ // additional DAGs necessary.
+ for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ CurDAG = &SDAG;
+ SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
+ // Set the current basic block to the mbb we wish to insert the code into
+ BB = SwitchCases[i].ThisBB;
+ SDL.setCurrentBasicBlock(BB);
+ // Emit the code
+ SDL.visitSwitchCase(SwitchCases[i]);
+ SDAG.setRoot(SDL.getRoot());
+ CodeGenAndEmitDAG(SDAG);
+ // Iterate over the phi nodes, if there is a phi node in a successor of this
+ // block (for instance, the default block), then add a pair of operands to
+ // the phi node for this block, as if we were coming from the original
+ // BB before switch expansion.
+ for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
+ MachineInstr *PHI = PHINodesToUpdate[pi].first;
+ MachineBasicBlock *PHIBB = PHI->getParent();
+ assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
+ "This is not a machine PHI node that we are updating!");
+ if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
+ PHI->addRegOperand(PHINodesToUpdate[pi].second);
+ PHI->addMachineBasicBlockOperand(BB);
+ }
+ }
}
}
switch (ISHeuristic) {
default: assert(0 && "Unrecognized scheduling heuristic");
- case defaultScheduling:
+ case ScheduleDAG::defaultScheduling:
if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
- SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
- else /* TargetLowering::SchedulingForRegPressure */
+ SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
+ else {
+ assert(TLI.getSchedulingPreference() ==
+ TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
SL = createBURRListDAGScheduler(DAG, BB);
+ }
break;
- case noScheduling:
- case simpleScheduling:
- case simpleNoItinScheduling:
- SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
+ case ScheduleDAG::noScheduling:
+ SL = createBFS_DAGScheduler(DAG, BB);
break;
- case listSchedulingBURR:
+ case ScheduleDAG::simpleScheduling:
+ SL = createSimpleDAGScheduler(false, DAG, BB);
+ break;
+ case ScheduleDAG::simpleNoItinScheduling:
+ SL = createSimpleDAGScheduler(true, DAG, BB);
+ break;
+ case ScheduleDAG::listSchedulingBURR:
SL = createBURRListDAGScheduler(DAG, BB);
+ break;
+ case ScheduleDAG::listSchedulingTDRR:
+ SL = createTDRRListDAGScheduler(DAG, BB);
+ break;
+ case ScheduleDAG::listSchedulingTD:
+ SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
+ break;
}
BB = SL->Run();
delete SL;
}
+HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
+ return new HazardRecognizer();
+}
+
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectionDAGISel::