#endif
namespace {
-static cl::opt<bool>
-NoFoldNodeInFlight(
- "no-isel-fold-inflight",
- cl::Hidden,
- cl::desc("Do not attempt to fold a node even if it is being selected"));
-}
-
-// Scheduling heuristics
-enum SchedHeuristics {
- defaultScheduling, // Let the target specify its preference.
- noScheduling, // No scheduling, emit breadth first sequence.
- simpleScheduling, // Two pass, min. critical path, max. utilization.
- simpleNoItinScheduling, // Same as above exact using generic latency.
- listSchedulingBURR, // Bottom up reg reduction list scheduling.
- listSchedulingTD // Top-down list scheduler.
-};
-
-namespace {
- cl::opt<SchedHeuristics>
+ cl::opt<ScheduleDAG::SchedHeuristics>
ISHeuristic(
"sched",
cl::desc("Choose scheduling style"),
- cl::init(defaultScheduling),
+ cl::init(ScheduleDAG::defaultScheduling),
cl::values(
- clEnumValN(defaultScheduling, "default",
+ clEnumValN(ScheduleDAG::defaultScheduling, "default",
"Target preferred scheduling style"),
- clEnumValN(noScheduling, "none",
+ clEnumValN(ScheduleDAG::noScheduling, "none",
"No scheduling: breadth first sequencing"),
- clEnumValN(simpleScheduling, "simple",
+ clEnumValN(ScheduleDAG::simpleScheduling, "simple",
"Simple two pass scheduling: minimize critical path "
"and maximize processor utilization"),
- clEnumValN(simpleNoItinScheduling, "simple-noitin",
+ clEnumValN(ScheduleDAG::simpleNoItinScheduling, "simple-noitin",
"Simple two pass scheduling: Same as simple "
"except using generic latency"),
- clEnumValN(listSchedulingBURR, "list-burr",
- "Bottom up register reduction list scheduling"),
- clEnumValN(listSchedulingTD, "list-td",
+ clEnumValN(ScheduleDAG::listSchedulingBURR, "list-burr",
+ "Bottom-up register reduction list scheduling"),
+ clEnumValN(ScheduleDAG::listSchedulingTDRR, "list-tdrr",
+ "Top-down register reduction list scheduling"),
+ clEnumValN(ScheduleDAG::listSchedulingTD, "list-td",
"Top-down list scheduler"),
clEnumValEnd));
} // namespace
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
AI->getAlignment());
// If the alignment of the value is smaller than the size of the value,
// implemented with a libcall, etc.
TargetLowering &TLI;
SelectionDAG &DAG;
- const TargetData &TD;
+ const TargetData *TD;
/// SwitchCases - Vector of CaseBlock structures used to communicate
/// SwitchInst code generation information.
SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, DAG.getJumpTable(JT.JTI,PTy));
SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0));
DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
-
- // Update successor info
- for (std::set<MachineBasicBlock*>::iterator ii = JT.SuccMBBs.begin(),
- ee = JT.SuccMBBs.end(); ii != ee; ++ii)
- JT.MBB->addSuccessor(*ii);
}
void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel();
- // If the switch has more than 3 blocks, and is 100% dense, then emit a jump
- // table rather than lowering the switch to a binary tree of conditional
- // branches.
- // FIXME: Make this work with 64 bit targets someday, possibly by always
- // doing differences there so that entries stay 32 bits.
+ // If the switch has more than 5 blocks, and at least 31.25% dense, and the
+ // target supports indirect branches, then emit a jump table rather than
+ // lowering the switch to a binary tree of conditional branches.
// FIXME: Make this work with PIC code
if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
- TLI.getPointerTy() == MVT::i32 &&
(Relocs == Reloc::Static || Relocs == Reloc::DynamicNoPIC) &&
- Cases.size() > 3) {
+ Cases.size() > 5) {
uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue();
uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue();
-
- // Determine density
- // FIXME: support sub-100% density
- if (((Last - First) + 1ULL) == (uint64_t)Cases.size()) {
+ double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
+
+ if (Density >= 0.3125) {
// Create a new basic block to hold the code for loading the address
// of the jump table, and jumping to it. Update successor information;
// we will either branch to the default case for the switch, or the jump
DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
DAG.getBasicBlock(Default)));
- // Build a sorted vector of destination BBs, corresponding to each target
- // of the switch.
- // FIXME: need to insert DefaultMBB for each "hole" in the jump table,
- // when we support jump tables with < 100% density.
+ // Build a vector of destination BBs, corresponding to each target
+ // of the jump table. If the value of the jump table slot corresponds to
+ // a case statement, push the case's BB onto the vector, otherwise, push
+ // the default BB.
std::set<MachineBasicBlock*> UniqueBBs;
std::vector<MachineBasicBlock*> DestBBs;
- for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++ii) {
- DestBBs.push_back(ii->second);
- UniqueBBs.insert(ii->second);
+ uint64_t TEI = First;
+ for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) {
+ if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) {
+ DestBBs.push_back(ii->second);
+ UniqueBBs.insert(ii->second);
+ ++ii;
+ } else {
+ DestBBs.push_back(Default);
+ UniqueBBs.insert(Default);
+ }
}
+
+ // Update successor info
+ for (std::set<MachineBasicBlock*>::iterator ii = UniqueBBs.begin(),
+ ee = UniqueBBs.end(); ii != ee; ++ii)
+ JumpTableBB->addSuccessor(*ii);
+
+ // Create a jump table index for this jump table, or return an existing
+ // one.
unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
// Set the jump table information so that we can codegen it as a second
JT.JTI = JTI;
JT.MBB = JumpTableBB;
JT.Default = Default;
- JT.SuccMBBs = UniqueBBs;
return;
}
}
void SelectionDAGLowering::visitGetElementPtr(User &I) {
SDOperand N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field) {
// N = N + Offset
- uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
+ uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
N = DAG.getNode(ISD::ADD, N.getValueType(), N,
getIntPtrConstant(Offset));
}
uint64_t Offs;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
continue;
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
SDOperand IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
- unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
I.getAlignment());
SDOperand AllocSize = getValue(I.getArraySize());
Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
// Scale the source by the type size.
- uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
+ uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
Src, getIntPtrConstant(ElementSize));
std::vector<std::pair<SDOperand, const Type*> > Args;
- Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
+ Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
void SelectionDAGLowering::visitFree(FreeInst &I) {
std::vector<std::pair<SDOperand, const Type*> > Args;
Args.push_back(std::make_pair(getValue(I.getOperand(0)),
- TLI.getTargetData().getIntPtrType()));
+ TLI.getTargetData()->getIntPtrType()));
MVT::ValueType IntPtr = TLI.getPointerTy();
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
}
+/// OptimizeNoopCopyExpression - We have determined that the specified cast
+/// instruction is a noop copy (e.g. it's casting from one pointer type to
+/// another, int->uint, or int->sbyte on PPC.
+///
+/// Return true if any changes are made.
+static bool OptimizeNoopCopyExpression(CastInst *CI) {
+ BasicBlock *DefBB = CI->getParent();
+
+ /// InsertedCasts - Only insert a cast in each block once.
+ std::map<BasicBlock*, CastInst*> InsertedCasts;
+
+ bool MadeChange = false;
+ for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
+ UI != E; ) {
+ Use &TheUse = UI.getUse();
+ Instruction *User = cast<Instruction>(*UI);
+
+ // Figure out which BB this cast is used in. For PHI's this is the
+ // appropriate predecessor block.
+ BasicBlock *UserBB = User->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(User)) {
+ unsigned OpVal = UI.getOperandNo()/2;
+ UserBB = PN->getIncomingBlock(OpVal);
+ }
+
+ // Preincrement use iterator so we don't invalidate it.
+ ++UI;
+
+ // If this user is in the same block as the cast, don't change the cast.
+ if (UserBB == DefBB) continue;
+
+ // If we have already inserted a cast into this block, use it.
+ CastInst *&InsertedCast = InsertedCasts[UserBB];
+
+ if (!InsertedCast) {
+ BasicBlock::iterator InsertPt = UserBB->begin();
+ while (isa<PHINode>(InsertPt)) ++InsertPt;
+
+ InsertedCast =
+ new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
+ MadeChange = true;
+ }
+
+ // Replace a use of the cast with a use of the new casat.
+ TheUse = InsertedCast;
+ }
+
+ // If we removed all uses, nuke the cast.
+ if (CI->use_empty())
+ CI->eraseFromParent();
+
+ return MadeChange;
+}
+
/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
/// casting to the type of GEPI.
-static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
- Value *Ptr, Value *PtrOffset) {
+static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
+ Instruction *GEPI, Value *Ptr,
+ Value *PtrOffset) {
if (V) return V; // Already computed.
BasicBlock::iterator InsertPt;
// Add the offset, cast it to the right type.
Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
- Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
- return V = Ptr;
+ return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
+}
+
+/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
+/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
+/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
+/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
+/// sink PtrOffset into user blocks where doing so will likely allow us to fold
+/// the constant add into a load or store instruction. Additionally, if a user
+/// is a pointer-pointer cast, we look through it to find its users.
+static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
+ Constant *PtrOffset, BasicBlock *DefBB,
+ GetElementPtrInst *GEPI,
+ std::map<BasicBlock*,Instruction*> &InsertedExprs) {
+ while (!RepPtr->use_empty()) {
+ Instruction *User = cast<Instruction>(RepPtr->use_back());
+
+ // If the user is a Pointer-Pointer cast, recurse.
+ if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
+ ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
+
+ // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
+ // could invalidate an iterator.
+ User->setOperand(0, UndefValue::get(RepPtr->getType()));
+ continue;
+ }
+
+ // If this is a load of the pointer, or a store through the pointer, emit
+ // the increment into the load/store block.
+ Instruction *NewVal;
+ if (isa<LoadInst>(User) ||
+ (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
+ NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
+ User->getParent(), GEPI,
+ Ptr, PtrOffset);
+ } else {
+ // If this use is not foldable into the addressing mode, use a version
+ // emitted in the GEP block.
+ NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
+ Ptr, PtrOffset);
+ }
+
+ if (GEPI->getType() != RepPtr->getType()) {
+ BasicBlock::iterator IP = NewVal;
+ ++IP;
+ NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
+ }
+ User->replaceUsesOfWith(RepPtr, NewVal);
+ }
}
/// defined, the addressing expression of the GEP cannot be folded into loads or
/// stores that use it. In this case, decompose the GEP and move constant
/// indices into blocks that use it.
-static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
- const TargetData &TD) {
+static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
+ const TargetData *TD) {
// If this GEP is only used inside the block it is defined in, there is no
// need to rewrite it.
bool isUsedOutsideDefBB = false;
break;
}
}
- if (!isUsedOutsideDefBB) return;
+ if (!isUsedOutsideDefBB) return false;
// If this GEP has no non-zero constant indices, there is nothing we can do,
// ignore it.
bool hasConstantIndex = false;
+ bool hasVariableIndex = false;
for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
E = GEPI->op_end(); OI != E; ++OI) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
if (CI->getRawValue()) {
hasConstantIndex = true;
break;
}
+ } else {
+ hasVariableIndex = true;
+ }
}
+
+ // If this is a "GEP X, 0, 0, 0", turn this into a cast.
+ if (!hasConstantIndex && !hasVariableIndex) {
+ Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
+ GEPI->getName(), GEPI);
+ GEPI->replaceAllUsesWith(NC);
+ GEPI->eraseFromParent();
+ return true;
+ }
+
// If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
- if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
+ if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
+ return false;
// Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
// constant offset (which we now know is non-zero) and deal with it later.
uint64_t ConstantOffset = 0;
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
const Type *Ty = GEPI->getOperand(0)->getType();
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field)
- ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
+ ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
if (CI->getRawValue() == 0) continue;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
continue;
}
// Cast Idx to UIntPtrTy if needed.
Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
// Mask off bits that should not be set.
ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
// block, otherwise we use a canonical version right next to the gep (these
// won't be foldable as addresses, so we might as well share the computation).
- std::map<BasicBlock*,Value*> InsertedExprs;
- while (!GEPI->use_empty()) {
- Instruction *User = cast<Instruction>(GEPI->use_back());
-
- // If this use is not foldable into the addressing mode, use a version
- // emitted in the GEP block.
- Value *NewVal;
- if (!isa<LoadInst>(User) &&
- (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
- NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
- Ptr, PtrOffset);
- } else {
- // Otherwise, insert the code in the User's block so it can be folded into
- // any users in that block.
- NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
- User->getParent(), GEPI,
- Ptr, PtrOffset);
- }
- User->replaceUsesOfWith(GEPI, NewVal);
- }
+ std::map<BasicBlock*,Instruction*> InsertedExprs;
+ ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
// Finally, the GEP is dead, remove it.
GEPI->eraseFromParent();
+
+ return true;
}
bool SelectionDAGISel::runOnFunction(Function &Fn) {
// constants, this way the load of the constant into a vreg will not be placed
// into MBBs that are used some other way.
//
- // In this pass we also look for GEP instructions that are used across basic
- // blocks and rewrites them to improve basic-block-at-a-time selection.
+ // In this pass we also look for GEP and cast instructions that are used
+ // across basic blocks and rewrite them to improve basic-block-at-a-time
+ // selection.
+ //
//
+ bool MadeChange = true;
+ while (MadeChange) {
+ MadeChange = false;
for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
PHINode *PN;
BasicBlock::iterator BBI;
if (isa<Constant>(PN->getIncomingValue(i)))
SplitCriticalEdge(PN->getIncomingBlock(i), BB);
- for (BasicBlock::iterator E = BB->end(); BBI != E; )
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
- OptimizeGEPExpression(GEPI, TLI.getTargetData());
+ for (BasicBlock::iterator E = BB->end(); BBI != E; ) {
+ Instruction *I = BBI++;
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
+ MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
+ } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
+ // If this is a noop copy, sink it into user blocks to reduce the number
+ // of virtual registers that must be created and coallesced.
+ MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
+ MVT::ValueType DstVT = TLI.getValueType(CI->getType());
+
+ // This is an fp<->int conversion?
+ if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
+ continue;
+
+ // If this is an extension, it will be a zero or sign extension, which
+ // isn't a noop.
+ if (SrcVT < DstVT) continue;
+
+ // If these values will be promoted, find out what they will be promoted
+ // to. This helps us consider truncates on PPC as noop copies when they
+ // are.
+ if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
+ SrcVT = TLI.getTypeToTransformTo(SrcVT);
+ if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
+ DstVT = TLI.getTypeToTransformTo(DstVT);
+
+ // If, after promotion, these are the same types, this is a noop copy.
+ if (SrcVT == DstVT)
+ MadeChange |= OptimizeNoopCopyExpression(CI);
+ }
+ }
+ }
}
FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
if (ViewISelDAGs) DAG.viewGraph();
- // TEMPORARY.
- FoldNodeInFlight = !NoFoldNodeInFlight;
-
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
InstructionSelectBasicBlock(DAG);
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
"This is not a machine PHI node that we are updating!");
- if (PHIBB == JT.Default || JT.SuccMBBs.find(PHIBB) != JT.SuccMBBs.end()) {
- PHIBB = (PHIBB == JT.Default) ? RangeBB : BB;
+ if (PHIBB == JT.Default) {
PHI->addRegOperand(PHINodesToUpdate[pi].second);
- PHI->addMachineBasicBlockOperand(PHIBB);
+ PHI->addMachineBasicBlockOperand(RangeBB);
+ }
+ if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
+ PHI->addRegOperand(PHINodesToUpdate[pi].second);
+ PHI->addMachineBasicBlockOperand(BB);
}
}
return;
switch (ISHeuristic) {
default: assert(0 && "Unrecognized scheduling heuristic");
- case defaultScheduling:
+ case ScheduleDAG::defaultScheduling:
if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
else {
SL = createBURRListDAGScheduler(DAG, BB);
}
break;
- case noScheduling:
+ case ScheduleDAG::noScheduling:
SL = createBFS_DAGScheduler(DAG, BB);
break;
- case simpleScheduling:
+ case ScheduleDAG::simpleScheduling:
SL = createSimpleDAGScheduler(false, DAG, BB);
break;
- case simpleNoItinScheduling:
+ case ScheduleDAG::simpleNoItinScheduling:
SL = createSimpleDAGScheduler(true, DAG, BB);
break;
- case listSchedulingBURR:
+ case ScheduleDAG::listSchedulingBURR:
SL = createBURRListDAGScheduler(DAG, BB);
break;
- case listSchedulingTD:
+ case ScheduleDAG::listSchedulingTDRR:
+ SL = createTDRRListDAGScheduler(DAG, BB);
+ break;
+ case ScheduleDAG::listSchedulingTD:
SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
break;
}