1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "ScheduleDAGSDNodes.h"
16 #include "SelectionDAGBuilder.h"
17 #include "llvm/CodeGen/FunctionLoweringInfo.h"
18 #include "llvm/CodeGen/SelectionDAGISel.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/DebugInfo.h"
21 #include "llvm/Constants.h"
22 #include "llvm/Function.h"
23 #include "llvm/InlineAsm.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
38 #include "llvm/CodeGen/SchedulerRegistry.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/Target/TargetRegisterInfo.h"
41 #include "llvm/Target/TargetIntrinsicInfo.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetLowering.h"
44 #include "llvm/Target/TargetMachine.h"
45 #include "llvm/Target/TargetOptions.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/Timer.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/ADT/Statistic.h"
55 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
56 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
59 EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
60 cl::desc("Enable verbose messages in the \"fast\" "
61 "instruction selector"));
63 EnableFastISelAbort("fast-isel-abort", cl::Hidden,
64 cl::desc("Enable abort calls when \"fast\" instruction fails"));
68 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
69 cl::desc("Pop up a window to show dags before the first "
72 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
73 cl::desc("Pop up a window to show dags before legalize types"));
75 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
76 cl::desc("Pop up a window to show dags before legalize"));
78 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
79 cl::desc("Pop up a window to show dags before the second "
82 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
83 cl::desc("Pop up a window to show dags before the post legalize types"
84 " dag combine pass"));
86 ViewISelDAGs("view-isel-dags", cl::Hidden,
87 cl::desc("Pop up a window to show isel dags as they are selected"));
89 ViewSchedDAGs("view-sched-dags", cl::Hidden,
90 cl::desc("Pop up a window to show sched dags as they are processed"));
92 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
93 cl::desc("Pop up a window to show SUnit dags after they are processed"));
95 static const bool ViewDAGCombine1 = false,
96 ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
97 ViewDAGCombine2 = false,
98 ViewDAGCombineLT = false,
99 ViewISelDAGs = false, ViewSchedDAGs = false,
100 ViewSUnitDAGs = false;
103 //===---------------------------------------------------------------------===//
105 /// RegisterScheduler class - Track the registration of instruction schedulers.
107 //===---------------------------------------------------------------------===//
108 MachinePassRegistry RegisterScheduler::Registry;
110 //===---------------------------------------------------------------------===//
112 /// ISHeuristic command line option for instruction schedulers.
114 //===---------------------------------------------------------------------===//
115 static cl::opt<RegisterScheduler::FunctionPassCtor, false,
116 RegisterPassParser<RegisterScheduler> >
117 ISHeuristic("pre-RA-sched",
118 cl::init(&createDefaultScheduler),
119 cl::desc("Instruction schedulers available (before register"
122 static RegisterScheduler
123 defaultListDAGScheduler("default", "Best scheduler for the target",
124 createDefaultScheduler);
127 //===--------------------------------------------------------------------===//
128 /// createDefaultScheduler - This creates an instruction scheduler appropriate
130 ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
131 CodeGenOpt::Level OptLevel) {
132 const TargetLowering &TLI = IS->getTargetLowering();
134 if (OptLevel == CodeGenOpt::None)
135 return createSourceListDAGScheduler(IS, OptLevel);
136 if (TLI.getSchedulingPreference() == Sched::Latency)
137 return createTDListDAGScheduler(IS, OptLevel);
138 if (TLI.getSchedulingPreference() == Sched::RegPressure)
139 return createBURRListDAGScheduler(IS, OptLevel);
140 if (TLI.getSchedulingPreference() == Sched::Hybrid)
141 return createHybridListDAGScheduler(IS, OptLevel);
142 assert(TLI.getSchedulingPreference() == Sched::ILP &&
143 "Unknown sched type!");
144 return createILPListDAGScheduler(IS, OptLevel);
148 // EmitInstrWithCustomInserter - This method should be implemented by targets
149 // that mark instructions with the 'usesCustomInserter' flag. These
150 // instructions are special in various ways, which require special support to
151 // insert. The specified MachineInstr is created but not inserted into any
152 // basic blocks, and this method is called to expand it into a sequence of
153 // instructions, potentially also creating new basic blocks and control flow.
154 // When new basic blocks are inserted and the edges from MBB to its successors
155 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
158 TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
159 MachineBasicBlock *MBB) const {
161 dbgs() << "If a target marks an instruction with "
162 "'usesCustomInserter', it must implement "
163 "TargetLowering::EmitInstrWithCustomInserter!";
169 //===----------------------------------------------------------------------===//
170 // SelectionDAGISel code
171 //===----------------------------------------------------------------------===//
173 SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm, CodeGenOpt::Level OL) :
174 MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
175 FuncInfo(new FunctionLoweringInfo(TLI)),
176 CurDAG(new SelectionDAG(tm)),
177 SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
183 SelectionDAGISel::~SelectionDAGISel() {
189 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
190 AU.addRequired<AliasAnalysis>();
191 AU.addPreserved<AliasAnalysis>();
192 AU.addRequired<GCModuleInfo>();
193 AU.addPreserved<GCModuleInfo>();
194 MachineFunctionPass::getAnalysisUsage(AU);
197 /// FunctionCallsSetJmp - Return true if the function has a call to setjmp or
198 /// other function that gcc recognizes as "returning twice". This is used to
199 /// limit code-gen optimizations on the machine function.
201 /// FIXME: Remove after <rdar://problem/8031714> is fixed.
202 static bool FunctionCallsSetJmp(const Function *F) {
203 const Module *M = F->getParent();
204 static const char *ReturnsTwiceFns[] = {
213 #define NUM_RETURNS_TWICE_FNS sizeof(ReturnsTwiceFns) / sizeof(const char *)
215 for (unsigned I = 0; I < NUM_RETURNS_TWICE_FNS; ++I)
216 if (const Function *Callee = M->getFunction(ReturnsTwiceFns[I])) {
217 if (!Callee->use_empty())
218 for (Value::const_use_iterator
219 I = Callee->use_begin(), E = Callee->use_end();
221 if (const CallInst *CI = dyn_cast<CallInst>(*I))
222 if (CI->getParent()->getParent() == F)
227 #undef NUM_RETURNS_TWICE_FNS
230 bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
231 // Do some sanity-checking on the command-line options.
232 assert((!EnableFastISelVerbose || EnableFastISel) &&
233 "-fast-isel-verbose requires -fast-isel");
234 assert((!EnableFastISelAbort || EnableFastISel) &&
235 "-fast-isel-abort requires -fast-isel");
237 const Function &Fn = *mf.getFunction();
238 const TargetInstrInfo &TII = *TM.getInstrInfo();
239 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
242 RegInfo = &MF->getRegInfo();
243 AA = &getAnalysis<AliasAnalysis>();
244 GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : 0;
246 DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
249 FuncInfo->set(Fn, *MF);
252 SelectAllBasicBlocks(Fn);
254 // If the first basic block in the function has live ins that need to be
255 // copied into vregs, emit the copies into the top of the block before
256 // emitting the code for the block.
257 MachineBasicBlock *EntryMBB = MF->begin();
258 RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII);
260 DenseMap<unsigned, unsigned> LiveInMap;
261 if (!FuncInfo->ArgDbgValues.empty())
262 for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
263 E = RegInfo->livein_end(); LI != E; ++LI)
265 LiveInMap.insert(std::make_pair(LI->first, LI->second));
267 // Insert DBG_VALUE instructions for function arguments to the entry block.
268 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
269 MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
270 unsigned Reg = MI->getOperand(0).getReg();
271 if (TargetRegisterInfo::isPhysicalRegister(Reg))
272 EntryMBB->insert(EntryMBB->begin(), MI);
274 MachineInstr *Def = RegInfo->getVRegDef(Reg);
275 MachineBasicBlock::iterator InsertPos = Def;
276 // FIXME: VR def may not be in entry block.
277 Def->getParent()->insert(llvm::next(InsertPos), MI);
280 // If Reg is live-in then update debug info to track its copy in a vreg.
281 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
282 if (LDI != LiveInMap.end()) {
283 MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
284 MachineBasicBlock::iterator InsertPos = Def;
285 const MDNode *Variable =
286 MI->getOperand(MI->getNumOperands()-1).getMetadata();
287 unsigned Offset = MI->getOperand(1).getImm();
288 // Def is never a terminator here, so it is ok to increment InsertPos.
289 BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
290 TII.get(TargetOpcode::DBG_VALUE))
291 .addReg(LDI->second, RegState::Debug)
292 .addImm(Offset).addMetadata(Variable);
296 // Determine if there are any calls in this machine function.
297 MachineFrameInfo *MFI = MF->getFrameInfo();
298 if (!MFI->hasCalls()) {
299 for (MachineFunction::const_iterator
300 I = MF->begin(), E = MF->end(); I != E; ++I) {
301 const MachineBasicBlock *MBB = I;
302 for (MachineBasicBlock::const_iterator
303 II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
304 const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
306 // Operand 1 of an inline asm instruction indicates whether the asm
307 // needs stack or not.
308 if ((II->isInlineAsm() && II->getOperand(1).getImm()) ||
309 (TID.isCall() && !TID.isReturn())) {
310 MFI->setHasCalls(true);
318 // Determine if there is a call to setjmp in the machine function.
319 MF->setCallsSetJmp(FunctionCallsSetJmp(&Fn));
321 // Replace forward-declared registers with the registers containing
322 // the desired value.
323 MachineRegisterInfo &MRI = MF->getRegInfo();
324 for (DenseMap<unsigned, unsigned>::iterator
325 I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
327 unsigned From = I->first;
328 unsigned To = I->second;
329 // If To is also scheduled to be replaced, find what its ultimate
332 DenseMap<unsigned, unsigned>::iterator J =
333 FuncInfo->RegFixups.find(To);
338 MRI.replaceRegWith(From, To);
341 // Release function-specific state. SDB and CurDAG are already cleared
349 SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
350 BasicBlock::const_iterator End,
352 // Lower all of the non-terminator instructions. If a call is emitted
353 // as a tail call, cease emitting nodes for this block. Terminators
354 // are handled below.
355 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I)
358 // Make sure the root of the DAG is up-to-date.
359 CurDAG->setRoot(SDB->getControlRoot());
360 HadTailCall = SDB->HasTailCall;
363 // Final step, emit the lowered DAG as machine code.
367 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
368 SmallPtrSet<SDNode*, 128> VisitedNodes;
369 SmallVector<SDNode*, 128> Worklist;
371 Worklist.push_back(CurDAG->getRoot().getNode());
378 SDNode *N = Worklist.pop_back_val();
380 // If we've already seen this node, ignore it.
381 if (!VisitedNodes.insert(N))
384 // Otherwise, add all chain operands to the worklist.
385 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
386 if (N->getOperand(i).getValueType() == MVT::Other)
387 Worklist.push_back(N->getOperand(i).getNode());
389 // If this is a CopyToReg with a vreg dest, process it.
390 if (N->getOpcode() != ISD::CopyToReg)
393 unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
394 if (!TargetRegisterInfo::isVirtualRegister(DestReg))
397 // Ignore non-scalar or non-integer values.
398 SDValue Src = N->getOperand(2);
399 EVT SrcVT = Src.getValueType();
400 if (!SrcVT.isInteger() || SrcVT.isVector())
403 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
404 Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
405 CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
407 // Only install this information if it tells us something.
408 if (NumSignBits != 1 || KnownZero != 0 || KnownOne != 0) {
409 DestReg -= TargetRegisterInfo::FirstVirtualRegister;
410 if (DestReg >= FuncInfo->LiveOutRegInfo.size())
411 FuncInfo->LiveOutRegInfo.resize(DestReg+1);
412 FunctionLoweringInfo::LiveOutInfo &LOI =
413 FuncInfo->LiveOutRegInfo[DestReg];
414 LOI.NumSignBits = NumSignBits;
415 LOI.KnownOne = KnownOne;
416 LOI.KnownZero = KnownZero;
418 } while (!Worklist.empty());
421 void SelectionDAGISel::CodeGenAndEmitDAG() {
422 std::string GroupName;
423 if (TimePassesIsEnabled)
424 GroupName = "Instruction Selection and Scheduling";
425 std::string BlockName;
426 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
427 ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
429 BlockName = MF->getFunction()->getNameStr() + ":" +
430 FuncInfo->MBB->getBasicBlock()->getNameStr();
432 DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
434 if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
436 // Run the DAG combiner in pre-legalize mode.
438 NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
439 CurDAG->Combine(Unrestricted, *AA, OptLevel);
442 DEBUG(dbgs() << "Optimized lowered selection DAG:\n"; CurDAG->dump());
444 // Second step, hack on the DAG until it only uses operations and types that
445 // the target supports.
446 if (ViewLegalizeTypesDAGs) CurDAG->viewGraph("legalize-types input for " +
451 NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
452 Changed = CurDAG->LegalizeTypes();
455 DEBUG(dbgs() << "Type-legalized selection DAG:\n"; CurDAG->dump());
458 if (ViewDAGCombineLT)
459 CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
461 // Run the DAG combiner in post-type-legalize mode.
463 NamedRegionTimer T("DAG Combining after legalize types", GroupName,
464 TimePassesIsEnabled);
465 CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
468 DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n";
473 NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
474 Changed = CurDAG->LegalizeVectors();
479 NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
480 CurDAG->LegalizeTypes();
483 if (ViewDAGCombineLT)
484 CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
486 // Run the DAG combiner in post-type-legalize mode.
488 NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
489 TimePassesIsEnabled);
490 CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
493 DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n";
497 if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
500 NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
501 CurDAG->Legalize(OptLevel);
504 DEBUG(dbgs() << "Legalized selection DAG:\n"; CurDAG->dump());
506 if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
508 // Run the DAG combiner in post-legalize mode.
510 NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
511 CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
514 DEBUG(dbgs() << "Optimized legalized selection DAG:\n"; CurDAG->dump());
516 if (OptLevel != CodeGenOpt::None)
517 ComputeLiveOutVRegInfo();
519 if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
521 // Third, instruction select all of the operations to machine code, adding the
522 // code to the MachineBasicBlock.
524 NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
525 DoInstructionSelection();
528 DEBUG(dbgs() << "Selected selection DAG:\n"; CurDAG->dump());
530 if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
532 // Schedule machine code.
533 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
535 NamedRegionTimer T("Instruction Scheduling", GroupName,
536 TimePassesIsEnabled);
537 Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
540 if (ViewSUnitDAGs) Scheduler->viewGraph();
542 // Emit machine code to BB. This can change 'BB' to the last block being
545 NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
547 FuncInfo->MBB = Scheduler->EmitSchedule();
548 FuncInfo->InsertPt = Scheduler->InsertPos;
551 // Free the scheduler state.
553 NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
554 TimePassesIsEnabled);
558 // Free the SelectionDAG state, now that we're finished with it.
562 void SelectionDAGISel::DoInstructionSelection() {
563 DEBUG(errs() << "===== Instruction selection begins:\n");
567 // Select target instructions for the DAG.
569 // Number all nodes with a topological order and set DAGSize.
570 DAGSize = CurDAG->AssignTopologicalOrder();
572 // Create a dummy node (which is not added to allnodes), that adds
573 // a reference to the root node, preventing it from being deleted,
574 // and tracking any changes of the root.
575 HandleSDNode Dummy(CurDAG->getRoot());
576 ISelPosition = SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode());
579 // The AllNodes list is now topological-sorted. Visit the
580 // nodes by starting at the end of the list (the root of the
581 // graph) and preceding back toward the beginning (the entry
583 while (ISelPosition != CurDAG->allnodes_begin()) {
584 SDNode *Node = --ISelPosition;
585 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
586 // but there are currently some corner cases that it misses. Also, this
587 // makes it theoretically possible to disable the DAGCombiner.
588 if (Node->use_empty())
591 SDNode *ResNode = Select(Node);
593 // FIXME: This is pretty gross. 'Select' should be changed to not return
594 // anything at all and this code should be nuked with a tactical strike.
596 // If node should not be replaced, continue with the next one.
597 if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE)
601 ReplaceUses(Node, ResNode);
603 // If after the replacement this node is not used any more,
604 // remove this dead node.
605 if (Node->use_empty()) { // Don't delete EntryToken, etc.
606 ISelUpdater ISU(ISelPosition);
607 CurDAG->RemoveDeadNode(Node, &ISU);
611 CurDAG->setRoot(Dummy.getValue());
614 DEBUG(errs() << "===== Instruction selection ends:\n");
616 PostprocessISelDAG();
619 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
620 /// do other setup for EH landing-pad blocks.
621 void SelectionDAGISel::PrepareEHLandingPad() {
622 // Add a label to mark the beginning of the landing pad. Deletion of the
623 // landing pad can thus be detected via the MachineModuleInfo.
624 MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
626 const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
627 BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
630 // Mark exception register as live in.
631 unsigned Reg = TLI.getExceptionAddressRegister();
632 if (Reg) FuncInfo->MBB->addLiveIn(Reg);
634 // Mark exception selector register as live in.
635 Reg = TLI.getExceptionSelectorRegister();
636 if (Reg) FuncInfo->MBB->addLiveIn(Reg);
638 // FIXME: Hack around an exception handling flaw (PR1508): the personality
639 // function and list of typeids logically belong to the invoke (or, if you
640 // like, the basic block containing the invoke), and need to be associated
641 // with it in the dwarf exception handling tables. Currently however the
642 // information is provided by an intrinsic (eh.selector) that can be moved
643 // to unexpected places by the optimizers: if the unwind edge is critical,
644 // then breaking it can result in the intrinsics being in the successor of
645 // the landing pad, not the landing pad itself. This results
646 // in exceptions not being caught because no typeids are associated with
647 // the invoke. This may not be the only way things can go wrong, but it
648 // is the only way we try to work around for the moment.
649 const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock();
650 const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
652 if (Br && Br->isUnconditional()) { // Critical edge?
653 BasicBlock::const_iterator I, E;
654 for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I)
655 if (isa<EHSelectorInst>(I))
659 // No catch info found - try to extract some from the successor.
660 CopyCatchInfo(Br->getSuccessor(0), LLVMBB, &MF->getMMI(), *FuncInfo);
667 bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
669 // Don't try to fold volatile loads. Target has to deal with alignment
671 if (LI->isVolatile()) return false;
673 // Figure out which vreg this is going into.
674 unsigned LoadReg = FastIS->getRegForValue(LI);
675 assert(LoadReg && "Load isn't already assigned a vreg? ");
677 // Check to see what the uses of this vreg are. If it has no uses, or more
678 // than one use (at the machine instr level) then we can't fold it.
679 MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(LoadReg);
680 if (RI == RegInfo->reg_end())
683 // See if there is exactly one use of the vreg. If there are multiple uses,
684 // then the instruction got lowered to multiple machine instructions or the
685 // use of the loaded value ended up being multiple operands of the result, in
686 // either case, we can't fold this.
687 MachineRegisterInfo::reg_iterator PostRI = RI; ++PostRI;
688 if (PostRI != RegInfo->reg_end())
691 assert(RI.getOperand().isUse() &&
692 "The only use of the vreg must be a use, we haven't emitted the def!");
694 // Ask the target to try folding the load.
695 return FastIS->TryToFoldLoad(&*RI, RI.getOperandNo(), LI);
701 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
702 // Initialize the Fast-ISel state, if needed.
703 FastISel *FastIS = 0;
705 FastIS = TLI.createFastISel(*FuncInfo);
707 // Iterate over all basic blocks in the function.
708 for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
709 const BasicBlock *LLVMBB = &*I;
710 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
711 FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
713 BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
714 BasicBlock::const_iterator const End = LLVMBB->end();
715 BasicBlock::const_iterator BI = End;
717 FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
719 // Setup an EH landing-pad block.
720 if (FuncInfo->MBB->isLandingPad())
721 PrepareEHLandingPad();
723 // Lower any arguments needed in this block if this is the entry block.
724 if (LLVMBB == &Fn.getEntryBlock())
725 LowerArguments(LLVMBB);
727 // Before doing SelectionDAG ISel, see if FastISel has been requested.
729 FastIS->startNewBlock();
731 // Emit code for any incoming arguments. This must happen before
732 // beginning FastISel on the entry block.
733 if (LLVMBB == &Fn.getEntryBlock()) {
734 CurDAG->setRoot(SDB->getControlRoot());
738 // If we inserted any instructions at the beginning, make a note of
739 // where they are, so we can be sure to emit subsequent instructions
741 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
742 FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
744 FastIS->setLastLocalValue(0);
747 // Do FastISel on as many instructions as possible.
748 for (; BI != Begin; --BI) {
749 const Instruction *Inst = llvm::prior(BI);
751 // If we no longer require this instruction, skip it.
752 if (!Inst->mayWriteToMemory() &&
753 !isa<TerminatorInst>(Inst) &&
754 !isa<DbgInfoIntrinsic>(Inst) &&
755 !FuncInfo->isExportedInst(Inst))
758 // Bottom-up: reset the insert pos at the top, after any local-value
760 FastIS->recomputeInsertPt();
762 // Try to select the instruction with FastISel.
763 if (FastIS->SelectInstruction(Inst)) {
764 // If fast isel succeeded, check to see if there is a single-use
765 // non-volatile load right before the selected instruction, and see if
766 // the load is used by the instruction. If so, try to fold it.
767 const Instruction *BeforeInst = 0;
769 BeforeInst = llvm::prior(llvm::prior(BI));
770 if (BeforeInst && isa<LoadInst>(BeforeInst) &&
771 BeforeInst->hasOneUse() && *BeforeInst->use_begin() == Inst &&
772 TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), FastIS)) {
773 // If we succeeded, don't re-select the load.
779 // Then handle certain instructions as single-LLVM-Instruction blocks.
780 if (isa<CallInst>(Inst)) {
781 ++NumFastIselFailures;
782 if (EnableFastISelVerbose || EnableFastISelAbort) {
783 dbgs() << "FastISel missed call: ";
787 if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
788 unsigned &R = FuncInfo->ValueMap[Inst];
790 R = FuncInfo->CreateRegs(Inst->getType());
793 bool HadTailCall = false;
794 SelectBasicBlock(Inst, BI, HadTailCall);
796 // If the call was emitted as a tail call, we're done with the block.
805 // Otherwise, give up on FastISel for the rest of the block.
806 // For now, be a little lenient about non-branch terminators.
807 if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) {
808 ++NumFastIselFailures;
809 if (EnableFastISelVerbose || EnableFastISelAbort) {
810 dbgs() << "FastISel miss: ";
813 if (EnableFastISelAbort)
814 // The "fast" selector couldn't handle something and bailed.
815 // For the purpose of debugging, just abort.
816 llvm_unreachable("FastISel didn't select the entire block");
821 FastIS->recomputeInsertPt();
824 // Run SelectionDAG instruction selection on the remainder of the block
825 // not handled by FastISel. If FastISel is not run, this is the entire
828 SelectBasicBlock(Begin, BI, HadTailCall);
831 FuncInfo->PHINodesToUpdate.clear();
838 SelectionDAGISel::FinishBasicBlock() {
840 DEBUG(dbgs() << "Total amount of phi nodes to update: "
841 << FuncInfo->PHINodesToUpdate.size() << "\n";
842 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
843 dbgs() << "Node " << i << " : ("
844 << FuncInfo->PHINodesToUpdate[i].first
845 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
847 // Next, now that we know what the last MBB the LLVM BB expanded is, update
848 // PHI nodes in successors.
849 if (SDB->SwitchCases.empty() &&
850 SDB->JTCases.empty() &&
851 SDB->BitTestCases.empty()) {
852 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
853 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
854 assert(PHI->isPHI() &&
855 "This is not a machine PHI node that we are updating!");
856 if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
859 MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
860 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
865 for (unsigned i = 0, e = SDB->BitTestCases.size(); i != e; ++i) {
866 // Lower header first, if it wasn't already lowered
867 if (!SDB->BitTestCases[i].Emitted) {
868 // Set the current basic block to the mbb we wish to insert the code into
869 FuncInfo->MBB = SDB->BitTestCases[i].Parent;
870 FuncInfo->InsertPt = FuncInfo->MBB->end();
872 SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
873 CurDAG->setRoot(SDB->getRoot());
878 for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
879 // Set the current basic block to the mbb we wish to insert the code into
880 FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
881 FuncInfo->InsertPt = FuncInfo->MBB->end();
884 SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
885 SDB->BitTestCases[i].Reg,
886 SDB->BitTestCases[i].Cases[j],
889 SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
890 SDB->BitTestCases[i].Reg,
891 SDB->BitTestCases[i].Cases[j],
895 CurDAG->setRoot(SDB->getRoot());
901 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
903 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
904 MachineBasicBlock *PHIBB = PHI->getParent();
905 assert(PHI->isPHI() &&
906 "This is not a machine PHI node that we are updating!");
907 // This is "default" BB. We have two jumps to it. From "header" BB and
908 // from last "case" BB.
909 if (PHIBB == SDB->BitTestCases[i].Default) {
910 PHI->addOperand(MachineOperand::
911 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
913 PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Parent));
914 PHI->addOperand(MachineOperand::
915 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
917 PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Cases.
920 // One of "cases" BB.
921 for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size();
923 MachineBasicBlock* cBB = SDB->BitTestCases[i].Cases[j].ThisBB;
924 if (cBB->isSuccessor(PHIBB)) {
925 PHI->addOperand(MachineOperand::
926 CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
928 PHI->addOperand(MachineOperand::CreateMBB(cBB));
933 SDB->BitTestCases.clear();
935 // If the JumpTable record is filled in, then we need to emit a jump table.
936 // Updating the PHI nodes is tricky in this case, since we need to determine
937 // whether the PHI is a successor of the range check MBB or the jump table MBB
938 for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
939 // Lower header first, if it wasn't already lowered
940 if (!SDB->JTCases[i].first.Emitted) {
941 // Set the current basic block to the mbb we wish to insert the code into
942 FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
943 FuncInfo->InsertPt = FuncInfo->MBB->end();
945 SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
947 CurDAG->setRoot(SDB->getRoot());
952 // Set the current basic block to the mbb we wish to insert the code into
953 FuncInfo->MBB = SDB->JTCases[i].second.MBB;
954 FuncInfo->InsertPt = FuncInfo->MBB->end();
956 SDB->visitJumpTable(SDB->JTCases[i].second);
957 CurDAG->setRoot(SDB->getRoot());
962 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
964 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
965 MachineBasicBlock *PHIBB = PHI->getParent();
966 assert(PHI->isPHI() &&
967 "This is not a machine PHI node that we are updating!");
968 // "default" BB. We can go there only from header BB.
969 if (PHIBB == SDB->JTCases[i].second.Default) {
971 (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
974 (MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
976 // JT BB. Just iterate over successors here
977 if (FuncInfo->MBB->isSuccessor(PHIBB)) {
979 (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
981 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
985 SDB->JTCases.clear();
987 // If the switch block involved a branch to one of the actual successors, we
988 // need to update PHI nodes in that block.
989 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
990 MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
991 assert(PHI->isPHI() &&
992 "This is not a machine PHI node that we are updating!");
993 if (FuncInfo->MBB->isSuccessor(PHI->getParent())) {
995 MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
996 PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
1000 // If we generated any switch lowering information, build and codegen any
1001 // additional DAGs necessary.
1002 for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
1003 // Set the current basic block to the mbb we wish to insert the code into
1004 MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
1005 FuncInfo->InsertPt = FuncInfo->MBB->end();
1007 // Determine the unique successors.
1008 SmallVector<MachineBasicBlock *, 2> Succs;
1009 Succs.push_back(SDB->SwitchCases[i].TrueBB);
1010 if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
1011 Succs.push_back(SDB->SwitchCases[i].FalseBB);
1013 // Emit the code. Note that this could result in ThisBB being split, so
1014 // we need to check for updates.
1015 SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
1016 CurDAG->setRoot(SDB->getRoot());
1018 CodeGenAndEmitDAG();
1019 ThisBB = FuncInfo->MBB;
1021 // Handle any PHI nodes in successors of this chunk, as if we were coming
1022 // from the original BB before switch expansion. Note that PHI nodes can
1023 // occur multiple times in PHINodesToUpdate. We have to be very careful to
1024 // handle them the right number of times.
1025 for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
1026 FuncInfo->MBB = Succs[i];
1027 FuncInfo->InsertPt = FuncInfo->MBB->end();
1028 // FuncInfo->MBB may have been removed from the CFG if a branch was
1030 if (ThisBB->isSuccessor(FuncInfo->MBB)) {
1031 for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
1032 Phi != FuncInfo->MBB->end() && Phi->isPHI();
1034 // This value for this PHI node is recorded in PHINodesToUpdate.
1035 for (unsigned pn = 0; ; ++pn) {
1036 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
1037 "Didn't find PHI entry!");
1038 if (FuncInfo->PHINodesToUpdate[pn].first == Phi) {
1039 Phi->addOperand(MachineOperand::
1040 CreateReg(FuncInfo->PHINodesToUpdate[pn].second,
1042 Phi->addOperand(MachineOperand::CreateMBB(ThisBB));
1050 SDB->SwitchCases.clear();
1054 /// Create the scheduler. If a specific scheduler was specified
1055 /// via the SchedulerRegistry, use it, otherwise select the
1056 /// one preferred by the target.
1058 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
1059 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
1063 RegisterScheduler::setDefault(Ctor);
1066 return Ctor(this, OptLevel);
1069 ScheduleHazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
1070 return new ScheduleHazardRecognizer();
1073 //===----------------------------------------------------------------------===//
1074 // Helper functions used by the generated instruction selector.
1075 //===----------------------------------------------------------------------===//
1076 // Calls to these methods are generated by tblgen.
1078 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
1079 /// the dag combiner simplified the 255, we still want to match. RHS is the
1080 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
1081 /// specified in the .td file (e.g. 255).
1082 bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
1083 int64_t DesiredMaskS) const {
1084 const APInt &ActualMask = RHS->getAPIntValue();
1085 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
1087 // If the actual mask exactly matches, success!
1088 if (ActualMask == DesiredMask)
1091 // If the actual AND mask is allowing unallowed bits, this doesn't match.
1092 if (ActualMask.intersects(~DesiredMask))
1095 // Otherwise, the DAG Combiner may have proven that the value coming in is
1096 // either already zero or is not demanded. Check for known zero input bits.
1097 APInt NeededMask = DesiredMask & ~ActualMask;
1098 if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
1101 // TODO: check to see if missing bits are just not demanded.
1103 // Otherwise, this pattern doesn't match.
1107 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
1108 /// the dag combiner simplified the 255, we still want to match. RHS is the
1109 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
1110 /// specified in the .td file (e.g. 255).
1111 bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
1112 int64_t DesiredMaskS) const {
1113 const APInt &ActualMask = RHS->getAPIntValue();
1114 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
1116 // If the actual mask exactly matches, success!
1117 if (ActualMask == DesiredMask)
1120 // If the actual AND mask is allowing unallowed bits, this doesn't match.
1121 if (ActualMask.intersects(~DesiredMask))
1124 // Otherwise, the DAG Combiner may have proven that the value coming in is
1125 // either already zero or is not demanded. Check for known zero input bits.
1126 APInt NeededMask = DesiredMask & ~ActualMask;
1128 APInt KnownZero, KnownOne;
1129 CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
1131 // If all the missing bits in the or are already known to be set, match!
1132 if ((NeededMask & KnownOne) == NeededMask)
1135 // TODO: check to see if missing bits are just not demanded.
1137 // Otherwise, this pattern doesn't match.
1142 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
1143 /// by tblgen. Others should not call it.
1144 void SelectionDAGISel::
1145 SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
1146 std::vector<SDValue> InOps;
1147 std::swap(InOps, Ops);
1149 Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
1150 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
1151 Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
1152 Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3
1154 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
1155 if (InOps[e-1].getValueType() == MVT::Flag)
1156 --e; // Don't process a flag operand if it is here.
1159 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
1160 if (!InlineAsm::isMemKind(Flags)) {
1161 // Just skip over this operand, copying the operands verbatim.
1162 Ops.insert(Ops.end(), InOps.begin()+i,
1163 InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
1164 i += InlineAsm::getNumOperandRegisters(Flags) + 1;
1166 assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
1167 "Memory operand with multiple values?");
1168 // Otherwise, this is a memory operand. Ask the target to select it.
1169 std::vector<SDValue> SelOps;
1170 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps))
1171 report_fatal_error("Could not match memory address. Inline asm"
1174 // Add this to the output node.
1176 InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
1177 Ops.push_back(CurDAG->getTargetConstant(NewFlags, MVT::i32));
1178 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
1183 // Add the flag input back if present.
1184 if (e != InOps.size())
1185 Ops.push_back(InOps.back());
1188 /// findFlagUse - Return use of EVT::Flag value produced by the specified
1191 static SDNode *findFlagUse(SDNode *N) {
1192 unsigned FlagResNo = N->getNumValues()-1;
1193 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
1194 SDUse &Use = I.getUse();
1195 if (Use.getResNo() == FlagResNo)
1196 return Use.getUser();
1201 /// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
1202 /// This function recursively traverses up the operand chain, ignoring
1204 static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
1205 SDNode *Root, SmallPtrSet<SDNode*, 16> &Visited,
1206 bool IgnoreChains) {
1207 // The NodeID's are given uniques ID's where a node ID is guaranteed to be
1208 // greater than all of its (recursive) operands. If we scan to a point where
1209 // 'use' is smaller than the node we're scanning for, then we know we will
1212 // The Use may be -1 (unassigned) if it is a newly allocated node. This can
1213 // happen because we scan down to newly selected nodes in the case of flag
1215 if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
1218 // Don't revisit nodes if we already scanned it and didn't fail, we know we
1219 // won't fail if we scan it again.
1220 if (!Visited.insert(Use))
1223 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
1224 // Ignore chain uses, they are validated by HandleMergeInputChains.
1225 if (Use->getOperand(i).getValueType() == MVT::Other && IgnoreChains)
1228 SDNode *N = Use->getOperand(i).getNode();
1230 if (Use == ImmedUse || Use == Root)
1231 continue; // We are not looking for immediate use.
1236 // Traverse up the operand chain.
1237 if (findNonImmUse(N, Def, ImmedUse, Root, Visited, IgnoreChains))
1243 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
1244 /// operand node N of U during instruction selection that starts at Root.
1245 bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
1246 SDNode *Root) const {
1247 if (OptLevel == CodeGenOpt::None) return false;
1248 return N.hasOneUse();
1251 /// IsLegalToFold - Returns true if the specific operand node N of
1252 /// U can be folded during instruction selection that starts at Root.
1253 bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
1254 CodeGenOpt::Level OptLevel,
1255 bool IgnoreChains) {
1256 if (OptLevel == CodeGenOpt::None) return false;
1258 // If Root use can somehow reach N through a path that that doesn't contain
1259 // U then folding N would create a cycle. e.g. In the following
1260 // diagram, Root can reach N through X. If N is folded into into Root, then
1261 // X is both a predecessor and a successor of U.
1272 // * indicates nodes to be folded together.
1274 // If Root produces a flag, then it gets (even more) interesting. Since it
1275 // will be "glued" together with its flag use in the scheduler, we need to
1276 // check if it might reach N.
1295 // If FU (flag use) indirectly reaches N (the load), and Root folds N
1296 // (call it Fold), then X is a predecessor of FU and a successor of
1297 // Fold. But since Fold and FU are flagged together, this will create
1298 // a cycle in the scheduling graph.
1300 // If the node has flags, walk down the graph to the "lowest" node in the
1302 EVT VT = Root->getValueType(Root->getNumValues()-1);
1303 while (VT == MVT::Flag) {
1304 SDNode *FU = findFlagUse(Root);
1308 VT = Root->getValueType(Root->getNumValues()-1);
1310 // If our query node has a flag result with a use, we've walked up it. If
1311 // the user (which has already been selected) has a chain or indirectly uses
1312 // the chain, our WalkChainUsers predicate will not consider it. Because of
1313 // this, we cannot ignore chains in this predicate.
1314 IgnoreChains = false;
1318 SmallPtrSet<SDNode*, 16> Visited;
1319 return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
1322 SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
1323 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
1324 SelectInlineAsmMemoryOperands(Ops);
1326 std::vector<EVT> VTs;
1327 VTs.push_back(MVT::Other);
1328 VTs.push_back(MVT::Flag);
1329 SDValue New = CurDAG->getNode(ISD::INLINEASM, N->getDebugLoc(),
1330 VTs, &Ops[0], Ops.size());
1332 return New.getNode();
1335 SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
1336 return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0));
1339 /// GetVBR - decode a vbr encoding whose top bit is set.
1340 ALWAYS_INLINE static uint64_t
1341 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
1342 assert(Val >= 128 && "Not a VBR");
1343 Val &= 127; // Remove first vbr bit.
1348 NextBits = MatcherTable[Idx++];
1349 Val |= (NextBits&127) << Shift;
1351 } while (NextBits & 128);
1357 /// UpdateChainsAndFlags - When a match is complete, this method updates uses of
1358 /// interior flag and chain results to use the new flag and chain results.
1359 void SelectionDAGISel::
1360 UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
1361 const SmallVectorImpl<SDNode*> &ChainNodesMatched,
1363 const SmallVectorImpl<SDNode*> &FlagResultNodesMatched,
1364 bool isMorphNodeTo) {
1365 SmallVector<SDNode*, 4> NowDeadNodes;
1367 ISelUpdater ISU(ISelPosition);
1369 // Now that all the normal results are replaced, we replace the chain and
1370 // flag results if present.
1371 if (!ChainNodesMatched.empty()) {
1372 assert(InputChain.getNode() != 0 &&
1373 "Matched input chains but didn't produce a chain");
1374 // Loop over all of the nodes we matched that produced a chain result.
1375 // Replace all the chain results with the final chain we ended up with.
1376 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1377 SDNode *ChainNode = ChainNodesMatched[i];
1379 // If this node was already deleted, don't look at it.
1380 if (ChainNode->getOpcode() == ISD::DELETED_NODE)
1383 // Don't replace the results of the root node if we're doing a
1385 if (ChainNode == NodeToMatch && isMorphNodeTo)
1388 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
1389 if (ChainVal.getValueType() == MVT::Flag)
1390 ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
1391 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
1392 CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
1394 // If the node became dead and we haven't already seen it, delete it.
1395 if (ChainNode->use_empty() &&
1396 !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
1397 NowDeadNodes.push_back(ChainNode);
1401 // If the result produces a flag, update any flag results in the matched
1402 // pattern with the flag result.
1403 if (InputFlag.getNode() != 0) {
1404 // Handle any interior nodes explicitly marked.
1405 for (unsigned i = 0, e = FlagResultNodesMatched.size(); i != e; ++i) {
1406 SDNode *FRN = FlagResultNodesMatched[i];
1408 // If this node was already deleted, don't look at it.
1409 if (FRN->getOpcode() == ISD::DELETED_NODE)
1412 assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Flag &&
1413 "Doesn't have a flag result");
1414 CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
1417 // If the node became dead and we haven't already seen it, delete it.
1418 if (FRN->use_empty() &&
1419 !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
1420 NowDeadNodes.push_back(FRN);
1424 if (!NowDeadNodes.empty())
1425 CurDAG->RemoveDeadNodes(NowDeadNodes, &ISU);
1427 DEBUG(errs() << "ISEL: Match complete!\n");
1433 CR_LeadsToInteriorNode
1436 /// WalkChainUsers - Walk down the users of the specified chained node that is
1437 /// part of the pattern we're matching, looking at all of the users we find.
1438 /// This determines whether something is an interior node, whether we have a
1439 /// non-pattern node in between two pattern nodes (which prevent folding because
1440 /// it would induce a cycle) and whether we have a TokenFactor node sandwiched
1441 /// between pattern nodes (in which case the TF becomes part of the pattern).
1443 /// The walk we do here is guaranteed to be small because we quickly get down to
1444 /// already selected nodes "below" us.
1446 WalkChainUsers(SDNode *ChainedNode,
1447 SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
1448 SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
1449 ChainResult Result = CR_Simple;
1451 for (SDNode::use_iterator UI = ChainedNode->use_begin(),
1452 E = ChainedNode->use_end(); UI != E; ++UI) {
1453 // Make sure the use is of the chain, not some other value we produce.
1454 if (UI.getUse().getValueType() != MVT::Other) continue;
1458 // If we see an already-selected machine node, then we've gone beyond the
1459 // pattern that we're selecting down into the already selected chunk of the
1461 if (User->isMachineOpcode() ||
1462 User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
1465 if (User->getOpcode() == ISD::CopyToReg ||
1466 User->getOpcode() == ISD::CopyFromReg ||
1467 User->getOpcode() == ISD::INLINEASM ||
1468 User->getOpcode() == ISD::EH_LABEL) {
1469 // If their node ID got reset to -1 then they've already been selected.
1470 // Treat them like a MachineOpcode.
1471 if (User->getNodeId() == -1)
1475 // If we have a TokenFactor, we handle it specially.
1476 if (User->getOpcode() != ISD::TokenFactor) {
1477 // If the node isn't a token factor and isn't part of our pattern, then it
1478 // must be a random chained node in between two nodes we're selecting.
1479 // This happens when we have something like:
1484 // Because we structurally match the load/store as a read/modify/write,
1485 // but the call is chained between them. We cannot fold in this case
1486 // because it would induce a cycle in the graph.
1487 if (!std::count(ChainedNodesInPattern.begin(),
1488 ChainedNodesInPattern.end(), User))
1489 return CR_InducesCycle;
1491 // Otherwise we found a node that is part of our pattern. For example in:
1495 // This would happen when we're scanning down from the load and see the
1496 // store as a user. Record that there is a use of ChainedNode that is
1497 // part of the pattern and keep scanning uses.
1498 Result = CR_LeadsToInteriorNode;
1499 InteriorChainedNodes.push_back(User);
1503 // If we found a TokenFactor, there are two cases to consider: first if the
1504 // TokenFactor is just hanging "below" the pattern we're matching (i.e. no
1505 // uses of the TF are in our pattern) we just want to ignore it. Second,
1506 // the TokenFactor can be sandwiched in between two chained nodes, like so:
1512 // | \ DAG's like cheese
1515 // [TokenFactor] [Op]
1522 // In this case, the TokenFactor becomes part of our match and we rewrite it
1523 // as a new TokenFactor.
1525 // To distinguish these two cases, do a recursive walk down the uses.
1526 switch (WalkChainUsers(User, ChainedNodesInPattern, InteriorChainedNodes)) {
1528 // If the uses of the TokenFactor are just already-selected nodes, ignore
1529 // it, it is "below" our pattern.
1531 case CR_InducesCycle:
1532 // If the uses of the TokenFactor lead to nodes that are not part of our
1533 // pattern that are not selected, folding would turn this into a cycle,
1535 return CR_InducesCycle;
1536 case CR_LeadsToInteriorNode:
1537 break; // Otherwise, keep processing.
1540 // Okay, we know we're in the interesting interior case. The TokenFactor
1541 // is now going to be considered part of the pattern so that we rewrite its
1542 // uses (it may have uses that are not part of the pattern) with the
1543 // ultimate chain result of the generated code. We will also add its chain
1544 // inputs as inputs to the ultimate TokenFactor we create.
1545 Result = CR_LeadsToInteriorNode;
1546 ChainedNodesInPattern.push_back(User);
1547 InteriorChainedNodes.push_back(User);
1554 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
1555 /// operation for when the pattern matched at least one node with a chains. The
1556 /// input vector contains a list of all of the chained nodes that we match. We
1557 /// must determine if this is a valid thing to cover (i.e. matching it won't
1558 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
1559 /// be used as the input node chain for the generated nodes.
1561 HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
1562 SelectionDAG *CurDAG) {
1563 // Walk all of the chained nodes we've matched, recursively scanning down the
1564 // users of the chain result. This adds any TokenFactor nodes that are caught
1565 // in between chained nodes to the chained and interior nodes list.
1566 SmallVector<SDNode*, 3> InteriorChainedNodes;
1567 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1568 if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
1569 InteriorChainedNodes) == CR_InducesCycle)
1570 return SDValue(); // Would induce a cycle.
1573 // Okay, we have walked all the matched nodes and collected TokenFactor nodes
1574 // that we are interested in. Form our input TokenFactor node.
1575 SmallVector<SDValue, 3> InputChains;
1576 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
1577 // Add the input chain of this node to the InputChains list (which will be
1578 // the operands of the generated TokenFactor) if it's not an interior node.
1579 SDNode *N = ChainNodesMatched[i];
1580 if (N->getOpcode() != ISD::TokenFactor) {
1581 if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
1584 // Otherwise, add the input chain.
1585 SDValue InChain = ChainNodesMatched[i]->getOperand(0);
1586 assert(InChain.getValueType() == MVT::Other && "Not a chain");
1587 InputChains.push_back(InChain);
1591 // If we have a token factor, we want to add all inputs of the token factor
1592 // that are not part of the pattern we're matching.
1593 for (unsigned op = 0, e = N->getNumOperands(); op != e; ++op) {
1594 if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
1595 N->getOperand(op).getNode()))
1596 InputChains.push_back(N->getOperand(op));
1601 if (InputChains.size() == 1)
1602 return InputChains[0];
1603 return CurDAG->getNode(ISD::TokenFactor, ChainNodesMatched[0]->getDebugLoc(),
1604 MVT::Other, &InputChains[0], InputChains.size());
1607 /// MorphNode - Handle morphing a node in place for the selector.
1608 SDNode *SelectionDAGISel::
1609 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
1610 const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo) {
1611 // It is possible we're using MorphNodeTo to replace a node with no
1612 // normal results with one that has a normal result (or we could be
1613 // adding a chain) and the input could have flags and chains as well.
1614 // In this case we need to shift the operands down.
1615 // FIXME: This is a horrible hack and broken in obscure cases, no worse
1616 // than the old isel though.
1617 int OldFlagResultNo = -1, OldChainResultNo = -1;
1619 unsigned NTMNumResults = Node->getNumValues();
1620 if (Node->getValueType(NTMNumResults-1) == MVT::Flag) {
1621 OldFlagResultNo = NTMNumResults-1;
1622 if (NTMNumResults != 1 &&
1623 Node->getValueType(NTMNumResults-2) == MVT::Other)
1624 OldChainResultNo = NTMNumResults-2;
1625 } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
1626 OldChainResultNo = NTMNumResults-1;
1628 // Call the underlying SelectionDAG routine to do the transmogrification. Note
1629 // that this deletes operands of the old node that become dead.
1630 SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops, NumOps);
1632 // MorphNodeTo can operate in two ways: if an existing node with the
1633 // specified operands exists, it can just return it. Otherwise, it
1634 // updates the node in place to have the requested operands.
1636 // If we updated the node in place, reset the node ID. To the isel,
1637 // this should be just like a newly allocated machine node.
1641 unsigned ResNumResults = Res->getNumValues();
1642 // Move the flag if needed.
1643 if ((EmitNodeInfo & OPFL_FlagOutput) && OldFlagResultNo != -1 &&
1644 (unsigned)OldFlagResultNo != ResNumResults-1)
1645 CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldFlagResultNo),
1646 SDValue(Res, ResNumResults-1));
1648 if ((EmitNodeInfo & OPFL_FlagOutput) != 0)
1651 // Move the chain reference if needed.
1652 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
1653 (unsigned)OldChainResultNo != ResNumResults-1)
1654 CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
1655 SDValue(Res, ResNumResults-1));
1657 // Otherwise, no replacement happened because the node already exists. Replace
1658 // Uses of the old node with the new one.
1660 CurDAG->ReplaceAllUsesWith(Node, Res);
1665 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
1666 ALWAYS_INLINE static bool
1667 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1668 SDValue N, const SmallVectorImpl<SDValue> &RecordedNodes) {
1669 // Accept if it is exactly the same as a previously recorded node.
1670 unsigned RecNo = MatcherTable[MatcherIndex++];
1671 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
1672 return N == RecordedNodes[RecNo];
1675 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
1676 ALWAYS_INLINE static bool
1677 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1678 SelectionDAGISel &SDISel) {
1679 return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
1682 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
1683 ALWAYS_INLINE static bool
1684 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1685 SelectionDAGISel &SDISel, SDNode *N) {
1686 return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
1689 ALWAYS_INLINE static bool
1690 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1692 uint16_t Opc = MatcherTable[MatcherIndex++];
1693 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
1694 return N->getOpcode() == Opc;
1697 ALWAYS_INLINE static bool
1698 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1699 SDValue N, const TargetLowering &TLI) {
1700 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
1701 if (N.getValueType() == VT) return true;
1703 // Handle the case when VT is iPTR.
1704 return VT == MVT::iPTR && N.getValueType() == TLI.getPointerTy();
1707 ALWAYS_INLINE static bool
1708 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1709 SDValue N, const TargetLowering &TLI,
1711 if (ChildNo >= N.getNumOperands())
1712 return false; // Match fails if out of range child #.
1713 return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI);
1717 ALWAYS_INLINE static bool
1718 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1720 return cast<CondCodeSDNode>(N)->get() ==
1721 (ISD::CondCode)MatcherTable[MatcherIndex++];
1724 ALWAYS_INLINE static bool
1725 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1726 SDValue N, const TargetLowering &TLI) {
1727 MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
1728 if (cast<VTSDNode>(N)->getVT() == VT)
1731 // Handle the case when VT is iPTR.
1732 return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI.getPointerTy();
1735 ALWAYS_INLINE static bool
1736 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1738 int64_t Val = MatcherTable[MatcherIndex++];
1740 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1742 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
1743 return C != 0 && C->getSExtValue() == Val;
1746 ALWAYS_INLINE static bool
1747 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1748 SDValue N, SelectionDAGISel &SDISel) {
1749 int64_t Val = MatcherTable[MatcherIndex++];
1751 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1753 if (N->getOpcode() != ISD::AND) return false;
1755 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1756 return C != 0 && SDISel.CheckAndMask(N.getOperand(0), C, Val);
1759 ALWAYS_INLINE static bool
1760 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
1761 SDValue N, SelectionDAGISel &SDISel) {
1762 int64_t Val = MatcherTable[MatcherIndex++];
1764 Val = GetVBR(Val, MatcherTable, MatcherIndex);
1766 if (N->getOpcode() != ISD::OR) return false;
1768 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1769 return C != 0 && SDISel.CheckOrMask(N.getOperand(0), C, Val);
1772 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
1773 /// scope, evaluate the current node. If the current predicate is known to
1774 /// fail, set Result=true and return anything. If the current predicate is
1775 /// known to pass, set Result=false and return the MatcherIndex to continue
1776 /// with. If the current predicate is unknown, set Result=false and return the
1777 /// MatcherIndex to continue with.
1778 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
1779 unsigned Index, SDValue N,
1780 bool &Result, SelectionDAGISel &SDISel,
1781 SmallVectorImpl<SDValue> &RecordedNodes){
1782 switch (Table[Index++]) {
1785 return Index-1; // Could not evaluate this predicate.
1786 case SelectionDAGISel::OPC_CheckSame:
1787 Result = !::CheckSame(Table, Index, N, RecordedNodes);
1789 case SelectionDAGISel::OPC_CheckPatternPredicate:
1790 Result = !::CheckPatternPredicate(Table, Index, SDISel);
1792 case SelectionDAGISel::OPC_CheckPredicate:
1793 Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
1795 case SelectionDAGISel::OPC_CheckOpcode:
1796 Result = !::CheckOpcode(Table, Index, N.getNode());
1798 case SelectionDAGISel::OPC_CheckType:
1799 Result = !::CheckType(Table, Index, N, SDISel.TLI);
1801 case SelectionDAGISel::OPC_CheckChild0Type:
1802 case SelectionDAGISel::OPC_CheckChild1Type:
1803 case SelectionDAGISel::OPC_CheckChild2Type:
1804 case SelectionDAGISel::OPC_CheckChild3Type:
1805 case SelectionDAGISel::OPC_CheckChild4Type:
1806 case SelectionDAGISel::OPC_CheckChild5Type:
1807 case SelectionDAGISel::OPC_CheckChild6Type:
1808 case SelectionDAGISel::OPC_CheckChild7Type:
1809 Result = !::CheckChildType(Table, Index, N, SDISel.TLI,
1810 Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Type);
1812 case SelectionDAGISel::OPC_CheckCondCode:
1813 Result = !::CheckCondCode(Table, Index, N);
1815 case SelectionDAGISel::OPC_CheckValueType:
1816 Result = !::CheckValueType(Table, Index, N, SDISel.TLI);
1818 case SelectionDAGISel::OPC_CheckInteger:
1819 Result = !::CheckInteger(Table, Index, N);
1821 case SelectionDAGISel::OPC_CheckAndImm:
1822 Result = !::CheckAndImm(Table, Index, N, SDISel);
1824 case SelectionDAGISel::OPC_CheckOrImm:
1825 Result = !::CheckOrImm(Table, Index, N, SDISel);
1833 /// FailIndex - If this match fails, this is the index to continue with.
1836 /// NodeStack - The node stack when the scope was formed.
1837 SmallVector<SDValue, 4> NodeStack;
1839 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
1840 unsigned NumRecordedNodes;
1842 /// NumMatchedMemRefs - The number of matched memref entries.
1843 unsigned NumMatchedMemRefs;
1845 /// InputChain/InputFlag - The current chain/flag
1846 SDValue InputChain, InputFlag;
1848 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
1849 bool HasChainNodesMatched, HasFlagResultNodesMatched;
1854 SDNode *SelectionDAGISel::
1855 SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
1856 unsigned TableSize) {
1857 // FIXME: Should these even be selected? Handle these cases in the caller?
1858 switch (NodeToMatch->getOpcode()) {
1861 case ISD::EntryToken: // These nodes remain the same.
1862 case ISD::BasicBlock:
1864 //case ISD::VALUETYPE:
1865 //case ISD::CONDCODE:
1866 case ISD::HANDLENODE:
1867 case ISD::MDNODE_SDNODE:
1868 case ISD::TargetConstant:
1869 case ISD::TargetConstantFP:
1870 case ISD::TargetConstantPool:
1871 case ISD::TargetFrameIndex:
1872 case ISD::TargetExternalSymbol:
1873 case ISD::TargetBlockAddress:
1874 case ISD::TargetJumpTable:
1875 case ISD::TargetGlobalTLSAddress:
1876 case ISD::TargetGlobalAddress:
1877 case ISD::TokenFactor:
1878 case ISD::CopyFromReg:
1879 case ISD::CopyToReg:
1881 NodeToMatch->setNodeId(-1); // Mark selected.
1883 case ISD::AssertSext:
1884 case ISD::AssertZext:
1885 CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
1886 NodeToMatch->getOperand(0));
1888 case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
1889 case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
1892 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
1894 // Set up the node stack with NodeToMatch as the only node on the stack.
1895 SmallVector<SDValue, 8> NodeStack;
1896 SDValue N = SDValue(NodeToMatch, 0);
1897 NodeStack.push_back(N);
1899 // MatchScopes - Scopes used when matching, if a match failure happens, this
1900 // indicates where to continue checking.
1901 SmallVector<MatchScope, 8> MatchScopes;
1903 // RecordedNodes - This is the set of nodes that have been recorded by the
1905 SmallVector<SDValue, 8> RecordedNodes;
1907 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
1909 SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
1911 // These are the current input chain and flag for use when generating nodes.
1912 // Various Emit operations change these. For example, emitting a copytoreg
1913 // uses and updates these.
1914 SDValue InputChain, InputFlag;
1916 // ChainNodesMatched - If a pattern matches nodes that have input/output
1917 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
1918 // which ones they are. The result is captured into this list so that we can
1919 // update the chain results when the pattern is complete.
1920 SmallVector<SDNode*, 3> ChainNodesMatched;
1921 SmallVector<SDNode*, 3> FlagResultNodesMatched;
1923 DEBUG(errs() << "ISEL: Starting pattern match on root node: ";
1924 NodeToMatch->dump(CurDAG);
1927 // Determine where to start the interpreter. Normally we start at opcode #0,
1928 // but if the state machine starts with an OPC_SwitchOpcode, then we
1929 // accelerate the first lookup (which is guaranteed to be hot) with the
1930 // OpcodeOffset table.
1931 unsigned MatcherIndex = 0;
1933 if (!OpcodeOffset.empty()) {
1934 // Already computed the OpcodeOffset table, just index into it.
1935 if (N.getOpcode() < OpcodeOffset.size())
1936 MatcherIndex = OpcodeOffset[N.getOpcode()];
1937 DEBUG(errs() << " Initial Opcode index to " << MatcherIndex << "\n");
1939 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
1940 // Otherwise, the table isn't computed, but the state machine does start
1941 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
1942 // is the first time we're selecting an instruction.
1945 // Get the size of this case.
1946 unsigned CaseSize = MatcherTable[Idx++];
1948 CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
1949 if (CaseSize == 0) break;
1951 // Get the opcode, add the index to the table.
1952 uint16_t Opc = MatcherTable[Idx++];
1953 Opc |= (unsigned short)MatcherTable[Idx++] << 8;
1954 if (Opc >= OpcodeOffset.size())
1955 OpcodeOffset.resize((Opc+1)*2);
1956 OpcodeOffset[Opc] = Idx;
1960 // Okay, do the lookup for the first opcode.
1961 if (N.getOpcode() < OpcodeOffset.size())
1962 MatcherIndex = OpcodeOffset[N.getOpcode()];
1966 assert(MatcherIndex < TableSize && "Invalid index");
1968 unsigned CurrentOpcodeIndex = MatcherIndex;
1970 BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
1973 // Okay, the semantics of this operation are that we should push a scope
1974 // then evaluate the first child. However, pushing a scope only to have
1975 // the first check fail (which then pops it) is inefficient. If we can
1976 // determine immediately that the first check (or first several) will
1977 // immediately fail, don't even bother pushing a scope for them.
1981 unsigned NumToSkip = MatcherTable[MatcherIndex++];
1982 if (NumToSkip & 128)
1983 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
1984 // Found the end of the scope with no match.
1985 if (NumToSkip == 0) {
1990 FailIndex = MatcherIndex+NumToSkip;
1992 unsigned MatcherIndexOfPredicate = MatcherIndex;
1993 (void)MatcherIndexOfPredicate; // silence warning.
1995 // If we can't evaluate this predicate without pushing a scope (e.g. if
1996 // it is a 'MoveParent') or if the predicate succeeds on this node, we
1997 // push the scope and evaluate the full predicate chain.
1999 MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
2000 Result, *this, RecordedNodes);
2004 DEBUG(errs() << " Skipped scope entry (due to false predicate) at "
2005 << "index " << MatcherIndexOfPredicate
2006 << ", continuing at " << FailIndex << "\n");
2007 ++NumDAGIselRetries;
2009 // Otherwise, we know that this case of the Scope is guaranteed to fail,
2010 // move to the next case.
2011 MatcherIndex = FailIndex;
2014 // If the whole scope failed to match, bail.
2015 if (FailIndex == 0) break;
2017 // Push a MatchScope which indicates where to go if the first child fails
2019 MatchScope NewEntry;
2020 NewEntry.FailIndex = FailIndex;
2021 NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
2022 NewEntry.NumRecordedNodes = RecordedNodes.size();
2023 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
2024 NewEntry.InputChain = InputChain;
2025 NewEntry.InputFlag = InputFlag;
2026 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
2027 NewEntry.HasFlagResultNodesMatched = !FlagResultNodesMatched.empty();
2028 MatchScopes.push_back(NewEntry);
2031 case OPC_RecordNode:
2032 // Remember this node, it may end up being an operand in the pattern.
2033 RecordedNodes.push_back(N);
2036 case OPC_RecordChild0: case OPC_RecordChild1:
2037 case OPC_RecordChild2: case OPC_RecordChild3:
2038 case OPC_RecordChild4: case OPC_RecordChild5:
2039 case OPC_RecordChild6: case OPC_RecordChild7: {
2040 unsigned ChildNo = Opcode-OPC_RecordChild0;
2041 if (ChildNo >= N.getNumOperands())
2042 break; // Match fails if out of range child #.
2044 RecordedNodes.push_back(N->getOperand(ChildNo));
2047 case OPC_RecordMemRef:
2048 MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
2051 case OPC_CaptureFlagInput:
2052 // If the current node has an input flag, capture it in InputFlag.
2053 if (N->getNumOperands() != 0 &&
2054 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag)
2055 InputFlag = N->getOperand(N->getNumOperands()-1);
2058 case OPC_MoveChild: {
2059 unsigned ChildNo = MatcherTable[MatcherIndex++];
2060 if (ChildNo >= N.getNumOperands())
2061 break; // Match fails if out of range child #.
2062 N = N.getOperand(ChildNo);
2063 NodeStack.push_back(N);
2067 case OPC_MoveParent:
2068 // Pop the current node off the NodeStack.
2069 NodeStack.pop_back();
2070 assert(!NodeStack.empty() && "Node stack imbalance!");
2071 N = NodeStack.back();
2075 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
2077 case OPC_CheckPatternPredicate:
2078 if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
2080 case OPC_CheckPredicate:
2081 if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
2085 case OPC_CheckComplexPat: {
2086 unsigned CPNum = MatcherTable[MatcherIndex++];
2087 unsigned RecNo = MatcherTable[MatcherIndex++];
2088 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
2089 if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo], CPNum,
2094 case OPC_CheckOpcode:
2095 if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
2099 if (!::CheckType(MatcherTable, MatcherIndex, N, TLI)) break;
2102 case OPC_SwitchOpcode: {
2103 unsigned CurNodeOpcode = N.getOpcode();
2104 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
2107 // Get the size of this case.
2108 CaseSize = MatcherTable[MatcherIndex++];
2110 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
2111 if (CaseSize == 0) break;
2113 uint16_t Opc = MatcherTable[MatcherIndex++];
2114 Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2116 // If the opcode matches, then we will execute this case.
2117 if (CurNodeOpcode == Opc)
2120 // Otherwise, skip over this case.
2121 MatcherIndex += CaseSize;
2124 // If no cases matched, bail out.
2125 if (CaseSize == 0) break;
2127 // Otherwise, execute the case we found.
2128 DEBUG(errs() << " OpcodeSwitch from " << SwitchStart
2129 << " to " << MatcherIndex << "\n");
2133 case OPC_SwitchType: {
2134 MVT::SimpleValueType CurNodeVT = N.getValueType().getSimpleVT().SimpleTy;
2135 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
2138 // Get the size of this case.
2139 CaseSize = MatcherTable[MatcherIndex++];
2141 CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
2142 if (CaseSize == 0) break;
2144 MVT::SimpleValueType CaseVT =
2145 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2146 if (CaseVT == MVT::iPTR)
2147 CaseVT = TLI.getPointerTy().SimpleTy;
2149 // If the VT matches, then we will execute this case.
2150 if (CurNodeVT == CaseVT)
2153 // Otherwise, skip over this case.
2154 MatcherIndex += CaseSize;
2157 // If no cases matched, bail out.
2158 if (CaseSize == 0) break;
2160 // Otherwise, execute the case we found.
2161 DEBUG(errs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
2162 << "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
2165 case OPC_CheckChild0Type: case OPC_CheckChild1Type:
2166 case OPC_CheckChild2Type: case OPC_CheckChild3Type:
2167 case OPC_CheckChild4Type: case OPC_CheckChild5Type:
2168 case OPC_CheckChild6Type: case OPC_CheckChild7Type:
2169 if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
2170 Opcode-OPC_CheckChild0Type))
2173 case OPC_CheckCondCode:
2174 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
2176 case OPC_CheckValueType:
2177 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI)) break;
2179 case OPC_CheckInteger:
2180 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
2182 case OPC_CheckAndImm:
2183 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
2185 case OPC_CheckOrImm:
2186 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
2189 case OPC_CheckFoldableChainNode: {
2190 assert(NodeStack.size() != 1 && "No parent node");
2191 // Verify that all intermediate nodes between the root and this one have
2193 bool HasMultipleUses = false;
2194 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
2195 if (!NodeStack[i].hasOneUse()) {
2196 HasMultipleUses = true;
2199 if (HasMultipleUses) break;
2201 // Check to see that the target thinks this is profitable to fold and that
2202 // we can fold it without inducing cycles in the graph.
2203 if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
2205 !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
2206 NodeToMatch, OptLevel,
2207 true/*We validate our own chains*/))
2212 case OPC_EmitInteger: {
2213 MVT::SimpleValueType VT =
2214 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2215 int64_t Val = MatcherTable[MatcherIndex++];
2217 Val = GetVBR(Val, MatcherTable, MatcherIndex);
2218 RecordedNodes.push_back(CurDAG->getTargetConstant(Val, VT));
2221 case OPC_EmitRegister: {
2222 MVT::SimpleValueType VT =
2223 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2224 unsigned RegNo = MatcherTable[MatcherIndex++];
2225 RecordedNodes.push_back(CurDAG->getRegister(RegNo, VT));
2229 case OPC_EmitConvertToTarget: {
2230 // Convert from IMM/FPIMM to target version.
2231 unsigned RecNo = MatcherTable[MatcherIndex++];
2232 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2233 SDValue Imm = RecordedNodes[RecNo];
2235 if (Imm->getOpcode() == ISD::Constant) {
2236 int64_t Val = cast<ConstantSDNode>(Imm)->getZExtValue();
2237 Imm = CurDAG->getTargetConstant(Val, Imm.getValueType());
2238 } else if (Imm->getOpcode() == ISD::ConstantFP) {
2239 const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
2240 Imm = CurDAG->getTargetConstantFP(*Val, Imm.getValueType());
2243 RecordedNodes.push_back(Imm);
2247 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
2248 case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
2249 // These are space-optimized forms of OPC_EmitMergeInputChains.
2250 assert(InputChain.getNode() == 0 &&
2251 "EmitMergeInputChains should be the first chain producing node");
2252 assert(ChainNodesMatched.empty() &&
2253 "Should only have one EmitMergeInputChains per match");
2255 // Read all of the chained nodes.
2256 unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
2257 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2258 ChainNodesMatched.push_back(RecordedNodes[RecNo].getNode());
2260 // FIXME: What if other value results of the node have uses not matched
2262 if (ChainNodesMatched.back() != NodeToMatch &&
2263 !RecordedNodes[RecNo].hasOneUse()) {
2264 ChainNodesMatched.clear();
2268 // Merge the input chains if they are not intra-pattern references.
2269 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2271 if (InputChain.getNode() == 0)
2272 break; // Failed to merge.
2276 case OPC_EmitMergeInputChains: {
2277 assert(InputChain.getNode() == 0 &&
2278 "EmitMergeInputChains should be the first chain producing node");
2279 // This node gets a list of nodes we matched in the input that have
2280 // chains. We want to token factor all of the input chains to these nodes
2281 // together. However, if any of the input chains is actually one of the
2282 // nodes matched in this pattern, then we have an intra-match reference.
2283 // Ignore these because the newly token factored chain should not refer to
2285 unsigned NumChains = MatcherTable[MatcherIndex++];
2286 assert(NumChains != 0 && "Can't TF zero chains");
2288 assert(ChainNodesMatched.empty() &&
2289 "Should only have one EmitMergeInputChains per match");
2291 // Read all of the chained nodes.
2292 for (unsigned i = 0; i != NumChains; ++i) {
2293 unsigned RecNo = MatcherTable[MatcherIndex++];
2294 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2295 ChainNodesMatched.push_back(RecordedNodes[RecNo].getNode());
2297 // FIXME: What if other value results of the node have uses not matched
2299 if (ChainNodesMatched.back() != NodeToMatch &&
2300 !RecordedNodes[RecNo].hasOneUse()) {
2301 ChainNodesMatched.clear();
2306 // If the inner loop broke out, the match fails.
2307 if (ChainNodesMatched.empty())
2310 // Merge the input chains if they are not intra-pattern references.
2311 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2313 if (InputChain.getNode() == 0)
2314 break; // Failed to merge.
2319 case OPC_EmitCopyToReg: {
2320 unsigned RecNo = MatcherTable[MatcherIndex++];
2321 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2322 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
2324 if (InputChain.getNode() == 0)
2325 InputChain = CurDAG->getEntryNode();
2327 InputChain = CurDAG->getCopyToReg(InputChain, NodeToMatch->getDebugLoc(),
2328 DestPhysReg, RecordedNodes[RecNo],
2331 InputFlag = InputChain.getValue(1);
2335 case OPC_EmitNodeXForm: {
2336 unsigned XFormNo = MatcherTable[MatcherIndex++];
2337 unsigned RecNo = MatcherTable[MatcherIndex++];
2338 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2339 RecordedNodes.push_back(RunSDNodeXForm(RecordedNodes[RecNo], XFormNo));
2344 case OPC_MorphNodeTo: {
2345 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
2346 TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2347 unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
2348 // Get the result VT list.
2349 unsigned NumVTs = MatcherTable[MatcherIndex++];
2350 SmallVector<EVT, 4> VTs;
2351 for (unsigned i = 0; i != NumVTs; ++i) {
2352 MVT::SimpleValueType VT =
2353 (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2354 if (VT == MVT::iPTR) VT = TLI.getPointerTy().SimpleTy;
2358 if (EmitNodeInfo & OPFL_Chain)
2359 VTs.push_back(MVT::Other);
2360 if (EmitNodeInfo & OPFL_FlagOutput)
2361 VTs.push_back(MVT::Flag);
2363 // This is hot code, so optimize the two most common cases of 1 and 2
2366 if (VTs.size() == 1)
2367 VTList = CurDAG->getVTList(VTs[0]);
2368 else if (VTs.size() == 2)
2369 VTList = CurDAG->getVTList(VTs[0], VTs[1]);
2371 VTList = CurDAG->getVTList(VTs.data(), VTs.size());
2373 // Get the operand list.
2374 unsigned NumOps = MatcherTable[MatcherIndex++];
2375 SmallVector<SDValue, 8> Ops;
2376 for (unsigned i = 0; i != NumOps; ++i) {
2377 unsigned RecNo = MatcherTable[MatcherIndex++];
2379 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
2381 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
2382 Ops.push_back(RecordedNodes[RecNo]);
2385 // If there are variadic operands to add, handle them now.
2386 if (EmitNodeInfo & OPFL_VariadicInfo) {
2387 // Determine the start index to copy from.
2388 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
2389 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
2390 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
2391 "Invalid variadic node");
2392 // Copy all of the variadic operands, not including a potential flag
2394 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
2396 SDValue V = NodeToMatch->getOperand(i);
2397 if (V.getValueType() == MVT::Flag) break;
2402 // If this has chain/flag inputs, add them.
2403 if (EmitNodeInfo & OPFL_Chain)
2404 Ops.push_back(InputChain);
2405 if ((EmitNodeInfo & OPFL_FlagInput) && InputFlag.getNode() != 0)
2406 Ops.push_back(InputFlag);
2410 if (Opcode != OPC_MorphNodeTo) {
2411 // If this is a normal EmitNode command, just create the new node and
2412 // add the results to the RecordedNodes list.
2413 Res = CurDAG->getMachineNode(TargetOpc, NodeToMatch->getDebugLoc(),
2414 VTList, Ops.data(), Ops.size());
2416 // Add all the non-flag/non-chain results to the RecordedNodes list.
2417 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
2418 if (VTs[i] == MVT::Other || VTs[i] == MVT::Flag) break;
2419 RecordedNodes.push_back(SDValue(Res, i));
2423 Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
2427 // If the node had chain/flag results, update our notion of the current
2429 if (EmitNodeInfo & OPFL_FlagOutput) {
2430 InputFlag = SDValue(Res, VTs.size()-1);
2431 if (EmitNodeInfo & OPFL_Chain)
2432 InputChain = SDValue(Res, VTs.size()-2);
2433 } else if (EmitNodeInfo & OPFL_Chain)
2434 InputChain = SDValue(Res, VTs.size()-1);
2436 // If the OPFL_MemRefs flag is set on this node, slap all of the
2437 // accumulated memrefs onto it.
2439 // FIXME: This is vastly incorrect for patterns with multiple outputs
2440 // instructions that access memory and for ComplexPatterns that match
2442 if (EmitNodeInfo & OPFL_MemRefs) {
2443 MachineSDNode::mmo_iterator MemRefs =
2444 MF->allocateMemRefsArray(MatchedMemRefs.size());
2445 std::copy(MatchedMemRefs.begin(), MatchedMemRefs.end(), MemRefs);
2446 cast<MachineSDNode>(Res)
2447 ->setMemRefs(MemRefs, MemRefs + MatchedMemRefs.size());
2451 << (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created")
2452 << " node: "; Res->dump(CurDAG); errs() << "\n");
2454 // If this was a MorphNodeTo then we're completely done!
2455 if (Opcode == OPC_MorphNodeTo) {
2456 // Update chain and flag uses.
2457 UpdateChainsAndFlags(NodeToMatch, InputChain, ChainNodesMatched,
2458 InputFlag, FlagResultNodesMatched, true);
2465 case OPC_MarkFlagResults: {
2466 unsigned NumNodes = MatcherTable[MatcherIndex++];
2468 // Read and remember all the flag-result nodes.
2469 for (unsigned i = 0; i != NumNodes; ++i) {
2470 unsigned RecNo = MatcherTable[MatcherIndex++];
2472 RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
2474 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2475 FlagResultNodesMatched.push_back(RecordedNodes[RecNo].getNode());
2480 case OPC_CompleteMatch: {
2481 // The match has been completed, and any new nodes (if any) have been
2482 // created. Patch up references to the matched dag to use the newly
2484 unsigned NumResults = MatcherTable[MatcherIndex++];
2486 for (unsigned i = 0; i != NumResults; ++i) {
2487 unsigned ResSlot = MatcherTable[MatcherIndex++];
2489 ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
2491 assert(ResSlot < RecordedNodes.size() && "Invalid CheckSame");
2492 SDValue Res = RecordedNodes[ResSlot];
2494 assert(i < NodeToMatch->getNumValues() &&
2495 NodeToMatch->getValueType(i) != MVT::Other &&
2496 NodeToMatch->getValueType(i) != MVT::Flag &&
2497 "Invalid number of results to complete!");
2498 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
2499 NodeToMatch->getValueType(i) == MVT::iPTR ||
2500 Res.getValueType() == MVT::iPTR ||
2501 NodeToMatch->getValueType(i).getSizeInBits() ==
2502 Res.getValueType().getSizeInBits()) &&
2503 "invalid replacement");
2504 CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
2507 // If the root node defines a flag, add it to the flag nodes to update
2509 if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Flag)
2510 FlagResultNodesMatched.push_back(NodeToMatch);
2512 // Update chain and flag uses.
2513 UpdateChainsAndFlags(NodeToMatch, InputChain, ChainNodesMatched,
2514 InputFlag, FlagResultNodesMatched, false);
2516 assert(NodeToMatch->use_empty() &&
2517 "Didn't replace all uses of the node?");
2519 // FIXME: We just return here, which interacts correctly with SelectRoot
2520 // above. We should fix this to not return an SDNode* anymore.
2525 // If the code reached this point, then the match failed. See if there is
2526 // another child to try in the current 'Scope', otherwise pop it until we
2527 // find a case to check.
2528 DEBUG(errs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
2529 ++NumDAGIselRetries;
2531 if (MatchScopes.empty()) {
2532 CannotYetSelect(NodeToMatch);
2536 // Restore the interpreter state back to the point where the scope was
2538 MatchScope &LastScope = MatchScopes.back();
2539 RecordedNodes.resize(LastScope.NumRecordedNodes);
2541 NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
2542 N = NodeStack.back();
2544 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
2545 MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
2546 MatcherIndex = LastScope.FailIndex;
2548 DEBUG(errs() << " Continuing at " << MatcherIndex << "\n");
2550 InputChain = LastScope.InputChain;
2551 InputFlag = LastScope.InputFlag;
2552 if (!LastScope.HasChainNodesMatched)
2553 ChainNodesMatched.clear();
2554 if (!LastScope.HasFlagResultNodesMatched)
2555 FlagResultNodesMatched.clear();
2557 // Check to see what the offset is at the new MatcherIndex. If it is zero
2558 // we have reached the end of this scope, otherwise we have another child
2559 // in the current scope to try.
2560 unsigned NumToSkip = MatcherTable[MatcherIndex++];
2561 if (NumToSkip & 128)
2562 NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
2564 // If we have another child in this scope to match, update FailIndex and
2566 if (NumToSkip != 0) {
2567 LastScope.FailIndex = MatcherIndex+NumToSkip;
2571 // End of this scope, pop it and try the next child in the containing
2573 MatchScopes.pop_back();
2580 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
2582 raw_string_ostream Msg(msg);
2583 Msg << "Cannot yet select: ";
2585 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
2586 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
2587 N->getOpcode() != ISD::INTRINSIC_VOID) {
2588 N->printrFull(Msg, CurDAG);
2590 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
2592 cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
2593 if (iid < Intrinsic::num_intrinsics)
2594 Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid);
2595 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
2596 Msg << "target intrinsic %" << TII->getName(iid);
2598 Msg << "unknown intrinsic #" << iid;
2600 report_fatal_error(Msg.str());
2603 char SelectionDAGISel::ID = 0;