1 //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a simple two pass scheduler. The first pass attempts to push
11 // backward any lengthy instructions and critical paths. The second pass packs
12 // instructions into semi-optimal time slots.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "pre-RA-sched"
17 #include "llvm/Constants.h"
18 #include "llvm/Type.h"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/MathExtras.h"
34 STATISTIC(NumCommutes, "Number of instructions commuted");
38 SchedLiveInCopies("schedule-livein-copies",
39 cl::desc("Schedule copies of livein registers"),
43 ScheduleDAG::ScheduleDAG(SelectionDAG &dag, MachineBasicBlock *bb,
44 const TargetMachine &tm)
45 : DAG(dag), BB(bb), TM(tm), MRI(BB->getParent()->getRegInfo()) {
46 TII = TM.getInstrInfo();
47 MF = &DAG.getMachineFunction();
48 TRI = TM.getRegisterInfo();
49 TLI = &DAG.getTargetLoweringInfo();
50 ConstPool = BB->getParent()->getConstantPool();
53 /// CheckForPhysRegDependency - Check if the dependency between def and use of
54 /// a specified operand is a physical register dependency. If so, returns the
55 /// register and the cost of copying the register.
56 static void CheckForPhysRegDependency(SDNode *Def, SDNode *Use, unsigned Op,
57 const TargetRegisterInfo *TRI,
58 const TargetInstrInfo *TII,
59 unsigned &PhysReg, int &Cost) {
60 if (Op != 2 || Use->getOpcode() != ISD::CopyToReg)
63 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
64 if (TargetRegisterInfo::isVirtualRegister(Reg))
67 unsigned ResNo = Use->getOperand(2).ResNo;
68 if (Def->isTargetOpcode()) {
69 const TargetInstrDesc &II = TII->get(Def->getTargetOpcode());
70 if (ResNo >= II.getNumDefs() &&
71 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
73 const TargetRegisterClass *RC =
74 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
75 Cost = RC->getCopyCost();
80 SUnit *ScheduleDAG::Clone(SUnit *Old) {
81 SUnit *SU = NewSUnit(Old->Node);
82 SU->FlaggedNodes = Old->FlaggedNodes;
83 SU->InstanceNo = SUnitMap[Old->Node].size();
84 SU->Latency = Old->Latency;
85 SU->isTwoAddress = Old->isTwoAddress;
86 SU->isCommutable = Old->isCommutable;
87 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
88 SUnitMap[Old->Node].push_back(SU);
93 /// BuildSchedUnits - Build SUnits from the selection dag that we are input.
94 /// This SUnit graph is similar to the SelectionDAG, but represents flagged
95 /// together nodes with a single SUnit.
96 void ScheduleDAG::BuildSchedUnits() {
97 // Reserve entries in the vector for each of the SUnits we are creating. This
98 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
100 SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
102 for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
103 E = DAG.allnodes_end(); NI != E; ++NI) {
104 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
107 // If this node has already been processed, stop now.
108 if (SUnitMap[NI].size()) continue;
110 SUnit *NodeSUnit = NewSUnit(NI);
112 // See if anything is flagged to this node, if so, add them to flagged
113 // nodes. Nodes can have at most one flag input and one flag output. Flags
114 // are required the be the last operand and result of a node.
116 // Scan up, adding flagged preds to FlaggedNodes.
118 if (N->getNumOperands() &&
119 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
121 N = N->getOperand(N->getNumOperands()-1).Val;
122 NodeSUnit->FlaggedNodes.push_back(N);
123 SUnitMap[N].push_back(NodeSUnit);
124 } while (N->getNumOperands() &&
125 N->getOperand(N->getNumOperands()-1).getValueType()== MVT::Flag);
126 std::reverse(NodeSUnit->FlaggedNodes.begin(),
127 NodeSUnit->FlaggedNodes.end());
130 // Scan down, adding this node and any flagged succs to FlaggedNodes if they
131 // have a user of the flag operand.
133 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
134 SDOperand FlagVal(N, N->getNumValues()-1);
136 // There are either zero or one users of the Flag result.
137 bool HasFlagUse = false;
138 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
140 if (FlagVal.isOperandOf(UI->getUser())) {
142 NodeSUnit->FlaggedNodes.push_back(N);
143 SUnitMap[N].push_back(NodeSUnit);
147 if (!HasFlagUse) break;
150 // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
153 SUnitMap[N].push_back(NodeSUnit);
155 ComputeLatency(NodeSUnit);
158 // Pass 2: add the preds, succs, etc.
159 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
160 SUnit *SU = &SUnits[su];
161 SDNode *MainNode = SU->Node;
163 if (MainNode->isTargetOpcode()) {
164 unsigned Opc = MainNode->getTargetOpcode();
165 const TargetInstrDesc &TID = TII->get(Opc);
166 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
167 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
168 SU->isTwoAddress = true;
172 if (TID.isCommutable())
173 SU->isCommutable = true;
176 // Find all predecessors and successors of the group.
177 // Temporarily add N to make code simpler.
178 SU->FlaggedNodes.push_back(MainNode);
180 for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
181 SDNode *N = SU->FlaggedNodes[n];
182 if (N->isTargetOpcode() &&
183 TII->get(N->getTargetOpcode()).getImplicitDefs() &&
184 CountResults(N) > TII->get(N->getTargetOpcode()).getNumDefs())
185 SU->hasPhysRegDefs = true;
187 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
188 SDNode *OpN = N->getOperand(i).Val;
189 if (isPassiveNode(OpN)) continue; // Not scheduled.
190 SUnit *OpSU = SUnitMap[OpN].front();
191 assert(OpSU && "Node has no SUnit!");
192 if (OpSU == SU) continue; // In the same group.
194 MVT::ValueType OpVT = N->getOperand(i).getValueType();
195 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
196 bool isChain = OpVT == MVT::Other;
198 unsigned PhysReg = 0;
200 // Determine if this is a physical register dependency.
201 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
202 SU->addPred(OpSU, isChain, false, PhysReg, Cost);
206 // Remove MainNode from FlaggedNodes again.
207 SU->FlaggedNodes.pop_back();
213 void ScheduleDAG::ComputeLatency(SUnit *SU) {
214 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
216 // Compute the latency for the node. We use the sum of the latencies for
217 // all nodes flagged together into this SUnit.
218 if (InstrItins.isEmpty()) {
219 // No latency information.
223 if (SU->Node->isTargetOpcode()) {
224 unsigned SchedClass =
225 TII->get(SU->Node->getTargetOpcode()).getSchedClass();
226 const InstrStage *S = InstrItins.begin(SchedClass);
227 const InstrStage *E = InstrItins.end(SchedClass);
229 SU->Latency += S->Cycles;
231 for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
232 SDNode *FNode = SU->FlaggedNodes[i];
233 if (FNode->isTargetOpcode()) {
234 unsigned SchedClass =TII->get(FNode->getTargetOpcode()).getSchedClass();
235 const InstrStage *S = InstrItins.begin(SchedClass);
236 const InstrStage *E = InstrItins.end(SchedClass);
238 SU->Latency += S->Cycles;
244 /// CalculateDepths - compute depths using algorithms for the longest
246 void ScheduleDAG::CalculateDepths() {
247 unsigned DAGSize = SUnits.size();
248 std::vector<unsigned> InDegree(DAGSize);
249 std::vector<SUnit*> WorkList;
250 WorkList.reserve(DAGSize);
252 // Initialize the data structures
253 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
254 SUnit *SU = &SUnits[i];
255 int NodeNum = SU->NodeNum;
256 unsigned Degree = SU->Preds.size();
257 InDegree[NodeNum] = Degree;
260 // Is it a node without dependencies?
262 assert(SU->Preds.empty() && "SUnit should have no predecessors");
263 // Collect leaf nodes
264 WorkList.push_back(SU);
268 // Process nodes in the topological order
269 while (!WorkList.empty()) {
270 SUnit *SU = WorkList.back();
272 unsigned &SUDepth = SU->Depth;
274 // Use dynamic programming:
275 // When current node is being processed, all of its dependencies
276 // are already processed.
277 // So, just iterate over all predecessors and take the longest path
278 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
280 unsigned PredDepth = I->Dep->Depth;
281 if (PredDepth+1 > SUDepth) {
282 SUDepth = PredDepth + 1;
286 // Update InDegrees of all nodes depending on current SUnit
287 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
290 if (!--InDegree[SU->NodeNum])
291 // If all dependencies of the node are processed already,
292 // then the longest path for the node can be computed now
293 WorkList.push_back(SU);
298 /// CalculateHeights - compute heights using algorithms for the longest
300 void ScheduleDAG::CalculateHeights() {
301 unsigned DAGSize = SUnits.size();
302 std::vector<unsigned> InDegree(DAGSize);
303 std::vector<SUnit*> WorkList;
304 WorkList.reserve(DAGSize);
306 // Initialize the data structures
307 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
308 SUnit *SU = &SUnits[i];
309 int NodeNum = SU->NodeNum;
310 unsigned Degree = SU->Succs.size();
311 InDegree[NodeNum] = Degree;
314 // Is it a node without dependencies?
316 assert(SU->Succs.empty() && "Something wrong");
317 assert(WorkList.empty() && "Should be empty");
318 // Collect leaf nodes
319 WorkList.push_back(SU);
323 // Process nodes in the topological order
324 while (!WorkList.empty()) {
325 SUnit *SU = WorkList.back();
327 unsigned &SUHeight = SU->Height;
329 // Use dynamic programming:
330 // When current node is being processed, all of its dependencies
331 // are already processed.
332 // So, just iterate over all successors and take the longest path
333 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
335 unsigned SuccHeight = I->Dep->Height;
336 if (SuccHeight+1 > SUHeight) {
337 SUHeight = SuccHeight + 1;
341 // Update InDegrees of all nodes depending on current SUnit
342 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
345 if (!--InDegree[SU->NodeNum])
346 // If all dependencies of the node are processed already,
347 // then the longest path for the node can be computed now
348 WorkList.push_back(SU);
353 /// CountResults - The results of target nodes have register or immediate
354 /// operands first, then an optional chain, and optional flag operands (which do
355 /// not go into the resulting MachineInstr).
356 unsigned ScheduleDAG::CountResults(SDNode *Node) {
357 unsigned N = Node->getNumValues();
358 while (N && Node->getValueType(N - 1) == MVT::Flag)
360 if (N && Node->getValueType(N - 1) == MVT::Other)
361 --N; // Skip over chain result.
365 /// CountOperands - The inputs to target nodes have any actual inputs first,
366 /// followed by special operands that describe memory references, then an
367 /// optional chain operand, then flag operands. Compute the number of
368 /// actual operands that will go into the resulting MachineInstr.
369 unsigned ScheduleDAG::CountOperands(SDNode *Node) {
370 unsigned N = ComputeMemOperandsEnd(Node);
371 while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).Val))
372 --N; // Ignore MEMOPERAND nodes
376 /// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
378 unsigned ScheduleDAG::ComputeMemOperandsEnd(SDNode *Node) {
379 unsigned N = Node->getNumOperands();
380 while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
382 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
383 --N; // Ignore chain if it exists.
387 static const TargetRegisterClass *getInstrOperandRegClass(
388 const TargetRegisterInfo *TRI,
389 const TargetInstrInfo *TII,
390 const TargetInstrDesc &II,
392 if (Op >= II.getNumOperands()) {
393 assert(II.isVariadic() && "Invalid operand # of instruction");
396 if (II.OpInfo[Op].isLookupPtrRegClass())
397 return TII->getPointerRegClass();
398 return TRI->getRegClass(II.OpInfo[Op].RegClass);
401 void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo,
402 unsigned InstanceNo, unsigned SrcReg,
403 DenseMap<SDOperand, unsigned> &VRBaseMap) {
405 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
406 // Just use the input register directly!
408 VRBaseMap.erase(SDOperand(Node, ResNo));
410 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
412 VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
414 assert(isNew && "Node emitted out of order - early");
418 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
419 // the CopyToReg'd destination register instead of creating a new vreg.
420 bool MatchReg = true;
421 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
423 SDNode *Use = UI->getUser();
425 if (Use->getOpcode() == ISD::CopyToReg &&
426 Use->getOperand(2).Val == Node &&
427 Use->getOperand(2).ResNo == ResNo) {
428 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
429 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
432 } else if (DestReg != SrcReg)
435 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
436 SDOperand Op = Use->getOperand(i);
437 if (Op.Val != Node || Op.ResNo != ResNo)
439 MVT::ValueType VT = Node->getValueType(Op.ResNo);
440 if (VT != MVT::Other && VT != MVT::Flag)
449 const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
450 SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, Node->getValueType(ResNo));
452 // Figure out the register class to create for the destreg.
454 DstRC = MRI.getRegClass(VRBase);
456 DstRC = TLI->getRegClassFor(Node->getValueType(ResNo));
459 // If all uses are reading from the src physical register and copying the
460 // register is either impossible or very expensive, then don't create a copy.
461 if (MatchReg && SrcRC->getCopyCost() < 0) {
464 // Create the reg, emit the copy.
465 VRBase = MRI.createVirtualRegister(DstRC);
466 TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC);
470 VRBaseMap.erase(SDOperand(Node, ResNo));
472 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
474 VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
476 assert(isNew && "Node emitted out of order - early");
479 /// getDstOfCopyToRegUse - If the only use of the specified result number of
480 /// node is a CopyToReg, return its destination register. Return 0 otherwise.
481 unsigned ScheduleDAG::getDstOfOnlyCopyToRegUse(SDNode *Node,
482 unsigned ResNo) const {
483 if (!Node->hasOneUse())
486 SDNode *Use = Node->use_begin()->getUser();
487 if (Use->getOpcode() == ISD::CopyToReg &&
488 Use->getOperand(2).Val == Node &&
489 Use->getOperand(2).ResNo == ResNo) {
490 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
491 if (TargetRegisterInfo::isVirtualRegister(Reg))
497 void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
498 const TargetInstrDesc &II,
499 DenseMap<SDOperand, unsigned> &VRBaseMap) {
500 assert(Node->getTargetOpcode() != TargetInstrInfo::IMPLICIT_DEF &&
501 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
503 for (unsigned i = 0; i < II.getNumDefs(); ++i) {
504 // If the specific node value is only used by a CopyToReg and the dest reg
505 // is a vreg, use the CopyToReg'd destination register instead of creating
508 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
510 SDNode *Use = UI->getUser();
511 if (Use->getOpcode() == ISD::CopyToReg &&
512 Use->getOperand(2).Val == Node &&
513 Use->getOperand(2).ResNo == i) {
514 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
515 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
517 MI->addOperand(MachineOperand::CreateReg(Reg, true));
523 // Create the result registers for this node and add the result regs to
524 // the machine instruction.
526 const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, TII, II, i);
527 assert(RC && "Isn't a register operand!");
528 VRBase = MRI.createVirtualRegister(RC);
529 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
533 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
535 VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
537 assert(isNew && "Node emitted out of order - early");
541 /// getVR - Return the virtual register corresponding to the specified result
542 /// of the specified node.
543 unsigned ScheduleDAG::getVR(SDOperand Op,
544 DenseMap<SDOperand, unsigned> &VRBaseMap) {
545 if (Op.isTargetOpcode() &&
546 Op.getTargetOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
547 // Add an IMPLICIT_DEF instruction before every use.
548 unsigned VReg = getDstOfOnlyCopyToRegUse(Op.Val, Op.ResNo);
549 // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
550 // does not include operand register class info.
552 const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
553 VReg = MRI.createVirtualRegister(RC);
555 BuildMI(BB, TII->get(TargetInstrInfo::IMPLICIT_DEF), VReg);
559 DenseMap<SDOperand, unsigned>::iterator I = VRBaseMap.find(Op);
560 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
565 /// AddOperand - Add the specified operand to the specified machine instr. II
566 /// specifies the instruction information for the node, and IIOpNum is the
567 /// operand number (in the II) that we are adding. IIOpNum and II are used for
569 void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
571 const TargetInstrDesc *II,
572 DenseMap<SDOperand, unsigned> &VRBaseMap) {
573 if (Op.isTargetOpcode()) {
574 // Note that this case is redundant with the final else block, but we
575 // include it because it is the most common and it makes the logic
577 assert(Op.getValueType() != MVT::Other &&
578 Op.getValueType() != MVT::Flag &&
579 "Chain and flag operands should occur at end of operand list!");
580 // Get/emit the operand.
581 unsigned VReg = getVR(Op, VRBaseMap);
582 const TargetInstrDesc &TID = MI->getDesc();
583 bool isOptDef = IIOpNum < TID.getNumOperands() &&
584 TID.OpInfo[IIOpNum].isOptionalDef();
585 MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
587 // Verify that it is right.
588 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
591 // There may be no register class for this operand if it is a variadic
592 // argument (RC will be NULL in this case). In this case, we just assume
593 // the regclass is ok.
594 const TargetRegisterClass *RC =
595 getInstrOperandRegClass(TRI, TII, *II, IIOpNum);
596 assert((RC || II->isVariadic()) && "Expected reg class info!");
597 const TargetRegisterClass *VRC = MRI.getRegClass(VReg);
598 if (RC && VRC != RC) {
599 cerr << "Register class of operand and regclass of use don't agree!\n";
600 cerr << "Operand = " << IIOpNum << "\n";
601 cerr << "Op->Val = "; Op.Val->dump(&DAG); cerr << "\n";
602 cerr << "MI = "; MI->print(cerr);
603 cerr << "VReg = " << VReg << "\n";
604 cerr << "VReg RegClass size = " << VRC->getSize()
605 << ", align = " << VRC->getAlignment() << "\n";
606 cerr << "Expected RegClass size = " << RC->getSize()
607 << ", align = " << RC->getAlignment() << "\n";
608 cerr << "Fatal error, aborting.\n";
613 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
614 MI->addOperand(MachineOperand::CreateImm(C->getValue()));
615 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
616 ConstantFP *CFP = ConstantFP::get(F->getValueAPF());
617 MI->addOperand(MachineOperand::CreateFPImm(CFP));
618 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
619 MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
620 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
621 MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(),TGA->getOffset()));
622 } else if (BasicBlockSDNode *BB = dyn_cast<BasicBlockSDNode>(Op)) {
623 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
624 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
625 MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
626 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
627 MI->addOperand(MachineOperand::CreateJTI(JT->getIndex()));
628 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
629 int Offset = CP->getOffset();
630 unsigned Align = CP->getAlignment();
631 const Type *Type = CP->getType();
632 // MachineConstantPool wants an explicit alignment.
634 Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
636 // Alignment of vector types. FIXME!
637 Align = TM.getTargetData()->getABITypeSize(Type);
638 Align = Log2_64(Align);
643 if (CP->isMachineConstantPoolEntry())
644 Idx = ConstPool->getConstantPoolIndex(CP->getMachineCPVal(), Align);
646 Idx = ConstPool->getConstantPoolIndex(CP->getConstVal(), Align);
647 MI->addOperand(MachineOperand::CreateCPI(Idx, Offset));
648 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
649 MI->addOperand(MachineOperand::CreateES(ES->getSymbol()));
651 assert(Op.getValueType() != MVT::Other &&
652 Op.getValueType() != MVT::Flag &&
653 "Chain and flag operands should occur at end of operand list!");
654 unsigned VReg = getVR(Op, VRBaseMap);
655 MI->addOperand(MachineOperand::CreateReg(VReg, false));
657 // Verify that it is right. Note that the reg class of the physreg and the
658 // vreg don't necessarily need to match, but the target copy insertion has
659 // to be able to handle it. This handles things like copies from ST(0) to
660 // an FP vreg on x86.
661 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
662 if (II && !II->isVariadic()) {
663 assert(getInstrOperandRegClass(TRI, TII, *II, IIOpNum) &&
664 "Don't have operand info for this instruction!");
670 void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO) {
671 MI->addMemOperand(MO);
674 // Returns the Register Class of a subregister
675 static const TargetRegisterClass *getSubRegisterRegClass(
676 const TargetRegisterClass *TRC,
678 // Pick the register class of the subregister
679 TargetRegisterInfo::regclass_iterator I =
680 TRC->subregclasses_begin() + SubIdx-1;
681 assert(I < TRC->subregclasses_end() &&
682 "Invalid subregister index for register class");
686 static const TargetRegisterClass *getSuperregRegisterClass(
687 const TargetRegisterClass *TRC,
690 // Pick the register class of the superegister for this type
691 for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
692 E = TRC->superregclasses_end(); I != E; ++I)
693 if ((*I)->hasType(VT) && getSubRegisterRegClass(*I, SubIdx) == TRC)
695 assert(false && "Couldn't find the register class");
699 /// EmitSubregNode - Generate machine code for subreg nodes.
701 void ScheduleDAG::EmitSubregNode(SDNode *Node,
702 DenseMap<SDOperand, unsigned> &VRBaseMap) {
704 unsigned Opc = Node->getTargetOpcode();
706 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
707 // the CopyToReg'd destination register instead of creating a new vreg.
708 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
710 SDNode *Use = UI->getUser();
711 if (Use->getOpcode() == ISD::CopyToReg &&
712 Use->getOperand(2).Val == Node) {
713 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
714 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
721 if (Opc == TargetInstrInfo::EXTRACT_SUBREG) {
722 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getValue();
724 // Create the extract_subreg machine instruction.
725 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::EXTRACT_SUBREG));
727 // Figure out the register class to create for the destreg.
728 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
729 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
730 const TargetRegisterClass *SRC = getSubRegisterRegClass(TRC, SubIdx);
733 // Grab the destination register
735 const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
736 assert(SRC && DRC && SRC == DRC &&
737 "Source subregister and destination must have the same class");
741 assert(SRC && "Couldn't find source register class");
742 VRBase = MRI.createVirtualRegister(SRC);
745 // Add def, source, and subreg index
746 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
747 AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
748 MI->addOperand(MachineOperand::CreateImm(SubIdx));
750 } else if (Opc == TargetInstrInfo::INSERT_SUBREG ||
751 Opc == TargetInstrInfo::SUBREG_TO_REG) {
752 SDOperand N0 = Node->getOperand(0);
753 SDOperand N1 = Node->getOperand(1);
754 SDOperand N2 = Node->getOperand(2);
755 unsigned SubReg = getVR(N1, VRBaseMap);
756 unsigned SubIdx = cast<ConstantSDNode>(N2)->getValue();
759 // Figure out the register class to create for the destreg.
760 const TargetRegisterClass *TRC = 0;
762 TRC = MRI.getRegClass(VRBase);
764 TRC = getSuperregRegisterClass(MRI.getRegClass(SubReg), SubIdx,
765 Node->getValueType(0));
766 assert(TRC && "Couldn't determine register class for insert_subreg");
767 VRBase = MRI.createVirtualRegister(TRC); // Create the reg
770 // Create the insert_subreg or subreg_to_reg machine instruction.
771 MachineInstr *MI = BuildMI(TII->get(Opc));
772 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
774 // If creating a subreg_to_reg, then the first input operand
775 // is an implicit value immediate, otherwise it's a register
776 if (Opc == TargetInstrInfo::SUBREG_TO_REG) {
777 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
778 MI->addOperand(MachineOperand::CreateImm(SD->getValue()));
780 AddOperand(MI, N0, 0, 0, VRBaseMap);
781 // Add the subregster being inserted
782 AddOperand(MI, N1, 0, 0, VRBaseMap);
783 MI->addOperand(MachineOperand::CreateImm(SubIdx));
786 assert(0 && "Node is not insert_subreg, extract_subreg, or subreg_to_reg");
789 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
791 VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
793 assert(isNew && "Node emitted out of order - early");
796 /// EmitNode - Generate machine code for an node and needed dependencies.
798 void ScheduleDAG::EmitNode(SDNode *Node, unsigned InstanceNo,
799 DenseMap<SDOperand, unsigned> &VRBaseMap) {
800 // If machine instruction
801 if (Node->isTargetOpcode()) {
802 unsigned Opc = Node->getTargetOpcode();
804 // Handle subreg insert/extract specially
805 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
806 Opc == TargetInstrInfo::INSERT_SUBREG ||
807 Opc == TargetInstrInfo::SUBREG_TO_REG) {
808 EmitSubregNode(Node, VRBaseMap);
812 if (Opc == TargetInstrInfo::IMPLICIT_DEF)
813 // We want a unique VR for each IMPLICIT_DEF use.
816 const TargetInstrDesc &II = TII->get(Opc);
817 unsigned NumResults = CountResults(Node);
818 unsigned NodeOperands = CountOperands(Node);
819 unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
820 bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
821 II.getImplicitDefs() != 0;
823 unsigned NumMIOperands = NodeOperands + NumResults;
824 assert((II.getNumOperands() == NumMIOperands ||
825 HasPhysRegOuts || II.isVariadic()) &&
826 "#operands for dag node doesn't match .td file!");
829 // Create the new machine instruction.
830 MachineInstr *MI = BuildMI(II);
832 // Add result register values for things that are defined by this
835 CreateVirtualRegisters(Node, MI, II, VRBaseMap);
837 // Emit all of the actual operands of this instruction, adding them to the
838 // instruction as appropriate.
839 for (unsigned i = 0; i != NodeOperands; ++i)
840 AddOperand(MI, Node->getOperand(i), i+II.getNumDefs(), &II, VRBaseMap);
842 // Emit all of the memory operands of this instruction
843 for (unsigned i = NodeOperands; i != MemOperandsEnd; ++i)
844 AddMemOperand(MI, cast<MemOperandSDNode>(Node->getOperand(i))->MO);
846 // Commute node if it has been determined to be profitable.
847 if (CommuteSet.count(Node)) {
848 MachineInstr *NewMI = TII->commuteInstruction(MI);
850 DOUT << "Sched: COMMUTING FAILED!\n";
852 DOUT << "Sched: COMMUTED TO: " << *NewMI;
861 if (II.usesCustomDAGSchedInsertionHook())
862 // Insert this instruction into the basic block using a target
863 // specific inserter which may returns a new basic block.
864 BB = TLI->EmitInstrWithCustomInserter(MI, BB);
868 // Additional results must be an physical register def.
869 if (HasPhysRegOuts) {
870 for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
871 unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
872 if (Node->hasAnyUseOfValue(i))
873 EmitCopyFromReg(Node, i, InstanceNo, Reg, VRBaseMap);
877 switch (Node->getOpcode()) {
882 assert(0 && "This target-independent node should have been selected!");
884 case ISD::EntryToken:
885 assert(0 && "EntryToken should have been excluded from the schedule!");
887 case ISD::TokenFactor: // fall thru
892 case ISD::CopyToReg: {
894 SDOperand SrcVal = Node->getOperand(2);
895 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
896 SrcReg = R->getReg();
898 SrcReg = getVR(SrcVal, VRBaseMap);
900 unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
901 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
904 const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
905 // Get the register classes of the src/dst.
906 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
907 SrcTRC = MRI.getRegClass(SrcReg);
909 SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
911 if (TargetRegisterInfo::isVirtualRegister(DestReg))
912 DstTRC = MRI.getRegClass(DestReg);
914 DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
915 Node->getOperand(1).getValueType());
916 TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
919 case ISD::CopyFromReg: {
920 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
921 EmitCopyFromReg(Node, 0, InstanceNo, SrcReg, VRBaseMap);
924 case ISD::INLINEASM: {
925 unsigned NumOps = Node->getNumOperands();
926 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
927 --NumOps; // Ignore the flag operand.
929 // Create the inline asm machine instruction.
930 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::INLINEASM));
932 // Add the asm string as an external symbol operand.
934 cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
935 MI->addOperand(MachineOperand::CreateES(AsmStr));
937 // Add all of the operand registers to the instruction.
938 for (unsigned i = 2; i != NumOps;) {
939 unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
940 unsigned NumVals = Flags >> 3;
942 MI->addOperand(MachineOperand::CreateImm(Flags));
943 ++i; // Skip the ID value.
946 default: assert(0 && "Bad flags!");
947 case 1: // Use of register.
948 for (; NumVals; --NumVals, ++i) {
949 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
950 MI->addOperand(MachineOperand::CreateReg(Reg, false));
953 case 2: // Def of register.
954 for (; NumVals; --NumVals, ++i) {
955 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
956 MI->addOperand(MachineOperand::CreateReg(Reg, true));
959 case 3: { // Immediate.
960 for (; NumVals; --NumVals, ++i) {
961 if (ConstantSDNode *CS =
962 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
963 MI->addOperand(MachineOperand::CreateImm(CS->getValue()));
964 } else if (GlobalAddressSDNode *GA =
965 dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
966 MI->addOperand(MachineOperand::CreateGA(GA->getGlobal(),
969 BasicBlockSDNode *BB =cast<BasicBlockSDNode>(Node->getOperand(i));
970 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
975 case 4: // Addressing mode.
976 // The addressing mode has been selected, just add all of the
977 // operands to the machine instruction.
978 for (; NumVals; --NumVals, ++i)
979 AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
990 void ScheduleDAG::EmitNoop() {
991 TII->insertNoop(*BB, BB->end());
994 void ScheduleDAG::EmitCrossRCCopy(SUnit *SU,
995 DenseMap<SUnit*, unsigned> &VRBaseMap) {
996 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
998 if (I->isCtrl) continue; // ignore chain preds
1000 // Copy to physical register.
1001 DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->Dep);
1002 assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
1003 // Find the destination physical register.
1005 for (SUnit::const_succ_iterator II = SU->Succs.begin(),
1006 EE = SU->Succs.end(); II != EE; ++II) {
1012 assert(I->Reg && "Unknown physical register!");
1013 TII->copyRegToReg(*BB, BB->end(), Reg, VRI->second,
1014 SU->CopyDstRC, SU->CopySrcRC);
1016 // Copy from physical register.
1017 assert(I->Reg && "Unknown physical register!");
1018 unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
1020 bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase));
1022 VRBaseMap.insert(std::make_pair(SU, VRBase));
1024 assert(isNew && "Node emitted out of order - early");
1025 TII->copyRegToReg(*BB, BB->end(), VRBase, I->Reg,
1026 SU->CopyDstRC, SU->CopySrcRC);
1032 /// EmitLiveInCopy - Emit a copy for a live in physical register. If the
1033 /// physical register has only a single copy use, then coalesced the copy
1035 void ScheduleDAG::EmitLiveInCopy(MachineBasicBlock *MBB,
1036 MachineBasicBlock::iterator &InsertPos,
1037 unsigned VirtReg, unsigned PhysReg,
1038 const TargetRegisterClass *RC,
1039 DenseMap<MachineInstr*, unsigned> &CopyRegMap){
1040 unsigned NumUses = 0;
1041 MachineInstr *UseMI = NULL;
1042 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
1043 UE = MRI.use_end(); UI != UE; ++UI) {
1049 // If the number of uses is not one, or the use is not a move instruction,
1050 // don't coalesce. Also, only coalesce away a virtual register to virtual
1052 bool Coalesced = false;
1053 unsigned SrcReg, DstReg;
1055 TII->isMoveInstr(*UseMI, SrcReg, DstReg) &&
1056 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1061 // Now find an ideal location to insert the copy.
1062 MachineBasicBlock::iterator Pos = InsertPos;
1063 while (Pos != MBB->begin()) {
1064 MachineInstr *PrevMI = prior(Pos);
1065 DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
1066 // copyRegToReg might emit multiple instructions to do a copy.
1067 unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
1068 if (CopyDstReg && !TRI->regsOverlap(CopyDstReg, PhysReg))
1069 // This is what the BB looks like right now:
1074 // We want to insert "r1025 = mov r1". Inserting this copy below the
1075 // move to r1024 makes it impossible for that move to be coalesced.
1082 break; // Woot! Found a good location.
1086 TII->copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
1087 CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
1089 if (&*InsertPos == UseMI) ++InsertPos;
1094 /// EmitLiveInCopies - If this is the first basic block in the function,
1095 /// and if it has live ins that need to be copied into vregs, emit the
1096 /// copies into the top of the block.
1097 void ScheduleDAG::EmitLiveInCopies(MachineBasicBlock *MBB) {
1098 DenseMap<MachineInstr*, unsigned> CopyRegMap;
1099 MachineBasicBlock::iterator InsertPos = MBB->begin();
1100 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1101 E = MRI.livein_end(); LI != E; ++LI)
1103 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1104 EmitLiveInCopy(MBB, InsertPos, LI->second, LI->first, RC, CopyRegMap);
1108 /// EmitSchedule - Emit the machine code in scheduled order.
1109 void ScheduleDAG::EmitSchedule() {
1110 bool isEntryBB = &MF->front() == BB;
1112 if (isEntryBB && !SchedLiveInCopies) {
1113 // If this is the first basic block in the function, and if it has live ins
1114 // that need to be copied into vregs, emit the copies into the top of the
1115 // block before emitting the code for the block.
1116 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1117 E = MRI.livein_end(); LI != E; ++LI)
1119 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1120 TII->copyRegToReg(*MF->begin(), MF->begin()->end(), LI->second,
1125 // Finally, emit the code for all of the scheduled instructions.
1126 DenseMap<SDOperand, unsigned> VRBaseMap;
1127 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
1128 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1129 SUnit *SU = Sequence[i];
1131 // Null SUnit* is a noop.
1135 for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; ++j)
1136 EmitNode(SU->FlaggedNodes[j], SU->InstanceNo, VRBaseMap);
1138 EmitCrossRCCopy(SU, CopyVRBaseMap);
1140 EmitNode(SU->Node, SU->InstanceNo, VRBaseMap);
1143 if (isEntryBB && SchedLiveInCopies)
1144 EmitLiveInCopies(MF->begin());
1147 /// dump - dump the schedule.
1148 void ScheduleDAG::dumpSchedule() const {
1149 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1150 if (SUnit *SU = Sequence[i])
1153 cerr << "**** NOOP ****\n";
1158 /// Run - perform scheduling.
1160 MachineBasicBlock *ScheduleDAG::Run() {
1165 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
1166 /// a group of nodes flagged together.
1167 void SUnit::dump(const SelectionDAG *G) const {
1168 cerr << "SU(" << NodeNum << "): ";
1172 cerr << "CROSS RC COPY ";
1174 if (FlaggedNodes.size() != 0) {
1175 for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
1177 FlaggedNodes[i]->dump(G);
1183 void SUnit::dumpAll(const SelectionDAG *G) const {
1186 cerr << " # preds left : " << NumPredsLeft << "\n";
1187 cerr << " # succs left : " << NumSuccsLeft << "\n";
1188 cerr << " Latency : " << Latency << "\n";
1189 cerr << " Depth : " << Depth << "\n";
1190 cerr << " Height : " << Height << "\n";
1192 if (Preds.size() != 0) {
1193 cerr << " Predecessors:\n";
1194 for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
1200 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";
1206 if (Succs.size() != 0) {
1207 cerr << " Successors:\n";
1208 for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
1214 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";