1 //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a simple two pass scheduler. The first pass attempts to push
11 // backward any lengthy instructions and critical paths. The second pass packs
12 // instructions into semi-optimal time slots.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "pre-RA-sched"
17 #include "llvm/Constants.h"
18 #include "llvm/Type.h"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/MathExtras.h"
34 STATISTIC(NumCommutes, "Number of instructions commuted");
38 SchedLiveInCopies("schedule-livein-copies",
39 cl::desc("Schedule copies of livein registers"),
43 ScheduleDAG::ScheduleDAG(SelectionDAG &dag, MachineBasicBlock *bb,
44 const TargetMachine &tm)
45 : DAG(dag), BB(bb), TM(tm), MRI(BB->getParent()->getRegInfo()) {
46 TII = TM.getInstrInfo();
47 MF = &DAG.getMachineFunction();
48 TRI = TM.getRegisterInfo();
49 TLI = &DAG.getTargetLoweringInfo();
50 ConstPool = BB->getParent()->getConstantPool();
53 /// CheckForPhysRegDependency - Check if the dependency between def and use of
54 /// a specified operand is a physical register dependency. If so, returns the
55 /// register and the cost of copying the register.
56 static void CheckForPhysRegDependency(SDNode *Def, SDNode *Use, unsigned Op,
57 const TargetRegisterInfo *TRI,
58 const TargetInstrInfo *TII,
59 unsigned &PhysReg, int &Cost) {
60 if (Op != 2 || Use->getOpcode() != ISD::CopyToReg)
63 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
64 if (TargetRegisterInfo::isVirtualRegister(Reg))
67 unsigned ResNo = Use->getOperand(2).ResNo;
68 if (Def->isTargetOpcode()) {
69 const TargetInstrDesc &II = TII->get(Def->getTargetOpcode());
70 if (ResNo >= II.getNumDefs() &&
71 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
73 const TargetRegisterClass *RC =
74 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
75 Cost = RC->getCopyCost();
80 SUnit *ScheduleDAG::Clone(SUnit *Old) {
81 SUnit *SU = NewSUnit(Old->Node);
82 SU->FlaggedNodes = Old->FlaggedNodes;
83 SU->InstanceNo = SUnitMap[Old->Node].size();
84 SU->Latency = Old->Latency;
85 SU->isTwoAddress = Old->isTwoAddress;
86 SU->isCommutable = Old->isCommutable;
87 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
88 SUnitMap[Old->Node].push_back(SU);
93 /// BuildSchedUnits - Build SUnits from the selection dag that we are input.
94 /// This SUnit graph is similar to the SelectionDAG, but represents flagged
95 /// together nodes with a single SUnit.
96 void ScheduleDAG::BuildSchedUnits() {
97 // Reserve entries in the vector for each of the SUnits we are creating. This
98 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
100 SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
102 for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
103 E = DAG.allnodes_end(); NI != E; ++NI) {
104 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
107 // If this node has already been processed, stop now.
108 if (SUnitMap[NI].size()) continue;
110 SUnit *NodeSUnit = NewSUnit(NI);
112 // See if anything is flagged to this node, if so, add them to flagged
113 // nodes. Nodes can have at most one flag input and one flag output. Flags
114 // are required the be the last operand and result of a node.
116 // Scan up, adding flagged preds to FlaggedNodes.
118 if (N->getNumOperands() &&
119 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
121 N = N->getOperand(N->getNumOperands()-1).Val;
122 NodeSUnit->FlaggedNodes.push_back(N);
123 SUnitMap[N].push_back(NodeSUnit);
124 } while (N->getNumOperands() &&
125 N->getOperand(N->getNumOperands()-1).getValueType()== MVT::Flag);
126 std::reverse(NodeSUnit->FlaggedNodes.begin(),
127 NodeSUnit->FlaggedNodes.end());
130 // Scan down, adding this node and any flagged succs to FlaggedNodes if they
131 // have a user of the flag operand.
133 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
134 SDOperand FlagVal(N, N->getNumValues()-1);
136 // There are either zero or one users of the Flag result.
137 bool HasFlagUse = false;
138 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
140 if (FlagVal.isOperandOf(UI->getUser())) {
142 NodeSUnit->FlaggedNodes.push_back(N);
143 SUnitMap[N].push_back(NodeSUnit);
147 if (!HasFlagUse) break;
150 // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
153 SUnitMap[N].push_back(NodeSUnit);
155 ComputeLatency(NodeSUnit);
158 // Pass 2: add the preds, succs, etc.
159 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
160 SUnit *SU = &SUnits[su];
161 SDNode *MainNode = SU->Node;
163 if (MainNode->isTargetOpcode()) {
164 unsigned Opc = MainNode->getTargetOpcode();
165 const TargetInstrDesc &TID = TII->get(Opc);
166 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
167 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
168 SU->isTwoAddress = true;
172 if (TID.isCommutable())
173 SU->isCommutable = true;
176 // Find all predecessors and successors of the group.
177 // Temporarily add N to make code simpler.
178 SU->FlaggedNodes.push_back(MainNode);
180 for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
181 SDNode *N = SU->FlaggedNodes[n];
182 if (N->isTargetOpcode() &&
183 TII->get(N->getTargetOpcode()).getImplicitDefs() &&
184 CountResults(N) > TII->get(N->getTargetOpcode()).getNumDefs())
185 SU->hasPhysRegDefs = true;
187 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
188 SDNode *OpN = N->getOperand(i).Val;
189 if (isPassiveNode(OpN)) continue; // Not scheduled.
190 SUnit *OpSU = SUnitMap[OpN].front();
191 assert(OpSU && "Node has no SUnit!");
192 if (OpSU == SU) continue; // In the same group.
194 MVT OpVT = N->getOperand(i).getValueType();
195 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
196 bool isChain = OpVT == MVT::Other;
198 unsigned PhysReg = 0;
200 // Determine if this is a physical register dependency.
201 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
202 SU->addPred(OpSU, isChain, false, PhysReg, Cost);
206 // Remove MainNode from FlaggedNodes again.
207 SU->FlaggedNodes.pop_back();
213 void ScheduleDAG::ComputeLatency(SUnit *SU) {
214 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
216 // Compute the latency for the node. We use the sum of the latencies for
217 // all nodes flagged together into this SUnit.
218 if (InstrItins.isEmpty()) {
219 // No latency information.
223 if (SU->Node->isTargetOpcode()) {
224 unsigned SchedClass =
225 TII->get(SU->Node->getTargetOpcode()).getSchedClass();
226 const InstrStage *S = InstrItins.begin(SchedClass);
227 const InstrStage *E = InstrItins.end(SchedClass);
229 SU->Latency += S->Cycles;
231 for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
232 SDNode *FNode = SU->FlaggedNodes[i];
233 if (FNode->isTargetOpcode()) {
234 unsigned SchedClass =TII->get(FNode->getTargetOpcode()).getSchedClass();
235 const InstrStage *S = InstrItins.begin(SchedClass);
236 const InstrStage *E = InstrItins.end(SchedClass);
238 SU->Latency += S->Cycles;
244 /// CalculateDepths - compute depths using algorithms for the longest
246 void ScheduleDAG::CalculateDepths() {
247 unsigned DAGSize = SUnits.size();
248 std::vector<unsigned> InDegree(DAGSize);
249 std::vector<SUnit*> WorkList;
250 WorkList.reserve(DAGSize);
252 // Initialize the data structures
253 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
254 SUnit *SU = &SUnits[i];
255 int NodeNum = SU->NodeNum;
256 unsigned Degree = SU->Preds.size();
257 InDegree[NodeNum] = Degree;
260 // Is it a node without dependencies?
262 assert(SU->Preds.empty() && "SUnit should have no predecessors");
263 // Collect leaf nodes
264 WorkList.push_back(SU);
268 // Process nodes in the topological order
269 while (!WorkList.empty()) {
270 SUnit *SU = WorkList.back();
272 unsigned &SUDepth = SU->Depth;
274 // Use dynamic programming:
275 // When current node is being processed, all of its dependencies
276 // are already processed.
277 // So, just iterate over all predecessors and take the longest path
278 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
280 unsigned PredDepth = I->Dep->Depth;
281 if (PredDepth+1 > SUDepth) {
282 SUDepth = PredDepth + 1;
286 // Update InDegrees of all nodes depending on current SUnit
287 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
290 if (!--InDegree[SU->NodeNum])
291 // If all dependencies of the node are processed already,
292 // then the longest path for the node can be computed now
293 WorkList.push_back(SU);
298 /// CalculateHeights - compute heights using algorithms for the longest
300 void ScheduleDAG::CalculateHeights() {
301 unsigned DAGSize = SUnits.size();
302 std::vector<unsigned> InDegree(DAGSize);
303 std::vector<SUnit*> WorkList;
304 WorkList.reserve(DAGSize);
306 // Initialize the data structures
307 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
308 SUnit *SU = &SUnits[i];
309 int NodeNum = SU->NodeNum;
310 unsigned Degree = SU->Succs.size();
311 InDegree[NodeNum] = Degree;
314 // Is it a node without dependencies?
316 assert(SU->Succs.empty() && "Something wrong");
317 assert(WorkList.empty() && "Should be empty");
318 // Collect leaf nodes
319 WorkList.push_back(SU);
323 // Process nodes in the topological order
324 while (!WorkList.empty()) {
325 SUnit *SU = WorkList.back();
327 unsigned &SUHeight = SU->Height;
329 // Use dynamic programming:
330 // When current node is being processed, all of its dependencies
331 // are already processed.
332 // So, just iterate over all successors and take the longest path
333 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
335 unsigned SuccHeight = I->Dep->Height;
336 if (SuccHeight+1 > SUHeight) {
337 SUHeight = SuccHeight + 1;
341 // Update InDegrees of all nodes depending on current SUnit
342 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
345 if (!--InDegree[SU->NodeNum])
346 // If all dependencies of the node are processed already,
347 // then the longest path for the node can be computed now
348 WorkList.push_back(SU);
353 /// CountResults - The results of target nodes have register or immediate
354 /// operands first, then an optional chain, and optional flag operands (which do
355 /// not go into the resulting MachineInstr).
356 unsigned ScheduleDAG::CountResults(SDNode *Node) {
357 unsigned N = Node->getNumValues();
358 while (N && Node->getValueType(N - 1) == MVT::Flag)
360 if (N && Node->getValueType(N - 1) == MVT::Other)
361 --N; // Skip over chain result.
365 /// CountOperands - The inputs to target nodes have any actual inputs first,
366 /// followed by special operands that describe memory references, then an
367 /// optional chain operand, then flag operands. Compute the number of
368 /// actual operands that will go into the resulting MachineInstr.
369 unsigned ScheduleDAG::CountOperands(SDNode *Node) {
370 unsigned N = ComputeMemOperandsEnd(Node);
371 while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).Val))
372 --N; // Ignore MEMOPERAND nodes
376 /// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
378 unsigned ScheduleDAG::ComputeMemOperandsEnd(SDNode *Node) {
379 unsigned N = Node->getNumOperands();
380 while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
382 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
383 --N; // Ignore chain if it exists.
387 static const TargetRegisterClass *getInstrOperandRegClass(
388 const TargetRegisterInfo *TRI,
389 const TargetInstrInfo *TII,
390 const TargetInstrDesc &II,
392 if (Op >= II.getNumOperands()) {
393 assert(II.isVariadic() && "Invalid operand # of instruction");
396 if (II.OpInfo[Op].isLookupPtrRegClass())
397 return TII->getPointerRegClass();
398 return TRI->getRegClass(II.OpInfo[Op].RegClass);
401 void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo,
402 unsigned InstanceNo, unsigned SrcReg,
403 DenseMap<SDOperand, unsigned> &VRBaseMap) {
405 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
406 // Just use the input register directly!
408 VRBaseMap.erase(SDOperand(Node, ResNo));
409 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
410 isNew = isNew; // Silence compiler warning.
411 assert(isNew && "Node emitted out of order - early");
415 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
416 // the CopyToReg'd destination register instead of creating a new vreg.
417 bool MatchReg = true;
418 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
420 SDNode *Use = UI->getUser();
422 if (Use->getOpcode() == ISD::CopyToReg &&
423 Use->getOperand(2).Val == Node &&
424 Use->getOperand(2).ResNo == ResNo) {
425 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
426 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
429 } else if (DestReg != SrcReg)
432 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
433 SDOperand Op = Use->getOperand(i);
434 if (Op.Val != Node || Op.ResNo != ResNo)
436 MVT VT = Node->getValueType(Op.ResNo);
437 if (VT != MVT::Other && VT != MVT::Flag)
446 const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
447 SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, Node->getValueType(ResNo));
449 // Figure out the register class to create for the destreg.
451 DstRC = MRI.getRegClass(VRBase);
453 DstRC = TLI->getRegClassFor(Node->getValueType(ResNo));
456 // If all uses are reading from the src physical register and copying the
457 // register is either impossible or very expensive, then don't create a copy.
458 if (MatchReg && SrcRC->getCopyCost() < 0) {
461 // Create the reg, emit the copy.
462 VRBase = MRI.createVirtualRegister(DstRC);
463 TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC);
467 VRBaseMap.erase(SDOperand(Node, ResNo));
468 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
469 isNew = isNew; // Silence compiler warning.
470 assert(isNew && "Node emitted out of order - early");
473 /// getDstOfCopyToRegUse - If the only use of the specified result number of
474 /// node is a CopyToReg, return its destination register. Return 0 otherwise.
475 unsigned ScheduleDAG::getDstOfOnlyCopyToRegUse(SDNode *Node,
476 unsigned ResNo) const {
477 if (!Node->hasOneUse())
480 SDNode *Use = Node->use_begin()->getUser();
481 if (Use->getOpcode() == ISD::CopyToReg &&
482 Use->getOperand(2).Val == Node &&
483 Use->getOperand(2).ResNo == ResNo) {
484 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
485 if (TargetRegisterInfo::isVirtualRegister(Reg))
491 void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
492 const TargetInstrDesc &II,
493 DenseMap<SDOperand, unsigned> &VRBaseMap) {
494 assert(Node->getTargetOpcode() != TargetInstrInfo::IMPLICIT_DEF &&
495 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
497 for (unsigned i = 0; i < II.getNumDefs(); ++i) {
498 // If the specific node value is only used by a CopyToReg and the dest reg
499 // is a vreg, use the CopyToReg'd destination register instead of creating
502 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
504 SDNode *Use = UI->getUser();
505 if (Use->getOpcode() == ISD::CopyToReg &&
506 Use->getOperand(2).Val == Node &&
507 Use->getOperand(2).ResNo == i) {
508 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
509 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
511 MI->addOperand(MachineOperand::CreateReg(Reg, true));
517 // Create the result registers for this node and add the result regs to
518 // the machine instruction.
520 const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, TII, II, i);
521 assert(RC && "Isn't a register operand!");
522 VRBase = MRI.createVirtualRegister(RC);
523 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
526 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
527 isNew = isNew; // Silence compiler warning.
528 assert(isNew && "Node emitted out of order - early");
532 /// getVR - Return the virtual register corresponding to the specified result
533 /// of the specified node.
534 unsigned ScheduleDAG::getVR(SDOperand Op,
535 DenseMap<SDOperand, unsigned> &VRBaseMap) {
536 if (Op.isTargetOpcode() &&
537 Op.getTargetOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
538 // Add an IMPLICIT_DEF instruction before every use.
539 unsigned VReg = getDstOfOnlyCopyToRegUse(Op.Val, Op.ResNo);
540 // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
541 // does not include operand register class info.
543 const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
544 VReg = MRI.createVirtualRegister(RC);
546 BuildMI(BB, TII->get(TargetInstrInfo::IMPLICIT_DEF), VReg);
550 DenseMap<SDOperand, unsigned>::iterator I = VRBaseMap.find(Op);
551 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
556 /// AddOperand - Add the specified operand to the specified machine instr. II
557 /// specifies the instruction information for the node, and IIOpNum is the
558 /// operand number (in the II) that we are adding. IIOpNum and II are used for
560 void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
562 const TargetInstrDesc *II,
563 DenseMap<SDOperand, unsigned> &VRBaseMap) {
564 if (Op.isTargetOpcode()) {
565 // Note that this case is redundant with the final else block, but we
566 // include it because it is the most common and it makes the logic
568 assert(Op.getValueType() != MVT::Other &&
569 Op.getValueType() != MVT::Flag &&
570 "Chain and flag operands should occur at end of operand list!");
571 // Get/emit the operand.
572 unsigned VReg = getVR(Op, VRBaseMap);
573 const TargetInstrDesc &TID = MI->getDesc();
574 bool isOptDef = IIOpNum < TID.getNumOperands() &&
575 TID.OpInfo[IIOpNum].isOptionalDef();
576 MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
578 // Verify that it is right.
579 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
582 // There may be no register class for this operand if it is a variadic
583 // argument (RC will be NULL in this case). In this case, we just assume
584 // the regclass is ok.
585 const TargetRegisterClass *RC =
586 getInstrOperandRegClass(TRI, TII, *II, IIOpNum);
587 assert((RC || II->isVariadic()) && "Expected reg class info!");
588 const TargetRegisterClass *VRC = MRI.getRegClass(VReg);
589 if (RC && VRC != RC) {
590 cerr << "Register class of operand and regclass of use don't agree!\n";
591 cerr << "Operand = " << IIOpNum << "\n";
592 cerr << "Op->Val = "; Op.Val->dump(&DAG); cerr << "\n";
593 cerr << "MI = "; MI->print(cerr);
594 cerr << "VReg = " << VReg << "\n";
595 cerr << "VReg RegClass size = " << VRC->getSize()
596 << ", align = " << VRC->getAlignment() << "\n";
597 cerr << "Expected RegClass size = " << RC->getSize()
598 << ", align = " << RC->getAlignment() << "\n";
599 cerr << "Fatal error, aborting.\n";
604 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
605 MI->addOperand(MachineOperand::CreateImm(C->getValue()));
606 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
607 ConstantFP *CFP = ConstantFP::get(F->getValueAPF());
608 MI->addOperand(MachineOperand::CreateFPImm(CFP));
609 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
610 MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
611 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
612 MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(),TGA->getOffset()));
613 } else if (BasicBlockSDNode *BB = dyn_cast<BasicBlockSDNode>(Op)) {
614 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
615 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
616 MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
617 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
618 MI->addOperand(MachineOperand::CreateJTI(JT->getIndex()));
619 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
620 int Offset = CP->getOffset();
621 unsigned Align = CP->getAlignment();
622 const Type *Type = CP->getType();
623 // MachineConstantPool wants an explicit alignment.
625 Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
627 // Alignment of vector types. FIXME!
628 Align = TM.getTargetData()->getABITypeSize(Type);
629 Align = Log2_64(Align);
634 if (CP->isMachineConstantPoolEntry())
635 Idx = ConstPool->getConstantPoolIndex(CP->getMachineCPVal(), Align);
637 Idx = ConstPool->getConstantPoolIndex(CP->getConstVal(), Align);
638 MI->addOperand(MachineOperand::CreateCPI(Idx, Offset));
639 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
640 MI->addOperand(MachineOperand::CreateES(ES->getSymbol()));
642 assert(Op.getValueType() != MVT::Other &&
643 Op.getValueType() != MVT::Flag &&
644 "Chain and flag operands should occur at end of operand list!");
645 unsigned VReg = getVR(Op, VRBaseMap);
646 MI->addOperand(MachineOperand::CreateReg(VReg, false));
648 // Verify that it is right. Note that the reg class of the physreg and the
649 // vreg don't necessarily need to match, but the target copy insertion has
650 // to be able to handle it. This handles things like copies from ST(0) to
651 // an FP vreg on x86.
652 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
653 if (II && !II->isVariadic()) {
654 assert(getInstrOperandRegClass(TRI, TII, *II, IIOpNum) &&
655 "Don't have operand info for this instruction!");
661 void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO) {
662 MI->addMemOperand(MO);
665 // Returns the Register Class of a subregister
666 static const TargetRegisterClass *getSubRegisterRegClass(
667 const TargetRegisterClass *TRC,
669 // Pick the register class of the subregister
670 TargetRegisterInfo::regclass_iterator I =
671 TRC->subregclasses_begin() + SubIdx-1;
672 assert(I < TRC->subregclasses_end() &&
673 "Invalid subregister index for register class");
677 static const TargetRegisterClass *getSuperregRegisterClass(
678 const TargetRegisterClass *TRC,
681 // Pick the register class of the superegister for this type
682 for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
683 E = TRC->superregclasses_end(); I != E; ++I)
684 if ((*I)->hasType(VT) && getSubRegisterRegClass(*I, SubIdx) == TRC)
686 assert(false && "Couldn't find the register class");
690 /// EmitSubregNode - Generate machine code for subreg nodes.
692 void ScheduleDAG::EmitSubregNode(SDNode *Node,
693 DenseMap<SDOperand, unsigned> &VRBaseMap) {
695 unsigned Opc = Node->getTargetOpcode();
697 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
698 // the CopyToReg'd destination register instead of creating a new vreg.
699 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
701 SDNode *Use = UI->getUser();
702 if (Use->getOpcode() == ISD::CopyToReg &&
703 Use->getOperand(2).Val == Node) {
704 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
705 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
712 if (Opc == TargetInstrInfo::EXTRACT_SUBREG) {
713 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getValue();
715 // Create the extract_subreg machine instruction.
716 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::EXTRACT_SUBREG));
718 // Figure out the register class to create for the destreg.
719 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
720 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
721 const TargetRegisterClass *SRC = getSubRegisterRegClass(TRC, SubIdx);
724 // Grab the destination register
726 const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
727 assert(SRC && DRC && SRC == DRC &&
728 "Source subregister and destination must have the same class");
732 assert(SRC && "Couldn't find source register class");
733 VRBase = MRI.createVirtualRegister(SRC);
736 // Add def, source, and subreg index
737 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
738 AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
739 MI->addOperand(MachineOperand::CreateImm(SubIdx));
741 } else if (Opc == TargetInstrInfo::INSERT_SUBREG ||
742 Opc == TargetInstrInfo::SUBREG_TO_REG) {
743 SDOperand N0 = Node->getOperand(0);
744 SDOperand N1 = Node->getOperand(1);
745 SDOperand N2 = Node->getOperand(2);
746 unsigned SubReg = getVR(N1, VRBaseMap);
747 unsigned SubIdx = cast<ConstantSDNode>(N2)->getValue();
750 // Figure out the register class to create for the destreg.
751 const TargetRegisterClass *TRC = 0;
753 TRC = MRI.getRegClass(VRBase);
755 TRC = getSuperregRegisterClass(MRI.getRegClass(SubReg), SubIdx,
756 Node->getValueType(0));
757 assert(TRC && "Couldn't determine register class for insert_subreg");
758 VRBase = MRI.createVirtualRegister(TRC); // Create the reg
761 // Create the insert_subreg or subreg_to_reg machine instruction.
762 MachineInstr *MI = BuildMI(TII->get(Opc));
763 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
765 // If creating a subreg_to_reg, then the first input operand
766 // is an implicit value immediate, otherwise it's a register
767 if (Opc == TargetInstrInfo::SUBREG_TO_REG) {
768 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
769 MI->addOperand(MachineOperand::CreateImm(SD->getValue()));
771 AddOperand(MI, N0, 0, 0, VRBaseMap);
772 // Add the subregster being inserted
773 AddOperand(MI, N1, 0, 0, VRBaseMap);
774 MI->addOperand(MachineOperand::CreateImm(SubIdx));
777 assert(0 && "Node is not insert_subreg, extract_subreg, or subreg_to_reg");
779 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
780 isNew = isNew; // Silence compiler warning.
781 assert(isNew && "Node emitted out of order - early");
784 /// EmitNode - Generate machine code for an node and needed dependencies.
786 void ScheduleDAG::EmitNode(SDNode *Node, unsigned InstanceNo,
787 DenseMap<SDOperand, unsigned> &VRBaseMap) {
788 // If machine instruction
789 if (Node->isTargetOpcode()) {
790 unsigned Opc = Node->getTargetOpcode();
792 // Handle subreg insert/extract specially
793 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
794 Opc == TargetInstrInfo::INSERT_SUBREG ||
795 Opc == TargetInstrInfo::SUBREG_TO_REG) {
796 EmitSubregNode(Node, VRBaseMap);
800 if (Opc == TargetInstrInfo::IMPLICIT_DEF)
801 // We want a unique VR for each IMPLICIT_DEF use.
804 const TargetInstrDesc &II = TII->get(Opc);
805 unsigned NumResults = CountResults(Node);
806 unsigned NodeOperands = CountOperands(Node);
807 unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
808 bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
809 II.getImplicitDefs() != 0;
811 unsigned NumMIOperands = NodeOperands + NumResults;
812 assert((II.getNumOperands() == NumMIOperands ||
813 HasPhysRegOuts || II.isVariadic()) &&
814 "#operands for dag node doesn't match .td file!");
817 // Create the new machine instruction.
818 MachineInstr *MI = BuildMI(II);
820 // Add result register values for things that are defined by this
823 CreateVirtualRegisters(Node, MI, II, VRBaseMap);
825 // Emit all of the actual operands of this instruction, adding them to the
826 // instruction as appropriate.
827 for (unsigned i = 0; i != NodeOperands; ++i)
828 AddOperand(MI, Node->getOperand(i), i+II.getNumDefs(), &II, VRBaseMap);
830 // Emit all of the memory operands of this instruction
831 for (unsigned i = NodeOperands; i != MemOperandsEnd; ++i)
832 AddMemOperand(MI, cast<MemOperandSDNode>(Node->getOperand(i))->MO);
834 // Commute node if it has been determined to be profitable.
835 if (CommuteSet.count(Node)) {
836 MachineInstr *NewMI = TII->commuteInstruction(MI);
838 DOUT << "Sched: COMMUTING FAILED!\n";
840 DOUT << "Sched: COMMUTED TO: " << *NewMI;
849 if (II.usesCustomDAGSchedInsertionHook())
850 // Insert this instruction into the basic block using a target
851 // specific inserter which may returns a new basic block.
852 BB = TLI->EmitInstrWithCustomInserter(MI, BB);
856 // Additional results must be an physical register def.
857 if (HasPhysRegOuts) {
858 for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
859 unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
860 if (Node->hasAnyUseOfValue(i))
861 EmitCopyFromReg(Node, i, InstanceNo, Reg, VRBaseMap);
865 switch (Node->getOpcode()) {
870 assert(0 && "This target-independent node should have been selected!");
872 case ISD::EntryToken:
873 assert(0 && "EntryToken should have been excluded from the schedule!");
875 case ISD::TokenFactor: // fall thru
880 case ISD::CopyToReg: {
882 SDOperand SrcVal = Node->getOperand(2);
883 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
884 SrcReg = R->getReg();
886 SrcReg = getVR(SrcVal, VRBaseMap);
888 unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
889 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
892 const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
893 // Get the register classes of the src/dst.
894 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
895 SrcTRC = MRI.getRegClass(SrcReg);
897 SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
899 if (TargetRegisterInfo::isVirtualRegister(DestReg))
900 DstTRC = MRI.getRegClass(DestReg);
902 DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
903 Node->getOperand(1).getValueType());
904 TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
907 case ISD::CopyFromReg: {
908 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
909 EmitCopyFromReg(Node, 0, InstanceNo, SrcReg, VRBaseMap);
912 case ISD::INLINEASM: {
913 unsigned NumOps = Node->getNumOperands();
914 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
915 --NumOps; // Ignore the flag operand.
917 // Create the inline asm machine instruction.
918 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::INLINEASM));
920 // Add the asm string as an external symbol operand.
922 cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
923 MI->addOperand(MachineOperand::CreateES(AsmStr));
925 // Add all of the operand registers to the instruction.
926 for (unsigned i = 2; i != NumOps;) {
927 unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
928 unsigned NumVals = Flags >> 3;
930 MI->addOperand(MachineOperand::CreateImm(Flags));
931 ++i; // Skip the ID value.
934 default: assert(0 && "Bad flags!");
935 case 1: // Use of register.
936 for (; NumVals; --NumVals, ++i) {
937 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
938 MI->addOperand(MachineOperand::CreateReg(Reg, false));
941 case 2: // Def of register.
942 for (; NumVals; --NumVals, ++i) {
943 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
944 MI->addOperand(MachineOperand::CreateReg(Reg, true));
947 case 3: { // Immediate.
948 for (; NumVals; --NumVals, ++i) {
949 if (ConstantSDNode *CS =
950 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
951 MI->addOperand(MachineOperand::CreateImm(CS->getValue()));
952 } else if (GlobalAddressSDNode *GA =
953 dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
954 MI->addOperand(MachineOperand::CreateGA(GA->getGlobal(),
957 BasicBlockSDNode *BB =cast<BasicBlockSDNode>(Node->getOperand(i));
958 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
963 case 4: // Addressing mode.
964 // The addressing mode has been selected, just add all of the
965 // operands to the machine instruction.
966 for (; NumVals; --NumVals, ++i)
967 AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
978 void ScheduleDAG::EmitNoop() {
979 TII->insertNoop(*BB, BB->end());
982 void ScheduleDAG::EmitCrossRCCopy(SUnit *SU,
983 DenseMap<SUnit*, unsigned> &VRBaseMap) {
984 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
986 if (I->isCtrl) continue; // ignore chain preds
988 // Copy to physical register.
989 DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->Dep);
990 assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
991 // Find the destination physical register.
993 for (SUnit::const_succ_iterator II = SU->Succs.begin(),
994 EE = SU->Succs.end(); II != EE; ++II) {
1000 assert(I->Reg && "Unknown physical register!");
1001 TII->copyRegToReg(*BB, BB->end(), Reg, VRI->second,
1002 SU->CopyDstRC, SU->CopySrcRC);
1004 // Copy from physical register.
1005 assert(I->Reg && "Unknown physical register!");
1006 unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
1007 bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase));
1008 isNew = isNew; // Silence compiler warning.
1009 assert(isNew && "Node emitted out of order - early");
1010 TII->copyRegToReg(*BB, BB->end(), VRBase, I->Reg,
1011 SU->CopyDstRC, SU->CopySrcRC);
1017 /// EmitLiveInCopy - Emit a copy for a live in physical register. If the
1018 /// physical register has only a single copy use, then coalesced the copy
1020 void ScheduleDAG::EmitLiveInCopy(MachineBasicBlock *MBB,
1021 MachineBasicBlock::iterator &InsertPos,
1022 unsigned VirtReg, unsigned PhysReg,
1023 const TargetRegisterClass *RC,
1024 DenseMap<MachineInstr*, unsigned> &CopyRegMap){
1025 unsigned NumUses = 0;
1026 MachineInstr *UseMI = NULL;
1027 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
1028 UE = MRI.use_end(); UI != UE; ++UI) {
1034 // If the number of uses is not one, or the use is not a move instruction,
1035 // don't coalesce. Also, only coalesce away a virtual register to virtual
1037 bool Coalesced = false;
1038 unsigned SrcReg, DstReg;
1040 TII->isMoveInstr(*UseMI, SrcReg, DstReg) &&
1041 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1046 // Now find an ideal location to insert the copy.
1047 MachineBasicBlock::iterator Pos = InsertPos;
1048 while (Pos != MBB->begin()) {
1049 MachineInstr *PrevMI = prior(Pos);
1050 DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
1051 // copyRegToReg might emit multiple instructions to do a copy.
1052 unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
1053 if (CopyDstReg && !TRI->regsOverlap(CopyDstReg, PhysReg))
1054 // This is what the BB looks like right now:
1059 // We want to insert "r1025 = mov r1". Inserting this copy below the
1060 // move to r1024 makes it impossible for that move to be coalesced.
1067 break; // Woot! Found a good location.
1071 TII->copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
1072 CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
1074 if (&*InsertPos == UseMI) ++InsertPos;
1079 /// EmitLiveInCopies - If this is the first basic block in the function,
1080 /// and if it has live ins that need to be copied into vregs, emit the
1081 /// copies into the top of the block.
1082 void ScheduleDAG::EmitLiveInCopies(MachineBasicBlock *MBB) {
1083 DenseMap<MachineInstr*, unsigned> CopyRegMap;
1084 MachineBasicBlock::iterator InsertPos = MBB->begin();
1085 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1086 E = MRI.livein_end(); LI != E; ++LI)
1088 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1089 EmitLiveInCopy(MBB, InsertPos, LI->second, LI->first, RC, CopyRegMap);
1093 /// EmitSchedule - Emit the machine code in scheduled order.
1094 void ScheduleDAG::EmitSchedule() {
1095 bool isEntryBB = &MF->front() == BB;
1097 if (isEntryBB && !SchedLiveInCopies) {
1098 // If this is the first basic block in the function, and if it has live ins
1099 // that need to be copied into vregs, emit the copies into the top of the
1100 // block before emitting the code for the block.
1101 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1102 E = MRI.livein_end(); LI != E; ++LI)
1104 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1105 TII->copyRegToReg(*MF->begin(), MF->begin()->end(), LI->second,
1110 // Finally, emit the code for all of the scheduled instructions.
1111 DenseMap<SDOperand, unsigned> VRBaseMap;
1112 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
1113 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1114 SUnit *SU = Sequence[i];
1116 // Null SUnit* is a noop.
1120 for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; ++j)
1121 EmitNode(SU->FlaggedNodes[j], SU->InstanceNo, VRBaseMap);
1123 EmitCrossRCCopy(SU, CopyVRBaseMap);
1125 EmitNode(SU->Node, SU->InstanceNo, VRBaseMap);
1128 if (isEntryBB && SchedLiveInCopies)
1129 EmitLiveInCopies(MF->begin());
1132 /// dump - dump the schedule.
1133 void ScheduleDAG::dumpSchedule() const {
1134 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1135 if (SUnit *SU = Sequence[i])
1138 cerr << "**** NOOP ****\n";
1143 /// Run - perform scheduling.
1145 MachineBasicBlock *ScheduleDAG::Run() {
1150 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
1151 /// a group of nodes flagged together.
1152 void SUnit::dump(const SelectionDAG *G) const {
1153 cerr << "SU(" << NodeNum << "): ";
1157 cerr << "CROSS RC COPY ";
1159 if (FlaggedNodes.size() != 0) {
1160 for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
1162 FlaggedNodes[i]->dump(G);
1168 void SUnit::dumpAll(const SelectionDAG *G) const {
1171 cerr << " # preds left : " << NumPredsLeft << "\n";
1172 cerr << " # succs left : " << NumSuccsLeft << "\n";
1173 cerr << " Latency : " << Latency << "\n";
1174 cerr << " Depth : " << Depth << "\n";
1175 cerr << " Height : " << Height << "\n";
1177 if (Preds.size() != 0) {
1178 cerr << " Predecessors:\n";
1179 for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
1185 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";
1191 if (Succs.size() != 0) {
1192 cerr << " Successors:\n";
1193 for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
1199 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";