1 //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a simple two pass scheduler. The first pass attempts to push
11 // backward any lengthy instructions and critical paths. The second pass packs
12 // instructions into semi-optimal time slots.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "pre-RA-sched"
17 #include "llvm/Constants.h"
18 #include "llvm/Type.h"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/MathExtras.h"
34 STATISTIC(NumCommutes, "Number of instructions commuted");
38 SchedLiveInCopies("schedule-livein-copies",
39 cl::desc("Schedule copies of livein registers"),
43 ScheduleDAG::ScheduleDAG(SelectionDAG &dag, MachineBasicBlock *bb,
44 const TargetMachine &tm)
45 : DAG(dag), BB(bb), TM(tm), MRI(BB->getParent()->getRegInfo()) {
46 TII = TM.getInstrInfo();
47 MF = &DAG.getMachineFunction();
48 TRI = TM.getRegisterInfo();
49 TLI = &DAG.getTargetLoweringInfo();
50 ConstPool = BB->getParent()->getConstantPool();
53 /// CheckForPhysRegDependency - Check if the dependency between def and use of
54 /// a specified operand is a physical register dependency. If so, returns the
55 /// register and the cost of copying the register.
56 static void CheckForPhysRegDependency(SDNode *Def, SDNode *Use, unsigned Op,
57 const TargetRegisterInfo *TRI,
58 const TargetInstrInfo *TII,
59 unsigned &PhysReg, int &Cost) {
60 if (Op != 2 || Use->getOpcode() != ISD::CopyToReg)
63 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
64 if (TargetRegisterInfo::isVirtualRegister(Reg))
67 unsigned ResNo = Use->getOperand(2).ResNo;
68 if (Def->isTargetOpcode()) {
69 const TargetInstrDesc &II = TII->get(Def->getTargetOpcode());
70 if (ResNo >= II.getNumDefs() &&
71 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
73 const TargetRegisterClass *RC =
74 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
75 Cost = RC->getCopyCost();
80 SUnit *ScheduleDAG::Clone(SUnit *Old) {
81 SUnit *SU = NewSUnit(Old->Node);
82 SU->OrigNode = Old->OrigNode;
83 SU->FlaggedNodes = Old->FlaggedNodes;
84 SU->Latency = Old->Latency;
85 SU->isTwoAddress = Old->isTwoAddress;
86 SU->isCommutable = Old->isCommutable;
87 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
92 /// BuildSchedUnits - Build SUnits from the selection dag that we are input.
93 /// This SUnit graph is similar to the SelectionDAG, but represents flagged
94 /// together nodes with a single SUnit.
95 void ScheduleDAG::BuildSchedUnits() {
96 // Reserve entries in the vector for each of the SUnits we are creating. This
97 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
99 SUnits.reserve(DAG.allnodes_size());
101 for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
102 E = DAG.allnodes_end(); NI != E; ++NI) {
103 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
106 // If this node has already been processed, stop now.
107 if (SUnitMap.count(NI)) continue;
109 SUnit *NodeSUnit = NewSUnit(NI);
111 // See if anything is flagged to this node, if so, add them to flagged
112 // nodes. Nodes can have at most one flag input and one flag output. Flags
113 // are required the be the last operand and result of a node.
115 // Scan up, adding flagged preds to FlaggedNodes.
117 if (N->getNumOperands() &&
118 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
120 N = N->getOperand(N->getNumOperands()-1).Val;
121 NodeSUnit->FlaggedNodes.push_back(N);
122 bool isNew = SUnitMap.insert(std::make_pair(N, NodeSUnit));
124 assert(isNew && "Node already inserted!");
125 } while (N->getNumOperands() &&
126 N->getOperand(N->getNumOperands()-1).getValueType()== MVT::Flag);
127 std::reverse(NodeSUnit->FlaggedNodes.begin(),
128 NodeSUnit->FlaggedNodes.end());
131 // Scan down, adding this node and any flagged succs to FlaggedNodes if they
132 // have a user of the flag operand.
134 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
135 SDOperand FlagVal(N, N->getNumValues()-1);
137 // There are either zero or one users of the Flag result.
138 bool HasFlagUse = false;
139 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
141 if (FlagVal.isOperandOf(UI->getUser())) {
143 NodeSUnit->FlaggedNodes.push_back(N);
144 bool isNew = SUnitMap.insert(std::make_pair(N, NodeSUnit));
146 assert(isNew && "Node already inserted!");
150 if (!HasFlagUse) break;
153 // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
156 bool isNew = SUnitMap.insert(std::make_pair(N, NodeSUnit));
158 assert(isNew && "Node already inserted!");
160 ComputeLatency(NodeSUnit);
163 // Pass 2: add the preds, succs, etc.
164 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
165 SUnit *SU = &SUnits[su];
166 SDNode *MainNode = SU->Node;
168 if (MainNode->isTargetOpcode()) {
169 unsigned Opc = MainNode->getTargetOpcode();
170 const TargetInstrDesc &TID = TII->get(Opc);
171 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
172 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
173 SU->isTwoAddress = true;
177 if (TID.isCommutable())
178 SU->isCommutable = true;
181 // Find all predecessors and successors of the group.
182 // Temporarily add N to make code simpler.
183 SU->FlaggedNodes.push_back(MainNode);
185 for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
186 SDNode *N = SU->FlaggedNodes[n];
187 if (N->isTargetOpcode() &&
188 TII->get(N->getTargetOpcode()).getImplicitDefs() &&
189 CountResults(N) > TII->get(N->getTargetOpcode()).getNumDefs())
190 SU->hasPhysRegDefs = true;
192 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
193 SDNode *OpN = N->getOperand(i).Val;
194 if (isPassiveNode(OpN)) continue; // Not scheduled.
195 SUnit *OpSU = SUnitMap[OpN];
196 assert(OpSU && "Node has no SUnit!");
197 if (OpSU == SU) continue; // In the same group.
199 MVT OpVT = N->getOperand(i).getValueType();
200 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
201 bool isChain = OpVT == MVT::Other;
203 unsigned PhysReg = 0;
205 // Determine if this is a physical register dependency.
206 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
207 SU->addPred(OpSU, isChain, false, PhysReg, Cost);
211 // Remove MainNode from FlaggedNodes again.
212 SU->FlaggedNodes.pop_back();
218 void ScheduleDAG::ComputeLatency(SUnit *SU) {
219 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
221 // Compute the latency for the node. We use the sum of the latencies for
222 // all nodes flagged together into this SUnit.
223 if (InstrItins.isEmpty()) {
224 // No latency information.
228 if (SU->Node->isTargetOpcode()) {
229 unsigned SchedClass =
230 TII->get(SU->Node->getTargetOpcode()).getSchedClass();
231 const InstrStage *S = InstrItins.begin(SchedClass);
232 const InstrStage *E = InstrItins.end(SchedClass);
234 SU->Latency += S->Cycles;
236 for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
237 SDNode *FNode = SU->FlaggedNodes[i];
238 if (FNode->isTargetOpcode()) {
239 unsigned SchedClass =TII->get(FNode->getTargetOpcode()).getSchedClass();
240 const InstrStage *S = InstrItins.begin(SchedClass);
241 const InstrStage *E = InstrItins.end(SchedClass);
243 SU->Latency += S->Cycles;
249 /// CalculateDepths - compute depths using algorithms for the longest
251 void ScheduleDAG::CalculateDepths() {
252 unsigned DAGSize = SUnits.size();
253 std::vector<unsigned> InDegree(DAGSize);
254 std::vector<SUnit*> WorkList;
255 WorkList.reserve(DAGSize);
257 // Initialize the data structures
258 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
259 SUnit *SU = &SUnits[i];
260 int NodeNum = SU->NodeNum;
261 unsigned Degree = SU->Preds.size();
262 InDegree[NodeNum] = Degree;
265 // Is it a node without dependencies?
267 assert(SU->Preds.empty() && "SUnit should have no predecessors");
268 // Collect leaf nodes
269 WorkList.push_back(SU);
273 // Process nodes in the topological order
274 while (!WorkList.empty()) {
275 SUnit *SU = WorkList.back();
277 unsigned &SUDepth = SU->Depth;
279 // Use dynamic programming:
280 // When current node is being processed, all of its dependencies
281 // are already processed.
282 // So, just iterate over all predecessors and take the longest path
283 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
285 unsigned PredDepth = I->Dep->Depth;
286 if (PredDepth+1 > SUDepth) {
287 SUDepth = PredDepth + 1;
291 // Update InDegrees of all nodes depending on current SUnit
292 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
295 if (!--InDegree[SU->NodeNum])
296 // If all dependencies of the node are processed already,
297 // then the longest path for the node can be computed now
298 WorkList.push_back(SU);
303 /// CalculateHeights - compute heights using algorithms for the longest
305 void ScheduleDAG::CalculateHeights() {
306 unsigned DAGSize = SUnits.size();
307 std::vector<unsigned> InDegree(DAGSize);
308 std::vector<SUnit*> WorkList;
309 WorkList.reserve(DAGSize);
311 // Initialize the data structures
312 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
313 SUnit *SU = &SUnits[i];
314 int NodeNum = SU->NodeNum;
315 unsigned Degree = SU->Succs.size();
316 InDegree[NodeNum] = Degree;
319 // Is it a node without dependencies?
321 assert(SU->Succs.empty() && "Something wrong");
322 assert(WorkList.empty() && "Should be empty");
323 // Collect leaf nodes
324 WorkList.push_back(SU);
328 // Process nodes in the topological order
329 while (!WorkList.empty()) {
330 SUnit *SU = WorkList.back();
332 unsigned &SUHeight = SU->Height;
334 // Use dynamic programming:
335 // When current node is being processed, all of its dependencies
336 // are already processed.
337 // So, just iterate over all successors and take the longest path
338 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
340 unsigned SuccHeight = I->Dep->Height;
341 if (SuccHeight+1 > SUHeight) {
342 SUHeight = SuccHeight + 1;
346 // Update InDegrees of all nodes depending on current SUnit
347 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
350 if (!--InDegree[SU->NodeNum])
351 // If all dependencies of the node are processed already,
352 // then the longest path for the node can be computed now
353 WorkList.push_back(SU);
358 /// CountResults - The results of target nodes have register or immediate
359 /// operands first, then an optional chain, and optional flag operands (which do
360 /// not go into the resulting MachineInstr).
361 unsigned ScheduleDAG::CountResults(SDNode *Node) {
362 unsigned N = Node->getNumValues();
363 while (N && Node->getValueType(N - 1) == MVT::Flag)
365 if (N && Node->getValueType(N - 1) == MVT::Other)
366 --N; // Skip over chain result.
370 /// CountOperands - The inputs to target nodes have any actual inputs first,
371 /// followed by special operands that describe memory references, then an
372 /// optional chain operand, then flag operands. Compute the number of
373 /// actual operands that will go into the resulting MachineInstr.
374 unsigned ScheduleDAG::CountOperands(SDNode *Node) {
375 unsigned N = ComputeMemOperandsEnd(Node);
376 while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).Val))
377 --N; // Ignore MEMOPERAND nodes
381 /// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
383 unsigned ScheduleDAG::ComputeMemOperandsEnd(SDNode *Node) {
384 unsigned N = Node->getNumOperands();
385 while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
387 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
388 --N; // Ignore chain if it exists.
392 static const TargetRegisterClass *getInstrOperandRegClass(
393 const TargetRegisterInfo *TRI,
394 const TargetInstrInfo *TII,
395 const TargetInstrDesc &II,
397 if (Op >= II.getNumOperands()) {
398 assert(II.isVariadic() && "Invalid operand # of instruction");
401 if (II.OpInfo[Op].isLookupPtrRegClass())
402 return TII->getPointerRegClass();
403 return TRI->getRegClass(II.OpInfo[Op].RegClass);
406 void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo,
407 bool IsClone, unsigned SrcReg,
408 DenseMap<SDOperand, unsigned> &VRBaseMap) {
410 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
411 // Just use the input register directly!
413 VRBaseMap.erase(SDOperand(Node, ResNo));
414 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
415 isNew = isNew; // Silence compiler warning.
416 assert(isNew && "Node emitted out of order - early");
420 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
421 // the CopyToReg'd destination register instead of creating a new vreg.
422 bool MatchReg = true;
423 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
425 SDNode *Use = UI->getUser();
427 if (Use->getOpcode() == ISD::CopyToReg &&
428 Use->getOperand(2).Val == Node &&
429 Use->getOperand(2).ResNo == ResNo) {
430 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
431 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
434 } else if (DestReg != SrcReg)
437 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
438 SDOperand Op = Use->getOperand(i);
439 if (Op.Val != Node || Op.ResNo != ResNo)
441 MVT VT = Node->getValueType(Op.ResNo);
442 if (VT != MVT::Other && VT != MVT::Flag)
451 const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
452 SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, Node->getValueType(ResNo));
454 // Figure out the register class to create for the destreg.
456 DstRC = MRI.getRegClass(VRBase);
458 DstRC = TLI->getRegClassFor(Node->getValueType(ResNo));
461 // If all uses are reading from the src physical register and copying the
462 // register is either impossible or very expensive, then don't create a copy.
463 if (MatchReg && SrcRC->getCopyCost() < 0) {
466 // Create the reg, emit the copy.
467 VRBase = MRI.createVirtualRegister(DstRC);
468 TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC);
472 VRBaseMap.erase(SDOperand(Node, ResNo));
473 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
474 isNew = isNew; // Silence compiler warning.
475 assert(isNew && "Node emitted out of order - early");
478 /// getDstOfCopyToRegUse - If the only use of the specified result number of
479 /// node is a CopyToReg, return its destination register. Return 0 otherwise.
480 unsigned ScheduleDAG::getDstOfOnlyCopyToRegUse(SDNode *Node,
481 unsigned ResNo) const {
482 if (!Node->hasOneUse())
485 SDNode *Use = Node->use_begin()->getUser();
486 if (Use->getOpcode() == ISD::CopyToReg &&
487 Use->getOperand(2).Val == Node &&
488 Use->getOperand(2).ResNo == ResNo) {
489 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
490 if (TargetRegisterInfo::isVirtualRegister(Reg))
496 void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
497 const TargetInstrDesc &II,
498 DenseMap<SDOperand, unsigned> &VRBaseMap) {
499 assert(Node->getTargetOpcode() != TargetInstrInfo::IMPLICIT_DEF &&
500 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
502 for (unsigned i = 0; i < II.getNumDefs(); ++i) {
503 // If the specific node value is only used by a CopyToReg and the dest reg
504 // is a vreg, use the CopyToReg'd destination register instead of creating
507 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
509 SDNode *Use = UI->getUser();
510 if (Use->getOpcode() == ISD::CopyToReg &&
511 Use->getOperand(2).Val == Node &&
512 Use->getOperand(2).ResNo == i) {
513 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
514 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
516 MI->addOperand(MachineOperand::CreateReg(Reg, true));
522 // Create the result registers for this node and add the result regs to
523 // the machine instruction.
525 const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, TII, II, i);
526 assert(RC && "Isn't a register operand!");
527 VRBase = MRI.createVirtualRegister(RC);
528 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
531 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
532 isNew = isNew; // Silence compiler warning.
533 assert(isNew && "Node emitted out of order - early");
537 /// getVR - Return the virtual register corresponding to the specified result
538 /// of the specified node.
539 unsigned ScheduleDAG::getVR(SDOperand Op,
540 DenseMap<SDOperand, unsigned> &VRBaseMap) {
541 if (Op.isTargetOpcode() &&
542 Op.getTargetOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
543 // Add an IMPLICIT_DEF instruction before every use.
544 unsigned VReg = getDstOfOnlyCopyToRegUse(Op.Val, Op.ResNo);
545 // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
546 // does not include operand register class info.
548 const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
549 VReg = MRI.createVirtualRegister(RC);
551 BuildMI(BB, TII->get(TargetInstrInfo::IMPLICIT_DEF), VReg);
555 DenseMap<SDOperand, unsigned>::iterator I = VRBaseMap.find(Op);
556 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
561 /// AddOperand - Add the specified operand to the specified machine instr. II
562 /// specifies the instruction information for the node, and IIOpNum is the
563 /// operand number (in the II) that we are adding. IIOpNum and II are used for
565 void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
567 const TargetInstrDesc *II,
568 DenseMap<SDOperand, unsigned> &VRBaseMap) {
569 if (Op.isTargetOpcode()) {
570 // Note that this case is redundant with the final else block, but we
571 // include it because it is the most common and it makes the logic
573 assert(Op.getValueType() != MVT::Other &&
574 Op.getValueType() != MVT::Flag &&
575 "Chain and flag operands should occur at end of operand list!");
576 // Get/emit the operand.
577 unsigned VReg = getVR(Op, VRBaseMap);
578 const TargetInstrDesc &TID = MI->getDesc();
579 bool isOptDef = IIOpNum < TID.getNumOperands() &&
580 TID.OpInfo[IIOpNum].isOptionalDef();
581 MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
583 // Verify that it is right.
584 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
587 // There may be no register class for this operand if it is a variadic
588 // argument (RC will be NULL in this case). In this case, we just assume
589 // the regclass is ok.
590 const TargetRegisterClass *RC =
591 getInstrOperandRegClass(TRI, TII, *II, IIOpNum);
592 assert((RC || II->isVariadic()) && "Expected reg class info!");
593 const TargetRegisterClass *VRC = MRI.getRegClass(VReg);
594 if (RC && VRC != RC) {
595 cerr << "Register class of operand and regclass of use don't agree!\n";
596 cerr << "Operand = " << IIOpNum << "\n";
597 cerr << "Op->Val = "; Op.Val->dump(&DAG); cerr << "\n";
598 cerr << "MI = "; MI->print(cerr);
599 cerr << "VReg = " << VReg << "\n";
600 cerr << "VReg RegClass size = " << VRC->getSize()
601 << ", align = " << VRC->getAlignment() << "\n";
602 cerr << "Expected RegClass size = " << RC->getSize()
603 << ", align = " << RC->getAlignment() << "\n";
604 cerr << "Fatal error, aborting.\n";
609 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
610 MI->addOperand(MachineOperand::CreateImm(C->getValue()));
611 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
612 ConstantFP *CFP = ConstantFP::get(F->getValueAPF());
613 MI->addOperand(MachineOperand::CreateFPImm(CFP));
614 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
615 MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
616 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
617 MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(),TGA->getOffset()));
618 } else if (BasicBlockSDNode *BB = dyn_cast<BasicBlockSDNode>(Op)) {
619 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
620 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
621 MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
622 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
623 MI->addOperand(MachineOperand::CreateJTI(JT->getIndex()));
624 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
625 int Offset = CP->getOffset();
626 unsigned Align = CP->getAlignment();
627 const Type *Type = CP->getType();
628 // MachineConstantPool wants an explicit alignment.
630 Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
632 // Alignment of vector types. FIXME!
633 Align = TM.getTargetData()->getABITypeSize(Type);
634 Align = Log2_64(Align);
639 if (CP->isMachineConstantPoolEntry())
640 Idx = ConstPool->getConstantPoolIndex(CP->getMachineCPVal(), Align);
642 Idx = ConstPool->getConstantPoolIndex(CP->getConstVal(), Align);
643 MI->addOperand(MachineOperand::CreateCPI(Idx, Offset));
644 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
645 MI->addOperand(MachineOperand::CreateES(ES->getSymbol()));
647 assert(Op.getValueType() != MVT::Other &&
648 Op.getValueType() != MVT::Flag &&
649 "Chain and flag operands should occur at end of operand list!");
650 unsigned VReg = getVR(Op, VRBaseMap);
651 MI->addOperand(MachineOperand::CreateReg(VReg, false));
653 // Verify that it is right. Note that the reg class of the physreg and the
654 // vreg don't necessarily need to match, but the target copy insertion has
655 // to be able to handle it. This handles things like copies from ST(0) to
656 // an FP vreg on x86.
657 assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
658 if (II && !II->isVariadic()) {
659 assert(getInstrOperandRegClass(TRI, TII, *II, IIOpNum) &&
660 "Don't have operand info for this instruction!");
666 void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO) {
667 MI->addMemOperand(MO);
670 // Returns the Register Class of a subregister
671 static const TargetRegisterClass *getSubRegisterRegClass(
672 const TargetRegisterClass *TRC,
674 // Pick the register class of the subregister
675 TargetRegisterInfo::regclass_iterator I =
676 TRC->subregclasses_begin() + SubIdx-1;
677 assert(I < TRC->subregclasses_end() &&
678 "Invalid subregister index for register class");
682 static const TargetRegisterClass *getSuperregRegisterClass(
683 const TargetRegisterClass *TRC,
686 // Pick the register class of the superegister for this type
687 for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
688 E = TRC->superregclasses_end(); I != E; ++I)
689 if ((*I)->hasType(VT) && getSubRegisterRegClass(*I, SubIdx) == TRC)
691 assert(false && "Couldn't find the register class");
695 /// EmitSubregNode - Generate machine code for subreg nodes.
697 void ScheduleDAG::EmitSubregNode(SDNode *Node,
698 DenseMap<SDOperand, unsigned> &VRBaseMap) {
700 unsigned Opc = Node->getTargetOpcode();
702 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
703 // the CopyToReg'd destination register instead of creating a new vreg.
704 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
706 SDNode *Use = UI->getUser();
707 if (Use->getOpcode() == ISD::CopyToReg &&
708 Use->getOperand(2).Val == Node) {
709 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
710 if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
717 if (Opc == TargetInstrInfo::EXTRACT_SUBREG) {
718 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getValue();
720 // Create the extract_subreg machine instruction.
721 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::EXTRACT_SUBREG));
723 // Figure out the register class to create for the destreg.
724 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
725 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
726 const TargetRegisterClass *SRC = getSubRegisterRegClass(TRC, SubIdx);
729 // Grab the destination register
731 const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
732 assert(SRC && DRC && SRC == DRC &&
733 "Source subregister and destination must have the same class");
737 assert(SRC && "Couldn't find source register class");
738 VRBase = MRI.createVirtualRegister(SRC);
741 // Add def, source, and subreg index
742 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
743 AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
744 MI->addOperand(MachineOperand::CreateImm(SubIdx));
746 } else if (Opc == TargetInstrInfo::INSERT_SUBREG ||
747 Opc == TargetInstrInfo::SUBREG_TO_REG) {
748 SDOperand N0 = Node->getOperand(0);
749 SDOperand N1 = Node->getOperand(1);
750 SDOperand N2 = Node->getOperand(2);
751 unsigned SubReg = getVR(N1, VRBaseMap);
752 unsigned SubIdx = cast<ConstantSDNode>(N2)->getValue();
755 // Figure out the register class to create for the destreg.
756 const TargetRegisterClass *TRC = 0;
758 TRC = MRI.getRegClass(VRBase);
760 TRC = getSuperregRegisterClass(MRI.getRegClass(SubReg), SubIdx,
761 Node->getValueType(0));
762 assert(TRC && "Couldn't determine register class for insert_subreg");
763 VRBase = MRI.createVirtualRegister(TRC); // Create the reg
766 // Create the insert_subreg or subreg_to_reg machine instruction.
767 MachineInstr *MI = BuildMI(TII->get(Opc));
768 MI->addOperand(MachineOperand::CreateReg(VRBase, true));
770 // If creating a subreg_to_reg, then the first input operand
771 // is an implicit value immediate, otherwise it's a register
772 if (Opc == TargetInstrInfo::SUBREG_TO_REG) {
773 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
774 MI->addOperand(MachineOperand::CreateImm(SD->getValue()));
776 AddOperand(MI, N0, 0, 0, VRBaseMap);
777 // Add the subregster being inserted
778 AddOperand(MI, N1, 0, 0, VRBaseMap);
779 MI->addOperand(MachineOperand::CreateImm(SubIdx));
782 assert(0 && "Node is not insert_subreg, extract_subreg, or subreg_to_reg");
784 bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
785 isNew = isNew; // Silence compiler warning.
786 assert(isNew && "Node emitted out of order - early");
789 /// EmitNode - Generate machine code for an node and needed dependencies.
791 void ScheduleDAG::EmitNode(SDNode *Node, bool IsClone,
792 DenseMap<SDOperand, unsigned> &VRBaseMap) {
793 // If machine instruction
794 if (Node->isTargetOpcode()) {
795 unsigned Opc = Node->getTargetOpcode();
797 // Handle subreg insert/extract specially
798 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
799 Opc == TargetInstrInfo::INSERT_SUBREG ||
800 Opc == TargetInstrInfo::SUBREG_TO_REG) {
801 EmitSubregNode(Node, VRBaseMap);
805 if (Opc == TargetInstrInfo::IMPLICIT_DEF)
806 // We want a unique VR for each IMPLICIT_DEF use.
809 const TargetInstrDesc &II = TII->get(Opc);
810 unsigned NumResults = CountResults(Node);
811 unsigned NodeOperands = CountOperands(Node);
812 unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
813 bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
814 II.getImplicitDefs() != 0;
816 unsigned NumMIOperands = NodeOperands + NumResults;
817 assert((II.getNumOperands() == NumMIOperands ||
818 HasPhysRegOuts || II.isVariadic()) &&
819 "#operands for dag node doesn't match .td file!");
822 // Create the new machine instruction.
823 MachineInstr *MI = BuildMI(II);
825 // Add result register values for things that are defined by this
828 CreateVirtualRegisters(Node, MI, II, VRBaseMap);
830 // Emit all of the actual operands of this instruction, adding them to the
831 // instruction as appropriate.
832 for (unsigned i = 0; i != NodeOperands; ++i)
833 AddOperand(MI, Node->getOperand(i), i+II.getNumDefs(), &II, VRBaseMap);
835 // Emit all of the memory operands of this instruction
836 for (unsigned i = NodeOperands; i != MemOperandsEnd; ++i)
837 AddMemOperand(MI, cast<MemOperandSDNode>(Node->getOperand(i))->MO);
839 // Commute node if it has been determined to be profitable.
840 if (CommuteSet.count(Node)) {
841 MachineInstr *NewMI = TII->commuteInstruction(MI);
843 DOUT << "Sched: COMMUTING FAILED!\n";
845 DOUT << "Sched: COMMUTED TO: " << *NewMI;
854 if (II.usesCustomDAGSchedInsertionHook())
855 // Insert this instruction into the basic block using a target
856 // specific inserter which may returns a new basic block.
857 BB = TLI->EmitInstrWithCustomInserter(MI, BB);
861 // Additional results must be an physical register def.
862 if (HasPhysRegOuts) {
863 for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
864 unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
865 if (Node->hasAnyUseOfValue(i))
866 EmitCopyFromReg(Node, i, IsClone, Reg, VRBaseMap);
870 switch (Node->getOpcode()) {
875 assert(0 && "This target-independent node should have been selected!");
877 case ISD::EntryToken:
878 assert(0 && "EntryToken should have been excluded from the schedule!");
880 case ISD::TokenFactor: // fall thru
885 case ISD::CopyToReg: {
887 SDOperand SrcVal = Node->getOperand(2);
888 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
889 SrcReg = R->getReg();
891 SrcReg = getVR(SrcVal, VRBaseMap);
893 unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
894 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
897 const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
898 // Get the register classes of the src/dst.
899 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
900 SrcTRC = MRI.getRegClass(SrcReg);
902 SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
904 if (TargetRegisterInfo::isVirtualRegister(DestReg))
905 DstTRC = MRI.getRegClass(DestReg);
907 DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
908 Node->getOperand(1).getValueType());
909 TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
912 case ISD::CopyFromReg: {
913 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
914 EmitCopyFromReg(Node, 0, IsClone, SrcReg, VRBaseMap);
917 case ISD::INLINEASM: {
918 unsigned NumOps = Node->getNumOperands();
919 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
920 --NumOps; // Ignore the flag operand.
922 // Create the inline asm machine instruction.
923 MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::INLINEASM));
925 // Add the asm string as an external symbol operand.
927 cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
928 MI->addOperand(MachineOperand::CreateES(AsmStr));
930 // Add all of the operand registers to the instruction.
931 for (unsigned i = 2; i != NumOps;) {
932 unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
933 unsigned NumVals = Flags >> 3;
935 MI->addOperand(MachineOperand::CreateImm(Flags));
936 ++i; // Skip the ID value.
939 default: assert(0 && "Bad flags!");
940 case 1: // Use of register.
941 for (; NumVals; --NumVals, ++i) {
942 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
943 MI->addOperand(MachineOperand::CreateReg(Reg, false));
946 case 2: // Def of register.
947 for (; NumVals; --NumVals, ++i) {
948 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
949 MI->addOperand(MachineOperand::CreateReg(Reg, true));
952 case 3: { // Immediate.
953 for (; NumVals; --NumVals, ++i) {
954 if (ConstantSDNode *CS =
955 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
956 MI->addOperand(MachineOperand::CreateImm(CS->getValue()));
957 } else if (GlobalAddressSDNode *GA =
958 dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
959 MI->addOperand(MachineOperand::CreateGA(GA->getGlobal(),
962 BasicBlockSDNode *BB =cast<BasicBlockSDNode>(Node->getOperand(i));
963 MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
968 case 4: // Addressing mode.
969 // The addressing mode has been selected, just add all of the
970 // operands to the machine instruction.
971 for (; NumVals; --NumVals, ++i)
972 AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
983 void ScheduleDAG::EmitNoop() {
984 TII->insertNoop(*BB, BB->end());
987 void ScheduleDAG::EmitCrossRCCopy(SUnit *SU,
988 DenseMap<SUnit*, unsigned> &VRBaseMap) {
989 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
991 if (I->isCtrl) continue; // ignore chain preds
993 // Copy to physical register.
994 DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->Dep);
995 assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
996 // Find the destination physical register.
998 for (SUnit::const_succ_iterator II = SU->Succs.begin(),
999 EE = SU->Succs.end(); II != EE; ++II) {
1005 assert(I->Reg && "Unknown physical register!");
1006 TII->copyRegToReg(*BB, BB->end(), Reg, VRI->second,
1007 SU->CopyDstRC, SU->CopySrcRC);
1009 // Copy from physical register.
1010 assert(I->Reg && "Unknown physical register!");
1011 unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
1012 bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase));
1013 isNew = isNew; // Silence compiler warning.
1014 assert(isNew && "Node emitted out of order - early");
1015 TII->copyRegToReg(*BB, BB->end(), VRBase, I->Reg,
1016 SU->CopyDstRC, SU->CopySrcRC);
1022 /// EmitLiveInCopy - Emit a copy for a live in physical register. If the
1023 /// physical register has only a single copy use, then coalesced the copy
1025 void ScheduleDAG::EmitLiveInCopy(MachineBasicBlock *MBB,
1026 MachineBasicBlock::iterator &InsertPos,
1027 unsigned VirtReg, unsigned PhysReg,
1028 const TargetRegisterClass *RC,
1029 DenseMap<MachineInstr*, unsigned> &CopyRegMap){
1030 unsigned NumUses = 0;
1031 MachineInstr *UseMI = NULL;
1032 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
1033 UE = MRI.use_end(); UI != UE; ++UI) {
1039 // If the number of uses is not one, or the use is not a move instruction,
1040 // don't coalesce. Also, only coalesce away a virtual register to virtual
1042 bool Coalesced = false;
1043 unsigned SrcReg, DstReg;
1045 TII->isMoveInstr(*UseMI, SrcReg, DstReg) &&
1046 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1051 // Now find an ideal location to insert the copy.
1052 MachineBasicBlock::iterator Pos = InsertPos;
1053 while (Pos != MBB->begin()) {
1054 MachineInstr *PrevMI = prior(Pos);
1055 DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
1056 // copyRegToReg might emit multiple instructions to do a copy.
1057 unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
1058 if (CopyDstReg && !TRI->regsOverlap(CopyDstReg, PhysReg))
1059 // This is what the BB looks like right now:
1064 // We want to insert "r1025 = mov r1". Inserting this copy below the
1065 // move to r1024 makes it impossible for that move to be coalesced.
1072 break; // Woot! Found a good location.
1076 TII->copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
1077 CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
1079 if (&*InsertPos == UseMI) ++InsertPos;
1084 /// EmitLiveInCopies - If this is the first basic block in the function,
1085 /// and if it has live ins that need to be copied into vregs, emit the
1086 /// copies into the top of the block.
1087 void ScheduleDAG::EmitLiveInCopies(MachineBasicBlock *MBB) {
1088 DenseMap<MachineInstr*, unsigned> CopyRegMap;
1089 MachineBasicBlock::iterator InsertPos = MBB->begin();
1090 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1091 E = MRI.livein_end(); LI != E; ++LI)
1093 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1094 EmitLiveInCopy(MBB, InsertPos, LI->second, LI->first, RC, CopyRegMap);
1098 /// EmitSchedule - Emit the machine code in scheduled order.
1099 void ScheduleDAG::EmitSchedule() {
1100 bool isEntryBB = &MF->front() == BB;
1102 if (isEntryBB && !SchedLiveInCopies) {
1103 // If this is the first basic block in the function, and if it has live ins
1104 // that need to be copied into vregs, emit the copies into the top of the
1105 // block before emitting the code for the block.
1106 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1107 E = MRI.livein_end(); LI != E; ++LI)
1109 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
1110 TII->copyRegToReg(*MF->begin(), MF->begin()->end(), LI->second,
1115 // Finally, emit the code for all of the scheduled instructions.
1116 DenseMap<SDOperand, unsigned> VRBaseMap;
1117 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
1118 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1119 SUnit *SU = Sequence[i];
1121 // Null SUnit* is a noop.
1125 for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; ++j)
1126 EmitNode(SU->FlaggedNodes[j], SU->OrigNode != SU, VRBaseMap);
1128 EmitCrossRCCopy(SU, CopyVRBaseMap);
1130 EmitNode(SU->Node, SU->OrigNode != SU, VRBaseMap);
1133 if (isEntryBB && SchedLiveInCopies)
1134 EmitLiveInCopies(MF->begin());
1137 /// dump - dump the schedule.
1138 void ScheduleDAG::dumpSchedule() const {
1139 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
1140 if (SUnit *SU = Sequence[i])
1143 cerr << "**** NOOP ****\n";
1148 /// Run - perform scheduling.
1150 MachineBasicBlock *ScheduleDAG::Run() {
1155 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
1156 /// a group of nodes flagged together.
1157 void SUnit::dump(const SelectionDAG *G) const {
1158 cerr << "SU(" << NodeNum << "): ";
1162 cerr << "CROSS RC COPY ";
1164 if (FlaggedNodes.size() != 0) {
1165 for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
1167 FlaggedNodes[i]->dump(G);
1173 void SUnit::dumpAll(const SelectionDAG *G) const {
1176 cerr << " # preds left : " << NumPredsLeft << "\n";
1177 cerr << " # succs left : " << NumSuccsLeft << "\n";
1178 cerr << " Latency : " << Latency << "\n";
1179 cerr << " Depth : " << Depth << "\n";
1180 cerr << " Height : " << Height << "\n";
1182 if (Preds.size() != 0) {
1183 cerr << " Predecessors:\n";
1184 for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
1190 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";
1196 if (Succs.size() != 0) {
1197 cerr << " Successors:\n";
1198 for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
1204 cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";