1 //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by James M. Laskey and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a simple two pass scheduler. The first pass attempts to push
11 // backward any lengthy instructions and critical paths. The second pass packs
12 // instructions into semi-optimal time slots.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "sched"
17 #include "llvm/CodeGen/ScheduleDAG.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/SSARegMap.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/MathExtras.h"
31 /// BuildSchedUnits - Build SUnits from the selection dag that we are input.
32 /// This SUnit graph is similar to the SelectionDAG, but represents flagged
33 /// together nodes with a single SUnit.
34 void ScheduleDAG::BuildSchedUnits() {
35 // Reserve entries in the vector for each of the SUnits we are creating. This
36 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
38 SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
40 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
42 for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
43 E = DAG.allnodes_end(); NI != E; ++NI) {
44 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
47 // If this node has already been processed, stop now.
48 if (SUnitMap[NI]) continue;
50 SUnit *NodeSUnit = NewSUnit(NI);
52 // See if anything is flagged to this node, if so, add them to flagged
53 // nodes. Nodes can have at most one flag input and one flag output. Flags
54 // are required the be the last operand and result of a node.
56 // Scan up, adding flagged preds to FlaggedNodes.
58 while (N->getNumOperands() &&
59 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
60 N = N->getOperand(N->getNumOperands()-1).Val;
61 NodeSUnit->FlaggedNodes.push_back(N);
62 SUnitMap[N] = NodeSUnit;
65 // Scan down, adding this node and any flagged succs to FlaggedNodes if they
66 // have a user of the flag operand.
68 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
69 SDOperand FlagVal(N, N->getNumValues()-1);
71 // There are either zero or one users of the Flag result.
72 bool HasFlagUse = false;
73 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
75 if (FlagVal.isOperand(*UI)) {
77 NodeSUnit->FlaggedNodes.push_back(N);
78 SUnitMap[N] = NodeSUnit;
82 if (!HasFlagUse) break;
85 // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
88 SUnitMap[N] = NodeSUnit;
90 // Compute the latency for the node. We use the sum of the latencies for
91 // all nodes flagged together into this SUnit.
92 if (InstrItins.isEmpty()) {
93 // No latency information.
94 NodeSUnit->Latency = 1;
96 NodeSUnit->Latency = 0;
97 if (N->isTargetOpcode()) {
98 unsigned SchedClass = TII->getSchedClass(N->getTargetOpcode());
99 InstrStage *S = InstrItins.begin(SchedClass);
100 InstrStage *E = InstrItins.end(SchedClass);
102 NodeSUnit->Latency += S->Cycles;
104 for (unsigned i = 0, e = NodeSUnit->FlaggedNodes.size(); i != e; ++i) {
105 SDNode *FNode = NodeSUnit->FlaggedNodes[i];
106 if (FNode->isTargetOpcode()) {
107 unsigned SchedClass = TII->getSchedClass(FNode->getTargetOpcode());
108 InstrStage *S = InstrItins.begin(SchedClass);
109 InstrStage *E = InstrItins.end(SchedClass);
111 NodeSUnit->Latency += S->Cycles;
117 // Pass 2: add the preds, succs, etc.
118 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
119 SUnit *SU = &SUnits[su];
120 SDNode *MainNode = SU->Node;
122 if (MainNode->isTargetOpcode()) {
123 unsigned Opc = MainNode->getTargetOpcode();
124 if (TII->isTwoAddrInstr(Opc))
125 SU->isTwoAddress = true;
126 if (TII->isCommutableInstr(Opc))
127 SU->isCommutable = true;
130 // Find all predecessors and successors of the group.
131 // Temporarily add N to make code simpler.
132 SU->FlaggedNodes.push_back(MainNode);
134 for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
135 SDNode *N = SU->FlaggedNodes[n];
137 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
138 SDNode *OpN = N->getOperand(i).Val;
139 if (isPassiveNode(OpN)) continue; // Not scheduled.
140 SUnit *OpSU = SUnitMap[OpN];
141 assert(OpSU && "Node has no SUnit!");
142 if (OpSU == SU) continue; // In the same group.
144 MVT::ValueType OpVT = N->getOperand(i).getValueType();
145 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
146 bool isChain = OpVT == MVT::Other;
148 if (SU->Preds.insert(std::make_pair(OpSU, isChain)).second) {
153 SU->NumChainPredsLeft++;
156 if (OpSU->Succs.insert(std::make_pair(SU, isChain)).second) {
159 OpSU->NumSuccsLeft++;
161 OpSU->NumChainSuccsLeft++;
167 // Remove MainNode from FlaggedNodes again.
168 SU->FlaggedNodes.pop_back();
174 static void CalculateDepths(SUnit *SU, unsigned Depth) {
175 if (SU->Depth == 0 || Depth > SU->Depth) {
177 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Succs.begin(),
178 E = SU->Succs.end(); I != E; ++I)
179 CalculateDepths(I->first, Depth+1);
183 void ScheduleDAG::CalculateDepths() {
184 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
185 ::CalculateDepths(Entry, 0U);
186 for (unsigned i = 0, e = SUnits.size(); i != e; ++i)
187 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
188 ::CalculateDepths(&SUnits[i], 0U);
192 static void CalculateHeights(SUnit *SU, unsigned Height) {
193 if (SU->Height == 0 || Height > SU->Height) {
195 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
196 E = SU->Preds.end(); I != E; ++I)
197 CalculateHeights(I->first, Height+1);
200 void ScheduleDAG::CalculateHeights() {
201 SUnit *Root = SUnitMap[DAG.getRoot().Val];
202 ::CalculateHeights(Root, 0U);
205 /// CountResults - The results of target nodes have register or immediate
206 /// operands first, then an optional chain, and optional flag operands (which do
207 /// not go into the machine instrs.)
208 static unsigned CountResults(SDNode *Node) {
209 unsigned N = Node->getNumValues();
210 while (N && Node->getValueType(N - 1) == MVT::Flag)
212 if (N && Node->getValueType(N - 1) == MVT::Other)
213 --N; // Skip over chain result.
217 /// CountOperands The inputs to target nodes have any actual inputs first,
218 /// followed by an optional chain operand, then flag operands. Compute the
219 /// number of actual operands that will go into the machine instr.
220 static unsigned CountOperands(SDNode *Node) {
221 unsigned N = Node->getNumOperands();
222 while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
224 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
225 --N; // Ignore chain if it exists.
229 static unsigned CreateVirtualRegisters(MachineInstr *MI,
232 const TargetInstrInfo *TII,
233 const TargetInstrDescriptor &II) {
234 // Create the result registers for this node and add the result regs to
235 // the machine instruction.
237 RegMap->createVirtualRegister(TII->getInstrOperandRegClass(&II, 0));
238 MI->addRegOperand(ResultReg, MachineOperand::Def);
239 for (unsigned i = 1; i != NumResults; ++i) {
240 const TargetRegisterClass *RC = TII->getInstrOperandRegClass(&II, i);
241 assert(RC && "Isn't a register operand!");
242 MI->addRegOperand(RegMap->createVirtualRegister(RC), MachineOperand::Def);
247 /// getVR - Return the virtual register corresponding to the specified result
248 /// of the specified node.
249 static unsigned getVR(SDOperand Op, std::map<SDNode*, unsigned> &VRBaseMap) {
250 std::map<SDNode*, unsigned>::iterator I = VRBaseMap.find(Op.Val);
251 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
252 return I->second + Op.ResNo;
256 /// AddOperand - Add the specified operand to the specified machine instr. II
257 /// specifies the instruction information for the node, and IIOpNum is the
258 /// operand number (in the II) that we are adding. IIOpNum and II are used for
260 void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
262 const TargetInstrDescriptor *II,
263 std::map<SDNode*, unsigned> &VRBaseMap) {
264 if (Op.isTargetOpcode()) {
265 // Note that this case is redundant with the final else block, but we
266 // include it because it is the most common and it makes the logic
268 assert(Op.getValueType() != MVT::Other &&
269 Op.getValueType() != MVT::Flag &&
270 "Chain and flag operands should occur at end of operand list!");
272 // Get/emit the operand.
273 unsigned VReg = getVR(Op, VRBaseMap);
274 MI->addRegOperand(VReg, MachineOperand::Use);
276 // Verify that it is right.
277 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
279 const TargetRegisterClass *RC = TII->getInstrOperandRegClass(II, IIOpNum);
280 assert(RC && "Don't have operand info for this instruction!");
281 assert(RegMap->getRegClass(VReg) == RC &&
282 "Register class of operand and regclass of use don't agree!");
284 } else if (ConstantSDNode *C =
285 dyn_cast<ConstantSDNode>(Op)) {
286 MI->addImmOperand(C->getValue());
287 } else if (RegisterSDNode*R =
288 dyn_cast<RegisterSDNode>(Op)) {
289 MI->addRegOperand(R->getReg(), MachineOperand::Use);
290 } else if (GlobalAddressSDNode *TGA =
291 dyn_cast<GlobalAddressSDNode>(Op)) {
292 MI->addGlobalAddressOperand(TGA->getGlobal(), TGA->getOffset());
293 } else if (BasicBlockSDNode *BB =
294 dyn_cast<BasicBlockSDNode>(Op)) {
295 MI->addMachineBasicBlockOperand(BB->getBasicBlock());
296 } else if (FrameIndexSDNode *FI =
297 dyn_cast<FrameIndexSDNode>(Op)) {
298 MI->addFrameIndexOperand(FI->getIndex());
299 } else if (JumpTableSDNode *JT =
300 dyn_cast<JumpTableSDNode>(Op)) {
301 MI->addJumpTableIndexOperand(JT->getIndex());
302 } else if (ConstantPoolSDNode *CP =
303 dyn_cast<ConstantPoolSDNode>(Op)) {
304 int Offset = CP->getOffset();
305 unsigned Align = CP->getAlignment();
306 // MachineConstantPool wants an explicit alignment.
308 if (CP->get()->getType() == Type::DoubleTy)
309 Align = 3; // always 8-byte align doubles.
311 Align = TM.getTargetData()
312 ->getTypeAlignmentShift(CP->get()->getType());
314 // Alignment of packed types. FIXME!
315 Align = TM.getTargetData()->getTypeSize(CP->get()->getType());
316 Align = Log2_64(Align);
321 unsigned Idx = ConstPool->getConstantPoolIndex(CP->get(), Align);
322 MI->addConstantPoolIndexOperand(Idx, Offset);
323 } else if (ExternalSymbolSDNode *ES =
324 dyn_cast<ExternalSymbolSDNode>(Op)) {
325 MI->addExternalSymbolOperand(ES->getSymbol());
327 assert(Op.getValueType() != MVT::Other &&
328 Op.getValueType() != MVT::Flag &&
329 "Chain and flag operands should occur at end of operand list!");
330 unsigned VReg = getVR(Op, VRBaseMap);
331 MI->addRegOperand(VReg, MachineOperand::Use);
333 // Verify that it is right.
334 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
336 const TargetRegisterClass *RC = TII->getInstrOperandRegClass(II, IIOpNum);
337 assert(RC && "Don't have operand info for this instruction!");
338 assert(RegMap->getRegClass(VReg) == RC &&
339 "Register class of operand and regclass of use don't agree!");
346 /// EmitNode - Generate machine code for an node and needed dependencies.
348 void ScheduleDAG::EmitNode(SDNode *Node,
349 std::map<SDNode*, unsigned> &VRBaseMap) {
350 unsigned VRBase = 0; // First virtual register for node
352 // If machine instruction
353 if (Node->isTargetOpcode()) {
354 unsigned Opc = Node->getTargetOpcode();
355 const TargetInstrDescriptor &II = TII->get(Opc);
357 unsigned NumResults = CountResults(Node);
358 unsigned NodeOperands = CountOperands(Node);
359 unsigned NumMIOperands = NodeOperands + NumResults;
361 assert((unsigned(II.numOperands) == NumMIOperands ||
362 (II.Flags & M_VARIABLE_OPS)) &&
363 "#operands for dag node doesn't match .td file!");
366 // Create the new machine instruction.
367 MachineInstr *MI = new MachineInstr(Opc, NumMIOperands);
369 // Add result register values for things that are defined by this
372 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
373 // the CopyToReg'd destination register instead of creating a new vreg.
374 if (NumResults == 1) {
375 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
378 if (Use->getOpcode() == ISD::CopyToReg &&
379 Use->getOperand(2).Val == Node) {
380 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
381 if (MRegisterInfo::isVirtualRegister(Reg)) {
383 MI->addRegOperand(Reg, MachineOperand::Def);
390 // Otherwise, create new virtual registers.
391 if (NumResults && VRBase == 0)
392 VRBase = CreateVirtualRegisters(MI, NumResults, RegMap, TII, II);
394 // Emit all of the actual operands of this instruction, adding them to the
395 // instruction as appropriate.
396 for (unsigned i = 0; i != NodeOperands; ++i)
397 AddOperand(MI, Node->getOperand(i), i+NumResults, &II, VRBaseMap);
399 // Commute node if it has been determined to be profitable.
400 if (CommuteSet.count(Node)) {
401 MachineInstr *NewMI = TII->commuteInstruction(MI);
403 DEBUG(std::cerr << "Sched: COMMUTING FAILED!\n");
405 DEBUG(std::cerr << "Sched: COMMUTED TO: " << *NewMI);
413 // Now that we have emitted all operands, emit this instruction itself.
414 if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) {
415 BB->insert(BB->end(), MI);
417 // Insert this instruction into the end of the basic block, potentially
418 // taking some custom action.
419 BB = DAG.getTargetLoweringInfo().InsertAtEndOfBasicBlock(MI, BB);
422 switch (Node->getOpcode()) {
425 assert(0 && "This target-independent node should have been selected!");
426 case ISD::EntryToken: // fall thru
427 case ISD::TokenFactor:
429 case ISD::CopyToReg: {
430 unsigned InReg = getVR(Node->getOperand(2), VRBaseMap);
431 unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
432 if (InReg != DestReg) // Coalesced away the copy?
433 MRI->copyRegToReg(*BB, BB->end(), DestReg, InReg,
434 RegMap->getRegClass(InReg));
437 case ISD::CopyFromReg: {
438 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
439 if (MRegisterInfo::isVirtualRegister(SrcReg)) {
440 VRBase = SrcReg; // Just use the input register directly!
444 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
445 // the CopyToReg'd destination register instead of creating a new vreg.
446 for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
449 if (Use->getOpcode() == ISD::CopyToReg &&
450 Use->getOperand(2).Val == Node) {
451 unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
452 if (MRegisterInfo::isVirtualRegister(DestReg)) {
459 // Figure out the register class to create for the destreg.
460 const TargetRegisterClass *TRC = 0;
462 TRC = RegMap->getRegClass(VRBase);
465 // Pick the register class of the right type that contains this physreg.
466 for (MRegisterInfo::regclass_iterator I = MRI->regclass_begin(),
467 E = MRI->regclass_end(); I != E; ++I)
468 if ((*I)->hasType(Node->getValueType(0)) &&
469 (*I)->contains(SrcReg)) {
473 assert(TRC && "Couldn't find register class for reg copy!");
475 // Create the reg, emit the copy.
476 VRBase = RegMap->createVirtualRegister(TRC);
478 MRI->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, TRC);
481 case ISD::INLINEASM: {
482 unsigned NumOps = Node->getNumOperands();
483 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
484 --NumOps; // Ignore the flag operand.
486 // Create the inline asm machine instruction.
488 new MachineInstr(BB, TargetInstrInfo::INLINEASM, (NumOps-2)/2+1);
490 // Add the asm string as an external symbol operand.
492 cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
493 MI->addExternalSymbolOperand(AsmStr);
495 // Add all of the operand registers to the instruction.
496 for (unsigned i = 2; i != NumOps;) {
497 unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
498 unsigned NumVals = Flags >> 3;
500 MI->addImmOperand(Flags);
501 ++i; // Skip the ID value.
504 default: assert(0 && "Bad flags!");
505 case 1: // Use of register.
506 for (; NumVals; --NumVals, ++i) {
507 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
508 MI->addRegOperand(Reg, MachineOperand::Use);
511 case 2: // Def of register.
512 for (; NumVals; --NumVals, ++i) {
513 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
514 MI->addRegOperand(Reg, MachineOperand::Def);
517 case 3: { // Immediate.
518 assert(NumVals == 1 && "Unknown immediate value!");
519 uint64_t Val = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
520 MI->addImmOperand(Val);
524 case 4: // Addressing mode.
525 // The addressing mode has been selected, just add all of the
526 // operands to the machine instruction.
527 for (; NumVals; --NumVals, ++i)
528 AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
537 assert(!VRBaseMap.count(Node) && "Node emitted out of order - early");
538 VRBaseMap[Node] = VRBase;
541 void ScheduleDAG::EmitNoop() {
542 TII->insertNoop(*BB, BB->end());
545 /// EmitSchedule - Emit the machine code in scheduled order.
546 void ScheduleDAG::EmitSchedule() {
547 // If this is the first basic block in the function, and if it has live ins
548 // that need to be copied into vregs, emit the copies into the top of the
549 // block before emitting the code for the block.
550 MachineFunction &MF = DAG.getMachineFunction();
551 if (&MF.front() == BB && MF.livein_begin() != MF.livein_end()) {
552 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
553 E = MF.livein_end(); LI != E; ++LI)
555 MRI->copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
556 LI->first, RegMap->getRegClass(LI->second));
560 // Finally, emit the code for all of the scheduled instructions.
561 std::map<SDNode*, unsigned> VRBaseMap;
562 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
563 if (SUnit *SU = Sequence[i]) {
564 for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; j++)
565 EmitNode(SU->FlaggedNodes[j], VRBaseMap);
566 EmitNode(SU->Node, VRBaseMap);
568 // Null SUnit* is a noop.
574 /// dump - dump the schedule.
575 void ScheduleDAG::dumpSchedule() const {
576 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
577 if (SUnit *SU = Sequence[i])
580 std::cerr << "**** NOOP ****\n";
585 /// Run - perform scheduling.
587 MachineBasicBlock *ScheduleDAG::Run() {
588 TII = TM.getInstrInfo();
589 MRI = TM.getRegisterInfo();
590 RegMap = BB->getParent()->getSSARegMap();
591 ConstPool = BB->getParent()->getConstantPool();
597 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
598 /// a group of nodes flagged together.
599 void SUnit::dump(const SelectionDAG *G) const {
600 std::cerr << "SU(" << NodeNum << "): ";
603 if (FlaggedNodes.size() != 0) {
604 for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
606 FlaggedNodes[i]->dump(G);
612 void SUnit::dumpAll(const SelectionDAG *G) const {
615 std::cerr << " # preds left : " << NumPredsLeft << "\n";
616 std::cerr << " # succs left : " << NumSuccsLeft << "\n";
617 std::cerr << " # chain preds left : " << NumChainPredsLeft << "\n";
618 std::cerr << " # chain succs left : " << NumChainSuccsLeft << "\n";
619 std::cerr << " Latency : " << Latency << "\n";
620 std::cerr << " Depth : " << Depth << "\n";
621 std::cerr << " Height : " << Height << "\n";
623 if (Preds.size() != 0) {
624 std::cerr << " Predecessors:\n";
625 for (std::set<std::pair<SUnit*,bool> >::const_iterator I = Preds.begin(),
626 E = Preds.end(); I != E; ++I) {
630 std::cerr << " val ";
634 if (Succs.size() != 0) {
635 std::cerr << " Successors:\n";
636 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = Succs.begin(),
637 E = Succs.end(); I != E; ++I) {
641 std::cerr << " val ";