1 //===- DataStructure.cpp - Implement the core data structure analysis -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the core data structure functionality.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/DataStructure/DSGraphTraits.h"
15 #include "llvm/Function.h"
16 #include "llvm/GlobalVariable.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Target/TargetData.h"
20 #include "llvm/Assembly/Writer.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/ADT/DepthFirstIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Support/Timer.h"
30 #define COLLAPSE_ARRAYS_AGGRESSIVELY 0
33 Statistic<> NumFolds ("dsa", "Number of nodes completely folded");
34 Statistic<> NumCallNodesMerged("dsa", "Number of call nodes merged");
35 Statistic<> NumNodeAllocated ("dsa", "Number of nodes allocated");
36 Statistic<> NumDNE ("dsa", "Number of nodes removed by reachability");
37 Statistic<> NumTrivialDNE ("dsa", "Number of nodes trivially removed");
38 Statistic<> NumTrivialGlobalDNE("dsa", "Number of globals trivially removed");
42 #define TIME_REGION(VARNAME, DESC) \
43 NamedRegionTimer VARNAME(DESC)
45 #define TIME_REGION(VARNAME, DESC)
50 /// isForwarding - Return true if this NodeHandle is forwarding to another
52 bool DSNodeHandle::isForwarding() const {
53 return N && N->isForwarding();
56 DSNode *DSNodeHandle::HandleForwarding() const {
57 assert(N->isForwarding() && "Can only be invoked if forwarding!");
59 // Handle node forwarding here!
60 DSNode *Next = N->ForwardNH.getNode(); // Cause recursive shrinkage
61 Offset += N->ForwardNH.getOffset();
63 if (--N->NumReferrers == 0) {
64 // Removing the last referrer to the node, sever the forwarding link
70 if (N->Size <= Offset) {
71 assert(N->Size <= 1 && "Forwarded to shrunk but not collapsed node?");
77 //===----------------------------------------------------------------------===//
78 // DSNode Implementation
79 //===----------------------------------------------------------------------===//
81 DSNode::DSNode(const Type *T, DSGraph *G)
82 : NumReferrers(0), Size(0), ParentGraph(G), Ty(Type::VoidTy), NodeType(0) {
83 // Add the type entry if it is specified...
84 if (T) mergeTypeInfo(T, 0);
85 if (G) G->addNode(this);
89 // DSNode copy constructor... do not copy over the referrers list!
90 DSNode::DSNode(const DSNode &N, DSGraph *G, bool NullLinks)
91 : NumReferrers(0), Size(N.Size), ParentGraph(G),
92 Ty(N.Ty), NodeType(N.NodeType) {
97 Links.resize(N.Links.size()); // Create the appropriate number of null links
102 /// getTargetData - Get the target data object used to construct this node.
104 const TargetData &DSNode::getTargetData() const {
105 return ParentGraph->getTargetData();
108 void DSNode::assertOK() const {
109 assert((Ty != Type::VoidTy ||
110 Ty == Type::VoidTy && (Size == 0 ||
111 (NodeType & DSNode::Array))) &&
114 assert(ParentGraph && "Node has no parent?");
115 const DSScalarMap &SM = ParentGraph->getScalarMap();
116 for (unsigned i = 0, e = Globals.size(); i != e; ++i) {
117 assert(SM.count(Globals[i]));
118 assert(SM.find(Globals[i])->second.getNode() == this);
122 /// forwardNode - Mark this node as being obsolete, and all references to it
123 /// should be forwarded to the specified node and offset.
125 void DSNode::forwardNode(DSNode *To, unsigned Offset) {
126 assert(this != To && "Cannot forward a node to itself!");
127 assert(ForwardNH.isNull() && "Already forwarding from this node!");
128 if (To->Size <= 1) Offset = 0;
129 assert((Offset < To->Size || (Offset == To->Size && Offset == 0)) &&
130 "Forwarded offset is wrong!");
131 ForwardNH.setTo(To, Offset);
136 // Remove this node from the parent graph's Nodes list.
137 ParentGraph->unlinkNode(this);
141 // addGlobal - Add an entry for a global value to the Globals list. This also
142 // marks the node with the 'G' flag if it does not already have it.
144 void DSNode::addGlobal(GlobalValue *GV) {
145 // Keep the list sorted.
146 std::vector<GlobalValue*>::iterator I =
147 std::lower_bound(Globals.begin(), Globals.end(), GV);
149 if (I == Globals.end() || *I != GV) {
150 //assert(GV->getType()->getElementType() == Ty);
151 Globals.insert(I, GV);
152 NodeType |= GlobalNode;
156 /// foldNodeCompletely - If we determine that this node has some funny
157 /// behavior happening to it that we cannot represent, we fold it down to a
158 /// single, completely pessimistic, node. This node is represented as a
159 /// single byte with a single TypeEntry of "void".
161 void DSNode::foldNodeCompletely() {
162 if (isNodeCompletelyFolded()) return; // If this node is already folded...
166 // If this node has a size that is <= 1, we don't need to create a forwarding
168 if (getSize() <= 1) {
169 NodeType |= DSNode::Array;
172 assert(Links.size() <= 1 && "Size is 1, but has more links?");
175 // Create the node we are going to forward to. This is required because
176 // some referrers may have an offset that is > 0. By forcing them to
177 // forward, the forwarder has the opportunity to correct the offset.
178 DSNode *DestNode = new DSNode(0, ParentGraph);
179 DestNode->NodeType = NodeType|DSNode::Array;
180 DestNode->Ty = Type::VoidTy;
182 DestNode->Globals.swap(Globals);
184 // Start forwarding to the destination node...
185 forwardNode(DestNode, 0);
187 if (!Links.empty()) {
188 DestNode->Links.reserve(1);
190 DSNodeHandle NH(DestNode);
191 DestNode->Links.push_back(Links[0]);
193 // If we have links, merge all of our outgoing links together...
194 for (unsigned i = Links.size()-1; i != 0; --i)
195 NH.getNode()->Links[0].mergeWith(Links[i]);
198 DestNode->Links.resize(1);
203 /// isNodeCompletelyFolded - Return true if this node has been completely
204 /// folded down to something that can never be expanded, effectively losing
205 /// all of the field sensitivity that may be present in the node.
207 bool DSNode::isNodeCompletelyFolded() const {
208 return getSize() == 1 && Ty == Type::VoidTy && isArray();
212 /// TypeElementWalker Class - Used for implementation of physical subtyping...
214 class TypeElementWalker {
219 StackState(const Type *T, unsigned Off = 0)
220 : Ty(T), Offset(Off), Idx(0) {}
223 std::vector<StackState> Stack;
224 const TargetData &TD;
226 TypeElementWalker(const Type *T, const TargetData &td) : TD(td) {
231 bool isDone() const { return Stack.empty(); }
232 const Type *getCurrentType() const { return Stack.back().Ty; }
233 unsigned getCurrentOffset() const { return Stack.back().Offset; }
235 void StepToNextType() {
236 PopStackAndAdvance();
241 /// PopStackAndAdvance - Pop the current element off of the stack and
242 /// advance the underlying element to the next contained member.
243 void PopStackAndAdvance() {
244 assert(!Stack.empty() && "Cannot pop an empty stack!");
246 while (!Stack.empty()) {
247 StackState &SS = Stack.back();
248 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
250 if (SS.Idx != ST->getNumElements()) {
251 const StructLayout *SL = TD.getStructLayout(ST);
253 unsigned(SL->MemberOffsets[SS.Idx]-SL->MemberOffsets[SS.Idx-1]);
256 Stack.pop_back(); // At the end of the structure
258 const ArrayType *AT = cast<ArrayType>(SS.Ty);
260 if (SS.Idx != AT->getNumElements()) {
261 SS.Offset += unsigned(TD.getTypeSize(AT->getElementType()));
264 Stack.pop_back(); // At the end of the array
269 /// StepToLeaf - Used by physical subtyping to move to the first leaf node
270 /// on the type stack.
272 if (Stack.empty()) return;
273 while (!Stack.empty() && !Stack.back().Ty->isFirstClassType()) {
274 StackState &SS = Stack.back();
275 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
276 if (ST->getNumElements() == 0) {
278 PopStackAndAdvance();
280 // Step into the structure...
281 assert(SS.Idx < ST->getNumElements());
282 const StructLayout *SL = TD.getStructLayout(ST);
283 Stack.push_back(StackState(ST->getElementType(SS.Idx),
284 SS.Offset+unsigned(SL->MemberOffsets[SS.Idx])));
287 const ArrayType *AT = cast<ArrayType>(SS.Ty);
288 if (AT->getNumElements() == 0) {
290 PopStackAndAdvance();
292 // Step into the array...
293 assert(SS.Idx < AT->getNumElements());
294 Stack.push_back(StackState(AT->getElementType(),
296 unsigned(TD.getTypeSize(AT->getElementType()))));
302 } // end anonymous namespace
304 /// ElementTypesAreCompatible - Check to see if the specified types are
305 /// "physically" compatible. If so, return true, else return false. We only
306 /// have to check the fields in T1: T2 may be larger than T1. If AllowLargerT1
307 /// is true, then we also allow a larger T1.
309 static bool ElementTypesAreCompatible(const Type *T1, const Type *T2,
310 bool AllowLargerT1, const TargetData &TD){
311 TypeElementWalker T1W(T1, TD), T2W(T2, TD);
313 while (!T1W.isDone() && !T2W.isDone()) {
314 if (T1W.getCurrentOffset() != T2W.getCurrentOffset())
317 const Type *T1 = T1W.getCurrentType();
318 const Type *T2 = T2W.getCurrentType();
319 if (T1 != T2 && !T1->isLosslesslyConvertibleTo(T2))
322 T1W.StepToNextType();
323 T2W.StepToNextType();
326 return AllowLargerT1 || T1W.isDone();
330 /// mergeTypeInfo - This method merges the specified type into the current node
331 /// at the specified offset. This may update the current node's type record if
332 /// this gives more information to the node, it may do nothing to the node if
333 /// this information is already known, or it may merge the node completely (and
334 /// return true) if the information is incompatible with what is already known.
336 /// This method returns true if the node is completely folded, otherwise false.
338 bool DSNode::mergeTypeInfo(const Type *NewTy, unsigned Offset,
339 bool FoldIfIncompatible) {
340 const TargetData &TD = getTargetData();
341 // Check to make sure the Size member is up-to-date. Size can be one of the
343 // Size = 0, Ty = Void: Nothing is known about this node.
344 // Size = 0, Ty = FnTy: FunctionPtr doesn't have a size, so we use zero
345 // Size = 1, Ty = Void, Array = 1: The node is collapsed
346 // Otherwise, sizeof(Ty) = Size
348 assert(((Size == 0 && Ty == Type::VoidTy && !isArray()) ||
349 (Size == 0 && !Ty->isSized() && !isArray()) ||
350 (Size == 1 && Ty == Type::VoidTy && isArray()) ||
351 (Size == 0 && !Ty->isSized() && !isArray()) ||
352 (TD.getTypeSize(Ty) == Size)) &&
353 "Size member of DSNode doesn't match the type structure!");
354 assert(NewTy != Type::VoidTy && "Cannot merge void type into DSNode!");
356 if (Offset == 0 && NewTy == Ty)
357 return false; // This should be a common case, handle it efficiently
359 // Return true immediately if the node is completely folded.
360 if (isNodeCompletelyFolded()) return true;
362 // If this is an array type, eliminate the outside arrays because they won't
363 // be used anyway. This greatly reduces the size of large static arrays used
364 // as global variables, for example.
366 bool WillBeArray = false;
367 while (const ArrayType *AT = dyn_cast<ArrayType>(NewTy)) {
368 // FIXME: we might want to keep small arrays, but must be careful about
369 // things like: [2 x [10000 x int*]]
370 NewTy = AT->getElementType();
374 // Figure out how big the new type we're merging in is...
375 unsigned NewTySize = NewTy->isSized() ? (unsigned)TD.getTypeSize(NewTy) : 0;
377 // Otherwise check to see if we can fold this type into the current node. If
378 // we can't, we fold the node completely, if we can, we potentially update our
381 if (Ty == Type::VoidTy) {
382 // If this is the first type that this node has seen, just accept it without
384 assert(Offset == 0 && !isArray() &&
385 "Cannot have an offset into a void node!");
388 if (WillBeArray) NodeType |= Array;
391 // Calculate the number of outgoing links from this node.
392 Links.resize((Size+DS::PointerSize-1) >> DS::PointerShift);
396 // Handle node expansion case here...
397 if (Offset+NewTySize > Size) {
398 // It is illegal to grow this node if we have treated it as an array of
401 if (FoldIfIncompatible) foldNodeCompletely();
405 if (Offset) { // We could handle this case, but we don't for now...
406 std::cerr << "UNIMP: Trying to merge a growth type into "
407 << "offset != 0: Collapsing!\n";
408 if (FoldIfIncompatible) foldNodeCompletely();
412 // Okay, the situation is nice and simple, we are trying to merge a type in
413 // at offset 0 that is bigger than our current type. Implement this by
414 // switching to the new type and then merge in the smaller one, which should
415 // hit the other code path here. If the other code path decides it's not
416 // ok, it will collapse the node as appropriate.
418 const Type *OldTy = Ty;
421 if (WillBeArray) NodeType |= Array;
424 // Must grow links to be the appropriate size...
425 Links.resize((Size+DS::PointerSize-1) >> DS::PointerShift);
427 // Merge in the old type now... which is guaranteed to be smaller than the
429 return mergeTypeInfo(OldTy, 0);
432 assert(Offset <= Size &&
433 "Cannot merge something into a part of our type that doesn't exist!");
435 // Find the section of Ty that NewTy overlaps with... first we find the
436 // type that starts at offset Offset.
439 const Type *SubType = Ty;
441 assert(Offset-O < TD.getTypeSize(SubType) && "Offset out of range!");
443 switch (SubType->getTypeID()) {
444 case Type::StructTyID: {
445 const StructType *STy = cast<StructType>(SubType);
446 const StructLayout &SL = *TD.getStructLayout(STy);
448 unsigned i = 0, e = SL.MemberOffsets.size();
449 for (; i+1 < e && SL.MemberOffsets[i+1] <= Offset-O; ++i)
452 // The offset we are looking for must be in the i'th element...
453 SubType = STy->getElementType(i);
454 O += (unsigned)SL.MemberOffsets[i];
457 case Type::ArrayTyID: {
458 SubType = cast<ArrayType>(SubType)->getElementType();
459 unsigned ElSize = (unsigned)TD.getTypeSize(SubType);
460 unsigned Remainder = (Offset-O) % ElSize;
461 O = Offset-Remainder;
465 if (FoldIfIncompatible) foldNodeCompletely();
470 assert(O == Offset && "Could not achieve the correct offset!");
472 // If we found our type exactly, early exit
473 if (SubType == NewTy) return false;
475 // Differing function types don't require us to merge. They are not values
477 if (isa<FunctionType>(SubType) &&
478 isa<FunctionType>(NewTy)) return false;
480 unsigned SubTypeSize = SubType->isSized() ?
481 (unsigned)TD.getTypeSize(SubType) : 0;
483 // Ok, we are getting desperate now. Check for physical subtyping, where we
484 // just require each element in the node to be compatible.
485 if (NewTySize <= SubTypeSize && NewTySize && NewTySize < 256 &&
486 SubTypeSize && SubTypeSize < 256 &&
487 ElementTypesAreCompatible(NewTy, SubType, !isArray(), TD))
490 // Okay, so we found the leader type at the offset requested. Search the list
491 // of types that starts at this offset. If SubType is currently an array or
492 // structure, the type desired may actually be the first element of the
495 unsigned PadSize = SubTypeSize; // Size, including pad memory which is ignored
496 while (SubType != NewTy) {
497 const Type *NextSubType = 0;
498 unsigned NextSubTypeSize = 0;
499 unsigned NextPadSize = 0;
500 switch (SubType->getTypeID()) {
501 case Type::StructTyID: {
502 const StructType *STy = cast<StructType>(SubType);
503 const StructLayout &SL = *TD.getStructLayout(STy);
504 if (SL.MemberOffsets.size() > 1)
505 NextPadSize = (unsigned)SL.MemberOffsets[1];
507 NextPadSize = SubTypeSize;
508 NextSubType = STy->getElementType(0);
509 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
512 case Type::ArrayTyID:
513 NextSubType = cast<ArrayType>(SubType)->getElementType();
514 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
515 NextPadSize = NextSubTypeSize;
521 if (NextSubType == 0)
522 break; // In the default case, break out of the loop
524 if (NextPadSize < NewTySize)
525 break; // Don't allow shrinking to a smaller type than NewTySize
526 SubType = NextSubType;
527 SubTypeSize = NextSubTypeSize;
528 PadSize = NextPadSize;
531 // If we found the type exactly, return it...
532 if (SubType == NewTy)
535 // Check to see if we have a compatible, but different type...
536 if (NewTySize == SubTypeSize) {
537 // Check to see if this type is obviously convertible... int -> uint f.e.
538 if (NewTy->isLosslesslyConvertibleTo(SubType))
541 // Check to see if we have a pointer & integer mismatch going on here,
542 // loading a pointer as a long, for example.
544 if (SubType->isInteger() && isa<PointerType>(NewTy) ||
545 NewTy->isInteger() && isa<PointerType>(SubType))
547 } else if (NewTySize > SubTypeSize && NewTySize <= PadSize) {
548 // We are accessing the field, plus some structure padding. Ignore the
549 // structure padding.
554 if (getParentGraph()->getReturnNodes().size())
555 M = getParentGraph()->getReturnNodes().begin()->first->getParent();
556 DEBUG(std::cerr << "MergeTypeInfo Folding OrigTy: ";
557 WriteTypeSymbolic(std::cerr, Ty, M) << "\n due to:";
558 WriteTypeSymbolic(std::cerr, NewTy, M) << " @ " << Offset << "!\n"
560 WriteTypeSymbolic(std::cerr, SubType, M) << "\n\n");
562 if (FoldIfIncompatible) foldNodeCompletely();
568 /// addEdgeTo - Add an edge from the current node to the specified node. This
569 /// can cause merging of nodes in the graph.
571 void DSNode::addEdgeTo(unsigned Offset, const DSNodeHandle &NH) {
572 if (NH.isNull()) return; // Nothing to do
574 DSNodeHandle &ExistingEdge = getLink(Offset);
575 if (!ExistingEdge.isNull()) {
576 // Merge the two nodes...
577 ExistingEdge.mergeWith(NH);
578 } else { // No merging to perform...
579 setLink(Offset, NH); // Just force a link in there...
584 /// MergeSortedVectors - Efficiently merge a vector into another vector where
585 /// duplicates are not allowed and both are sorted. This assumes that 'T's are
586 /// efficiently copyable and have sane comparison semantics.
588 static void MergeSortedVectors(std::vector<GlobalValue*> &Dest,
589 const std::vector<GlobalValue*> &Src) {
590 // By far, the most common cases will be the simple ones. In these cases,
591 // avoid having to allocate a temporary vector...
593 if (Src.empty()) { // Nothing to merge in...
595 } else if (Dest.empty()) { // Just copy the result in...
597 } else if (Src.size() == 1) { // Insert a single element...
598 const GlobalValue *V = Src[0];
599 std::vector<GlobalValue*>::iterator I =
600 std::lower_bound(Dest.begin(), Dest.end(), V);
601 if (I == Dest.end() || *I != Src[0]) // If not already contained...
602 Dest.insert(I, Src[0]);
603 } else if (Dest.size() == 1) {
604 GlobalValue *Tmp = Dest[0]; // Save value in temporary...
605 Dest = Src; // Copy over list...
606 std::vector<GlobalValue*>::iterator I =
607 std::lower_bound(Dest.begin(), Dest.end(), Tmp);
608 if (I == Dest.end() || *I != Tmp) // If not already contained...
612 // Make a copy to the side of Dest...
613 std::vector<GlobalValue*> Old(Dest);
615 // Make space for all of the type entries now...
616 Dest.resize(Dest.size()+Src.size());
618 // Merge the two sorted ranges together... into Dest.
619 std::merge(Old.begin(), Old.end(), Src.begin(), Src.end(), Dest.begin());
621 // Now erase any duplicate entries that may have accumulated into the
622 // vectors (because they were in both of the input sets)
623 Dest.erase(std::unique(Dest.begin(), Dest.end()), Dest.end());
627 void DSNode::mergeGlobals(const std::vector<GlobalValue*> &RHS) {
628 MergeSortedVectors(Globals, RHS);
631 // MergeNodes - Helper function for DSNode::mergeWith().
632 // This function does the hard work of merging two nodes, CurNodeH
633 // and NH after filtering out trivial cases and making sure that
634 // CurNodeH.offset >= NH.offset.
637 // Since merging may cause either node to go away, we must always
638 // use the node-handles to refer to the nodes. These node handles are
639 // automatically updated during merging, so will always provide access
640 // to the correct node after a merge.
642 void DSNode::MergeNodes(DSNodeHandle& CurNodeH, DSNodeHandle& NH) {
643 assert(CurNodeH.getOffset() >= NH.getOffset() &&
644 "This should have been enforced in the caller.");
645 assert(CurNodeH.getNode()->getParentGraph()==NH.getNode()->getParentGraph() &&
646 "Cannot merge two nodes that are not in the same graph!");
648 // Now we know that Offset >= NH.Offset, so convert it so our "Offset" (with
649 // respect to NH.Offset) is now zero. NOffset is the distance from the base
650 // of our object that N starts from.
652 unsigned NOffset = CurNodeH.getOffset()-NH.getOffset();
653 unsigned NSize = NH.getNode()->getSize();
655 // If the two nodes are of different size, and the smaller node has the array
656 // bit set, collapse!
657 if (NSize != CurNodeH.getNode()->getSize()) {
658 #if COLLAPSE_ARRAYS_AGGRESSIVELY
659 if (NSize < CurNodeH.getNode()->getSize()) {
660 if (NH.getNode()->isArray())
661 NH.getNode()->foldNodeCompletely();
662 } else if (CurNodeH.getNode()->isArray()) {
663 NH.getNode()->foldNodeCompletely();
668 // Merge the type entries of the two nodes together...
669 if (NH.getNode()->Ty != Type::VoidTy)
670 CurNodeH.getNode()->mergeTypeInfo(NH.getNode()->Ty, NOffset);
671 assert(!CurNodeH.getNode()->isDeadNode());
673 // If we are merging a node with a completely folded node, then both nodes are
674 // now completely folded.
676 if (CurNodeH.getNode()->isNodeCompletelyFolded()) {
677 if (!NH.getNode()->isNodeCompletelyFolded()) {
678 NH.getNode()->foldNodeCompletely();
679 assert(NH.getNode() && NH.getOffset() == 0 &&
680 "folding did not make offset 0?");
681 NOffset = NH.getOffset();
682 NSize = NH.getNode()->getSize();
683 assert(NOffset == 0 && NSize == 1);
685 } else if (NH.getNode()->isNodeCompletelyFolded()) {
686 CurNodeH.getNode()->foldNodeCompletely();
687 assert(CurNodeH.getNode() && CurNodeH.getOffset() == 0 &&
688 "folding did not make offset 0?");
689 NSize = NH.getNode()->getSize();
690 NOffset = NH.getOffset();
691 assert(NOffset == 0 && NSize == 1);
694 DSNode *N = NH.getNode();
695 if (CurNodeH.getNode() == N || N == 0) return;
696 assert(!CurNodeH.getNode()->isDeadNode());
698 // Merge the NodeType information.
699 CurNodeH.getNode()->NodeType |= N->NodeType;
701 // Start forwarding to the new node!
702 N->forwardNode(CurNodeH.getNode(), NOffset);
703 assert(!CurNodeH.getNode()->isDeadNode());
705 // Make all of the outgoing links of N now be outgoing links of CurNodeH.
707 for (unsigned i = 0; i < N->getNumLinks(); ++i) {
708 DSNodeHandle &Link = N->getLink(i << DS::PointerShift);
709 if (Link.getNode()) {
710 // Compute the offset into the current node at which to
711 // merge this link. In the common case, this is a linear
712 // relation to the offset in the original node (with
713 // wrapping), but if the current node gets collapsed due to
714 // recursive merging, we must make sure to merge in all remaining
715 // links at offset zero.
716 unsigned MergeOffset = 0;
717 DSNode *CN = CurNodeH.getNode();
719 MergeOffset = ((i << DS::PointerShift)+NOffset) % CN->getSize();
720 CN->addEdgeTo(MergeOffset, Link);
724 // Now that there are no outgoing edges, all of the Links are dead.
727 // Merge the globals list...
728 if (!N->Globals.empty()) {
729 CurNodeH.getNode()->mergeGlobals(N->Globals);
731 // Delete the globals from the old node...
732 std::vector<GlobalValue*>().swap(N->Globals);
737 /// mergeWith - Merge this node and the specified node, moving all links to and
738 /// from the argument node into the current node, deleting the node argument.
739 /// Offset indicates what offset the specified node is to be merged into the
742 /// The specified node may be a null pointer (in which case, we update it to
743 /// point to this node).
745 void DSNode::mergeWith(const DSNodeHandle &NH, unsigned Offset) {
746 DSNode *N = NH.getNode();
747 if (N == this && NH.getOffset() == Offset)
750 // If the RHS is a null node, make it point to this node!
752 NH.mergeWith(DSNodeHandle(this, Offset));
756 assert(!N->isDeadNode() && !isDeadNode());
757 assert(!hasNoReferrers() && "Should not try to fold a useless node!");
760 // We cannot merge two pieces of the same node together, collapse the node
762 DEBUG(std::cerr << "Attempting to merge two chunks of"
763 << " the same node together!\n");
764 foldNodeCompletely();
768 // If both nodes are not at offset 0, make sure that we are merging the node
769 // at an later offset into the node with the zero offset.
771 if (Offset < NH.getOffset()) {
772 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
774 } else if (Offset == NH.getOffset() && getSize() < N->getSize()) {
775 // If the offsets are the same, merge the smaller node into the bigger node
776 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
780 // Ok, now we can merge the two nodes. Use a static helper that works with
781 // two node handles, since "this" may get merged away at intermediate steps.
782 DSNodeHandle CurNodeH(this, Offset);
783 DSNodeHandle NHCopy(NH);
784 DSNode::MergeNodes(CurNodeH, NHCopy);
788 //===----------------------------------------------------------------------===//
789 // ReachabilityCloner Implementation
790 //===----------------------------------------------------------------------===//
792 DSNodeHandle ReachabilityCloner::getClonedNH(const DSNodeHandle &SrcNH) {
793 if (SrcNH.isNull()) return DSNodeHandle();
794 const DSNode *SN = SrcNH.getNode();
796 DSNodeHandle &NH = NodeMap[SN];
797 if (!NH.isNull()) { // Node already mapped?
798 DSNode *NHN = NH.getNode();
799 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
802 // If SrcNH has globals and the destination graph has one of the same globals,
803 // merge this node with the destination node, which is much more efficient.
804 if (SN->global_begin() != SN->global_end()) {
805 DSScalarMap &DestSM = Dest.getScalarMap();
806 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
808 GlobalValue *GV = *I;
809 DSScalarMap::iterator GI = DestSM.find(GV);
810 if (GI != DestSM.end() && !GI->second.isNull()) {
811 // We found one, use merge instead!
812 merge(GI->second, Src.getNodeForValue(GV));
813 assert(!NH.isNull() && "Didn't merge node!");
814 DSNode *NHN = NH.getNode();
815 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
820 DSNode *DN = new DSNode(*SN, &Dest, true /* Null out all links */);
821 DN->maskNodeTypes(BitsToKeep);
824 // Next, recursively clone all outgoing links as necessary. Note that
825 // adding these links can cause the node to collapse itself at any time, and
826 // the current node may be merged with arbitrary other nodes. For this
827 // reason, we must always go through NH.
829 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
830 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
831 if (!SrcEdge.isNull()) {
832 const DSNodeHandle &DestEdge = getClonedNH(SrcEdge);
833 // Compute the offset into the current node at which to
834 // merge this link. In the common case, this is a linear
835 // relation to the offset in the original node (with
836 // wrapping), but if the current node gets collapsed due to
837 // recursive merging, we must make sure to merge in all remaining
838 // links at offset zero.
839 unsigned MergeOffset = 0;
840 DSNode *CN = NH.getNode();
841 if (CN->getSize() != 1)
842 MergeOffset = ((i << DS::PointerShift)+NH.getOffset()) % CN->getSize();
843 CN->addEdgeTo(MergeOffset, DestEdge);
847 // If this node contains any globals, make sure they end up in the scalar
848 // map with the correct offset.
849 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
851 GlobalValue *GV = *I;
852 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
853 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
854 assert(DestGNH.getNode() == NH.getNode() &&"Global mapping inconsistent");
855 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
856 DestGNH.getOffset()+SrcGNH.getOffset()));
858 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
859 Dest.getInlinedGlobals().insert(GV);
861 NH.getNode()->mergeGlobals(SN->getGlobals());
863 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset());
866 void ReachabilityCloner::merge(const DSNodeHandle &NH,
867 const DSNodeHandle &SrcNH) {
868 if (SrcNH.isNull()) return; // Noop
870 // If there is no destination node, just clone the source and assign the
871 // destination node to be it.
872 NH.mergeWith(getClonedNH(SrcNH));
876 // Okay, at this point, we know that we have both a destination and a source
877 // node that need to be merged. Check to see if the source node has already
879 const DSNode *SN = SrcNH.getNode();
880 DSNodeHandle &SCNH = NodeMap[SN]; // SourceClonedNodeHandle
881 if (!SCNH.isNull()) { // Node already cloned?
882 DSNode *SCNHN = SCNH.getNode();
883 NH.mergeWith(DSNodeHandle(SCNHN,
884 SCNH.getOffset()+SrcNH.getOffset()));
885 return; // Nothing to do!
888 // Okay, so the source node has not already been cloned. Instead of creating
889 // a new DSNode, only to merge it into the one we already have, try to perform
890 // the merge in-place. The only case we cannot handle here is when the offset
891 // into the existing node is less than the offset into the virtual node we are
892 // merging in. In this case, we have to extend the existing node, which
893 // requires an allocation anyway.
894 DSNode *DN = NH.getNode(); // Make sure the Offset is up-to-date
895 if (NH.getOffset() >= SrcNH.getOffset()) {
896 if (!DN->isNodeCompletelyFolded()) {
897 // Make sure the destination node is folded if the source node is folded.
898 if (SN->isNodeCompletelyFolded()) {
899 DN->foldNodeCompletely();
901 } else if (SN->getSize() != DN->getSize()) {
902 // If the two nodes are of different size, and the smaller node has the
903 // array bit set, collapse!
904 #if COLLAPSE_ARRAYS_AGGRESSIVELY
905 if (SN->getSize() < DN->getSize()) {
907 DN->foldNodeCompletely();
910 } else if (DN->isArray()) {
911 DN->foldNodeCompletely();
917 // Merge the type entries of the two nodes together...
918 if (SN->getType() != Type::VoidTy && !DN->isNodeCompletelyFolded()) {
919 DN->mergeTypeInfo(SN->getType(), NH.getOffset()-SrcNH.getOffset());
924 assert(!DN->isDeadNode());
926 // Merge the NodeType information.
927 DN->mergeNodeFlags(SN->getNodeFlags() & BitsToKeep);
929 // Before we start merging outgoing links and updating the scalar map, make
930 // sure it is known that this is the representative node for the src node.
931 SCNH = DSNodeHandle(DN, NH.getOffset()-SrcNH.getOffset());
933 // If the source node contains any globals, make sure they end up in the
934 // scalar map with the correct offset.
935 if (SN->global_begin() != SN->global_end()) {
936 // Update the globals in the destination node itself.
937 DN->mergeGlobals(SN->getGlobals());
939 // Update the scalar map for the graph we are merging the source node
941 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
943 GlobalValue *GV = *I;
944 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
945 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
946 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
947 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
948 DestGNH.getOffset()+SrcGNH.getOffset()));
950 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
951 Dest.getInlinedGlobals().insert(GV);
953 NH.getNode()->mergeGlobals(SN->getGlobals());
956 // We cannot handle this case without allocating a temporary node. Fall
957 // back on being simple.
958 DSNode *NewDN = new DSNode(*SN, &Dest, true /* Null out all links */);
959 NewDN->maskNodeTypes(BitsToKeep);
961 unsigned NHOffset = NH.getOffset();
962 NH.mergeWith(DSNodeHandle(NewDN, SrcNH.getOffset()));
964 assert(NH.getNode() &&
965 (NH.getOffset() > NHOffset ||
966 (NH.getOffset() == 0 && NH.getNode()->isNodeCompletelyFolded())) &&
967 "Merging did not adjust the offset!");
969 // Before we start merging outgoing links and updating the scalar map, make
970 // sure it is known that this is the representative node for the src node.
971 SCNH = DSNodeHandle(NH.getNode(), NH.getOffset()-SrcNH.getOffset());
973 // If the source node contained any globals, make sure to create entries
974 // in the scalar map for them!
975 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
977 GlobalValue *GV = *I;
978 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
979 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
980 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
981 assert(SrcGNH.getNode() == SN && "Global mapping inconsistent");
982 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
983 DestGNH.getOffset()+SrcGNH.getOffset()));
985 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
986 Dest.getInlinedGlobals().insert(GV);
991 // Next, recursively merge all outgoing links as necessary. Note that
992 // adding these links can cause the destination node to collapse itself at
993 // any time, and the current node may be merged with arbitrary other nodes.
994 // For this reason, we must always go through NH.
996 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
997 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
998 if (!SrcEdge.isNull()) {
999 // Compute the offset into the current node at which to
1000 // merge this link. In the common case, this is a linear
1001 // relation to the offset in the original node (with
1002 // wrapping), but if the current node gets collapsed due to
1003 // recursive merging, we must make sure to merge in all remaining
1004 // links at offset zero.
1005 DSNode *CN = SCNH.getNode();
1006 unsigned MergeOffset =
1007 ((i << DS::PointerShift)+SCNH.getOffset()) % CN->getSize();
1009 DSNodeHandle Tmp = CN->getLink(MergeOffset);
1010 if (!Tmp.isNull()) {
1011 // Perform the recursive merging. Make sure to create a temporary NH,
1012 // because the Link can disappear in the process of recursive merging.
1013 merge(Tmp, SrcEdge);
1015 Tmp.mergeWith(getClonedNH(SrcEdge));
1016 // Merging this could cause all kinds of recursive things to happen,
1017 // culminating in the current node being eliminated. Since this is
1018 // possible, make sure to reaquire the link from 'CN'.
1020 unsigned MergeOffset = 0;
1021 CN = SCNH.getNode();
1022 MergeOffset = ((i << DS::PointerShift)+SCNH.getOffset()) %CN->getSize();
1023 CN->getLink(MergeOffset).mergeWith(Tmp);
1029 /// mergeCallSite - Merge the nodes reachable from the specified src call
1030 /// site into the nodes reachable from DestCS.
1031 void ReachabilityCloner::mergeCallSite(const DSCallSite &DestCS,
1032 const DSCallSite &SrcCS) {
1033 merge(DestCS.getRetVal(), SrcCS.getRetVal());
1034 unsigned MinArgs = DestCS.getNumPtrArgs();
1035 if (SrcCS.getNumPtrArgs() < MinArgs) MinArgs = SrcCS.getNumPtrArgs();
1037 for (unsigned a = 0; a != MinArgs; ++a)
1038 merge(DestCS.getPtrArg(a), SrcCS.getPtrArg(a));
1042 //===----------------------------------------------------------------------===//
1043 // DSCallSite Implementation
1044 //===----------------------------------------------------------------------===//
1046 // Define here to avoid including iOther.h and BasicBlock.h in DSGraph.h
1047 Function &DSCallSite::getCaller() const {
1048 return *Site.getInstruction()->getParent()->getParent();
1051 void DSCallSite::InitNH(DSNodeHandle &NH, const DSNodeHandle &Src,
1052 ReachabilityCloner &RC) {
1053 NH = RC.getClonedNH(Src);
1056 //===----------------------------------------------------------------------===//
1057 // DSGraph Implementation
1058 //===----------------------------------------------------------------------===//
1060 /// getFunctionNames - Return a space separated list of the name of the
1061 /// functions in this graph (if any)
1062 std::string DSGraph::getFunctionNames() const {
1063 switch (getReturnNodes().size()) {
1064 case 0: return "Globals graph";
1065 case 1: return getReturnNodes().begin()->first->getName();
1068 for (DSGraph::ReturnNodesTy::const_iterator I = getReturnNodes().begin();
1069 I != getReturnNodes().end(); ++I)
1070 Return += I->first->getName() + " ";
1071 Return.erase(Return.end()-1, Return.end()); // Remove last space character
1077 DSGraph::DSGraph(const DSGraph &G) : GlobalsGraph(0), TD(G.TD) {
1078 PrintAuxCalls = false;
1080 cloneInto(G, ScalarMap, ReturnNodes, NodeMap);
1083 DSGraph::DSGraph(const DSGraph &G, NodeMapTy &NodeMap)
1084 : GlobalsGraph(0), TD(G.TD) {
1085 PrintAuxCalls = false;
1086 cloneInto(G, ScalarMap, ReturnNodes, NodeMap);
1089 DSGraph::~DSGraph() {
1090 FunctionCalls.clear();
1091 AuxFunctionCalls.clear();
1092 InlinedGlobals.clear();
1094 ReturnNodes.clear();
1096 // Drop all intra-node references, so that assertions don't fail...
1097 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
1098 (*NI)->dropAllReferences();
1100 // Free all of the nodes.
1104 // dump - Allow inspection of graph in a debugger.
1105 void DSGraph::dump() const { print(std::cerr); }
1108 /// remapLinks - Change all of the Links in the current node according to the
1109 /// specified mapping.
1111 void DSNode::remapLinks(DSGraph::NodeMapTy &OldNodeMap) {
1112 for (unsigned i = 0, e = Links.size(); i != e; ++i)
1113 if (DSNode *N = Links[i].getNode()) {
1114 DSGraph::NodeMapTy::const_iterator ONMI = OldNodeMap.find(N);
1115 if (ONMI != OldNodeMap.end()) {
1116 DSNode *ONMIN = ONMI->second.getNode();
1117 Links[i].setTo(ONMIN, Links[i].getOffset()+ONMI->second.getOffset());
1122 /// updateFromGlobalGraph - This function rematerializes global nodes and
1123 /// nodes reachable from them from the globals graph into the current graph.
1124 /// It uses the vector InlinedGlobals to avoid cloning and merging globals that
1125 /// are already up-to-date in the current graph. In practice, in the TD pass,
1126 /// this is likely to be a large fraction of the live global nodes in each
1127 /// function (since most live nodes are likely to have been brought up-to-date
1128 /// in at _some_ caller or callee).
1130 void DSGraph::updateFromGlobalGraph() {
1131 TIME_REGION(X, "updateFromGlobalGraph");
1132 ReachabilityCloner RC(*this, *GlobalsGraph, 0);
1134 // Clone the non-up-to-date global nodes into this graph.
1135 for (DSScalarMap::global_iterator I = getScalarMap().global_begin(),
1136 E = getScalarMap().global_end(); I != E; ++I)
1137 if (InlinedGlobals.count(*I) == 0) { // GNode is not up-to-date
1138 DSScalarMap::iterator It = GlobalsGraph->ScalarMap.find(*I);
1139 if (It != GlobalsGraph->ScalarMap.end())
1140 RC.merge(getNodeForValue(*I), It->second);
1144 /// cloneInto - Clone the specified DSGraph into the current graph. The
1145 /// translated ScalarMap for the old function is filled into the OldValMap
1146 /// member, and the translated ReturnNodes map is returned into ReturnNodes.
1148 /// The CloneFlags member controls various aspects of the cloning process.
1150 void DSGraph::cloneInto(const DSGraph &G, DSScalarMap &OldValMap,
1151 ReturnNodesTy &OldReturnNodes, NodeMapTy &OldNodeMap,
1152 unsigned CloneFlags) {
1153 TIME_REGION(X, "cloneInto");
1154 assert(OldNodeMap.empty() && "Returned OldNodeMap should be empty!");
1155 assert(&G != this && "Cannot clone graph into itself!");
1157 // Remove alloca or mod/ref bits as specified...
1158 unsigned BitsToClear = ((CloneFlags & StripAllocaBit)? DSNode::AllocaNode : 0)
1159 | ((CloneFlags & StripModRefBits)? (DSNode::Modified | DSNode::Read) : 0)
1160 | ((CloneFlags & StripIncompleteBit)? DSNode::Incomplete : 0);
1161 BitsToClear |= DSNode::DEAD; // Clear dead flag...
1163 for (node_iterator I = G.node_begin(), E = G.node_end(); I != E; ++I) {
1164 assert(!(*I)->isForwarding() &&
1165 "Forward nodes shouldn't be in node list!");
1166 DSNode *New = new DSNode(**I, this);
1167 New->maskNodeTypes(~BitsToClear);
1168 OldNodeMap[*I] = New;
1172 Timer::addPeakMemoryMeasurement();
1175 // Rewrite the links in the new nodes to point into the current graph now.
1176 // Note that we don't loop over the node's list to do this. The problem is
1177 // that remaping links can cause recursive merging to happen, which means
1178 // that node_iterator's can get easily invalidated! Because of this, we
1179 // loop over the OldNodeMap, which contains all of the new nodes as the
1180 // .second element of the map elements. Also note that if we remap a node
1181 // more than once, we won't break anything.
1182 for (NodeMapTy::iterator I = OldNodeMap.begin(), E = OldNodeMap.end();
1184 I->second.getNode()->remapLinks(OldNodeMap);
1186 // Copy the scalar map... merging all of the global nodes...
1187 for (DSScalarMap::const_iterator I = G.ScalarMap.begin(),
1188 E = G.ScalarMap.end(); I != E; ++I) {
1189 DSNodeHandle &MappedNode = OldNodeMap[I->second.getNode()];
1190 DSNodeHandle &H = OldValMap[I->first];
1191 DSNode *MappedNodeN = MappedNode.getNode();
1192 H.mergeWith(DSNodeHandle(MappedNodeN,
1193 I->second.getOffset()+MappedNode.getOffset()));
1195 // If this is a global, add the global to this fn or merge if already exists
1196 if (GlobalValue* GV = dyn_cast<GlobalValue>(I->first)) {
1197 ScalarMap[GV].mergeWith(H);
1198 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
1199 InlinedGlobals.insert(GV);
1203 if (!(CloneFlags & DontCloneCallNodes)) {
1204 // Copy the function calls list.
1205 for (fc_iterator I = G.fc_begin(), E = G.fc_end(); I != E; ++I)
1206 FunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1209 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1210 // Copy the auxiliary function calls list.
1211 for (afc_iterator I = G.afc_begin(), E = G.afc_end(); I != E; ++I)
1212 AuxFunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1215 // Map the return node pointers over...
1216 for (ReturnNodesTy::const_iterator I = G.getReturnNodes().begin(),
1217 E = G.getReturnNodes().end(); I != E; ++I) {
1218 const DSNodeHandle &Ret = I->second;
1219 DSNodeHandle &MappedRet = OldNodeMap[Ret.getNode()];
1220 DSNode *MappedRetN = MappedRet.getNode();
1221 OldReturnNodes.insert(std::make_pair(I->first,
1222 DSNodeHandle(MappedRetN,
1223 MappedRet.getOffset()+Ret.getOffset())));
1227 static bool PathExistsToClonedNode(const DSNode *N, ReachabilityCloner &RC) {
1229 for (df_iterator<const DSNode*> I = df_begin(N), E = df_end(N); I != E; ++I)
1230 if (RC.hasClonedNode(*I))
1235 static bool PathExistsToClonedNode(const DSCallSite &CS,
1236 ReachabilityCloner &RC) {
1237 if (PathExistsToClonedNode(CS.getRetVal().getNode(), RC))
1239 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1240 if (PathExistsToClonedNode(CS.getPtrArg(i).getNode(), RC))
1245 /// mergeInGraph - The method is used for merging graphs together. If the
1246 /// argument graph is not *this, it makes a clone of the specified graph, then
1247 /// merges the nodes specified in the call site with the formal arguments in the
1250 void DSGraph::mergeInGraph(const DSCallSite &CS, Function &F,
1251 const DSGraph &Graph, unsigned CloneFlags) {
1252 TIME_REGION(X, "mergeInGraph");
1254 // Fastpath for a noop inline.
1255 if (CS.getNumPtrArgs() == 0 && CS.getRetVal().isNull())
1258 // If this is not a recursive call, clone the graph into this graph...
1259 if (&Graph != this) {
1260 // Clone the callee's graph into the current graph, keeping track of where
1261 // scalars in the old graph _used_ to point, and of the new nodes matching
1262 // nodes of the old graph.
1263 ReachabilityCloner RC(*this, Graph, CloneFlags);
1265 // Set up argument bindings
1266 Function::aiterator AI = F.abegin();
1267 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i, ++AI) {
1268 // Advance the argument iterator to the first pointer argument...
1269 while (AI != F.aend() && !isPointerType(AI->getType())) {
1271 #ifndef NDEBUG // FIXME: We should merge vararg arguments!
1272 if (AI == F.aend() && !F.getFunctionType()->isVarArg())
1273 std::cerr << "Bad call to Function: " << F.getName() << "\n";
1276 if (AI == F.aend()) break;
1278 // Add the link from the argument scalar to the provided value.
1279 RC.merge(CS.getPtrArg(i), Graph.getNodeForValue(AI));
1282 // Map the return node pointer over.
1283 if (!CS.getRetVal().isNull())
1284 RC.merge(CS.getRetVal(), Graph.getReturnNodeFor(F));
1286 // If requested, copy all of the calls.
1287 if (!(CloneFlags & DontCloneCallNodes)) {
1288 // Copy the function calls list.
1289 for (fc_iterator I = Graph.fc_begin(), E = Graph.fc_end(); I != E; ++I)
1290 FunctionCalls.push_back(DSCallSite(*I, RC));
1293 // If the user has us copying aux calls (the normal case), set up a data
1294 // structure to keep track of which ones we've copied over.
1295 std::set<const DSCallSite*> CopiedAuxCall;
1297 // Clone over all globals that appear in the caller and callee graphs.
1298 hash_set<GlobalVariable*> NonCopiedGlobals;
1299 for (DSScalarMap::global_iterator GI = Graph.getScalarMap().global_begin(),
1300 E = Graph.getScalarMap().global_end(); GI != E; ++GI)
1301 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*GI))
1302 if (ScalarMap.count(GV))
1303 RC.merge(ScalarMap[GV], Graph.getNodeForValue(GV));
1305 NonCopiedGlobals.insert(GV);
1307 // If the global does not appear in the callers graph we generally don't
1308 // want to copy the node. However, if there is a path from the node global
1309 // node to a node that we did copy in the graph, we *must* copy it to
1310 // maintain the connection information. Every time we decide to include a
1311 // new global, this might make other globals live, so we must iterate
1313 bool MadeChange = true;
1314 while (MadeChange) {
1316 for (hash_set<GlobalVariable*>::iterator I = NonCopiedGlobals.begin();
1317 I != NonCopiedGlobals.end();) {
1318 DSNode *GlobalNode = Graph.getNodeForValue(*I).getNode();
1319 if (RC.hasClonedNode(GlobalNode)) {
1320 // Already cloned it, remove from set.
1321 NonCopiedGlobals.erase(I++);
1323 } else if (PathExistsToClonedNode(GlobalNode, RC)) {
1324 RC.getClonedNH(Graph.getNodeForValue(*I));
1325 NonCopiedGlobals.erase(I++);
1332 // If requested, copy any aux calls that can reach copied nodes.
1333 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1334 for (afc_iterator I = Graph.afc_begin(), E = Graph.afc_end(); I!=E; ++I)
1335 if (CopiedAuxCall.insert(&*I).second &&
1336 PathExistsToClonedNode(*I, RC)) {
1337 AuxFunctionCalls.push_back(DSCallSite(*I, RC));
1344 DSNodeHandle RetVal = getReturnNodeFor(F);
1346 // Merge the return value with the return value of the context...
1347 RetVal.mergeWith(CS.getRetVal());
1349 // Resolve all of the function arguments...
1350 Function::aiterator AI = F.abegin();
1352 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i, ++AI) {
1353 // Advance the argument iterator to the first pointer argument...
1354 while (AI != F.aend() && !isPointerType(AI->getType())) {
1356 #ifndef NDEBUG // FIXME: We should merge varargs arguments!!
1357 if (AI == F.aend() && !F.getFunctionType()->isVarArg())
1358 std::cerr << "Bad call to Function: " << F.getName() << "\n";
1361 if (AI == F.aend()) break;
1363 // Add the link from the argument scalar to the provided value
1364 DSNodeHandle &NH = getNodeForValue(AI);
1365 assert(!NH.isNull() && "Pointer argument without scalarmap entry?");
1366 NH.mergeWith(CS.getPtrArg(i));
1371 /// getCallSiteForArguments - Get the arguments and return value bindings for
1372 /// the specified function in the current graph.
1374 DSCallSite DSGraph::getCallSiteForArguments(Function &F) const {
1375 std::vector<DSNodeHandle> Args;
1377 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I)
1378 if (isPointerType(I->getType()))
1379 Args.push_back(getNodeForValue(I));
1381 return DSCallSite(CallSite(), getReturnNodeFor(F), &F, Args);
1384 /// getDSCallSiteForCallSite - Given an LLVM CallSite object that is live in
1385 /// the context of this graph, return the DSCallSite for it.
1386 DSCallSite DSGraph::getDSCallSiteForCallSite(CallSite CS) const {
1387 DSNodeHandle RetVal;
1388 Instruction *I = CS.getInstruction();
1389 if (isPointerType(I->getType()))
1390 RetVal = getNodeForValue(I);
1392 std::vector<DSNodeHandle> Args;
1393 Args.reserve(CS.arg_end()-CS.arg_begin());
1395 // Calculate the arguments vector...
1396 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I)
1397 if (isPointerType((*I)->getType()))
1398 Args.push_back(getNodeForValue(*I));
1400 // Add a new function call entry...
1401 if (Function *F = CS.getCalledFunction())
1402 return DSCallSite(CS, RetVal, F, Args);
1404 return DSCallSite(CS, RetVal,
1405 getNodeForValue(CS.getCalledValue()).getNode(), Args);
1410 // markIncompleteNodes - Mark the specified node as having contents that are not
1411 // known with the current analysis we have performed. Because a node makes all
1412 // of the nodes it can reach incomplete if the node itself is incomplete, we
1413 // must recursively traverse the data structure graph, marking all reachable
1414 // nodes as incomplete.
1416 static void markIncompleteNode(DSNode *N) {
1417 // Stop recursion if no node, or if node already marked...
1418 if (N == 0 || N->isIncomplete()) return;
1420 // Actually mark the node
1421 N->setIncompleteMarker();
1423 // Recursively process children...
1424 for (unsigned i = 0, e = N->getSize(); i < e; i += DS::PointerSize)
1425 if (DSNode *DSN = N->getLink(i).getNode())
1426 markIncompleteNode(DSN);
1429 static void markIncomplete(DSCallSite &Call) {
1430 // Then the return value is certainly incomplete!
1431 markIncompleteNode(Call.getRetVal().getNode());
1433 // All objects pointed to by function arguments are incomplete!
1434 for (unsigned i = 0, e = Call.getNumPtrArgs(); i != e; ++i)
1435 markIncompleteNode(Call.getPtrArg(i).getNode());
1438 // markIncompleteNodes - Traverse the graph, identifying nodes that may be
1439 // modified by other functions that have not been resolved yet. This marks
1440 // nodes that are reachable through three sources of "unknownness":
1442 // Global Variables, Function Calls, and Incoming Arguments
1444 // For any node that may have unknown components (because something outside the
1445 // scope of current analysis may have modified it), the 'Incomplete' flag is
1446 // added to the NodeType.
1448 void DSGraph::markIncompleteNodes(unsigned Flags) {
1449 // Mark any incoming arguments as incomplete.
1450 if (Flags & DSGraph::MarkFormalArgs)
1451 for (ReturnNodesTy::iterator FI = ReturnNodes.begin(), E =ReturnNodes.end();
1453 Function &F = *FI->first;
1454 if (F.getName() != "main")
1455 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I)
1456 if (isPointerType(I->getType()))
1457 markIncompleteNode(getNodeForValue(I).getNode());
1460 // Mark stuff passed into functions calls as being incomplete.
1461 if (!shouldPrintAuxCalls())
1462 for (std::list<DSCallSite>::iterator I = FunctionCalls.begin(),
1463 E = FunctionCalls.end(); I != E; ++I)
1466 for (std::list<DSCallSite>::iterator I = AuxFunctionCalls.begin(),
1467 E = AuxFunctionCalls.end(); I != E; ++I)
1470 // Mark all global nodes as incomplete...
1471 if ((Flags & DSGraph::IgnoreGlobals) == 0)
1472 for (DSScalarMap::global_iterator I = ScalarMap.global_begin(),
1473 E = ScalarMap.global_end(); I != E; ++I)
1474 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*I))
1475 if (!GV->isConstant() || !GV->hasInitializer())
1476 markIncompleteNode(ScalarMap[GV].getNode());
1479 static inline void killIfUselessEdge(DSNodeHandle &Edge) {
1480 if (DSNode *N = Edge.getNode()) // Is there an edge?
1481 if (N->getNumReferrers() == 1) // Does it point to a lonely node?
1482 // No interesting info?
1483 if ((N->getNodeFlags() & ~DSNode::Incomplete) == 0 &&
1484 N->getType() == Type::VoidTy && !N->isNodeCompletelyFolded())
1485 Edge.setTo(0, 0); // Kill the edge!
1488 static inline bool nodeContainsExternalFunction(const DSNode *N) {
1489 const std::vector<GlobalValue*> &Globals = N->getGlobals();
1490 for (unsigned i = 0, e = Globals.size(); i != e; ++i)
1491 if (Globals[i]->isExternal() && isa<Function>(Globals[i]))
1496 static void removeIdenticalCalls(std::list<DSCallSite> &Calls) {
1497 // Remove trivially identical function calls
1498 Calls.sort(); // Sort by callee as primary key!
1500 // Scan the call list cleaning it up as necessary...
1501 DSNode *LastCalleeNode = 0;
1502 Function *LastCalleeFunc = 0;
1503 unsigned NumDuplicateCalls = 0;
1504 bool LastCalleeContainsExternalFunction = false;
1506 unsigned NumDeleted = 0;
1507 for (std::list<DSCallSite>::iterator I = Calls.begin(), E = Calls.end();
1509 DSCallSite &CS = *I;
1510 std::list<DSCallSite>::iterator OldIt = I++;
1512 // If the Callee is a useless edge, this must be an unreachable call site,
1514 if (CS.isIndirectCall() && CS.getCalleeNode()->getNumReferrers() == 1 &&
1515 CS.getCalleeNode()->isComplete() &&
1516 CS.getCalleeNode()->getGlobals().empty()) { // No useful info?
1518 std::cerr << "WARNING: Useless call site found.\n";
1525 // If the return value or any arguments point to a void node with no
1526 // information at all in it, and the call node is the only node to point
1527 // to it, remove the edge to the node (killing the node).
1529 killIfUselessEdge(CS.getRetVal());
1530 for (unsigned a = 0, e = CS.getNumPtrArgs(); a != e; ++a)
1531 killIfUselessEdge(CS.getPtrArg(a));
1534 // If this call site calls the same function as the last call site, and if
1535 // the function pointer contains an external function, this node will
1536 // never be resolved. Merge the arguments of the call node because no
1537 // information will be lost.
1539 if ((CS.isDirectCall() && CS.getCalleeFunc() == LastCalleeFunc) ||
1540 (CS.isIndirectCall() && CS.getCalleeNode() == LastCalleeNode)) {
1541 ++NumDuplicateCalls;
1542 if (NumDuplicateCalls == 1) {
1544 LastCalleeContainsExternalFunction =
1545 nodeContainsExternalFunction(LastCalleeNode);
1547 LastCalleeContainsExternalFunction = LastCalleeFunc->isExternal();
1550 // It is not clear why, but enabling this code makes DSA really
1551 // sensitive to node forwarding. Basically, with this enabled, DSA
1552 // performs different number of inlinings based on which nodes are
1553 // forwarding or not. This is clearly a problem, so this code is
1554 // disabled until this can be resolved.
1556 if (LastCalleeContainsExternalFunction
1559 // This should be more than enough context sensitivity!
1560 // FIXME: Evaluate how many times this is tripped!
1561 NumDuplicateCalls > 20
1565 std::list<DSCallSite>::iterator PrevIt = OldIt;
1567 PrevIt->mergeWith(CS);
1569 // No need to keep this call anymore.
1576 if (CS.isDirectCall()) {
1577 LastCalleeFunc = CS.getCalleeFunc();
1580 LastCalleeNode = CS.getCalleeNode();
1583 NumDuplicateCalls = 0;
1587 if (I != Calls.end() && CS == *I) {
1594 // Resort now that we simplified things.
1597 // Now that we are in sorted order, eliminate duplicates.
1598 std::list<DSCallSite>::iterator I = Calls.begin(), E = Calls.end();
1601 std::list<DSCallSite>::iterator OldIt = I++;
1604 // If this call site is now the same as the previous one, we can delete it
1613 //Calls.erase(std::unique(Calls.begin(), Calls.end()), Calls.end());
1615 // Track the number of call nodes merged away...
1616 NumCallNodesMerged += NumDeleted;
1618 DEBUG(if (NumDeleted)
1619 std::cerr << "Merged " << NumDeleted << " call nodes.\n";);
1623 // removeTriviallyDeadNodes - After the graph has been constructed, this method
1624 // removes all unreachable nodes that are created because they got merged with
1625 // other nodes in the graph. These nodes will all be trivially unreachable, so
1626 // we don't have to perform any non-trivial analysis here.
1628 void DSGraph::removeTriviallyDeadNodes() {
1629 TIME_REGION(X, "removeTriviallyDeadNodes");
1632 /// NOTE: This code is disabled. This slows down DSA on 177.mesa
1635 // Loop over all of the nodes in the graph, calling getNode on each field.
1636 // This will cause all nodes to update their forwarding edges, causing
1637 // forwarded nodes to be delete-able.
1638 { TIME_REGION(X, "removeTriviallyDeadNodes:node_iterate");
1639 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) {
1641 for (unsigned l = 0, e = N->getNumLinks(); l != e; ++l)
1642 N->getLink(l*N->getPointerSize()).getNode();
1646 // NOTE: This code is disabled. Though it should, in theory, allow us to
1647 // remove more nodes down below, the scan of the scalar map is incredibly
1648 // expensive for certain programs (with large SCCs). In the future, if we can
1649 // make the scalar map scan more efficient, then we can reenable this.
1650 { TIME_REGION(X, "removeTriviallyDeadNodes:scalarmap");
1652 // Likewise, forward any edges from the scalar nodes. While we are at it,
1653 // clean house a bit.
1654 for (DSScalarMap::iterator I = ScalarMap.begin(),E = ScalarMap.end();I != E;){
1655 I->second.getNode();
1660 bool isGlobalsGraph = !GlobalsGraph;
1662 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E; ) {
1665 // Do not remove *any* global nodes in the globals graph.
1666 // This is a special case because such nodes may not have I, M, R flags set.
1667 if (Node.isGlobalNode() && isGlobalsGraph) {
1672 if (Node.isComplete() && !Node.isModified() && !Node.isRead()) {
1673 // This is a useless node if it has no mod/ref info (checked above),
1674 // outgoing edges (which it cannot, as it is not modified in this
1675 // context), and it has no incoming edges. If it is a global node it may
1676 // have all of these properties and still have incoming edges, due to the
1677 // scalar map, so we check those now.
1679 if (Node.getNumReferrers() == Node.getGlobals().size()) {
1680 const std::vector<GlobalValue*> &Globals = Node.getGlobals();
1682 // Loop through and make sure all of the globals are referring directly
1684 for (unsigned j = 0, e = Globals.size(); j != e; ++j) {
1685 DSNode *N = getNodeForValue(Globals[j]).getNode();
1686 assert(N == &Node && "ScalarMap doesn't match globals list!");
1689 // Make sure NumReferrers still agrees, if so, the node is truly dead.
1690 if (Node.getNumReferrers() == Globals.size()) {
1691 for (unsigned j = 0, e = Globals.size(); j != e; ++j)
1692 ScalarMap.erase(Globals[j]);
1693 Node.makeNodeDead();
1694 ++NumTrivialGlobalDNE;
1699 if (Node.getNodeFlags() == 0 && Node.hasNoReferrers()) {
1700 // This node is dead!
1701 NI = Nodes.erase(NI); // Erase & remove from node list.
1708 removeIdenticalCalls(FunctionCalls);
1709 removeIdenticalCalls(AuxFunctionCalls);
1713 /// markReachableNodes - This method recursively traverses the specified
1714 /// DSNodes, marking any nodes which are reachable. All reachable nodes it adds
1715 /// to the set, which allows it to only traverse visited nodes once.
1717 void DSNode::markReachableNodes(hash_set<const DSNode*> &ReachableNodes) const {
1718 if (this == 0) return;
1719 assert(getForwardNode() == 0 && "Cannot mark a forwarded node!");
1720 if (ReachableNodes.insert(this).second) // Is newly reachable?
1721 for (unsigned i = 0, e = getSize(); i < e; i += DS::PointerSize)
1722 getLink(i).getNode()->markReachableNodes(ReachableNodes);
1725 void DSCallSite::markReachableNodes(hash_set<const DSNode*> &Nodes) const {
1726 getRetVal().getNode()->markReachableNodes(Nodes);
1727 if (isIndirectCall()) getCalleeNode()->markReachableNodes(Nodes);
1729 for (unsigned i = 0, e = getNumPtrArgs(); i != e; ++i)
1730 getPtrArg(i).getNode()->markReachableNodes(Nodes);
1733 // CanReachAliveNodes - Simple graph walker that recursively traverses the graph
1734 // looking for a node that is marked alive. If an alive node is found, return
1735 // true, otherwise return false. If an alive node is reachable, this node is
1736 // marked as alive...
1738 static bool CanReachAliveNodes(DSNode *N, hash_set<const DSNode*> &Alive,
1739 hash_set<const DSNode*> &Visited,
1740 bool IgnoreGlobals) {
1741 if (N == 0) return false;
1742 assert(N->getForwardNode() == 0 && "Cannot mark a forwarded node!");
1744 // If this is a global node, it will end up in the globals graph anyway, so we
1745 // don't need to worry about it.
1746 if (IgnoreGlobals && N->isGlobalNode()) return false;
1748 // If we know that this node is alive, return so!
1749 if (Alive.count(N)) return true;
1751 // Otherwise, we don't think the node is alive yet, check for infinite
1753 if (Visited.count(N)) return false; // Found a cycle
1754 Visited.insert(N); // No recursion, insert into Visited...
1756 for (unsigned i = 0, e = N->getSize(); i < e; i += DS::PointerSize)
1757 if (CanReachAliveNodes(N->getLink(i).getNode(), Alive, Visited,
1759 N->markReachableNodes(Alive);
1765 // CallSiteUsesAliveArgs - Return true if the specified call site can reach any
1768 static bool CallSiteUsesAliveArgs(const DSCallSite &CS,
1769 hash_set<const DSNode*> &Alive,
1770 hash_set<const DSNode*> &Visited,
1771 bool IgnoreGlobals) {
1772 if (CanReachAliveNodes(CS.getRetVal().getNode(), Alive, Visited,
1775 if (CS.isIndirectCall() &&
1776 CanReachAliveNodes(CS.getCalleeNode(), Alive, Visited, IgnoreGlobals))
1778 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1779 if (CanReachAliveNodes(CS.getPtrArg(i).getNode(), Alive, Visited,
1785 // removeDeadNodes - Use a more powerful reachability analysis to eliminate
1786 // subgraphs that are unreachable. This often occurs because the data
1787 // structure doesn't "escape" into it's caller, and thus should be eliminated
1788 // from the caller's graph entirely. This is only appropriate to use when
1791 void DSGraph::removeDeadNodes(unsigned Flags) {
1792 DEBUG(AssertGraphOK(); if (GlobalsGraph) GlobalsGraph->AssertGraphOK());
1794 // Reduce the amount of work we have to do... remove dummy nodes left over by
1796 removeTriviallyDeadNodes();
1798 TIME_REGION(X, "removeDeadNodes");
1800 // FIXME: Merge non-trivially identical call nodes...
1802 // Alive - a set that holds all nodes found to be reachable/alive.
1803 hash_set<const DSNode*> Alive;
1804 std::vector<std::pair<Value*, DSNode*> > GlobalNodes;
1806 // Copy and merge all information about globals to the GlobalsGraph if this is
1807 // not a final pass (where unreachable globals are removed).
1809 // Strip all alloca bits since the current function is only for the BU pass.
1810 // Strip all incomplete bits since they are short-lived properties and they
1811 // will be correctly computed when rematerializing nodes into the functions.
1813 ReachabilityCloner GGCloner(*GlobalsGraph, *this, DSGraph::StripAllocaBit |
1814 DSGraph::StripIncompleteBit);
1816 // Mark all nodes reachable by (non-global) scalar nodes as alive...
1817 { TIME_REGION(Y, "removeDeadNodes:scalarscan");
1818 for (DSScalarMap::iterator I = ScalarMap.begin(), E = ScalarMap.end(); I !=E;)
1819 if (isa<GlobalValue>(I->first)) { // Keep track of global nodes
1820 assert(!I->second.isNull() && "Null global node?");
1821 assert(I->second.getNode()->isGlobalNode() && "Should be a global node!");
1822 GlobalNodes.push_back(std::make_pair(I->first, I->second.getNode()));
1824 // Make sure that all globals are cloned over as roots.
1825 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) {
1826 DSGraph::ScalarMapTy::iterator SMI =
1827 GlobalsGraph->getScalarMap().find(I->first);
1828 if (SMI != GlobalsGraph->getScalarMap().end())
1829 GGCloner.merge(SMI->second, I->second);
1831 GGCloner.getClonedNH(I->second);
1835 DSNode *N = I->second.getNode();
1837 // Check to see if this is a worthless node generated for non-pointer
1838 // values, such as integers. Consider an addition of long types: A+B.
1839 // Assuming we can track all uses of the value in this context, and it is
1840 // NOT used as a pointer, we can delete the node. We will be able to
1841 // detect this situation if the node pointed to ONLY has Unknown bit set
1842 // in the node. In this case, the node is not incomplete, does not point
1843 // to any other nodes (no mod/ref bits set), and is therefore
1844 // uninteresting for data structure analysis. If we run across one of
1845 // these, prune the scalar pointing to it.
1847 if (N->getNodeFlags() == DSNode::UnknownNode && !isa<Argument>(I->first))
1848 ScalarMap.erase(I++);
1851 N->markReachableNodes(Alive);
1857 // The return values are alive as well.
1858 for (ReturnNodesTy::iterator I = ReturnNodes.begin(), E = ReturnNodes.end();
1860 I->second.getNode()->markReachableNodes(Alive);
1862 // Mark any nodes reachable by primary calls as alive...
1863 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
1864 I->markReachableNodes(Alive);
1867 // Now find globals and aux call nodes that are already live or reach a live
1868 // value (which makes them live in turn), and continue till no more are found.
1871 hash_set<const DSNode*> Visited;
1872 hash_set<const DSCallSite*> AuxFCallsAlive;
1875 // If any global node points to a non-global that is "alive", the global is
1876 // "alive" as well... Remove it from the GlobalNodes list so we only have
1877 // unreachable globals in the list.
1880 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
1881 for (unsigned i = 0; i != GlobalNodes.size(); ++i)
1882 if (CanReachAliveNodes(GlobalNodes[i].second, Alive, Visited,
1883 Flags & DSGraph::RemoveUnreachableGlobals)) {
1884 std::swap(GlobalNodes[i--], GlobalNodes.back()); // Move to end to...
1885 GlobalNodes.pop_back(); // erase efficiently
1889 // Mark only unresolvable call nodes for moving to the GlobalsGraph since
1890 // call nodes that get resolved will be difficult to remove from that graph.
1891 // The final unresolved call nodes must be handled specially at the end of
1892 // the BU pass (i.e., in main or other roots of the call graph).
1893 for (afc_iterator CI = afc_begin(), E = afc_end(); CI != E; ++CI)
1894 if (AuxFCallsAlive.insert(&*CI).second &&
1895 (CI->isIndirectCall()
1896 || CallSiteUsesAliveArgs(*CI, Alive, Visited,
1897 Flags & DSGraph::RemoveUnreachableGlobals))) {
1898 CI->markReachableNodes(Alive);
1903 // Move dead aux function calls to the end of the list
1904 unsigned CurIdx = 0;
1905 for (std::list<DSCallSite>::iterator CI = AuxFunctionCalls.begin(),
1906 E = AuxFunctionCalls.end(); CI != E; )
1907 if (AuxFCallsAlive.count(&*CI))
1910 // Copy and merge global nodes and dead aux call nodes into the
1911 // GlobalsGraph, and all nodes reachable from those nodes. Update their
1912 // target pointers using the GGCloner.
1914 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
1915 GlobalsGraph->AuxFunctionCalls.push_back(DSCallSite(*CI, GGCloner));
1917 AuxFunctionCalls.erase(CI++);
1920 // We are finally done with the GGCloner so we can destroy it.
1923 // At this point, any nodes which are visited, but not alive, are nodes
1924 // which can be removed. Loop over all nodes, eliminating completely
1925 // unreachable nodes.
1927 std::vector<DSNode*> DeadNodes;
1928 DeadNodes.reserve(Nodes.size());
1929 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E;) {
1931 assert(!N->isForwarding() && "Forwarded node in nodes list?");
1933 if (!Alive.count(N)) {
1935 assert(!N->isForwarding() && "Cannot remove a forwarding node!");
1936 DeadNodes.push_back(N);
1937 N->dropAllReferences();
1942 // Remove all unreachable globals from the ScalarMap.
1943 // If flag RemoveUnreachableGlobals is set, GlobalNodes has only dead nodes.
1944 // In either case, the dead nodes will not be in the set Alive.
1945 for (unsigned i = 0, e = GlobalNodes.size(); i != e; ++i)
1946 if (!Alive.count(GlobalNodes[i].second))
1947 ScalarMap.erase(GlobalNodes[i].first);
1949 assert((Flags & DSGraph::RemoveUnreachableGlobals) && "non-dead global");
1951 // Delete all dead nodes now since their referrer counts are zero.
1952 for (unsigned i = 0, e = DeadNodes.size(); i != e; ++i)
1953 delete DeadNodes[i];
1955 DEBUG(AssertGraphOK(); GlobalsGraph->AssertGraphOK());
1958 void DSGraph::AssertNodeContainsGlobal(const DSNode *N, GlobalValue *GV) const {
1959 assert(std::find(N->getGlobals().begin(), N->getGlobals().end(), GV) !=
1960 N->getGlobals().end() && "Global value not in node!");
1963 void DSGraph::AssertCallSiteInGraph(const DSCallSite &CS) const {
1964 if (CS.isIndirectCall()) {
1965 AssertNodeInGraph(CS.getCalleeNode());
1967 if (CS.getNumPtrArgs() && CS.getCalleeNode() == CS.getPtrArg(0).getNode() &&
1968 CS.getCalleeNode() && CS.getCalleeNode()->getGlobals().empty())
1969 std::cerr << "WARNING: WIERD CALL SITE FOUND!\n";
1972 AssertNodeInGraph(CS.getRetVal().getNode());
1973 for (unsigned j = 0, e = CS.getNumPtrArgs(); j != e; ++j)
1974 AssertNodeInGraph(CS.getPtrArg(j).getNode());
1977 void DSGraph::AssertCallNodesInGraph() const {
1978 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
1979 AssertCallSiteInGraph(*I);
1981 void DSGraph::AssertAuxCallNodesInGraph() const {
1982 for (afc_iterator I = afc_begin(), E = afc_end(); I != E; ++I)
1983 AssertCallSiteInGraph(*I);
1986 void DSGraph::AssertGraphOK() const {
1987 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
1990 for (ScalarMapTy::const_iterator I = ScalarMap.begin(),
1991 E = ScalarMap.end(); I != E; ++I) {
1992 assert(!I->second.isNull() && "Null node in scalarmap!");
1993 AssertNodeInGraph(I->second.getNode());
1994 if (GlobalValue *GV = dyn_cast<GlobalValue>(I->first)) {
1995 assert(I->second.getNode()->isGlobalNode() &&
1996 "Global points to node, but node isn't global?");
1997 AssertNodeContainsGlobal(I->second.getNode(), GV);
2000 AssertCallNodesInGraph();
2001 AssertAuxCallNodesInGraph();
2003 // Check that all pointer arguments to any functions in this graph have
2005 for (ReturnNodesTy::const_iterator RI = ReturnNodes.begin(),
2006 E = ReturnNodes.end();
2008 Function &F = *RI->first;
2009 for (Function::aiterator AI = F.abegin(); AI != F.aend(); ++AI)
2010 if (isPointerType(AI->getType()))
2011 assert(!getNodeForValue(AI).isNull() &&
2012 "Pointer argument must be in the scalar map!");
2016 /// computeNodeMapping - Given roots in two different DSGraphs, traverse the
2017 /// nodes reachable from the two graphs, computing the mapping of nodes from the
2018 /// first to the second graph. This mapping may be many-to-one (i.e. the first
2019 /// graph may have multiple nodes representing one node in the second graph),
2020 /// but it will not work if there is a one-to-many or many-to-many mapping.
2022 void DSGraph::computeNodeMapping(const DSNodeHandle &NH1,
2023 const DSNodeHandle &NH2, NodeMapTy &NodeMap,
2024 bool StrictChecking) {
2025 DSNode *N1 = NH1.getNode(), *N2 = NH2.getNode();
2026 if (N1 == 0 || N2 == 0) return;
2028 DSNodeHandle &Entry = NodeMap[N1];
2029 if (!Entry.isNull()) {
2030 // Termination of recursion!
2031 if (StrictChecking) {
2032 assert(Entry.getNode() == N2 && "Inconsistent mapping detected!");
2033 assert((Entry.getOffset() == (NH2.getOffset()-NH1.getOffset()) ||
2034 Entry.getNode()->isNodeCompletelyFolded()) &&
2035 "Inconsistent mapping detected!");
2040 Entry.setTo(N2, NH2.getOffset()-NH1.getOffset());
2042 // Loop over all of the fields that N1 and N2 have in common, recursively
2043 // mapping the edges together now.
2044 int N2Idx = NH2.getOffset()-NH1.getOffset();
2045 unsigned N2Size = N2->getSize();
2046 for (unsigned i = 0, e = N1->getSize(); i < e; i += DS::PointerSize)
2047 if (unsigned(N2Idx)+i < N2Size)
2048 computeNodeMapping(N1->getLink(i), N2->getLink(N2Idx+i), NodeMap);
2050 computeNodeMapping(N1->getLink(i),
2051 N2->getLink(unsigned(N2Idx+i) % N2Size), NodeMap);