1 //===- DataStructure.cpp - Implement the core data structure analysis -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the core data structure functionality.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/DataStructure/DSGraphTraits.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/GlobalVariable.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/Assembly/Writer.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Support/Timer.h"
31 #define COLLAPSE_ARRAYS_AGGRESSIVELY 0
34 Statistic<> NumFolds ("dsa", "Number of nodes completely folded");
35 Statistic<> NumCallNodesMerged("dsa", "Number of call nodes merged");
36 Statistic<> NumNodeAllocated ("dsa", "Number of nodes allocated");
37 Statistic<> NumDNE ("dsa", "Number of nodes removed by reachability");
38 Statistic<> NumTrivialDNE ("dsa", "Number of nodes trivially removed");
39 Statistic<> NumTrivialGlobalDNE("dsa", "Number of globals trivially removed");
43 #define TIME_REGION(VARNAME, DESC) \
44 NamedRegionTimer VARNAME(DESC)
46 #define TIME_REGION(VARNAME, DESC)
51 /// isForwarding - Return true if this NodeHandle is forwarding to another
53 bool DSNodeHandle::isForwarding() const {
54 return N && N->isForwarding();
57 DSNode *DSNodeHandle::HandleForwarding() const {
58 assert(N->isForwarding() && "Can only be invoked if forwarding!");
60 // Handle node forwarding here!
61 DSNode *Next = N->ForwardNH.getNode(); // Cause recursive shrinkage
62 Offset += N->ForwardNH.getOffset();
64 if (--N->NumReferrers == 0) {
65 // Removing the last referrer to the node, sever the forwarding link
71 if (N->Size <= Offset) {
72 assert(N->Size <= 1 && "Forwarded to shrunk but not collapsed node?");
78 //===----------------------------------------------------------------------===//
79 // DSNode Implementation
80 //===----------------------------------------------------------------------===//
82 DSNode::DSNode(const Type *T, DSGraph *G)
83 : NumReferrers(0), Size(0), ParentGraph(G), Ty(Type::VoidTy), NodeType(0) {
84 // Add the type entry if it is specified...
85 if (T) mergeTypeInfo(T, 0);
86 if (G) G->addNode(this);
90 // DSNode copy constructor... do not copy over the referrers list!
91 DSNode::DSNode(const DSNode &N, DSGraph *G, bool NullLinks)
92 : NumReferrers(0), Size(N.Size), ParentGraph(G),
93 Ty(N.Ty), NodeType(N.NodeType) {
98 Links.resize(N.Links.size()); // Create the appropriate number of null links
103 /// getTargetData - Get the target data object used to construct this node.
105 const TargetData &DSNode::getTargetData() const {
106 return ParentGraph->getTargetData();
109 void DSNode::assertOK() const {
110 assert((Ty != Type::VoidTy ||
111 Ty == Type::VoidTy && (Size == 0 ||
112 (NodeType & DSNode::Array))) &&
115 assert(ParentGraph && "Node has no parent?");
116 const DSScalarMap &SM = ParentGraph->getScalarMap();
117 for (unsigned i = 0, e = Globals.size(); i != e; ++i) {
118 assert(SM.global_count(Globals[i]));
119 assert(SM.find(Globals[i])->second.getNode() == this);
123 /// forwardNode - Mark this node as being obsolete, and all references to it
124 /// should be forwarded to the specified node and offset.
126 void DSNode::forwardNode(DSNode *To, unsigned Offset) {
127 assert(this != To && "Cannot forward a node to itself!");
128 assert(ForwardNH.isNull() && "Already forwarding from this node!");
129 if (To->Size <= 1) Offset = 0;
130 assert((Offset < To->Size || (Offset == To->Size && Offset == 0)) &&
131 "Forwarded offset is wrong!");
132 ForwardNH.setTo(To, Offset);
137 // Remove this node from the parent graph's Nodes list.
138 ParentGraph->unlinkNode(this);
142 // addGlobal - Add an entry for a global value to the Globals list. This also
143 // marks the node with the 'G' flag if it does not already have it.
145 void DSNode::addGlobal(GlobalValue *GV) {
146 // First, check to make sure this is the leader if the global is in an
147 // equivalence class.
148 GV = getParentGraph()->getScalarMap().getLeaderForGlobal(GV);
150 // Keep the list sorted.
151 std::vector<GlobalValue*>::iterator I =
152 std::lower_bound(Globals.begin(), Globals.end(), GV);
154 if (I == Globals.end() || *I != GV) {
155 Globals.insert(I, GV);
156 NodeType |= GlobalNode;
160 // removeGlobal - Remove the specified global that is explicitly in the globals
162 void DSNode::removeGlobal(GlobalValue *GV) {
163 std::vector<GlobalValue*>::iterator I =
164 std::lower_bound(Globals.begin(), Globals.end(), GV);
165 assert(I != Globals.end() && *I == GV && "Global not in node!");
169 /// foldNodeCompletely - If we determine that this node has some funny
170 /// behavior happening to it that we cannot represent, we fold it down to a
171 /// single, completely pessimistic, node. This node is represented as a
172 /// single byte with a single TypeEntry of "void".
174 void DSNode::foldNodeCompletely() {
175 if (isNodeCompletelyFolded()) return; // If this node is already folded...
179 // If this node has a size that is <= 1, we don't need to create a forwarding
181 if (getSize() <= 1) {
182 NodeType |= DSNode::Array;
185 assert(Links.size() <= 1 && "Size is 1, but has more links?");
188 // Create the node we are going to forward to. This is required because
189 // some referrers may have an offset that is > 0. By forcing them to
190 // forward, the forwarder has the opportunity to correct the offset.
191 DSNode *DestNode = new DSNode(0, ParentGraph);
192 DestNode->NodeType = NodeType|DSNode::Array;
193 DestNode->Ty = Type::VoidTy;
195 DestNode->Globals.swap(Globals);
197 // Start forwarding to the destination node...
198 forwardNode(DestNode, 0);
200 if (!Links.empty()) {
201 DestNode->Links.reserve(1);
203 DSNodeHandle NH(DestNode);
204 DestNode->Links.push_back(Links[0]);
206 // If we have links, merge all of our outgoing links together...
207 for (unsigned i = Links.size()-1; i != 0; --i)
208 NH.getNode()->Links[0].mergeWith(Links[i]);
211 DestNode->Links.resize(1);
216 /// isNodeCompletelyFolded - Return true if this node has been completely
217 /// folded down to something that can never be expanded, effectively losing
218 /// all of the field sensitivity that may be present in the node.
220 bool DSNode::isNodeCompletelyFolded() const {
221 return getSize() == 1 && Ty == Type::VoidTy && isArray();
224 /// addFullGlobalsList - Compute the full set of global values that are
225 /// represented by this node. Unlike getGlobalsList(), this requires fair
226 /// amount of work to compute, so don't treat this method call as free.
227 void DSNode::addFullGlobalsList(std::vector<GlobalValue*> &List) const {
228 if (globals_begin() == globals_end()) return;
230 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs();
232 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) {
233 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I);
237 List.insert(List.end(), EC.member_begin(ECI), EC.member_end());
241 /// addFullFunctionList - Identical to addFullGlobalsList, but only return the
242 /// functions in the full list.
243 void DSNode::addFullFunctionList(std::vector<Function*> &List) const {
244 if (globals_begin() == globals_end()) return;
246 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs();
248 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) {
249 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I);
250 if (ECI == EC.end()) {
251 if (Function *F = dyn_cast<Function>(*I))
254 for (EquivalenceClasses<GlobalValue*>::member_iterator MI =
255 EC.member_begin(ECI), E = EC.member_end(); MI != E; ++MI)
256 if (Function *F = dyn_cast<Function>(*MI))
263 /// TypeElementWalker Class - Used for implementation of physical subtyping...
265 class TypeElementWalker {
270 StackState(const Type *T, unsigned Off = 0)
271 : Ty(T), Offset(Off), Idx(0) {}
274 std::vector<StackState> Stack;
275 const TargetData &TD;
277 TypeElementWalker(const Type *T, const TargetData &td) : TD(td) {
282 bool isDone() const { return Stack.empty(); }
283 const Type *getCurrentType() const { return Stack.back().Ty; }
284 unsigned getCurrentOffset() const { return Stack.back().Offset; }
286 void StepToNextType() {
287 PopStackAndAdvance();
292 /// PopStackAndAdvance - Pop the current element off of the stack and
293 /// advance the underlying element to the next contained member.
294 void PopStackAndAdvance() {
295 assert(!Stack.empty() && "Cannot pop an empty stack!");
297 while (!Stack.empty()) {
298 StackState &SS = Stack.back();
299 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
301 if (SS.Idx != ST->getNumElements()) {
302 const StructLayout *SL = TD.getStructLayout(ST);
304 unsigned(SL->MemberOffsets[SS.Idx]-SL->MemberOffsets[SS.Idx-1]);
307 Stack.pop_back(); // At the end of the structure
309 const ArrayType *AT = cast<ArrayType>(SS.Ty);
311 if (SS.Idx != AT->getNumElements()) {
312 SS.Offset += unsigned(TD.getTypeSize(AT->getElementType()));
315 Stack.pop_back(); // At the end of the array
320 /// StepToLeaf - Used by physical subtyping to move to the first leaf node
321 /// on the type stack.
323 if (Stack.empty()) return;
324 while (!Stack.empty() && !Stack.back().Ty->isFirstClassType()) {
325 StackState &SS = Stack.back();
326 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
327 if (ST->getNumElements() == 0) {
329 PopStackAndAdvance();
331 // Step into the structure...
332 assert(SS.Idx < ST->getNumElements());
333 const StructLayout *SL = TD.getStructLayout(ST);
334 Stack.push_back(StackState(ST->getElementType(SS.Idx),
335 SS.Offset+unsigned(SL->MemberOffsets[SS.Idx])));
338 const ArrayType *AT = cast<ArrayType>(SS.Ty);
339 if (AT->getNumElements() == 0) {
341 PopStackAndAdvance();
343 // Step into the array...
344 assert(SS.Idx < AT->getNumElements());
345 Stack.push_back(StackState(AT->getElementType(),
347 unsigned(TD.getTypeSize(AT->getElementType()))));
353 } // end anonymous namespace
355 /// ElementTypesAreCompatible - Check to see if the specified types are
356 /// "physically" compatible. If so, return true, else return false. We only
357 /// have to check the fields in T1: T2 may be larger than T1. If AllowLargerT1
358 /// is true, then we also allow a larger T1.
360 static bool ElementTypesAreCompatible(const Type *T1, const Type *T2,
361 bool AllowLargerT1, const TargetData &TD){
362 TypeElementWalker T1W(T1, TD), T2W(T2, TD);
364 while (!T1W.isDone() && !T2W.isDone()) {
365 if (T1W.getCurrentOffset() != T2W.getCurrentOffset())
368 const Type *T1 = T1W.getCurrentType();
369 const Type *T2 = T2W.getCurrentType();
370 if (T1 != T2 && !T1->isLosslesslyConvertibleTo(T2))
373 T1W.StepToNextType();
374 T2W.StepToNextType();
377 return AllowLargerT1 || T1W.isDone();
381 /// mergeTypeInfo - This method merges the specified type into the current node
382 /// at the specified offset. This may update the current node's type record if
383 /// this gives more information to the node, it may do nothing to the node if
384 /// this information is already known, or it may merge the node completely (and
385 /// return true) if the information is incompatible with what is already known.
387 /// This method returns true if the node is completely folded, otherwise false.
389 bool DSNode::mergeTypeInfo(const Type *NewTy, unsigned Offset,
390 bool FoldIfIncompatible) {
391 const TargetData &TD = getTargetData();
392 // Check to make sure the Size member is up-to-date. Size can be one of the
394 // Size = 0, Ty = Void: Nothing is known about this node.
395 // Size = 0, Ty = FnTy: FunctionPtr doesn't have a size, so we use zero
396 // Size = 1, Ty = Void, Array = 1: The node is collapsed
397 // Otherwise, sizeof(Ty) = Size
399 assert(((Size == 0 && Ty == Type::VoidTy && !isArray()) ||
400 (Size == 0 && !Ty->isSized() && !isArray()) ||
401 (Size == 1 && Ty == Type::VoidTy && isArray()) ||
402 (Size == 0 && !Ty->isSized() && !isArray()) ||
403 (TD.getTypeSize(Ty) == Size)) &&
404 "Size member of DSNode doesn't match the type structure!");
405 assert(NewTy != Type::VoidTy && "Cannot merge void type into DSNode!");
407 if (Offset == 0 && NewTy == Ty)
408 return false; // This should be a common case, handle it efficiently
410 // Return true immediately if the node is completely folded.
411 if (isNodeCompletelyFolded()) return true;
413 // If this is an array type, eliminate the outside arrays because they won't
414 // be used anyway. This greatly reduces the size of large static arrays used
415 // as global variables, for example.
417 bool WillBeArray = false;
418 while (const ArrayType *AT = dyn_cast<ArrayType>(NewTy)) {
419 // FIXME: we might want to keep small arrays, but must be careful about
420 // things like: [2 x [10000 x int*]]
421 NewTy = AT->getElementType();
425 // Figure out how big the new type we're merging in is...
426 unsigned NewTySize = NewTy->isSized() ? (unsigned)TD.getTypeSize(NewTy) : 0;
428 // Otherwise check to see if we can fold this type into the current node. If
429 // we can't, we fold the node completely, if we can, we potentially update our
432 if (Ty == Type::VoidTy) {
433 // If this is the first type that this node has seen, just accept it without
435 assert(Offset == 0 && !isArray() &&
436 "Cannot have an offset into a void node!");
438 // If this node would have to have an unreasonable number of fields, just
439 // collapse it. This can occur for fortran common blocks, which have stupid
440 // things like { [100000000 x double], [1000000 x double] }.
441 unsigned NumFields = (NewTySize+DS::PointerSize-1) >> DS::PointerShift;
442 if (NumFields > 64) {
443 foldNodeCompletely();
449 if (WillBeArray) NodeType |= Array;
452 // Calculate the number of outgoing links from this node.
453 Links.resize(NumFields);
457 // Handle node expansion case here...
458 if (Offset+NewTySize > Size) {
459 // It is illegal to grow this node if we have treated it as an array of
462 if (FoldIfIncompatible) foldNodeCompletely();
466 if (Offset) { // We could handle this case, but we don't for now...
467 std::cerr << "UNIMP: Trying to merge a growth type into "
468 << "offset != 0: Collapsing!\n";
469 if (FoldIfIncompatible) foldNodeCompletely();
473 // Okay, the situation is nice and simple, we are trying to merge a type in
474 // at offset 0 that is bigger than our current type. Implement this by
475 // switching to the new type and then merge in the smaller one, which should
476 // hit the other code path here. If the other code path decides it's not
477 // ok, it will collapse the node as appropriate.
480 // If this node would have to have an unreasonable number of fields, just
481 // collapse it. This can occur for fortran common blocks, which have stupid
482 // things like { [100000000 x double], [1000000 x double] }.
483 unsigned NumFields = (NewTySize+DS::PointerSize-1) >> DS::PointerShift;
484 if (NumFields > 64) {
485 foldNodeCompletely();
489 const Type *OldTy = Ty;
492 if (WillBeArray) NodeType |= Array;
495 // Must grow links to be the appropriate size...
496 Links.resize(NumFields);
498 // Merge in the old type now... which is guaranteed to be smaller than the
500 return mergeTypeInfo(OldTy, 0);
503 assert(Offset <= Size &&
504 "Cannot merge something into a part of our type that doesn't exist!");
506 // Find the section of Ty that NewTy overlaps with... first we find the
507 // type that starts at offset Offset.
510 const Type *SubType = Ty;
512 assert(Offset-O < TD.getTypeSize(SubType) && "Offset out of range!");
514 switch (SubType->getTypeID()) {
515 case Type::StructTyID: {
516 const StructType *STy = cast<StructType>(SubType);
517 const StructLayout &SL = *TD.getStructLayout(STy);
518 unsigned i = SL.getElementContainingOffset(Offset-O);
520 // The offset we are looking for must be in the i'th element...
521 SubType = STy->getElementType(i);
522 O += (unsigned)SL.MemberOffsets[i];
525 case Type::ArrayTyID: {
526 SubType = cast<ArrayType>(SubType)->getElementType();
527 unsigned ElSize = (unsigned)TD.getTypeSize(SubType);
528 unsigned Remainder = (Offset-O) % ElSize;
529 O = Offset-Remainder;
533 if (FoldIfIncompatible) foldNodeCompletely();
538 assert(O == Offset && "Could not achieve the correct offset!");
540 // If we found our type exactly, early exit
541 if (SubType == NewTy) return false;
543 // Differing function types don't require us to merge. They are not values
545 if (isa<FunctionType>(SubType) &&
546 isa<FunctionType>(NewTy)) return false;
548 unsigned SubTypeSize = SubType->isSized() ?
549 (unsigned)TD.getTypeSize(SubType) : 0;
551 // Ok, we are getting desperate now. Check for physical subtyping, where we
552 // just require each element in the node to be compatible.
553 if (NewTySize <= SubTypeSize && NewTySize && NewTySize < 256 &&
554 SubTypeSize && SubTypeSize < 256 &&
555 ElementTypesAreCompatible(NewTy, SubType, !isArray(), TD))
558 // Okay, so we found the leader type at the offset requested. Search the list
559 // of types that starts at this offset. If SubType is currently an array or
560 // structure, the type desired may actually be the first element of the
563 unsigned PadSize = SubTypeSize; // Size, including pad memory which is ignored
564 while (SubType != NewTy) {
565 const Type *NextSubType = 0;
566 unsigned NextSubTypeSize = 0;
567 unsigned NextPadSize = 0;
568 switch (SubType->getTypeID()) {
569 case Type::StructTyID: {
570 const StructType *STy = cast<StructType>(SubType);
571 const StructLayout &SL = *TD.getStructLayout(STy);
572 if (SL.MemberOffsets.size() > 1)
573 NextPadSize = (unsigned)SL.MemberOffsets[1];
575 NextPadSize = SubTypeSize;
576 NextSubType = STy->getElementType(0);
577 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
580 case Type::ArrayTyID:
581 NextSubType = cast<ArrayType>(SubType)->getElementType();
582 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
583 NextPadSize = NextSubTypeSize;
589 if (NextSubType == 0)
590 break; // In the default case, break out of the loop
592 if (NextPadSize < NewTySize)
593 break; // Don't allow shrinking to a smaller type than NewTySize
594 SubType = NextSubType;
595 SubTypeSize = NextSubTypeSize;
596 PadSize = NextPadSize;
599 // If we found the type exactly, return it...
600 if (SubType == NewTy)
603 // Check to see if we have a compatible, but different type...
604 if (NewTySize == SubTypeSize) {
605 // Check to see if this type is obviously convertible... int -> uint f.e.
606 if (NewTy->isLosslesslyConvertibleTo(SubType))
609 // Check to see if we have a pointer & integer mismatch going on here,
610 // loading a pointer as a long, for example.
612 if (SubType->isInteger() && isa<PointerType>(NewTy) ||
613 NewTy->isInteger() && isa<PointerType>(SubType))
615 } else if (NewTySize > SubTypeSize && NewTySize <= PadSize) {
616 // We are accessing the field, plus some structure padding. Ignore the
617 // structure padding.
622 if (getParentGraph()->retnodes_begin() != getParentGraph()->retnodes_end())
623 M = getParentGraph()->retnodes_begin()->first->getParent();
624 DEBUG(std::cerr << "MergeTypeInfo Folding OrigTy: ";
625 WriteTypeSymbolic(std::cerr, Ty, M) << "\n due to:";
626 WriteTypeSymbolic(std::cerr, NewTy, M) << " @ " << Offset << "!\n"
628 WriteTypeSymbolic(std::cerr, SubType, M) << "\n\n");
630 if (FoldIfIncompatible) foldNodeCompletely();
636 /// addEdgeTo - Add an edge from the current node to the specified node. This
637 /// can cause merging of nodes in the graph.
639 void DSNode::addEdgeTo(unsigned Offset, const DSNodeHandle &NH) {
640 if (NH.isNull()) return; // Nothing to do
642 DSNodeHandle &ExistingEdge = getLink(Offset);
643 if (!ExistingEdge.isNull()) {
644 // Merge the two nodes...
645 ExistingEdge.mergeWith(NH);
646 } else { // No merging to perform...
647 setLink(Offset, NH); // Just force a link in there...
652 /// MergeSortedVectors - Efficiently merge a vector into another vector where
653 /// duplicates are not allowed and both are sorted. This assumes that 'T's are
654 /// efficiently copyable and have sane comparison semantics.
656 static void MergeSortedVectors(std::vector<GlobalValue*> &Dest,
657 const std::vector<GlobalValue*> &Src) {
658 // By far, the most common cases will be the simple ones. In these cases,
659 // avoid having to allocate a temporary vector...
661 if (Src.empty()) { // Nothing to merge in...
663 } else if (Dest.empty()) { // Just copy the result in...
665 } else if (Src.size() == 1) { // Insert a single element...
666 const GlobalValue *V = Src[0];
667 std::vector<GlobalValue*>::iterator I =
668 std::lower_bound(Dest.begin(), Dest.end(), V);
669 if (I == Dest.end() || *I != Src[0]) // If not already contained...
670 Dest.insert(I, Src[0]);
671 } else if (Dest.size() == 1) {
672 GlobalValue *Tmp = Dest[0]; // Save value in temporary...
673 Dest = Src; // Copy over list...
674 std::vector<GlobalValue*>::iterator I =
675 std::lower_bound(Dest.begin(), Dest.end(), Tmp);
676 if (I == Dest.end() || *I != Tmp) // If not already contained...
680 // Make a copy to the side of Dest...
681 std::vector<GlobalValue*> Old(Dest);
683 // Make space for all of the type entries now...
684 Dest.resize(Dest.size()+Src.size());
686 // Merge the two sorted ranges together... into Dest.
687 std::merge(Old.begin(), Old.end(), Src.begin(), Src.end(), Dest.begin());
689 // Now erase any duplicate entries that may have accumulated into the
690 // vectors (because they were in both of the input sets)
691 Dest.erase(std::unique(Dest.begin(), Dest.end()), Dest.end());
695 void DSNode::mergeGlobals(const std::vector<GlobalValue*> &RHS) {
696 MergeSortedVectors(Globals, RHS);
699 // MergeNodes - Helper function for DSNode::mergeWith().
700 // This function does the hard work of merging two nodes, CurNodeH
701 // and NH after filtering out trivial cases and making sure that
702 // CurNodeH.offset >= NH.offset.
705 // Since merging may cause either node to go away, we must always
706 // use the node-handles to refer to the nodes. These node handles are
707 // automatically updated during merging, so will always provide access
708 // to the correct node after a merge.
710 void DSNode::MergeNodes(DSNodeHandle& CurNodeH, DSNodeHandle& NH) {
711 assert(CurNodeH.getOffset() >= NH.getOffset() &&
712 "This should have been enforced in the caller.");
713 assert(CurNodeH.getNode()->getParentGraph()==NH.getNode()->getParentGraph() &&
714 "Cannot merge two nodes that are not in the same graph!");
716 // Now we know that Offset >= NH.Offset, so convert it so our "Offset" (with
717 // respect to NH.Offset) is now zero. NOffset is the distance from the base
718 // of our object that N starts from.
720 unsigned NOffset = CurNodeH.getOffset()-NH.getOffset();
721 unsigned NSize = NH.getNode()->getSize();
723 // If the two nodes are of different size, and the smaller node has the array
724 // bit set, collapse!
725 if (NSize != CurNodeH.getNode()->getSize()) {
726 #if COLLAPSE_ARRAYS_AGGRESSIVELY
727 if (NSize < CurNodeH.getNode()->getSize()) {
728 if (NH.getNode()->isArray())
729 NH.getNode()->foldNodeCompletely();
730 } else if (CurNodeH.getNode()->isArray()) {
731 NH.getNode()->foldNodeCompletely();
736 // Merge the type entries of the two nodes together...
737 if (NH.getNode()->Ty != Type::VoidTy)
738 CurNodeH.getNode()->mergeTypeInfo(NH.getNode()->Ty, NOffset);
739 assert(!CurNodeH.getNode()->isDeadNode());
741 // If we are merging a node with a completely folded node, then both nodes are
742 // now completely folded.
744 if (CurNodeH.getNode()->isNodeCompletelyFolded()) {
745 if (!NH.getNode()->isNodeCompletelyFolded()) {
746 NH.getNode()->foldNodeCompletely();
747 assert(NH.getNode() && NH.getOffset() == 0 &&
748 "folding did not make offset 0?");
749 NOffset = NH.getOffset();
750 NSize = NH.getNode()->getSize();
751 assert(NOffset == 0 && NSize == 1);
753 } else if (NH.getNode()->isNodeCompletelyFolded()) {
754 CurNodeH.getNode()->foldNodeCompletely();
755 assert(CurNodeH.getNode() && CurNodeH.getOffset() == 0 &&
756 "folding did not make offset 0?");
757 NSize = NH.getNode()->getSize();
758 NOffset = NH.getOffset();
759 assert(NOffset == 0 && NSize == 1);
762 DSNode *N = NH.getNode();
763 if (CurNodeH.getNode() == N || N == 0) return;
764 assert(!CurNodeH.getNode()->isDeadNode());
766 // Merge the NodeType information.
767 CurNodeH.getNode()->NodeType |= N->NodeType;
769 // Start forwarding to the new node!
770 N->forwardNode(CurNodeH.getNode(), NOffset);
771 assert(!CurNodeH.getNode()->isDeadNode());
773 // Make all of the outgoing links of N now be outgoing links of CurNodeH.
775 for (unsigned i = 0; i < N->getNumLinks(); ++i) {
776 DSNodeHandle &Link = N->getLink(i << DS::PointerShift);
777 if (Link.getNode()) {
778 // Compute the offset into the current node at which to
779 // merge this link. In the common case, this is a linear
780 // relation to the offset in the original node (with
781 // wrapping), but if the current node gets collapsed due to
782 // recursive merging, we must make sure to merge in all remaining
783 // links at offset zero.
784 unsigned MergeOffset = 0;
785 DSNode *CN = CurNodeH.getNode();
787 MergeOffset = ((i << DS::PointerShift)+NOffset) % CN->getSize();
788 CN->addEdgeTo(MergeOffset, Link);
792 // Now that there are no outgoing edges, all of the Links are dead.
795 // Merge the globals list...
796 if (!N->Globals.empty()) {
797 CurNodeH.getNode()->mergeGlobals(N->Globals);
799 // Delete the globals from the old node...
800 std::vector<GlobalValue*>().swap(N->Globals);
805 /// mergeWith - Merge this node and the specified node, moving all links to and
806 /// from the argument node into the current node, deleting the node argument.
807 /// Offset indicates what offset the specified node is to be merged into the
810 /// The specified node may be a null pointer (in which case, we update it to
811 /// point to this node).
813 void DSNode::mergeWith(const DSNodeHandle &NH, unsigned Offset) {
814 DSNode *N = NH.getNode();
815 if (N == this && NH.getOffset() == Offset)
818 // If the RHS is a null node, make it point to this node!
820 NH.mergeWith(DSNodeHandle(this, Offset));
824 assert(!N->isDeadNode() && !isDeadNode());
825 assert(!hasNoReferrers() && "Should not try to fold a useless node!");
828 // We cannot merge two pieces of the same node together, collapse the node
830 DEBUG(std::cerr << "Attempting to merge two chunks of"
831 << " the same node together!\n");
832 foldNodeCompletely();
836 // If both nodes are not at offset 0, make sure that we are merging the node
837 // at an later offset into the node with the zero offset.
839 if (Offset < NH.getOffset()) {
840 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
842 } else if (Offset == NH.getOffset() && getSize() < N->getSize()) {
843 // If the offsets are the same, merge the smaller node into the bigger node
844 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
848 // Ok, now we can merge the two nodes. Use a static helper that works with
849 // two node handles, since "this" may get merged away at intermediate steps.
850 DSNodeHandle CurNodeH(this, Offset);
851 DSNodeHandle NHCopy(NH);
852 DSNode::MergeNodes(CurNodeH, NHCopy);
856 //===----------------------------------------------------------------------===//
857 // ReachabilityCloner Implementation
858 //===----------------------------------------------------------------------===//
860 DSNodeHandle ReachabilityCloner::getClonedNH(const DSNodeHandle &SrcNH) {
861 if (SrcNH.isNull()) return DSNodeHandle();
862 const DSNode *SN = SrcNH.getNode();
864 DSNodeHandle &NH = NodeMap[SN];
865 if (!NH.isNull()) { // Node already mapped?
866 DSNode *NHN = NH.getNode();
867 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
870 // If SrcNH has globals and the destination graph has one of the same globals,
871 // merge this node with the destination node, which is much more efficient.
872 if (SN->globals_begin() != SN->globals_end()) {
873 DSScalarMap &DestSM = Dest.getScalarMap();
874 for (DSNode::globals_iterator I = SN->globals_begin(),E = SN->globals_end();
876 GlobalValue *GV = *I;
877 DSScalarMap::iterator GI = DestSM.find(GV);
878 if (GI != DestSM.end() && !GI->second.isNull()) {
879 // We found one, use merge instead!
880 merge(GI->second, Src.getNodeForValue(GV));
881 assert(!NH.isNull() && "Didn't merge node!");
882 DSNode *NHN = NH.getNode();
883 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
888 DSNode *DN = new DSNode(*SN, &Dest, true /* Null out all links */);
889 DN->maskNodeTypes(BitsToKeep);
892 // Next, recursively clone all outgoing links as necessary. Note that
893 // adding these links can cause the node to collapse itself at any time, and
894 // the current node may be merged with arbitrary other nodes. For this
895 // reason, we must always go through NH.
897 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
898 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
899 if (!SrcEdge.isNull()) {
900 const DSNodeHandle &DestEdge = getClonedNH(SrcEdge);
901 // Compute the offset into the current node at which to
902 // merge this link. In the common case, this is a linear
903 // relation to the offset in the original node (with
904 // wrapping), but if the current node gets collapsed due to
905 // recursive merging, we must make sure to merge in all remaining
906 // links at offset zero.
907 unsigned MergeOffset = 0;
908 DSNode *CN = NH.getNode();
909 if (CN->getSize() != 1)
910 MergeOffset = ((i << DS::PointerShift)+NH.getOffset()) % CN->getSize();
911 CN->addEdgeTo(MergeOffset, DestEdge);
915 // If this node contains any globals, make sure they end up in the scalar
916 // map with the correct offset.
917 for (DSNode::globals_iterator I = SN->globals_begin(), E = SN->globals_end();
919 GlobalValue *GV = *I;
920 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
921 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
922 assert(DestGNH.getNode() == NH.getNode() &&"Global mapping inconsistent");
923 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
924 DestGNH.getOffset()+SrcGNH.getOffset()));
926 NH.getNode()->mergeGlobals(SN->getGlobalsList());
928 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset());
931 void ReachabilityCloner::merge(const DSNodeHandle &NH,
932 const DSNodeHandle &SrcNH) {
933 if (SrcNH.isNull()) return; // Noop
935 // If there is no destination node, just clone the source and assign the
936 // destination node to be it.
937 NH.mergeWith(getClonedNH(SrcNH));
941 // Okay, at this point, we know that we have both a destination and a source
942 // node that need to be merged. Check to see if the source node has already
944 const DSNode *SN = SrcNH.getNode();
945 DSNodeHandle &SCNH = NodeMap[SN]; // SourceClonedNodeHandle
946 if (!SCNH.isNull()) { // Node already cloned?
947 DSNode *SCNHN = SCNH.getNode();
948 NH.mergeWith(DSNodeHandle(SCNHN,
949 SCNH.getOffset()+SrcNH.getOffset()));
950 return; // Nothing to do!
953 // Okay, so the source node has not already been cloned. Instead of creating
954 // a new DSNode, only to merge it into the one we already have, try to perform
955 // the merge in-place. The only case we cannot handle here is when the offset
956 // into the existing node is less than the offset into the virtual node we are
957 // merging in. In this case, we have to extend the existing node, which
958 // requires an allocation anyway.
959 DSNode *DN = NH.getNode(); // Make sure the Offset is up-to-date
960 if (NH.getOffset() >= SrcNH.getOffset()) {
961 if (!DN->isNodeCompletelyFolded()) {
962 // Make sure the destination node is folded if the source node is folded.
963 if (SN->isNodeCompletelyFolded()) {
964 DN->foldNodeCompletely();
966 } else if (SN->getSize() != DN->getSize()) {
967 // If the two nodes are of different size, and the smaller node has the
968 // array bit set, collapse!
969 #if COLLAPSE_ARRAYS_AGGRESSIVELY
970 if (SN->getSize() < DN->getSize()) {
972 DN->foldNodeCompletely();
975 } else if (DN->isArray()) {
976 DN->foldNodeCompletely();
982 // Merge the type entries of the two nodes together...
983 if (SN->getType() != Type::VoidTy && !DN->isNodeCompletelyFolded()) {
984 DN->mergeTypeInfo(SN->getType(), NH.getOffset()-SrcNH.getOffset());
989 assert(!DN->isDeadNode());
991 // Merge the NodeType information.
992 DN->mergeNodeFlags(SN->getNodeFlags() & BitsToKeep);
994 // Before we start merging outgoing links and updating the scalar map, make
995 // sure it is known that this is the representative node for the src node.
996 SCNH = DSNodeHandle(DN, NH.getOffset()-SrcNH.getOffset());
998 // If the source node contains any globals, make sure they end up in the
999 // scalar map with the correct offset.
1000 if (SN->globals_begin() != SN->globals_end()) {
1001 // Update the globals in the destination node itself.
1002 DN->mergeGlobals(SN->getGlobalsList());
1004 // Update the scalar map for the graph we are merging the source node
1006 for (DSNode::globals_iterator I = SN->globals_begin(),
1007 E = SN->globals_end(); I != E; ++I) {
1008 GlobalValue *GV = *I;
1009 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
1010 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
1011 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
1012 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
1013 DestGNH.getOffset()+SrcGNH.getOffset()));
1015 NH.getNode()->mergeGlobals(SN->getGlobalsList());
1018 // We cannot handle this case without allocating a temporary node. Fall
1019 // back on being simple.
1020 DSNode *NewDN = new DSNode(*SN, &Dest, true /* Null out all links */);
1021 NewDN->maskNodeTypes(BitsToKeep);
1023 unsigned NHOffset = NH.getOffset();
1024 NH.mergeWith(DSNodeHandle(NewDN, SrcNH.getOffset()));
1026 assert(NH.getNode() &&
1027 (NH.getOffset() > NHOffset ||
1028 (NH.getOffset() == 0 && NH.getNode()->isNodeCompletelyFolded())) &&
1029 "Merging did not adjust the offset!");
1031 // Before we start merging outgoing links and updating the scalar map, make
1032 // sure it is known that this is the representative node for the src node.
1033 SCNH = DSNodeHandle(NH.getNode(), NH.getOffset()-SrcNH.getOffset());
1035 // If the source node contained any globals, make sure to create entries
1036 // in the scalar map for them!
1037 for (DSNode::globals_iterator I = SN->globals_begin(),
1038 E = SN->globals_end(); I != E; ++I) {
1039 GlobalValue *GV = *I;
1040 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
1041 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
1042 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
1043 assert(SrcGNH.getNode() == SN && "Global mapping inconsistent");
1044 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
1045 DestGNH.getOffset()+SrcGNH.getOffset()));
1050 // Next, recursively merge all outgoing links as necessary. Note that
1051 // adding these links can cause the destination node to collapse itself at
1052 // any time, and the current node may be merged with arbitrary other nodes.
1053 // For this reason, we must always go through NH.
1055 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
1056 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
1057 if (!SrcEdge.isNull()) {
1058 // Compute the offset into the current node at which to
1059 // merge this link. In the common case, this is a linear
1060 // relation to the offset in the original node (with
1061 // wrapping), but if the current node gets collapsed due to
1062 // recursive merging, we must make sure to merge in all remaining
1063 // links at offset zero.
1064 DSNode *CN = SCNH.getNode();
1065 unsigned MergeOffset =
1066 ((i << DS::PointerShift)+SCNH.getOffset()) % CN->getSize();
1068 DSNodeHandle Tmp = CN->getLink(MergeOffset);
1069 if (!Tmp.isNull()) {
1070 // Perform the recursive merging. Make sure to create a temporary NH,
1071 // because the Link can disappear in the process of recursive merging.
1072 merge(Tmp, SrcEdge);
1074 Tmp.mergeWith(getClonedNH(SrcEdge));
1075 // Merging this could cause all kinds of recursive things to happen,
1076 // culminating in the current node being eliminated. Since this is
1077 // possible, make sure to reaquire the link from 'CN'.
1079 unsigned MergeOffset = 0;
1080 CN = SCNH.getNode();
1081 MergeOffset = ((i << DS::PointerShift)+SCNH.getOffset()) %CN->getSize();
1082 CN->getLink(MergeOffset).mergeWith(Tmp);
1088 /// mergeCallSite - Merge the nodes reachable from the specified src call
1089 /// site into the nodes reachable from DestCS.
1090 void ReachabilityCloner::mergeCallSite(DSCallSite &DestCS,
1091 const DSCallSite &SrcCS) {
1092 merge(DestCS.getRetVal(), SrcCS.getRetVal());
1093 unsigned MinArgs = DestCS.getNumPtrArgs();
1094 if (SrcCS.getNumPtrArgs() < MinArgs) MinArgs = SrcCS.getNumPtrArgs();
1096 for (unsigned a = 0; a != MinArgs; ++a)
1097 merge(DestCS.getPtrArg(a), SrcCS.getPtrArg(a));
1099 for (unsigned a = MinArgs, e = SrcCS.getNumPtrArgs(); a != e; ++a)
1100 DestCS.addPtrArg(getClonedNH(SrcCS.getPtrArg(a)));
1104 //===----------------------------------------------------------------------===//
1105 // DSCallSite Implementation
1106 //===----------------------------------------------------------------------===//
1108 // Define here to avoid including iOther.h and BasicBlock.h in DSGraph.h
1109 Function &DSCallSite::getCaller() const {
1110 return *Site.getInstruction()->getParent()->getParent();
1113 void DSCallSite::InitNH(DSNodeHandle &NH, const DSNodeHandle &Src,
1114 ReachabilityCloner &RC) {
1115 NH = RC.getClonedNH(Src);
1118 //===----------------------------------------------------------------------===//
1119 // DSGraph Implementation
1120 //===----------------------------------------------------------------------===//
1122 /// getFunctionNames - Return a space separated list of the name of the
1123 /// functions in this graph (if any)
1124 std::string DSGraph::getFunctionNames() const {
1125 switch (getReturnNodes().size()) {
1126 case 0: return "Globals graph";
1127 case 1: return retnodes_begin()->first->getName();
1130 for (DSGraph::retnodes_iterator I = retnodes_begin();
1131 I != retnodes_end(); ++I)
1132 Return += I->first->getName() + " ";
1133 Return.erase(Return.end()-1, Return.end()); // Remove last space character
1139 DSGraph::DSGraph(const DSGraph &G, EquivalenceClasses<GlobalValue*> &ECs,
1140 unsigned CloneFlags)
1141 : GlobalsGraph(0), ScalarMap(ECs), TD(G.TD) {
1142 PrintAuxCalls = false;
1144 cloneInto(G, ScalarMap, ReturnNodes, NodeMap, CloneFlags);
1147 DSGraph::DSGraph(const DSGraph &G, NodeMapTy &NodeMap,
1148 EquivalenceClasses<GlobalValue*> &ECs)
1149 : GlobalsGraph(0), ScalarMap(ECs), TD(G.TD) {
1150 PrintAuxCalls = false;
1151 cloneInto(G, ScalarMap, ReturnNodes, NodeMap);
1154 DSGraph::~DSGraph() {
1155 FunctionCalls.clear();
1156 AuxFunctionCalls.clear();
1158 ReturnNodes.clear();
1160 // Drop all intra-node references, so that assertions don't fail...
1161 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
1162 NI->dropAllReferences();
1164 // Free all of the nodes.
1168 // dump - Allow inspection of graph in a debugger.
1169 void DSGraph::dump() const { print(std::cerr); }
1172 /// remapLinks - Change all of the Links in the current node according to the
1173 /// specified mapping.
1175 void DSNode::remapLinks(DSGraph::NodeMapTy &OldNodeMap) {
1176 for (unsigned i = 0, e = Links.size(); i != e; ++i)
1177 if (DSNode *N = Links[i].getNode()) {
1178 DSGraph::NodeMapTy::const_iterator ONMI = OldNodeMap.find(N);
1179 if (ONMI != OldNodeMap.end()) {
1180 DSNode *ONMIN = ONMI->second.getNode();
1181 Links[i].setTo(ONMIN, Links[i].getOffset()+ONMI->second.getOffset());
1186 /// addObjectToGraph - This method can be used to add global, stack, and heap
1187 /// objects to the graph. This can be used when updating DSGraphs due to the
1188 /// introduction of new temporary objects. The new object is not pointed to
1189 /// and does not point to any other objects in the graph.
1190 DSNode *DSGraph::addObjectToGraph(Value *Ptr, bool UseDeclaredType) {
1191 assert(isa<PointerType>(Ptr->getType()) && "Ptr is not a pointer!");
1192 const Type *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1193 DSNode *N = new DSNode(UseDeclaredType ? Ty : 0, this);
1194 assert(ScalarMap[Ptr].isNull() && "Object already in this graph!");
1197 if (GlobalValue *GV = dyn_cast<GlobalValue>(Ptr)) {
1199 } else if (MallocInst *MI = dyn_cast<MallocInst>(Ptr)) {
1200 N->setHeapNodeMarker();
1201 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Ptr)) {
1202 N->setAllocaNodeMarker();
1204 assert(0 && "Illegal memory object input!");
1210 /// cloneInto - Clone the specified DSGraph into the current graph. The
1211 /// translated ScalarMap for the old function is filled into the OldValMap
1212 /// member, and the translated ReturnNodes map is returned into ReturnNodes.
1214 /// The CloneFlags member controls various aspects of the cloning process.
1216 void DSGraph::cloneInto(const DSGraph &G, DSScalarMap &OldValMap,
1217 ReturnNodesTy &OldReturnNodes, NodeMapTy &OldNodeMap,
1218 unsigned CloneFlags) {
1219 TIME_REGION(X, "cloneInto");
1220 assert(OldNodeMap.empty() && "Returned OldNodeMap should be empty!");
1221 assert(&G != this && "Cannot clone graph into itself!");
1223 // Remove alloca or mod/ref bits as specified...
1224 unsigned BitsToClear = ((CloneFlags & StripAllocaBit)? DSNode::AllocaNode : 0)
1225 | ((CloneFlags & StripModRefBits)? (DSNode::Modified | DSNode::Read) : 0)
1226 | ((CloneFlags & StripIncompleteBit)? DSNode::Incomplete : 0);
1227 BitsToClear |= DSNode::DEAD; // Clear dead flag...
1229 for (node_const_iterator I = G.node_begin(), E = G.node_end(); I != E; ++I) {
1230 assert(!I->isForwarding() &&
1231 "Forward nodes shouldn't be in node list!");
1232 DSNode *New = new DSNode(*I, this);
1233 New->maskNodeTypes(~BitsToClear);
1234 OldNodeMap[I] = New;
1238 Timer::addPeakMemoryMeasurement();
1241 // Rewrite the links in the new nodes to point into the current graph now.
1242 // Note that we don't loop over the node's list to do this. The problem is
1243 // that remaping links can cause recursive merging to happen, which means
1244 // that node_iterator's can get easily invalidated! Because of this, we
1245 // loop over the OldNodeMap, which contains all of the new nodes as the
1246 // .second element of the map elements. Also note that if we remap a node
1247 // more than once, we won't break anything.
1248 for (NodeMapTy::iterator I = OldNodeMap.begin(), E = OldNodeMap.end();
1250 I->second.getNode()->remapLinks(OldNodeMap);
1252 // Copy the scalar map... merging all of the global nodes...
1253 for (DSScalarMap::const_iterator I = G.ScalarMap.begin(),
1254 E = G.ScalarMap.end(); I != E; ++I) {
1255 DSNodeHandle &MappedNode = OldNodeMap[I->second.getNode()];
1256 DSNodeHandle &H = OldValMap[I->first];
1257 DSNode *MappedNodeN = MappedNode.getNode();
1258 H.mergeWith(DSNodeHandle(MappedNodeN,
1259 I->second.getOffset()+MappedNode.getOffset()));
1261 // If this is a global, add the global to this fn or merge if already exists
1262 if (GlobalValue* GV = dyn_cast<GlobalValue>(I->first))
1263 ScalarMap[GV].mergeWith(H);
1266 if (!(CloneFlags & DontCloneCallNodes)) {
1267 // Copy the function calls list.
1268 for (fc_iterator I = G.fc_begin(), E = G.fc_end(); I != E; ++I)
1269 FunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1272 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1273 // Copy the auxiliary function calls list.
1274 for (afc_iterator I = G.afc_begin(), E = G.afc_end(); I != E; ++I)
1275 AuxFunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1278 // Map the return node pointers over...
1279 for (retnodes_iterator I = G.retnodes_begin(),
1280 E = G.retnodes_end(); I != E; ++I) {
1281 const DSNodeHandle &Ret = I->second;
1282 DSNodeHandle &MappedRet = OldNodeMap[Ret.getNode()];
1283 DSNode *MappedRetN = MappedRet.getNode();
1284 OldReturnNodes.insert(std::make_pair(I->first,
1285 DSNodeHandle(MappedRetN,
1286 MappedRet.getOffset()+Ret.getOffset())));
1290 static bool PathExistsToClonedNode(const DSNode *N, ReachabilityCloner &RC) {
1292 for (df_iterator<const DSNode*> I = df_begin(N), E = df_end(N); I != E; ++I)
1293 if (RC.hasClonedNode(*I))
1298 static bool PathExistsToClonedNode(const DSCallSite &CS,
1299 ReachabilityCloner &RC) {
1300 if (PathExistsToClonedNode(CS.getRetVal().getNode(), RC))
1302 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1303 if (PathExistsToClonedNode(CS.getPtrArg(i).getNode(), RC))
1308 /// getFunctionArgumentsForCall - Given a function that is currently in this
1309 /// graph, return the DSNodeHandles that correspond to the pointer-compatible
1310 /// function arguments. The vector is filled in with the return value (or
1311 /// null if it is not pointer compatible), followed by all of the
1312 /// pointer-compatible arguments.
1313 void DSGraph::getFunctionArgumentsForCall(Function *F,
1314 std::vector<DSNodeHandle> &Args) const {
1315 Args.push_back(getReturnNodeFor(*F));
1316 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI)
1317 if (isPointerType(AI->getType())) {
1318 Args.push_back(getNodeForValue(AI));
1319 assert(!Args.back().isNull() && "Pointer argument w/o scalarmap entry!?");
1323 /// mergeInCallFromOtherGraph - This graph merges in the minimal number of
1324 /// nodes from G2 into 'this' graph, merging the bindings specified by the
1325 /// call site (in this graph) with the bindings specified by the vector in G2.
1326 /// The two DSGraphs must be different.
1328 void DSGraph::mergeInGraph(const DSCallSite &CS,
1329 std::vector<DSNodeHandle> &Args,
1330 const DSGraph &Graph, unsigned CloneFlags) {
1331 TIME_REGION(X, "mergeInGraph");
1333 // If this is not a recursive call, clone the graph into this graph...
1334 if (&Graph != this) {
1335 // Clone the callee's graph into the current graph, keeping track of where
1336 // scalars in the old graph _used_ to point, and of the new nodes matching
1337 // nodes of the old graph.
1338 ReachabilityCloner RC(*this, Graph, CloneFlags);
1340 // Map the return node pointer over.
1341 if (!CS.getRetVal().isNull())
1342 RC.merge(CS.getRetVal(), Args[0]);
1344 // Map over all of the arguments.
1345 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) {
1346 if (i == Args.size()-1)
1349 // Add the link from the argument scalar to the provided value.
1350 RC.merge(CS.getPtrArg(i), Args[i+1]);
1353 // If requested, copy all of the calls.
1354 if (!(CloneFlags & DontCloneCallNodes)) {
1355 // Copy the function calls list.
1356 for (fc_iterator I = Graph.fc_begin(), E = Graph.fc_end(); I != E; ++I)
1357 FunctionCalls.push_back(DSCallSite(*I, RC));
1360 // If the user has us copying aux calls (the normal case), set up a data
1361 // structure to keep track of which ones we've copied over.
1362 std::set<const DSCallSite*> CopiedAuxCall;
1364 // Clone over all globals that appear in the caller and callee graphs.
1365 hash_set<GlobalVariable*> NonCopiedGlobals;
1366 for (DSScalarMap::global_iterator GI = Graph.getScalarMap().global_begin(),
1367 E = Graph.getScalarMap().global_end(); GI != E; ++GI)
1368 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*GI))
1369 if (ScalarMap.count(GV))
1370 RC.merge(ScalarMap[GV], Graph.getNodeForValue(GV));
1372 NonCopiedGlobals.insert(GV);
1374 // If the global does not appear in the callers graph we generally don't
1375 // want to copy the node. However, if there is a path from the node global
1376 // node to a node that we did copy in the graph, we *must* copy it to
1377 // maintain the connection information. Every time we decide to include a
1378 // new global, this might make other globals live, so we must iterate
1380 bool MadeChange = true;
1381 while (MadeChange) {
1383 for (hash_set<GlobalVariable*>::iterator I = NonCopiedGlobals.begin();
1384 I != NonCopiedGlobals.end();) {
1385 DSNode *GlobalNode = Graph.getNodeForValue(*I).getNode();
1386 if (RC.hasClonedNode(GlobalNode)) {
1387 // Already cloned it, remove from set.
1388 NonCopiedGlobals.erase(I++);
1390 } else if (PathExistsToClonedNode(GlobalNode, RC)) {
1391 RC.getClonedNH(Graph.getNodeForValue(*I));
1392 NonCopiedGlobals.erase(I++);
1399 // If requested, copy any aux calls that can reach copied nodes.
1400 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1401 for (afc_iterator I = Graph.afc_begin(), E = Graph.afc_end(); I!=E; ++I)
1402 if (CopiedAuxCall.insert(&*I).second &&
1403 PathExistsToClonedNode(*I, RC)) {
1404 AuxFunctionCalls.push_back(DSCallSite(*I, RC));
1411 // Merge the return value with the return value of the context.
1412 Args[0].mergeWith(CS.getRetVal());
1414 // Resolve all of the function arguments.
1415 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) {
1416 if (i == Args.size()-1)
1419 // Add the link from the argument scalar to the provided value.
1420 Args[i+1].mergeWith(CS.getPtrArg(i));
1427 /// mergeInGraph - The method is used for merging graphs together. If the
1428 /// argument graph is not *this, it makes a clone of the specified graph, then
1429 /// merges the nodes specified in the call site with the formal arguments in the
1432 void DSGraph::mergeInGraph(const DSCallSite &CS, Function &F,
1433 const DSGraph &Graph, unsigned CloneFlags) {
1434 // Set up argument bindings.
1435 std::vector<DSNodeHandle> Args;
1436 Graph.getFunctionArgumentsForCall(&F, Args);
1438 mergeInGraph(CS, Args, Graph, CloneFlags);
1441 /// getCallSiteForArguments - Get the arguments and return value bindings for
1442 /// the specified function in the current graph.
1444 DSCallSite DSGraph::getCallSiteForArguments(Function &F) const {
1445 std::vector<DSNodeHandle> Args;
1447 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
1448 if (isPointerType(I->getType()))
1449 Args.push_back(getNodeForValue(I));
1451 return DSCallSite(CallSite(), getReturnNodeFor(F), &F, Args);
1454 /// getDSCallSiteForCallSite - Given an LLVM CallSite object that is live in
1455 /// the context of this graph, return the DSCallSite for it.
1456 DSCallSite DSGraph::getDSCallSiteForCallSite(CallSite CS) const {
1457 DSNodeHandle RetVal;
1458 Instruction *I = CS.getInstruction();
1459 if (isPointerType(I->getType()))
1460 RetVal = getNodeForValue(I);
1462 std::vector<DSNodeHandle> Args;
1463 Args.reserve(CS.arg_end()-CS.arg_begin());
1465 // Calculate the arguments vector...
1466 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I)
1467 if (isPointerType((*I)->getType()))
1468 if (isa<ConstantPointerNull>(*I))
1469 Args.push_back(DSNodeHandle());
1471 Args.push_back(getNodeForValue(*I));
1473 // Add a new function call entry...
1474 if (Function *F = CS.getCalledFunction())
1475 return DSCallSite(CS, RetVal, F, Args);
1477 return DSCallSite(CS, RetVal,
1478 getNodeForValue(CS.getCalledValue()).getNode(), Args);
1483 // markIncompleteNodes - Mark the specified node as having contents that are not
1484 // known with the current analysis we have performed. Because a node makes all
1485 // of the nodes it can reach incomplete if the node itself is incomplete, we
1486 // must recursively traverse the data structure graph, marking all reachable
1487 // nodes as incomplete.
1489 static void markIncompleteNode(DSNode *N) {
1490 // Stop recursion if no node, or if node already marked...
1491 if (N == 0 || N->isIncomplete()) return;
1493 // Actually mark the node
1494 N->setIncompleteMarker();
1496 // Recursively process children...
1497 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I)
1498 if (DSNode *DSN = I->getNode())
1499 markIncompleteNode(DSN);
1502 static void markIncomplete(DSCallSite &Call) {
1503 // Then the return value is certainly incomplete!
1504 markIncompleteNode(Call.getRetVal().getNode());
1506 // All objects pointed to by function arguments are incomplete!
1507 for (unsigned i = 0, e = Call.getNumPtrArgs(); i != e; ++i)
1508 markIncompleteNode(Call.getPtrArg(i).getNode());
1511 // markIncompleteNodes - Traverse the graph, identifying nodes that may be
1512 // modified by other functions that have not been resolved yet. This marks
1513 // nodes that are reachable through three sources of "unknownness":
1515 // Global Variables, Function Calls, and Incoming Arguments
1517 // For any node that may have unknown components (because something outside the
1518 // scope of current analysis may have modified it), the 'Incomplete' flag is
1519 // added to the NodeType.
1521 void DSGraph::markIncompleteNodes(unsigned Flags) {
1522 // Mark any incoming arguments as incomplete.
1523 if (Flags & DSGraph::MarkFormalArgs)
1524 for (ReturnNodesTy::iterator FI = ReturnNodes.begin(), E =ReturnNodes.end();
1526 Function &F = *FI->first;
1527 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
1528 if (isPointerType(I->getType()))
1529 markIncompleteNode(getNodeForValue(I).getNode());
1530 markIncompleteNode(FI->second.getNode());
1533 // Mark stuff passed into functions calls as being incomplete.
1534 if (!shouldPrintAuxCalls())
1535 for (std::list<DSCallSite>::iterator I = FunctionCalls.begin(),
1536 E = FunctionCalls.end(); I != E; ++I)
1539 for (std::list<DSCallSite>::iterator I = AuxFunctionCalls.begin(),
1540 E = AuxFunctionCalls.end(); I != E; ++I)
1543 // Mark all global nodes as incomplete.
1544 for (DSScalarMap::global_iterator I = ScalarMap.global_begin(),
1545 E = ScalarMap.global_end(); I != E; ++I)
1546 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*I))
1547 if (!GV->hasInitializer() || // Always mark external globals incomp.
1548 (!GV->isConstant() && (Flags & DSGraph::IgnoreGlobals) == 0))
1549 markIncompleteNode(ScalarMap[GV].getNode());
1552 static inline void killIfUselessEdge(DSNodeHandle &Edge) {
1553 if (DSNode *N = Edge.getNode()) // Is there an edge?
1554 if (N->getNumReferrers() == 1) // Does it point to a lonely node?
1555 // No interesting info?
1556 if ((N->getNodeFlags() & ~DSNode::Incomplete) == 0 &&
1557 N->getType() == Type::VoidTy && !N->isNodeCompletelyFolded())
1558 Edge.setTo(0, 0); // Kill the edge!
1562 static inline bool nodeContainsExternalFunction(const DSNode *N) {
1563 const std::vector<GlobalValue*> &Globals = N->getGlobals();
1564 for (unsigned i = 0, e = Globals.size(); i != e; ++i)
1565 if (Globals[i]->isExternal() && isa<Function>(Globals[i]))
1571 static void removeIdenticalCalls(std::list<DSCallSite> &Calls) {
1572 // Remove trivially identical function calls
1573 Calls.sort(); // Sort by callee as primary key!
1575 // Scan the call list cleaning it up as necessary...
1576 DSNode *LastCalleeNode = 0;
1577 Function *LastCalleeFunc = 0;
1578 unsigned NumDuplicateCalls = 0;
1579 bool LastCalleeContainsExternalFunction = false;
1581 unsigned NumDeleted = 0;
1582 for (std::list<DSCallSite>::iterator I = Calls.begin(), E = Calls.end();
1584 DSCallSite &CS = *I;
1585 std::list<DSCallSite>::iterator OldIt = I++;
1587 // If the Callee is a useless edge, this must be an unreachable call site,
1589 if (CS.isIndirectCall() && CS.getCalleeNode()->getNumReferrers() == 1 &&
1590 CS.getCalleeNode()->isComplete() &&
1591 CS.getCalleeNode()->getGlobalsList().empty()) { // No useful info?
1593 std::cerr << "WARNING: Useless call site found.\n";
1600 // If the return value or any arguments point to a void node with no
1601 // information at all in it, and the call node is the only node to point
1602 // to it, remove the edge to the node (killing the node).
1604 killIfUselessEdge(CS.getRetVal());
1605 for (unsigned a = 0, e = CS.getNumPtrArgs(); a != e; ++a)
1606 killIfUselessEdge(CS.getPtrArg(a));
1609 // If this call site calls the same function as the last call site, and if
1610 // the function pointer contains an external function, this node will
1611 // never be resolved. Merge the arguments of the call node because no
1612 // information will be lost.
1614 if ((CS.isDirectCall() && CS.getCalleeFunc() == LastCalleeFunc) ||
1615 (CS.isIndirectCall() && CS.getCalleeNode() == LastCalleeNode)) {
1616 ++NumDuplicateCalls;
1617 if (NumDuplicateCalls == 1) {
1619 LastCalleeContainsExternalFunction =
1620 nodeContainsExternalFunction(LastCalleeNode);
1622 LastCalleeContainsExternalFunction = LastCalleeFunc->isExternal();
1625 // It is not clear why, but enabling this code makes DSA really
1626 // sensitive to node forwarding. Basically, with this enabled, DSA
1627 // performs different number of inlinings based on which nodes are
1628 // forwarding or not. This is clearly a problem, so this code is
1629 // disabled until this can be resolved.
1631 if (LastCalleeContainsExternalFunction
1634 // This should be more than enough context sensitivity!
1635 // FIXME: Evaluate how many times this is tripped!
1636 NumDuplicateCalls > 20
1640 std::list<DSCallSite>::iterator PrevIt = OldIt;
1642 PrevIt->mergeWith(CS);
1644 // No need to keep this call anymore.
1651 if (CS.isDirectCall()) {
1652 LastCalleeFunc = CS.getCalleeFunc();
1655 LastCalleeNode = CS.getCalleeNode();
1658 NumDuplicateCalls = 0;
1662 if (I != Calls.end() && CS == *I) {
1669 // Resort now that we simplified things.
1672 // Now that we are in sorted order, eliminate duplicates.
1673 std::list<DSCallSite>::iterator CI = Calls.begin(), CE = Calls.end();
1676 std::list<DSCallSite>::iterator OldIt = CI++;
1677 if (CI == CE) break;
1679 // If this call site is now the same as the previous one, we can delete it
1681 if (*OldIt == *CI) {
1688 //Calls.erase(std::unique(Calls.begin(), Calls.end()), Calls.end());
1690 // Track the number of call nodes merged away...
1691 NumCallNodesMerged += NumDeleted;
1693 DEBUG(if (NumDeleted)
1694 std::cerr << "Merged " << NumDeleted << " call nodes.\n";);
1698 // removeTriviallyDeadNodes - After the graph has been constructed, this method
1699 // removes all unreachable nodes that are created because they got merged with
1700 // other nodes in the graph. These nodes will all be trivially unreachable, so
1701 // we don't have to perform any non-trivial analysis here.
1703 void DSGraph::removeTriviallyDeadNodes() {
1704 TIME_REGION(X, "removeTriviallyDeadNodes");
1707 /// NOTE: This code is disabled. This slows down DSA on 177.mesa
1710 // Loop over all of the nodes in the graph, calling getNode on each field.
1711 // This will cause all nodes to update their forwarding edges, causing
1712 // forwarded nodes to be delete-able.
1713 { TIME_REGION(X, "removeTriviallyDeadNodes:node_iterate");
1714 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) {
1716 for (unsigned l = 0, e = N.getNumLinks(); l != e; ++l)
1717 N.getLink(l*N.getPointerSize()).getNode();
1721 // NOTE: This code is disabled. Though it should, in theory, allow us to
1722 // remove more nodes down below, the scan of the scalar map is incredibly
1723 // expensive for certain programs (with large SCCs). In the future, if we can
1724 // make the scalar map scan more efficient, then we can reenable this.
1725 { TIME_REGION(X, "removeTriviallyDeadNodes:scalarmap");
1727 // Likewise, forward any edges from the scalar nodes. While we are at it,
1728 // clean house a bit.
1729 for (DSScalarMap::iterator I = ScalarMap.begin(),E = ScalarMap.end();I != E;){
1730 I->second.getNode();
1735 bool isGlobalsGraph = !GlobalsGraph;
1737 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E; ) {
1740 // Do not remove *any* global nodes in the globals graph.
1741 // This is a special case because such nodes may not have I, M, R flags set.
1742 if (Node.isGlobalNode() && isGlobalsGraph) {
1747 if (Node.isComplete() && !Node.isModified() && !Node.isRead()) {
1748 // This is a useless node if it has no mod/ref info (checked above),
1749 // outgoing edges (which it cannot, as it is not modified in this
1750 // context), and it has no incoming edges. If it is a global node it may
1751 // have all of these properties and still have incoming edges, due to the
1752 // scalar map, so we check those now.
1754 if (Node.getNumReferrers() == Node.getGlobalsList().size()) {
1755 const std::vector<GlobalValue*> &Globals = Node.getGlobalsList();
1757 // Loop through and make sure all of the globals are referring directly
1759 for (unsigned j = 0, e = Globals.size(); j != e; ++j) {
1760 DSNode *N = getNodeForValue(Globals[j]).getNode();
1761 assert(N == &Node && "ScalarMap doesn't match globals list!");
1764 // Make sure NumReferrers still agrees, if so, the node is truly dead.
1765 if (Node.getNumReferrers() == Globals.size()) {
1766 for (unsigned j = 0, e = Globals.size(); j != e; ++j)
1767 ScalarMap.erase(Globals[j]);
1768 Node.makeNodeDead();
1769 ++NumTrivialGlobalDNE;
1774 if (Node.getNodeFlags() == 0 && Node.hasNoReferrers()) {
1775 // This node is dead!
1776 NI = Nodes.erase(NI); // Erase & remove from node list.
1783 removeIdenticalCalls(FunctionCalls);
1784 removeIdenticalCalls(AuxFunctionCalls);
1788 /// markReachableNodes - This method recursively traverses the specified
1789 /// DSNodes, marking any nodes which are reachable. All reachable nodes it adds
1790 /// to the set, which allows it to only traverse visited nodes once.
1792 void DSNode::markReachableNodes(hash_set<const DSNode*> &ReachableNodes) const {
1793 if (this == 0) return;
1794 assert(getForwardNode() == 0 && "Cannot mark a forwarded node!");
1795 if (ReachableNodes.insert(this).second) // Is newly reachable?
1796 for (DSNode::const_edge_iterator I = edge_begin(), E = edge_end();
1798 I->getNode()->markReachableNodes(ReachableNodes);
1801 void DSCallSite::markReachableNodes(hash_set<const DSNode*> &Nodes) const {
1802 getRetVal().getNode()->markReachableNodes(Nodes);
1803 if (isIndirectCall()) getCalleeNode()->markReachableNodes(Nodes);
1805 for (unsigned i = 0, e = getNumPtrArgs(); i != e; ++i)
1806 getPtrArg(i).getNode()->markReachableNodes(Nodes);
1809 // CanReachAliveNodes - Simple graph walker that recursively traverses the graph
1810 // looking for a node that is marked alive. If an alive node is found, return
1811 // true, otherwise return false. If an alive node is reachable, this node is
1812 // marked as alive...
1814 static bool CanReachAliveNodes(DSNode *N, hash_set<const DSNode*> &Alive,
1815 hash_set<const DSNode*> &Visited,
1816 bool IgnoreGlobals) {
1817 if (N == 0) return false;
1818 assert(N->getForwardNode() == 0 && "Cannot mark a forwarded node!");
1820 // If this is a global node, it will end up in the globals graph anyway, so we
1821 // don't need to worry about it.
1822 if (IgnoreGlobals && N->isGlobalNode()) return false;
1824 // If we know that this node is alive, return so!
1825 if (Alive.count(N)) return true;
1827 // Otherwise, we don't think the node is alive yet, check for infinite
1829 if (Visited.count(N)) return false; // Found a cycle
1830 Visited.insert(N); // No recursion, insert into Visited...
1832 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I)
1833 if (CanReachAliveNodes(I->getNode(), Alive, Visited, IgnoreGlobals)) {
1834 N->markReachableNodes(Alive);
1840 // CallSiteUsesAliveArgs - Return true if the specified call site can reach any
1843 static bool CallSiteUsesAliveArgs(const DSCallSite &CS,
1844 hash_set<const DSNode*> &Alive,
1845 hash_set<const DSNode*> &Visited,
1846 bool IgnoreGlobals) {
1847 if (CanReachAliveNodes(CS.getRetVal().getNode(), Alive, Visited,
1850 if (CS.isIndirectCall() &&
1851 CanReachAliveNodes(CS.getCalleeNode(), Alive, Visited, IgnoreGlobals))
1853 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1854 if (CanReachAliveNodes(CS.getPtrArg(i).getNode(), Alive, Visited,
1860 // removeDeadNodes - Use a more powerful reachability analysis to eliminate
1861 // subgraphs that are unreachable. This often occurs because the data
1862 // structure doesn't "escape" into it's caller, and thus should be eliminated
1863 // from the caller's graph entirely. This is only appropriate to use when
1866 void DSGraph::removeDeadNodes(unsigned Flags) {
1867 DEBUG(AssertGraphOK(); if (GlobalsGraph) GlobalsGraph->AssertGraphOK());
1869 // Reduce the amount of work we have to do... remove dummy nodes left over by
1871 removeTriviallyDeadNodes();
1873 TIME_REGION(X, "removeDeadNodes");
1875 // FIXME: Merge non-trivially identical call nodes...
1877 // Alive - a set that holds all nodes found to be reachable/alive.
1878 hash_set<const DSNode*> Alive;
1879 std::vector<std::pair<Value*, DSNode*> > GlobalNodes;
1881 // Copy and merge all information about globals to the GlobalsGraph if this is
1882 // not a final pass (where unreachable globals are removed).
1884 // Strip all alloca bits since the current function is only for the BU pass.
1885 // Strip all incomplete bits since they are short-lived properties and they
1886 // will be correctly computed when rematerializing nodes into the functions.
1888 ReachabilityCloner GGCloner(*GlobalsGraph, *this, DSGraph::StripAllocaBit |
1889 DSGraph::StripIncompleteBit);
1891 // Mark all nodes reachable by (non-global) scalar nodes as alive...
1892 { TIME_REGION(Y, "removeDeadNodes:scalarscan");
1893 for (DSScalarMap::iterator I = ScalarMap.begin(), E = ScalarMap.end();
1895 if (isa<GlobalValue>(I->first)) { // Keep track of global nodes
1896 assert(!I->second.isNull() && "Null global node?");
1897 assert(I->second.getNode()->isGlobalNode() && "Should be a global node!");
1898 GlobalNodes.push_back(std::make_pair(I->first, I->second.getNode()));
1900 // Make sure that all globals are cloned over as roots.
1901 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) {
1902 DSGraph::ScalarMapTy::iterator SMI =
1903 GlobalsGraph->getScalarMap().find(I->first);
1904 if (SMI != GlobalsGraph->getScalarMap().end())
1905 GGCloner.merge(SMI->second, I->second);
1907 GGCloner.getClonedNH(I->second);
1910 I->second.getNode()->markReachableNodes(Alive);
1914 // The return values are alive as well.
1915 for (ReturnNodesTy::iterator I = ReturnNodes.begin(), E = ReturnNodes.end();
1917 I->second.getNode()->markReachableNodes(Alive);
1919 // Mark any nodes reachable by primary calls as alive...
1920 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
1921 I->markReachableNodes(Alive);
1924 // Now find globals and aux call nodes that are already live or reach a live
1925 // value (which makes them live in turn), and continue till no more are found.
1928 hash_set<const DSNode*> Visited;
1929 hash_set<const DSCallSite*> AuxFCallsAlive;
1932 // If any global node points to a non-global that is "alive", the global is
1933 // "alive" as well... Remove it from the GlobalNodes list so we only have
1934 // unreachable globals in the list.
1937 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
1938 for (unsigned i = 0; i != GlobalNodes.size(); ++i)
1939 if (CanReachAliveNodes(GlobalNodes[i].second, Alive, Visited,
1940 Flags & DSGraph::RemoveUnreachableGlobals)) {
1941 std::swap(GlobalNodes[i--], GlobalNodes.back()); // Move to end to...
1942 GlobalNodes.pop_back(); // erase efficiently
1946 // Mark only unresolvable call nodes for moving to the GlobalsGraph since
1947 // call nodes that get resolved will be difficult to remove from that graph.
1948 // The final unresolved call nodes must be handled specially at the end of
1949 // the BU pass (i.e., in main or other roots of the call graph).
1950 for (afc_iterator CI = afc_begin(), E = afc_end(); CI != E; ++CI)
1951 if (!AuxFCallsAlive.count(&*CI) &&
1952 (CI->isIndirectCall()
1953 || CallSiteUsesAliveArgs(*CI, Alive, Visited,
1954 Flags & DSGraph::RemoveUnreachableGlobals))) {
1955 CI->markReachableNodes(Alive);
1956 AuxFCallsAlive.insert(&*CI);
1961 // Move dead aux function calls to the end of the list
1962 unsigned CurIdx = 0;
1963 for (std::list<DSCallSite>::iterator CI = AuxFunctionCalls.begin(),
1964 E = AuxFunctionCalls.end(); CI != E; )
1965 if (AuxFCallsAlive.count(&*CI))
1968 // Copy and merge global nodes and dead aux call nodes into the
1969 // GlobalsGraph, and all nodes reachable from those nodes. Update their
1970 // target pointers using the GGCloner.
1972 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
1973 GlobalsGraph->AuxFunctionCalls.push_back(DSCallSite(*CI, GGCloner));
1975 AuxFunctionCalls.erase(CI++);
1978 // We are finally done with the GGCloner so we can destroy it.
1981 // At this point, any nodes which are visited, but not alive, are nodes
1982 // which can be removed. Loop over all nodes, eliminating completely
1983 // unreachable nodes.
1985 std::vector<DSNode*> DeadNodes;
1986 DeadNodes.reserve(Nodes.size());
1987 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E;) {
1989 assert(!N->isForwarding() && "Forwarded node in nodes list?");
1991 if (!Alive.count(N)) {
1993 assert(!N->isForwarding() && "Cannot remove a forwarding node!");
1994 DeadNodes.push_back(N);
1995 N->dropAllReferences();
2000 // Remove all unreachable globals from the ScalarMap.
2001 // If flag RemoveUnreachableGlobals is set, GlobalNodes has only dead nodes.
2002 // In either case, the dead nodes will not be in the set Alive.
2003 for (unsigned i = 0, e = GlobalNodes.size(); i != e; ++i)
2004 if (!Alive.count(GlobalNodes[i].second))
2005 ScalarMap.erase(GlobalNodes[i].first);
2007 assert((Flags & DSGraph::RemoveUnreachableGlobals) && "non-dead global");
2009 // Delete all dead nodes now since their referrer counts are zero.
2010 for (unsigned i = 0, e = DeadNodes.size(); i != e; ++i)
2011 delete DeadNodes[i];
2013 DEBUG(AssertGraphOK(); GlobalsGraph->AssertGraphOK());
2016 void DSGraph::AssertNodeContainsGlobal(const DSNode *N, GlobalValue *GV) const {
2017 assert(std::find(N->globals_begin(),N->globals_end(), GV) !=
2018 N->globals_end() && "Global value not in node!");
2021 void DSGraph::AssertCallSiteInGraph(const DSCallSite &CS) const {
2022 if (CS.isIndirectCall()) {
2023 AssertNodeInGraph(CS.getCalleeNode());
2025 if (CS.getNumPtrArgs() && CS.getCalleeNode() == CS.getPtrArg(0).getNode() &&
2026 CS.getCalleeNode() && CS.getCalleeNode()->getGlobals().empty())
2027 std::cerr << "WARNING: WEIRD CALL SITE FOUND!\n";
2030 AssertNodeInGraph(CS.getRetVal().getNode());
2031 for (unsigned j = 0, e = CS.getNumPtrArgs(); j != e; ++j)
2032 AssertNodeInGraph(CS.getPtrArg(j).getNode());
2035 void DSGraph::AssertCallNodesInGraph() const {
2036 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
2037 AssertCallSiteInGraph(*I);
2039 void DSGraph::AssertAuxCallNodesInGraph() const {
2040 for (afc_iterator I = afc_begin(), E = afc_end(); I != E; ++I)
2041 AssertCallSiteInGraph(*I);
2044 void DSGraph::AssertGraphOK() const {
2045 for (node_const_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
2048 for (ScalarMapTy::const_iterator I = ScalarMap.begin(),
2049 E = ScalarMap.end(); I != E; ++I) {
2050 assert(!I->second.isNull() && "Null node in scalarmap!");
2051 AssertNodeInGraph(I->second.getNode());
2052 if (GlobalValue *GV = dyn_cast<GlobalValue>(I->first)) {
2053 assert(I->second.getNode()->isGlobalNode() &&
2054 "Global points to node, but node isn't global?");
2055 AssertNodeContainsGlobal(I->second.getNode(), GV);
2058 AssertCallNodesInGraph();
2059 AssertAuxCallNodesInGraph();
2061 // Check that all pointer arguments to any functions in this graph have
2063 for (ReturnNodesTy::const_iterator RI = ReturnNodes.begin(),
2064 E = ReturnNodes.end();
2066 Function &F = *RI->first;
2067 for (Function::arg_iterator AI = F.arg_begin(); AI != F.arg_end(); ++AI)
2068 if (isPointerType(AI->getType()))
2069 assert(!getNodeForValue(AI).isNull() &&
2070 "Pointer argument must be in the scalar map!");
2074 /// computeNodeMapping - Given roots in two different DSGraphs, traverse the
2075 /// nodes reachable from the two graphs, computing the mapping of nodes from the
2076 /// first to the second graph. This mapping may be many-to-one (i.e. the first
2077 /// graph may have multiple nodes representing one node in the second graph),
2078 /// but it will not work if there is a one-to-many or many-to-many mapping.
2080 void DSGraph::computeNodeMapping(const DSNodeHandle &NH1,
2081 const DSNodeHandle &NH2, NodeMapTy &NodeMap,
2082 bool StrictChecking) {
2083 DSNode *N1 = NH1.getNode(), *N2 = NH2.getNode();
2084 if (N1 == 0 || N2 == 0) return;
2086 DSNodeHandle &Entry = NodeMap[N1];
2087 if (!Entry.isNull()) {
2088 // Termination of recursion!
2089 if (StrictChecking) {
2090 assert(Entry.getNode() == N2 && "Inconsistent mapping detected!");
2091 assert((Entry.getOffset() == (NH2.getOffset()-NH1.getOffset()) ||
2092 Entry.getNode()->isNodeCompletelyFolded()) &&
2093 "Inconsistent mapping detected!");
2098 Entry.setTo(N2, NH2.getOffset()-NH1.getOffset());
2100 // Loop over all of the fields that N1 and N2 have in common, recursively
2101 // mapping the edges together now.
2102 int N2Idx = NH2.getOffset()-NH1.getOffset();
2103 unsigned N2Size = N2->getSize();
2104 if (N2Size == 0) return; // No edges to map to.
2106 for (unsigned i = 0, e = N1->getSize(); i < e; i += DS::PointerSize) {
2107 const DSNodeHandle &N1NH = N1->getLink(i);
2108 // Don't call N2->getLink if not needed (avoiding crash if N2Idx is not
2110 if (!N1NH.isNull()) {
2111 if (unsigned(N2Idx)+i < N2Size)
2112 computeNodeMapping(N1NH, N2->getLink(N2Idx+i), NodeMap);
2114 computeNodeMapping(N1NH,
2115 N2->getLink(unsigned(N2Idx+i) % N2Size), NodeMap);
2121 /// computeGToGGMapping - Compute the mapping of nodes in the global graph to
2122 /// nodes in this graph.
2123 void DSGraph::computeGToGGMapping(NodeMapTy &NodeMap) {
2124 DSGraph &GG = *getGlobalsGraph();
2126 DSScalarMap &SM = getScalarMap();
2127 for (DSScalarMap::global_iterator I = SM.global_begin(),
2128 E = SM.global_end(); I != E; ++I)
2129 DSGraph::computeNodeMapping(SM[*I], GG.getNodeForValue(*I), NodeMap);
2132 /// computeGGToGMapping - Compute the mapping of nodes in the global graph to
2133 /// nodes in this graph. Note that any uses of this method are probably bugs,
2134 /// unless it is known that the globals graph has been merged into this graph!
2135 void DSGraph::computeGGToGMapping(InvNodeMapTy &InvNodeMap) {
2137 computeGToGGMapping(NodeMap);
2139 while (!NodeMap.empty()) {
2140 InvNodeMap.insert(std::make_pair(NodeMap.begin()->second,
2141 NodeMap.begin()->first));
2142 NodeMap.erase(NodeMap.begin());
2147 /// computeCalleeCallerMapping - Given a call from a function in the current
2148 /// graph to the 'Callee' function (which lives in 'CalleeGraph'), compute the
2149 /// mapping of nodes from the callee to nodes in the caller.
2150 void DSGraph::computeCalleeCallerMapping(DSCallSite CS, const Function &Callee,
2151 DSGraph &CalleeGraph,
2152 NodeMapTy &NodeMap) {
2154 DSCallSite CalleeArgs =
2155 CalleeGraph.getCallSiteForArguments(const_cast<Function&>(Callee));
2157 computeNodeMapping(CalleeArgs.getRetVal(), CS.getRetVal(), NodeMap);
2159 unsigned NumArgs = CS.getNumPtrArgs();
2160 if (NumArgs > CalleeArgs.getNumPtrArgs())
2161 NumArgs = CalleeArgs.getNumPtrArgs();
2163 for (unsigned i = 0; i != NumArgs; ++i)
2164 computeNodeMapping(CalleeArgs.getPtrArg(i), CS.getPtrArg(i), NodeMap);
2166 // Map the nodes that are pointed to by globals.
2167 DSScalarMap &CalleeSM = CalleeGraph.getScalarMap();
2168 DSScalarMap &CallerSM = getScalarMap();
2170 if (CalleeSM.global_size() >= CallerSM.global_size()) {
2171 for (DSScalarMap::global_iterator GI = CallerSM.global_begin(),
2172 E = CallerSM.global_end(); GI != E; ++GI)
2173 if (CalleeSM.global_count(*GI))
2174 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap);
2176 for (DSScalarMap::global_iterator GI = CalleeSM.global_begin(),
2177 E = CalleeSM.global_end(); GI != E; ++GI)
2178 if (CallerSM.global_count(*GI))
2179 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap);