1 //===- DataStructure.cpp - Implement the core data structure analysis -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the core data structure functionality.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/DSGraph.h"
15 #include "llvm/Function.h"
16 #include "llvm/iOther.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Assembly/Writer.h"
20 #include "Support/CommandLine.h"
21 #include "Support/Debug.h"
22 #include "Support/STLExtras.h"
23 #include "Support/Statistic.h"
24 #include "Support/Timer.h"
29 Statistic<> NumFolds ("dsnode", "Number of nodes completely folded");
30 Statistic<> NumCallNodesMerged("dsnode", "Number of call nodes merged");
31 Statistic<> NumNodeAllocated ("dsnode", "Number of nodes allocated");
34 EnableDSNodeGlobalRootsHack("enable-dsa-globalrootshack", cl::Hidden,
35 cl::desc("Make DSA less aggressive when cloning graphs"));
39 #define TIME_REGION(VARNAME, DESC) \
40 NamedRegionTimer VARNAME(DESC)
42 #define TIME_REGION(VARNAME, DESC)
47 DSNode *DSNodeHandle::HandleForwarding() const {
48 assert(!N->ForwardNH.isNull() && "Can only be invoked if forwarding!");
50 // Handle node forwarding here!
51 DSNode *Next = N->ForwardNH.getNode(); // Cause recursive shrinkage
52 Offset += N->ForwardNH.getOffset();
54 if (--N->NumReferrers == 0) {
55 // Removing the last referrer to the node, sever the forwarding link
61 if (N->Size <= Offset) {
62 assert(N->Size <= 1 && "Forwarded to shrunk but not collapsed node?");
68 //===----------------------------------------------------------------------===//
69 // DSNode Implementation
70 //===----------------------------------------------------------------------===//
72 DSNode::DSNode(const Type *T, DSGraph *G)
73 : NumReferrers(0), Size(0), ParentGraph(G), Ty(Type::VoidTy), NodeType(0) {
74 // Add the type entry if it is specified...
75 if (T) mergeTypeInfo(T, 0);
76 G->getNodes().push_back(this);
80 // DSNode copy constructor... do not copy over the referrers list!
81 DSNode::DSNode(const DSNode &N, DSGraph *G, bool NullLinks)
82 : NumReferrers(0), Size(N.Size), ParentGraph(G),
83 Ty(N.Ty), Globals(N.Globals), NodeType(N.NodeType) {
87 Links.resize(N.Links.size()); // Create the appropriate number of null links
88 G->getNodes().push_back(this);
92 /// getTargetData - Get the target data object used to construct this node.
94 const TargetData &DSNode::getTargetData() const {
95 return ParentGraph->getTargetData();
98 void DSNode::assertOK() const {
99 assert((Ty != Type::VoidTy ||
100 Ty == Type::VoidTy && (Size == 0 ||
101 (NodeType & DSNode::Array))) &&
104 assert(ParentGraph && "Node has no parent?");
105 const DSScalarMap &SM = ParentGraph->getScalarMap();
106 for (unsigned i = 0, e = Globals.size(); i != e; ++i) {
107 assert(SM.count(Globals[i]));
108 assert(SM.find(Globals[i])->second.getNode() == this);
112 /// forwardNode - Mark this node as being obsolete, and all references to it
113 /// should be forwarded to the specified node and offset.
115 void DSNode::forwardNode(DSNode *To, unsigned Offset) {
116 assert(this != To && "Cannot forward a node to itself!");
117 assert(ForwardNH.isNull() && "Already forwarding from this node!");
118 if (To->Size <= 1) Offset = 0;
119 assert((Offset < To->Size || (Offset == To->Size && Offset == 0)) &&
120 "Forwarded offset is wrong!");
121 ForwardNH.setNode(To);
122 ForwardNH.setOffset(Offset);
128 // addGlobal - Add an entry for a global value to the Globals list. This also
129 // marks the node with the 'G' flag if it does not already have it.
131 void DSNode::addGlobal(GlobalValue *GV) {
132 // Keep the list sorted.
133 std::vector<GlobalValue*>::iterator I =
134 std::lower_bound(Globals.begin(), Globals.end(), GV);
136 if (I == Globals.end() || *I != GV) {
137 //assert(GV->getType()->getElementType() == Ty);
138 Globals.insert(I, GV);
139 NodeType |= GlobalNode;
143 /// foldNodeCompletely - If we determine that this node has some funny
144 /// behavior happening to it that we cannot represent, we fold it down to a
145 /// single, completely pessimistic, node. This node is represented as a
146 /// single byte with a single TypeEntry of "void".
148 void DSNode::foldNodeCompletely() {
149 if (isNodeCompletelyFolded()) return; // If this node is already folded...
153 // If this node has a size that is <= 1, we don't need to create a forwarding
155 if (getSize() <= 1) {
156 NodeType |= DSNode::Array;
159 assert(Links.size() <= 1 && "Size is 1, but has more links?");
162 // Create the node we are going to forward to. This is required because
163 // some referrers may have an offset that is > 0. By forcing them to
164 // forward, the forwarder has the opportunity to correct the offset.
165 DSNode *DestNode = new DSNode(0, ParentGraph);
166 DestNode->NodeType = NodeType|DSNode::Array;
167 DestNode->Ty = Type::VoidTy;
169 DestNode->Globals.swap(Globals);
171 // Start forwarding to the destination node...
172 forwardNode(DestNode, 0);
174 if (!Links.empty()) {
175 DestNode->Links.reserve(1);
177 DSNodeHandle NH(DestNode);
178 DestNode->Links.push_back(Links[0]);
180 // If we have links, merge all of our outgoing links together...
181 for (unsigned i = Links.size()-1; i != 0; --i)
182 NH.getNode()->Links[0].mergeWith(Links[i]);
185 DestNode->Links.resize(1);
190 /// isNodeCompletelyFolded - Return true if this node has been completely
191 /// folded down to something that can never be expanded, effectively losing
192 /// all of the field sensitivity that may be present in the node.
194 bool DSNode::isNodeCompletelyFolded() const {
195 return getSize() == 1 && Ty == Type::VoidTy && isArray();
199 /// TypeElementWalker Class - Used for implementation of physical subtyping...
201 class TypeElementWalker {
206 StackState(const Type *T, unsigned Off = 0)
207 : Ty(T), Offset(Off), Idx(0) {}
210 std::vector<StackState> Stack;
211 const TargetData &TD;
213 TypeElementWalker(const Type *T, const TargetData &td) : TD(td) {
218 bool isDone() const { return Stack.empty(); }
219 const Type *getCurrentType() const { return Stack.back().Ty; }
220 unsigned getCurrentOffset() const { return Stack.back().Offset; }
222 void StepToNextType() {
223 PopStackAndAdvance();
228 /// PopStackAndAdvance - Pop the current element off of the stack and
229 /// advance the underlying element to the next contained member.
230 void PopStackAndAdvance() {
231 assert(!Stack.empty() && "Cannot pop an empty stack!");
233 while (!Stack.empty()) {
234 StackState &SS = Stack.back();
235 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
237 if (SS.Idx != ST->getElementTypes().size()) {
238 const StructLayout *SL = TD.getStructLayout(ST);
239 SS.Offset += SL->MemberOffsets[SS.Idx]-SL->MemberOffsets[SS.Idx-1];
242 Stack.pop_back(); // At the end of the structure
244 const ArrayType *AT = cast<ArrayType>(SS.Ty);
246 if (SS.Idx != AT->getNumElements()) {
247 SS.Offset += TD.getTypeSize(AT->getElementType());
250 Stack.pop_back(); // At the end of the array
255 /// StepToLeaf - Used by physical subtyping to move to the first leaf node
256 /// on the type stack.
258 if (Stack.empty()) return;
259 while (!Stack.empty() && !Stack.back().Ty->isFirstClassType()) {
260 StackState &SS = Stack.back();
261 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
262 if (ST->getElementTypes().empty()) {
264 PopStackAndAdvance();
266 // Step into the structure...
267 assert(SS.Idx < ST->getElementTypes().size());
268 const StructLayout *SL = TD.getStructLayout(ST);
269 Stack.push_back(StackState(ST->getElementTypes()[SS.Idx],
270 SS.Offset+SL->MemberOffsets[SS.Idx]));
273 const ArrayType *AT = cast<ArrayType>(SS.Ty);
274 if (AT->getNumElements() == 0) {
276 PopStackAndAdvance();
278 // Step into the array...
279 assert(SS.Idx < AT->getNumElements());
280 Stack.push_back(StackState(AT->getElementType(),
282 TD.getTypeSize(AT->getElementType())));
288 } // end anonymous namespace
290 /// ElementTypesAreCompatible - Check to see if the specified types are
291 /// "physically" compatible. If so, return true, else return false. We only
292 /// have to check the fields in T1: T2 may be larger than T1. If AllowLargerT1
293 /// is true, then we also allow a larger T1.
295 static bool ElementTypesAreCompatible(const Type *T1, const Type *T2,
296 bool AllowLargerT1, const TargetData &TD){
297 TypeElementWalker T1W(T1, TD), T2W(T2, TD);
299 while (!T1W.isDone() && !T2W.isDone()) {
300 if (T1W.getCurrentOffset() != T2W.getCurrentOffset())
303 const Type *T1 = T1W.getCurrentType();
304 const Type *T2 = T2W.getCurrentType();
305 if (T1 != T2 && !T1->isLosslesslyConvertibleTo(T2))
308 T1W.StepToNextType();
309 T2W.StepToNextType();
312 return AllowLargerT1 || T1W.isDone();
316 /// mergeTypeInfo - This method merges the specified type into the current node
317 /// at the specified offset. This may update the current node's type record if
318 /// this gives more information to the node, it may do nothing to the node if
319 /// this information is already known, or it may merge the node completely (and
320 /// return true) if the information is incompatible with what is already known.
322 /// This method returns true if the node is completely folded, otherwise false.
324 bool DSNode::mergeTypeInfo(const Type *NewTy, unsigned Offset,
325 bool FoldIfIncompatible) {
326 const TargetData &TD = getTargetData();
327 // Check to make sure the Size member is up-to-date. Size can be one of the
329 // Size = 0, Ty = Void: Nothing is known about this node.
330 // Size = 0, Ty = FnTy: FunctionPtr doesn't have a size, so we use zero
331 // Size = 1, Ty = Void, Array = 1: The node is collapsed
332 // Otherwise, sizeof(Ty) = Size
334 assert(((Size == 0 && Ty == Type::VoidTy && !isArray()) ||
335 (Size == 0 && !Ty->isSized() && !isArray()) ||
336 (Size == 1 && Ty == Type::VoidTy && isArray()) ||
337 (Size == 0 && !Ty->isSized() && !isArray()) ||
338 (TD.getTypeSize(Ty) == Size)) &&
339 "Size member of DSNode doesn't match the type structure!");
340 assert(NewTy != Type::VoidTy && "Cannot merge void type into DSNode!");
342 if (Offset == 0 && NewTy == Ty)
343 return false; // This should be a common case, handle it efficiently
345 // Return true immediately if the node is completely folded.
346 if (isNodeCompletelyFolded()) return true;
348 // If this is an array type, eliminate the outside arrays because they won't
349 // be used anyway. This greatly reduces the size of large static arrays used
350 // as global variables, for example.
352 bool WillBeArray = false;
353 while (const ArrayType *AT = dyn_cast<ArrayType>(NewTy)) {
354 // FIXME: we might want to keep small arrays, but must be careful about
355 // things like: [2 x [10000 x int*]]
356 NewTy = AT->getElementType();
360 // Figure out how big the new type we're merging in is...
361 unsigned NewTySize = NewTy->isSized() ? TD.getTypeSize(NewTy) : 0;
363 // Otherwise check to see if we can fold this type into the current node. If
364 // we can't, we fold the node completely, if we can, we potentially update our
367 if (Ty == Type::VoidTy) {
368 // If this is the first type that this node has seen, just accept it without
370 assert(Offset == 0 && !isArray() &&
371 "Cannot have an offset into a void node!");
374 if (WillBeArray) NodeType |= Array;
377 // Calculate the number of outgoing links from this node.
378 Links.resize((Size+DS::PointerSize-1) >> DS::PointerShift);
382 // Handle node expansion case here...
383 if (Offset+NewTySize > Size) {
384 // It is illegal to grow this node if we have treated it as an array of
387 if (FoldIfIncompatible) foldNodeCompletely();
391 if (Offset) { // We could handle this case, but we don't for now...
392 std::cerr << "UNIMP: Trying to merge a growth type into "
393 << "offset != 0: Collapsing!\n";
394 if (FoldIfIncompatible) foldNodeCompletely();
398 // Okay, the situation is nice and simple, we are trying to merge a type in
399 // at offset 0 that is bigger than our current type. Implement this by
400 // switching to the new type and then merge in the smaller one, which should
401 // hit the other code path here. If the other code path decides it's not
402 // ok, it will collapse the node as appropriate.
404 const Type *OldTy = Ty;
407 if (WillBeArray) NodeType |= Array;
410 // Must grow links to be the appropriate size...
411 Links.resize((Size+DS::PointerSize-1) >> DS::PointerShift);
413 // Merge in the old type now... which is guaranteed to be smaller than the
415 return mergeTypeInfo(OldTy, 0);
418 assert(Offset <= Size &&
419 "Cannot merge something into a part of our type that doesn't exist!");
421 // Find the section of Ty that NewTy overlaps with... first we find the
422 // type that starts at offset Offset.
425 const Type *SubType = Ty;
427 assert(Offset-O < TD.getTypeSize(SubType) && "Offset out of range!");
429 switch (SubType->getPrimitiveID()) {
430 case Type::StructTyID: {
431 const StructType *STy = cast<StructType>(SubType);
432 const StructLayout &SL = *TD.getStructLayout(STy);
434 unsigned i = 0, e = SL.MemberOffsets.size();
435 for (; i+1 < e && SL.MemberOffsets[i+1] <= Offset-O; ++i)
438 // The offset we are looking for must be in the i'th element...
439 SubType = STy->getElementTypes()[i];
440 O += SL.MemberOffsets[i];
443 case Type::ArrayTyID: {
444 SubType = cast<ArrayType>(SubType)->getElementType();
445 unsigned ElSize = TD.getTypeSize(SubType);
446 unsigned Remainder = (Offset-O) % ElSize;
447 O = Offset-Remainder;
451 if (FoldIfIncompatible) foldNodeCompletely();
456 assert(O == Offset && "Could not achieve the correct offset!");
458 // If we found our type exactly, early exit
459 if (SubType == NewTy) return false;
461 // Differing function types don't require us to merge. They are not values anyway.
462 if (isa<FunctionType>(SubType) &&
463 isa<FunctionType>(NewTy)) return false;
465 unsigned SubTypeSize = SubType->isSized() ? TD.getTypeSize(SubType) : 0;
467 // Ok, we are getting desperate now. Check for physical subtyping, where we
468 // just require each element in the node to be compatible.
469 if (NewTySize <= SubTypeSize && NewTySize && NewTySize < 256 &&
470 SubTypeSize && SubTypeSize < 256 &&
471 ElementTypesAreCompatible(NewTy, SubType, !isArray(), TD))
474 // Okay, so we found the leader type at the offset requested. Search the list
475 // of types that starts at this offset. If SubType is currently an array or
476 // structure, the type desired may actually be the first element of the
479 unsigned PadSize = SubTypeSize; // Size, including pad memory which is ignored
480 while (SubType != NewTy) {
481 const Type *NextSubType = 0;
482 unsigned NextSubTypeSize = 0;
483 unsigned NextPadSize = 0;
484 switch (SubType->getPrimitiveID()) {
485 case Type::StructTyID: {
486 const StructType *STy = cast<StructType>(SubType);
487 const StructLayout &SL = *TD.getStructLayout(STy);
488 if (SL.MemberOffsets.size() > 1)
489 NextPadSize = SL.MemberOffsets[1];
491 NextPadSize = SubTypeSize;
492 NextSubType = STy->getElementTypes()[0];
493 NextSubTypeSize = TD.getTypeSize(NextSubType);
496 case Type::ArrayTyID:
497 NextSubType = cast<ArrayType>(SubType)->getElementType();
498 NextSubTypeSize = TD.getTypeSize(NextSubType);
499 NextPadSize = NextSubTypeSize;
505 if (NextSubType == 0)
506 break; // In the default case, break out of the loop
508 if (NextPadSize < NewTySize)
509 break; // Don't allow shrinking to a smaller type than NewTySize
510 SubType = NextSubType;
511 SubTypeSize = NextSubTypeSize;
512 PadSize = NextPadSize;
515 // If we found the type exactly, return it...
516 if (SubType == NewTy)
519 // Check to see if we have a compatible, but different type...
520 if (NewTySize == SubTypeSize) {
521 // Check to see if this type is obviously convertible... int -> uint f.e.
522 if (NewTy->isLosslesslyConvertibleTo(SubType))
525 // Check to see if we have a pointer & integer mismatch going on here,
526 // loading a pointer as a long, for example.
528 if (SubType->isInteger() && isa<PointerType>(NewTy) ||
529 NewTy->isInteger() && isa<PointerType>(SubType))
531 } else if (NewTySize > SubTypeSize && NewTySize <= PadSize) {
532 // We are accessing the field, plus some structure padding. Ignore the
533 // structure padding.
538 if (getParentGraph()->getReturnNodes().size())
539 M = getParentGraph()->getReturnNodes().begin()->first->getParent();
540 DEBUG(std::cerr << "MergeTypeInfo Folding OrigTy: ";
541 WriteTypeSymbolic(std::cerr, Ty, M) << "\n due to:";
542 WriteTypeSymbolic(std::cerr, NewTy, M) << " @ " << Offset << "!\n"
544 WriteTypeSymbolic(std::cerr, SubType, M) << "\n\n");
546 if (FoldIfIncompatible) foldNodeCompletely();
552 // addEdgeTo - Add an edge from the current node to the specified node. This
553 // can cause merging of nodes in the graph.
555 void DSNode::addEdgeTo(unsigned Offset, const DSNodeHandle &NH) {
556 if (NH.isNull()) return; // Nothing to do
558 DSNodeHandle &ExistingEdge = getLink(Offset);
559 if (!ExistingEdge.isNull()) {
560 // Merge the two nodes...
561 ExistingEdge.mergeWith(NH);
562 } else { // No merging to perform...
563 setLink(Offset, NH); // Just force a link in there...
568 // MergeSortedVectors - Efficiently merge a vector into another vector where
569 // duplicates are not allowed and both are sorted. This assumes that 'T's are
570 // efficiently copyable and have sane comparison semantics.
572 static void MergeSortedVectors(std::vector<GlobalValue*> &Dest,
573 const std::vector<GlobalValue*> &Src) {
574 // By far, the most common cases will be the simple ones. In these cases,
575 // avoid having to allocate a temporary vector...
577 if (Src.empty()) { // Nothing to merge in...
579 } else if (Dest.empty()) { // Just copy the result in...
581 } else if (Src.size() == 1) { // Insert a single element...
582 const GlobalValue *V = Src[0];
583 std::vector<GlobalValue*>::iterator I =
584 std::lower_bound(Dest.begin(), Dest.end(), V);
585 if (I == Dest.end() || *I != Src[0]) // If not already contained...
586 Dest.insert(I, Src[0]);
587 } else if (Dest.size() == 1) {
588 GlobalValue *Tmp = Dest[0]; // Save value in temporary...
589 Dest = Src; // Copy over list...
590 std::vector<GlobalValue*>::iterator I =
591 std::lower_bound(Dest.begin(), Dest.end(), Tmp);
592 if (I == Dest.end() || *I != Tmp) // If not already contained...
596 // Make a copy to the side of Dest...
597 std::vector<GlobalValue*> Old(Dest);
599 // Make space for all of the type entries now...
600 Dest.resize(Dest.size()+Src.size());
602 // Merge the two sorted ranges together... into Dest.
603 std::merge(Old.begin(), Old.end(), Src.begin(), Src.end(), Dest.begin());
605 // Now erase any duplicate entries that may have accumulated into the
606 // vectors (because they were in both of the input sets)
607 Dest.erase(std::unique(Dest.begin(), Dest.end()), Dest.end());
611 void DSNode::mergeGlobals(const std::vector<GlobalValue*> &RHS) {
612 MergeSortedVectors(Globals, RHS);
615 // MergeNodes - Helper function for DSNode::mergeWith().
616 // This function does the hard work of merging two nodes, CurNodeH
617 // and NH after filtering out trivial cases and making sure that
618 // CurNodeH.offset >= NH.offset.
621 // Since merging may cause either node to go away, we must always
622 // use the node-handles to refer to the nodes. These node handles are
623 // automatically updated during merging, so will always provide access
624 // to the correct node after a merge.
626 void DSNode::MergeNodes(DSNodeHandle& CurNodeH, DSNodeHandle& NH) {
627 assert(CurNodeH.getOffset() >= NH.getOffset() &&
628 "This should have been enforced in the caller.");
630 // Now we know that Offset >= NH.Offset, so convert it so our "Offset" (with
631 // respect to NH.Offset) is now zero. NOffset is the distance from the base
632 // of our object that N starts from.
634 unsigned NOffset = CurNodeH.getOffset()-NH.getOffset();
635 unsigned NSize = NH.getNode()->getSize();
637 // If the two nodes are of different size, and the smaller node has the array
638 // bit set, collapse!
639 if (NSize != CurNodeH.getNode()->getSize()) {
640 if (NSize < CurNodeH.getNode()->getSize()) {
641 if (NH.getNode()->isArray())
642 NH.getNode()->foldNodeCompletely();
643 } else if (CurNodeH.getNode()->isArray()) {
644 NH.getNode()->foldNodeCompletely();
648 // Merge the type entries of the two nodes together...
649 if (NH.getNode()->Ty != Type::VoidTy)
650 CurNodeH.getNode()->mergeTypeInfo(NH.getNode()->Ty, NOffset);
651 assert(!CurNodeH.getNode()->isDeadNode());
653 // If we are merging a node with a completely folded node, then both nodes are
654 // now completely folded.
656 if (CurNodeH.getNode()->isNodeCompletelyFolded()) {
657 if (!NH.getNode()->isNodeCompletelyFolded()) {
658 NH.getNode()->foldNodeCompletely();
659 assert(NH.getNode() && NH.getOffset() == 0 &&
660 "folding did not make offset 0?");
661 NOffset = NH.getOffset();
662 NSize = NH.getNode()->getSize();
663 assert(NOffset == 0 && NSize == 1);
665 } else if (NH.getNode()->isNodeCompletelyFolded()) {
666 CurNodeH.getNode()->foldNodeCompletely();
667 assert(CurNodeH.getNode() && CurNodeH.getOffset() == 0 &&
668 "folding did not make offset 0?");
669 NOffset = NH.getOffset();
670 NSize = NH.getNode()->getSize();
671 assert(NOffset == 0 && NSize == 1);
674 DSNode *N = NH.getNode();
675 if (CurNodeH.getNode() == N || N == 0) return;
676 assert(!CurNodeH.getNode()->isDeadNode());
678 // Merge the NodeType information.
679 CurNodeH.getNode()->NodeType |= N->NodeType;
681 // Start forwarding to the new node!
682 N->forwardNode(CurNodeH.getNode(), NOffset);
683 assert(!CurNodeH.getNode()->isDeadNode());
685 // Make all of the outgoing links of N now be outgoing links of CurNodeH.
687 for (unsigned i = 0; i < N->getNumLinks(); ++i) {
688 DSNodeHandle &Link = N->getLink(i << DS::PointerShift);
689 if (Link.getNode()) {
690 // Compute the offset into the current node at which to
691 // merge this link. In the common case, this is a linear
692 // relation to the offset in the original node (with
693 // wrapping), but if the current node gets collapsed due to
694 // recursive merging, we must make sure to merge in all remaining
695 // links at offset zero.
696 unsigned MergeOffset = 0;
697 DSNode *CN = CurNodeH.getNode();
699 MergeOffset = ((i << DS::PointerShift)+NOffset) % CN->getSize();
700 CN->addEdgeTo(MergeOffset, Link);
704 // Now that there are no outgoing edges, all of the Links are dead.
707 // Merge the globals list...
708 if (!N->Globals.empty()) {
709 CurNodeH.getNode()->mergeGlobals(N->Globals);
711 // Delete the globals from the old node...
712 std::vector<GlobalValue*>().swap(N->Globals);
717 // mergeWith - Merge this node and the specified node, moving all links to and
718 // from the argument node into the current node, deleting the node argument.
719 // Offset indicates what offset the specified node is to be merged into the
722 // The specified node may be a null pointer (in which case, we update it to
723 // point to this node).
725 void DSNode::mergeWith(const DSNodeHandle &NH, unsigned Offset) {
726 DSNode *N = NH.getNode();
727 if (N == this && NH.getOffset() == Offset)
730 // If the RHS is a null node, make it point to this node!
732 NH.mergeWith(DSNodeHandle(this, Offset));
736 assert(!N->isDeadNode() && !isDeadNode());
737 assert(!hasNoReferrers() && "Should not try to fold a useless node!");
740 // We cannot merge two pieces of the same node together, collapse the node
742 DEBUG(std::cerr << "Attempting to merge two chunks of"
743 << " the same node together!\n");
744 foldNodeCompletely();
748 // If both nodes are not at offset 0, make sure that we are merging the node
749 // at an later offset into the node with the zero offset.
751 if (Offset < NH.getOffset()) {
752 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
754 } else if (Offset == NH.getOffset() && getSize() < N->getSize()) {
755 // If the offsets are the same, merge the smaller node into the bigger node
756 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
760 // Ok, now we can merge the two nodes. Use a static helper that works with
761 // two node handles, since "this" may get merged away at intermediate steps.
762 DSNodeHandle CurNodeH(this, Offset);
763 DSNodeHandle NHCopy(NH);
764 DSNode::MergeNodes(CurNodeH, NHCopy);
768 //===----------------------------------------------------------------------===//
769 // ReachabilityCloner Implementation
770 //===----------------------------------------------------------------------===//
772 DSNodeHandle ReachabilityCloner::getClonedNH(const DSNodeHandle &SrcNH) {
773 if (SrcNH.isNull()) return DSNodeHandle();
774 const DSNode *SN = SrcNH.getNode();
776 DSNodeHandle &NH = NodeMap[SN];
777 if (!NH.isNull()) // Node already mapped?
778 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset());
780 DSNode *DN = new DSNode(*SN, &Dest, true /* Null out all links */);
781 DN->maskNodeTypes(BitsToKeep);
784 // Next, recursively clone all outgoing links as necessary. Note that
785 // adding these links can cause the node to collapse itself at any time, and
786 // the current node may be merged with arbitrary other nodes. For this
787 // reason, we must always go through NH.
789 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
790 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
791 if (!SrcEdge.isNull()) {
792 const DSNodeHandle &DestEdge = getClonedNH(SrcEdge);
793 // Compute the offset into the current node at which to
794 // merge this link. In the common case, this is a linear
795 // relation to the offset in the original node (with
796 // wrapping), but if the current node gets collapsed due to
797 // recursive merging, we must make sure to merge in all remaining
798 // links at offset zero.
799 unsigned MergeOffset = 0;
800 DSNode *CN = NH.getNode();
801 if (CN->getSize() != 1)
802 MergeOffset = ((i << DS::PointerShift)+NH.getOffset()
803 - SrcNH.getOffset()) %CN->getSize();
804 CN->addEdgeTo(MergeOffset, DestEdge);
808 // If this node contains any globals, make sure they end up in the scalar
809 // map with the correct offset.
810 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
812 GlobalValue *GV = *I;
813 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
814 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
815 assert(DestGNH.getNode() == NH.getNode() &&"Global mapping inconsistent");
816 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
817 DestGNH.getOffset()+SrcGNH.getOffset()));
819 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
820 Dest.getInlinedGlobals().insert(GV);
823 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset());
826 void ReachabilityCloner::merge(const DSNodeHandle &NH,
827 const DSNodeHandle &SrcNH) {
828 if (SrcNH.isNull()) return; // Noop
830 // If there is no destination node, just clone the source and assign the
831 // destination node to be it.
832 NH.mergeWith(getClonedNH(SrcNH));
836 // Okay, at this point, we know that we have both a destination and a source
837 // node that need to be merged. Check to see if the source node has already
839 const DSNode *SN = SrcNH.getNode();
840 DSNodeHandle &SCNH = NodeMap[SN]; // SourceClonedNodeHandle
841 if (SCNH.getNode()) { // Node already cloned?
842 NH.mergeWith(DSNodeHandle(SCNH.getNode(),
843 SCNH.getOffset()+SrcNH.getOffset()));
845 return; // Nothing to do!
848 // Okay, so the source node has not already been cloned. Instead of creating
849 // a new DSNode, only to merge it into the one we already have, try to perform
850 // the merge in-place. The only case we cannot handle here is when the offset
851 // into the existing node is less than the offset into the virtual node we are
852 // merging in. In this case, we have to extend the existing node, which
853 // requires an allocation anyway.
854 DSNode *DN = NH.getNode(); // Make sure the Offset is up-to-date
855 if (NH.getOffset() >= SrcNH.getOffset()) {
857 if (!DN->isNodeCompletelyFolded()) {
858 // Make sure the destination node is folded if the source node is folded.
859 if (SN->isNodeCompletelyFolded()) {
860 DN->foldNodeCompletely();
862 } else if (SN->getSize() != DN->getSize()) {
863 // If the two nodes are of different size, and the smaller node has the
864 // array bit set, collapse!
865 if (SN->getSize() < DN->getSize()) {
867 DN->foldNodeCompletely();
870 } else if (DN->isArray()) {
871 DN->foldNodeCompletely();
876 // Merge the type entries of the two nodes together...
877 if (SN->getType() != Type::VoidTy && !DN->isNodeCompletelyFolded()) {
878 DN->mergeTypeInfo(SN->getType(), NH.getOffset()-SrcNH.getOffset());
883 assert(!DN->isDeadNode());
885 // Merge the NodeType information.
886 DN->mergeNodeFlags(SN->getNodeFlags() & BitsToKeep);
888 // Before we start merging outgoing links and updating the scalar map, make
889 // sure it is known that this is the representative node for the src node.
890 SCNH = DSNodeHandle(DN, NH.getOffset()-SrcNH.getOffset());
892 // If the source node contains any globals, make sure they end up in the
893 // scalar map with the correct offset.
894 if (SN->global_begin() != SN->global_end()) {
895 // Update the globals in the destination node itself.
896 DN->mergeGlobals(SN->getGlobals());
898 // Update the scalar map for the graph we are merging the source node
900 for (DSNode::global_iterator I = SN->global_begin(), E = SN->global_end();
902 GlobalValue *GV = *I;
903 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
904 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
905 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
906 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
907 DestGNH.getOffset()+SrcGNH.getOffset()));
909 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
910 Dest.getInlinedGlobals().insert(GV);
914 // We cannot handle this case without allocating a temporary node. Fall
915 // back on being simple.
917 DSNode *NewDN = new DSNode(*SN, &Dest, true /* Null out all links */);
918 NewDN->maskNodeTypes(BitsToKeep);
920 unsigned NHOffset = NH.getOffset();
921 NH.mergeWith(DSNodeHandle(NewDN, SrcNH.getOffset()));
922 assert(NH.getNode() &&
923 (NH.getOffset() > NHOffset ||
924 (NH.getOffset() == 0 && NH.getNode()->isNodeCompletelyFolded())) &&
925 "Merging did not adjust the offset!");
927 // Before we start merging outgoing links and updating the scalar map, make
928 // sure it is known that this is the representative node for the src node.
929 SCNH = DSNodeHandle(NH.getNode(), NH.getOffset()-SrcNH.getOffset());
933 // Next, recursively merge all outgoing links as necessary. Note that
934 // adding these links can cause the destination node to collapse itself at
935 // any time, and the current node may be merged with arbitrary other nodes.
936 // For this reason, we must always go through NH.
938 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
939 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
940 if (!SrcEdge.isNull()) {
941 // Compute the offset into the current node at which to
942 // merge this link. In the common case, this is a linear
943 // relation to the offset in the original node (with
944 // wrapping), but if the current node gets collapsed due to
945 // recursive merging, we must make sure to merge in all remaining
946 // links at offset zero.
947 unsigned MergeOffset = 0;
948 DSNode *CN = SCNH.getNode();
949 if (CN->getSize() != 1)
950 MergeOffset = ((i << DS::PointerShift)+SCNH.getOffset()) %CN->getSize();
952 // Perform the recursive merging. Make sure to create a temporary NH,
953 // because the Link can disappear in the process of recursive merging.
954 DSNodeHandle Tmp = CN->getLink(MergeOffset);
960 /// mergeCallSite - Merge the nodes reachable from the specified src call
961 /// site into the nodes reachable from DestCS.
962 void ReachabilityCloner::mergeCallSite(const DSCallSite &DestCS,
963 const DSCallSite &SrcCS) {
964 merge(DestCS.getRetVal(), SrcCS.getRetVal());
965 unsigned MinArgs = DestCS.getNumPtrArgs();
966 if (SrcCS.getNumPtrArgs() < MinArgs) MinArgs = SrcCS.getNumPtrArgs();
968 for (unsigned a = 0; a != MinArgs; ++a)
969 merge(DestCS.getPtrArg(a), SrcCS.getPtrArg(a));
973 //===----------------------------------------------------------------------===//
974 // DSCallSite Implementation
975 //===----------------------------------------------------------------------===//
977 // Define here to avoid including iOther.h and BasicBlock.h in DSGraph.h
978 Function &DSCallSite::getCaller() const {
979 return *Site.getInstruction()->getParent()->getParent();
982 void DSCallSite::InitNH(DSNodeHandle &NH, const DSNodeHandle &Src,
983 ReachabilityCloner &RC) {
984 NH = RC.getClonedNH(Src);
987 //===----------------------------------------------------------------------===//
988 // DSGraph Implementation
989 //===----------------------------------------------------------------------===//
991 /// getFunctionNames - Return a space separated list of the name of the
992 /// functions in this graph (if any)
993 std::string DSGraph::getFunctionNames() const {
994 switch (getReturnNodes().size()) {
995 case 0: return "Globals graph";
996 case 1: return getReturnNodes().begin()->first->getName();
999 for (DSGraph::ReturnNodesTy::const_iterator I = getReturnNodes().begin();
1000 I != getReturnNodes().end(); ++I)
1001 Return += I->first->getName() + " ";
1002 Return.erase(Return.end()-1, Return.end()); // Remove last space character
1008 DSGraph::DSGraph(const DSGraph &G) : GlobalsGraph(0), TD(G.TD) {
1009 PrintAuxCalls = false;
1011 cloneInto(G, ScalarMap, ReturnNodes, NodeMap);
1014 DSGraph::DSGraph(const DSGraph &G, NodeMapTy &NodeMap)
1015 : GlobalsGraph(0), TD(G.TD) {
1016 PrintAuxCalls = false;
1017 cloneInto(G, ScalarMap, ReturnNodes, NodeMap);
1020 DSGraph::~DSGraph() {
1021 FunctionCalls.clear();
1022 AuxFunctionCalls.clear();
1023 InlinedGlobals.clear();
1025 ReturnNodes.clear();
1027 // Drop all intra-node references, so that assertions don't fail...
1028 std::for_each(Nodes.begin(), Nodes.end(),
1029 std::mem_fun(&DSNode::dropAllReferences));
1031 // Delete all of the nodes themselves...
1032 std::for_each(Nodes.begin(), Nodes.end(), deleter<DSNode>);
1035 // dump - Allow inspection of graph in a debugger.
1036 void DSGraph::dump() const { print(std::cerr); }
1039 /// remapLinks - Change all of the Links in the current node according to the
1040 /// specified mapping.
1042 void DSNode::remapLinks(DSGraph::NodeMapTy &OldNodeMap) {
1043 for (unsigned i = 0, e = Links.size(); i != e; ++i)
1044 if (DSNode *N = Links[i].getNode()) {
1045 DSGraph::NodeMapTy::const_iterator ONMI = OldNodeMap.find(N);
1046 if (ONMI != OldNodeMap.end()) {
1047 Links[i].setNode(ONMI->second.getNode());
1048 Links[i].setOffset(Links[i].getOffset()+ONMI->second.getOffset());
1053 /// updateFromGlobalGraph - This function rematerializes global nodes and
1054 /// nodes reachable from them from the globals graph into the current graph.
1055 /// It uses the vector InlinedGlobals to avoid cloning and merging globals that
1056 /// are already up-to-date in the current graph. In practice, in the TD pass,
1057 /// this is likely to be a large fraction of the live global nodes in each
1058 /// function (since most live nodes are likely to have been brought up-to-date
1059 /// in at _some_ caller or callee).
1061 void DSGraph::updateFromGlobalGraph() {
1062 TIME_REGION(X, "updateFromGlobalGraph");
1064 ReachabilityCloner RC(*this, *GlobalsGraph, 0);
1066 // Clone the non-up-to-date global nodes into this graph.
1067 for (DSScalarMap::global_iterator I = getScalarMap().global_begin(),
1068 E = getScalarMap().global_end(); I != E; ++I)
1069 if (InlinedGlobals.count(*I) == 0) { // GNode is not up-to-date
1070 DSScalarMap::iterator It = GlobalsGraph->ScalarMap.find(*I);
1071 if (It != GlobalsGraph->ScalarMap.end())
1072 RC.merge(getNodeForValue(*I), It->second);
1076 /// cloneInto - Clone the specified DSGraph into the current graph. The
1077 /// translated ScalarMap for the old function is filled into the OldValMap
1078 /// member, and the translated ReturnNodes map is returned into ReturnNodes.
1080 /// The CloneFlags member controls various aspects of the cloning process.
1082 void DSGraph::cloneInto(const DSGraph &G, DSScalarMap &OldValMap,
1083 ReturnNodesTy &OldReturnNodes, NodeMapTy &OldNodeMap,
1084 unsigned CloneFlags) {
1085 TIME_REGION(X, "cloneInto");
1086 assert(OldNodeMap.empty() && "Returned OldNodeMap should be empty!");
1087 assert(&G != this && "Cannot clone graph into itself!");
1089 unsigned FN = Nodes.size(); // First new node...
1091 // Duplicate all of the nodes, populating the node map...
1092 Nodes.reserve(FN+G.Nodes.size());
1094 // Remove alloca or mod/ref bits as specified...
1095 unsigned BitsToClear = ((CloneFlags & StripAllocaBit)? DSNode::AllocaNode : 0)
1096 | ((CloneFlags & StripModRefBits)? (DSNode::Modified | DSNode::Read) : 0)
1097 | ((CloneFlags & StripIncompleteBit)? DSNode::Incomplete : 0);
1098 BitsToClear |= DSNode::DEAD; // Clear dead flag...
1099 for (unsigned i = 0, e = G.Nodes.size(); i != e; ++i) {
1100 DSNode *Old = G.Nodes[i];
1101 DSNode *New = new DSNode(*Old, this);
1102 New->maskNodeTypes(~BitsToClear);
1103 OldNodeMap[Old] = New;
1106 Timer::addPeakMemoryMeasurement();
1109 // Rewrite the links in the new nodes to point into the current graph now.
1110 for (unsigned i = FN, e = Nodes.size(); i != e; ++i)
1111 Nodes[i]->remapLinks(OldNodeMap);
1113 // Copy the scalar map... merging all of the global nodes...
1114 for (DSScalarMap::const_iterator I = G.ScalarMap.begin(),
1115 E = G.ScalarMap.end(); I != E; ++I) {
1116 DSNodeHandle &MappedNode = OldNodeMap[I->second.getNode()];
1117 DSNodeHandle &H = OldValMap[I->first];
1118 H.mergeWith(DSNodeHandle(MappedNode.getNode(),
1119 I->second.getOffset()+MappedNode.getOffset()));
1121 // If this is a global, add the global to this fn or merge if already exists
1122 if (GlobalValue* GV = dyn_cast<GlobalValue>(I->first)) {
1123 ScalarMap[GV].mergeWith(H);
1124 if (CloneFlags & DSGraph::UpdateInlinedGlobals)
1125 InlinedGlobals.insert(GV);
1129 if (!(CloneFlags & DontCloneCallNodes)) {
1130 // Copy the function calls list...
1131 unsigned FC = FunctionCalls.size(); // FirstCall
1132 FunctionCalls.reserve(FC+G.FunctionCalls.size());
1133 for (unsigned i = 0, ei = G.FunctionCalls.size(); i != ei; ++i)
1134 FunctionCalls.push_back(DSCallSite(G.FunctionCalls[i], OldNodeMap));
1137 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1138 // Copy the auxiliary function calls list...
1139 unsigned FC = AuxFunctionCalls.size(); // FirstCall
1140 AuxFunctionCalls.reserve(FC+G.AuxFunctionCalls.size());
1141 for (unsigned i = 0, ei = G.AuxFunctionCalls.size(); i != ei; ++i)
1142 AuxFunctionCalls.push_back(DSCallSite(G.AuxFunctionCalls[i], OldNodeMap));
1145 // Map the return node pointers over...
1146 for (ReturnNodesTy::const_iterator I = G.getReturnNodes().begin(),
1147 E = G.getReturnNodes().end(); I != E; ++I) {
1148 const DSNodeHandle &Ret = I->second;
1149 DSNodeHandle &MappedRet = OldNodeMap[Ret.getNode()];
1150 OldReturnNodes.insert(std::make_pair(I->first,
1151 DSNodeHandle(MappedRet.getNode(),
1152 MappedRet.getOffset()+Ret.getOffset())));
1157 /// mergeInGraph - The method is used for merging graphs together. If the
1158 /// argument graph is not *this, it makes a clone of the specified graph, then
1159 /// merges the nodes specified in the call site with the formal arguments in the
1162 void DSGraph::mergeInGraph(const DSCallSite &CS, Function &F,
1163 const DSGraph &Graph, unsigned CloneFlags) {
1164 TIME_REGION(X, "mergeInGraph");
1166 // If this is not a recursive call, clone the graph into this graph...
1167 if (&Graph != this) {
1168 // Clone the callee's graph into the current graph, keeping track of where
1169 // scalars in the old graph _used_ to point, and of the new nodes matching
1170 // nodes of the old graph.
1171 ReachabilityCloner RC(*this, Graph, CloneFlags);
1173 // Set up argument bindings
1174 Function::aiterator AI = F.abegin();
1175 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i, ++AI) {
1176 // Advance the argument iterator to the first pointer argument...
1177 while (AI != F.aend() && !isPointerType(AI->getType())) {
1179 #ifndef NDEBUG // FIXME: We should merge vararg arguments!
1180 if (AI == F.aend() && !F.getFunctionType()->isVarArg())
1181 std::cerr << "Bad call to Function: " << F.getName() << "\n";
1184 if (AI == F.aend()) break;
1186 // Add the link from the argument scalar to the provided value.
1187 RC.merge(CS.getPtrArg(i), Graph.getNodeForValue(AI));
1190 // Map the return node pointer over.
1191 if (CS.getRetVal().getNode())
1192 RC.merge(CS.getRetVal(), Graph.getReturnNodeFor(F));
1194 // If requested, copy the calls or aux-calls lists.
1195 if (!(CloneFlags & DontCloneCallNodes)) {
1196 // Copy the function calls list...
1197 FunctionCalls.reserve(FunctionCalls.size()+Graph.FunctionCalls.size());
1198 for (unsigned i = 0, ei = Graph.FunctionCalls.size(); i != ei; ++i)
1199 FunctionCalls.push_back(DSCallSite(Graph.FunctionCalls[i], RC));
1202 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1203 // Copy the auxiliary function calls list...
1204 AuxFunctionCalls.reserve(AuxFunctionCalls.size()+
1205 Graph.AuxFunctionCalls.size());
1206 for (unsigned i = 0, ei = Graph.AuxFunctionCalls.size(); i != ei; ++i)
1207 AuxFunctionCalls.push_back(DSCallSite(Graph.AuxFunctionCalls[i], RC));
1210 // If the user requested it, add the nodes that we need to clone to the
1212 if (!EnableDSNodeGlobalRootsHack)
1213 for (unsigned i = 0, e = Graph.Nodes.size(); i != e; ++i)
1214 if (!Graph.Nodes[i]->getGlobals().empty())
1215 RC.getClonedNH(Graph.Nodes[i]);
1218 DSNodeHandle RetVal = getReturnNodeFor(F);
1220 // Merge the return value with the return value of the context...
1221 RetVal.mergeWith(CS.getRetVal());
1223 // Resolve all of the function arguments...
1224 Function::aiterator AI = F.abegin();
1226 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i, ++AI) {
1227 // Advance the argument iterator to the first pointer argument...
1228 while (AI != F.aend() && !isPointerType(AI->getType())) {
1230 #ifndef NDEBUG // FIXME: We should merge varargs arguments!!
1231 if (AI == F.aend() && !F.getFunctionType()->isVarArg())
1232 std::cerr << "Bad call to Function: " << F.getName() << "\n";
1235 if (AI == F.aend()) break;
1237 // Add the link from the argument scalar to the provided value
1238 DSNodeHandle &NH = getNodeForValue(AI);
1239 assert(NH.getNode() && "Pointer argument without scalarmap entry?");
1240 NH.mergeWith(CS.getPtrArg(i));
1245 /// getCallSiteForArguments - Get the arguments and return value bindings for
1246 /// the specified function in the current graph.
1248 DSCallSite DSGraph::getCallSiteForArguments(Function &F) const {
1249 std::vector<DSNodeHandle> Args;
1251 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I)
1252 if (isPointerType(I->getType()))
1253 Args.push_back(getNodeForValue(I));
1255 return DSCallSite(CallSite(), getReturnNodeFor(F), &F, Args);
1260 // markIncompleteNodes - Mark the specified node as having contents that are not
1261 // known with the current analysis we have performed. Because a node makes all
1262 // of the nodes it can reach incomplete if the node itself is incomplete, we
1263 // must recursively traverse the data structure graph, marking all reachable
1264 // nodes as incomplete.
1266 static void markIncompleteNode(DSNode *N) {
1267 // Stop recursion if no node, or if node already marked...
1268 if (N == 0 || N->isIncomplete()) return;
1270 // Actually mark the node
1271 N->setIncompleteMarker();
1273 // Recursively process children...
1274 for (unsigned i = 0, e = N->getSize(); i < e; i += DS::PointerSize)
1275 if (DSNode *DSN = N->getLink(i).getNode())
1276 markIncompleteNode(DSN);
1279 static void markIncomplete(DSCallSite &Call) {
1280 // Then the return value is certainly incomplete!
1281 markIncompleteNode(Call.getRetVal().getNode());
1283 // All objects pointed to by function arguments are incomplete!
1284 for (unsigned i = 0, e = Call.getNumPtrArgs(); i != e; ++i)
1285 markIncompleteNode(Call.getPtrArg(i).getNode());
1288 // markIncompleteNodes - Traverse the graph, identifying nodes that may be
1289 // modified by other functions that have not been resolved yet. This marks
1290 // nodes that are reachable through three sources of "unknownness":
1292 // Global Variables, Function Calls, and Incoming Arguments
1294 // For any node that may have unknown components (because something outside the
1295 // scope of current analysis may have modified it), the 'Incomplete' flag is
1296 // added to the NodeType.
1298 void DSGraph::markIncompleteNodes(unsigned Flags) {
1299 // Mark any incoming arguments as incomplete...
1300 if (Flags & DSGraph::MarkFormalArgs)
1301 for (ReturnNodesTy::iterator FI = ReturnNodes.begin(), E =ReturnNodes.end();
1303 Function &F = *FI->first;
1304 if (F.getName() != "main")
1305 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I)
1306 if (isPointerType(I->getType()))
1307 markIncompleteNode(getNodeForValue(I).getNode());
1310 // Mark stuff passed into functions calls as being incomplete...
1311 if (!shouldPrintAuxCalls())
1312 for (unsigned i = 0, e = FunctionCalls.size(); i != e; ++i)
1313 markIncomplete(FunctionCalls[i]);
1315 for (unsigned i = 0, e = AuxFunctionCalls.size(); i != e; ++i)
1316 markIncomplete(AuxFunctionCalls[i]);
1319 // Mark all global nodes as incomplete...
1320 if ((Flags & DSGraph::IgnoreGlobals) == 0)
1321 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1322 if (Nodes[i]->isGlobalNode() && Nodes[i]->getNumLinks())
1323 markIncompleteNode(Nodes[i]);
1326 static inline void killIfUselessEdge(DSNodeHandle &Edge) {
1327 if (DSNode *N = Edge.getNode()) // Is there an edge?
1328 if (N->getNumReferrers() == 1) // Does it point to a lonely node?
1329 // No interesting info?
1330 if ((N->getNodeFlags() & ~DSNode::Incomplete) == 0 &&
1331 N->getType() == Type::VoidTy && !N->isNodeCompletelyFolded())
1332 Edge.setNode(0); // Kill the edge!
1335 static inline bool nodeContainsExternalFunction(const DSNode *N) {
1336 const std::vector<GlobalValue*> &Globals = N->getGlobals();
1337 for (unsigned i = 0, e = Globals.size(); i != e; ++i)
1338 if (Globals[i]->isExternal())
1343 static void removeIdenticalCalls(std::vector<DSCallSite> &Calls) {
1344 // Remove trivially identical function calls
1345 unsigned NumFns = Calls.size();
1346 std::sort(Calls.begin(), Calls.end()); // Sort by callee as primary key!
1349 // Scan the call list cleaning it up as necessary...
1350 DSNode *LastCalleeNode = 0;
1351 Function *LastCalleeFunc = 0;
1352 unsigned NumDuplicateCalls = 0;
1353 bool LastCalleeContainsExternalFunction = false;
1354 for (unsigned i = 0; i != Calls.size(); ++i) {
1355 DSCallSite &CS = Calls[i];
1357 // If the Callee is a useless edge, this must be an unreachable call site,
1359 if (CS.isIndirectCall() && CS.getCalleeNode()->getNumReferrers() == 1 &&
1360 CS.getCalleeNode()->getNodeFlags() == 0) { // No useful info?
1362 std::cerr << "WARNING: Useless call site found??\n";
1364 CS.swap(Calls.back());
1368 // If the return value or any arguments point to a void node with no
1369 // information at all in it, and the call node is the only node to point
1370 // to it, remove the edge to the node (killing the node).
1372 killIfUselessEdge(CS.getRetVal());
1373 for (unsigned a = 0, e = CS.getNumPtrArgs(); a != e; ++a)
1374 killIfUselessEdge(CS.getPtrArg(a));
1376 // If this call site calls the same function as the last call site, and if
1377 // the function pointer contains an external function, this node will
1378 // never be resolved. Merge the arguments of the call node because no
1379 // information will be lost.
1381 if ((CS.isDirectCall() && CS.getCalleeFunc() == LastCalleeFunc) ||
1382 (CS.isIndirectCall() && CS.getCalleeNode() == LastCalleeNode)) {
1383 ++NumDuplicateCalls;
1384 if (NumDuplicateCalls == 1) {
1386 LastCalleeContainsExternalFunction =
1387 nodeContainsExternalFunction(LastCalleeNode);
1389 LastCalleeContainsExternalFunction = LastCalleeFunc->isExternal();
1392 // It is not clear why, but enabling this code makes DSA really
1393 // sensitive to node forwarding. Basically, with this enabled, DSA
1394 // performs different number of inlinings based on which nodes are
1395 // forwarding or not. This is clearly a problem, so this code is
1396 // disabled until this can be resolved.
1398 if (LastCalleeContainsExternalFunction
1401 // This should be more than enough context sensitivity!
1402 // FIXME: Evaluate how many times this is tripped!
1403 NumDuplicateCalls > 20
1406 DSCallSite &OCS = Calls[i-1];
1409 // The node will now be eliminated as a duplicate!
1410 if (CS.getNumPtrArgs() < OCS.getNumPtrArgs())
1412 else if (CS.getNumPtrArgs() > OCS.getNumPtrArgs())
1417 if (CS.isDirectCall()) {
1418 LastCalleeFunc = CS.getCalleeFunc();
1421 LastCalleeNode = CS.getCalleeNode();
1424 NumDuplicateCalls = 0;
1429 Calls.erase(std::unique(Calls.begin(), Calls.end()), Calls.end());
1431 // Track the number of call nodes merged away...
1432 NumCallNodesMerged += NumFns-Calls.size();
1434 DEBUG(if (NumFns != Calls.size())
1435 std::cerr << "Merged " << (NumFns-Calls.size()) << " call nodes.\n";);
1439 // removeTriviallyDeadNodes - After the graph has been constructed, this method
1440 // removes all unreachable nodes that are created because they got merged with
1441 // other nodes in the graph. These nodes will all be trivially unreachable, so
1442 // we don't have to perform any non-trivial analysis here.
1444 void DSGraph::removeTriviallyDeadNodes() {
1445 TIME_REGION(X, "removeTriviallyDeadNodes");
1446 removeIdenticalCalls(FunctionCalls);
1447 removeIdenticalCalls(AuxFunctionCalls);
1449 // Loop over all of the nodes in the graph, calling getNode on each field.
1450 // This will cause all nodes to update their forwarding edges, causing
1451 // forwarded nodes to be delete-able.
1452 for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
1453 DSNode *N = Nodes[i];
1454 for (unsigned l = 0, e = N->getNumLinks(); l != e; ++l)
1455 N->getLink(l*N->getPointerSize()).getNode();
1458 // NOTE: This code is disabled. Though it should, in theory, allow us to
1459 // remove more nodes down below, the scan of the scalar map is incredibly
1460 // expensive for certain programs (with large SCCs). In the future, if we can
1461 // make the scalar map scan more efficient, then we can reenable this.
1463 { TIME_REGION(X, "removeTriviallyDeadNodes:scalarmap");
1465 // Likewise, forward any edges from the scalar nodes. While we are at it,
1466 // clean house a bit.
1467 for (DSScalarMap::iterator I = ScalarMap.begin(),E = ScalarMap.end();I != E;){
1468 I->second.getNode();
1473 bool isGlobalsGraph = !GlobalsGraph;
1475 for (unsigned i = 0; i != Nodes.size(); ++i) {
1476 DSNode *Node = Nodes[i];
1478 // Do not remove *any* global nodes in the globals graph.
1479 // This is a special case because such nodes may not have I, M, R flags set.
1480 if (Node->isGlobalNode() && isGlobalsGraph)
1483 if (Node->isComplete() && !Node->isModified() && !Node->isRead()) {
1484 // This is a useless node if it has no mod/ref info (checked above),
1485 // outgoing edges (which it cannot, as it is not modified in this
1486 // context), and it has no incoming edges. If it is a global node it may
1487 // have all of these properties and still have incoming edges, due to the
1488 // scalar map, so we check those now.
1490 if (Node->getNumReferrers() == Node->getGlobals().size()) {
1491 const std::vector<GlobalValue*> &Globals = Node->getGlobals();
1493 // Loop through and make sure all of the globals are referring directly
1495 for (unsigned j = 0, e = Globals.size(); j != e; ++j) {
1496 DSNode *N = getNodeForValue(Globals[j]).getNode();
1497 assert(N == Node && "ScalarMap doesn't match globals list!");
1500 // Make sure NumReferrers still agrees, if so, the node is truly dead.
1501 if (Node->getNumReferrers() == Globals.size()) {
1502 for (unsigned j = 0, e = Globals.size(); j != e; ++j)
1503 ScalarMap.erase(Globals[j]);
1504 Node->makeNodeDead();
1509 if (Node->getNodeFlags() == 0 && Node->hasNoReferrers()) {
1510 // This node is dead!
1511 delete Node; // Free memory...
1512 Nodes[i--] = Nodes.back();
1513 Nodes.pop_back(); // Remove from node list...
1519 /// markReachableNodes - This method recursively traverses the specified
1520 /// DSNodes, marking any nodes which are reachable. All reachable nodes it adds
1521 /// to the set, which allows it to only traverse visited nodes once.
1523 void DSNode::markReachableNodes(hash_set<DSNode*> &ReachableNodes) {
1524 if (this == 0) return;
1525 assert(getForwardNode() == 0 && "Cannot mark a forwarded node!");
1526 if (ReachableNodes.insert(this).second) // Is newly reachable?
1527 for (unsigned i = 0, e = getSize(); i < e; i += DS::PointerSize)
1528 getLink(i).getNode()->markReachableNodes(ReachableNodes);
1531 void DSCallSite::markReachableNodes(hash_set<DSNode*> &Nodes) {
1532 getRetVal().getNode()->markReachableNodes(Nodes);
1533 if (isIndirectCall()) getCalleeNode()->markReachableNodes(Nodes);
1535 for (unsigned i = 0, e = getNumPtrArgs(); i != e; ++i)
1536 getPtrArg(i).getNode()->markReachableNodes(Nodes);
1539 // CanReachAliveNodes - Simple graph walker that recursively traverses the graph
1540 // looking for a node that is marked alive. If an alive node is found, return
1541 // true, otherwise return false. If an alive node is reachable, this node is
1542 // marked as alive...
1544 static bool CanReachAliveNodes(DSNode *N, hash_set<DSNode*> &Alive,
1545 hash_set<DSNode*> &Visited,
1546 bool IgnoreGlobals) {
1547 if (N == 0) return false;
1548 assert(N->getForwardNode() == 0 && "Cannot mark a forwarded node!");
1550 // If this is a global node, it will end up in the globals graph anyway, so we
1551 // don't need to worry about it.
1552 if (IgnoreGlobals && N->isGlobalNode()) return false;
1554 // If we know that this node is alive, return so!
1555 if (Alive.count(N)) return true;
1557 // Otherwise, we don't think the node is alive yet, check for infinite
1559 if (Visited.count(N)) return false; // Found a cycle
1560 Visited.insert(N); // No recursion, insert into Visited...
1562 for (unsigned i = 0, e = N->getSize(); i < e; i += DS::PointerSize)
1563 if (CanReachAliveNodes(N->getLink(i).getNode(), Alive, Visited,
1565 N->markReachableNodes(Alive);
1571 // CallSiteUsesAliveArgs - Return true if the specified call site can reach any
1574 static bool CallSiteUsesAliveArgs(DSCallSite &CS, hash_set<DSNode*> &Alive,
1575 hash_set<DSNode*> &Visited,
1576 bool IgnoreGlobals) {
1577 if (CanReachAliveNodes(CS.getRetVal().getNode(), Alive, Visited,
1580 if (CS.isIndirectCall() &&
1581 CanReachAliveNodes(CS.getCalleeNode(), Alive, Visited, IgnoreGlobals))
1583 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1584 if (CanReachAliveNodes(CS.getPtrArg(i).getNode(), Alive, Visited,
1590 // removeDeadNodes - Use a more powerful reachability analysis to eliminate
1591 // subgraphs that are unreachable. This often occurs because the data
1592 // structure doesn't "escape" into it's caller, and thus should be eliminated
1593 // from the caller's graph entirely. This is only appropriate to use when
1596 void DSGraph::removeDeadNodes(unsigned Flags) {
1597 DEBUG(AssertGraphOK(); if (GlobalsGraph) GlobalsGraph->AssertGraphOK());
1599 // Reduce the amount of work we have to do... remove dummy nodes left over by
1601 removeTriviallyDeadNodes();
1603 TIME_REGION(X, "removeDeadNodes");
1605 // FIXME: Merge non-trivially identical call nodes...
1607 // Alive - a set that holds all nodes found to be reachable/alive.
1608 hash_set<DSNode*> Alive;
1609 std::vector<std::pair<Value*, DSNode*> > GlobalNodes;
1611 // Copy and merge all information about globals to the GlobalsGraph if this is
1612 // not a final pass (where unreachable globals are removed).
1614 // Strip all alloca bits since the current function is only for the BU pass.
1615 // Strip all incomplete bits since they are short-lived properties and they
1616 // will be correctly computed when rematerializing nodes into the functions.
1618 ReachabilityCloner GGCloner(*GlobalsGraph, *this, DSGraph::StripAllocaBit |
1619 DSGraph::StripIncompleteBit);
1621 // Mark all nodes reachable by (non-global) scalar nodes as alive...
1622 { TIME_REGION(Y, "removeDeadNodes:scalarscan");
1623 for (DSScalarMap::iterator I = ScalarMap.begin(), E = ScalarMap.end(); I !=E;)
1624 if (isa<GlobalValue>(I->first)) { // Keep track of global nodes
1625 assert(I->second.getNode() && "Null global node?");
1626 assert(I->second.getNode()->isGlobalNode() && "Should be a global node!");
1627 GlobalNodes.push_back(std::make_pair(I->first, I->second.getNode()));
1629 // Make sure that all globals are cloned over as roots.
1630 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) {
1631 DSGraph::ScalarMapTy::iterator SMI =
1632 GlobalsGraph->getScalarMap().find(I->first);
1633 if (SMI != GlobalsGraph->getScalarMap().end())
1634 GGCloner.merge(SMI->second, I->second);
1636 GGCloner.getClonedNH(I->second);
1640 DSNode *N = I->second.getNode();
1642 // Check to see if this is a worthless node generated for non-pointer
1643 // values, such as integers. Consider an addition of long types: A+B.
1644 // Assuming we can track all uses of the value in this context, and it is
1645 // NOT used as a pointer, we can delete the node. We will be able to
1646 // detect this situation if the node pointed to ONLY has Unknown bit set
1647 // in the node. In this case, the node is not incomplete, does not point
1648 // to any other nodes (no mod/ref bits set), and is therefore
1649 // uninteresting for data structure analysis. If we run across one of
1650 // these, prune the scalar pointing to it.
1652 if (N->getNodeFlags() == DSNode::UnknownNode && !isa<Argument>(I->first))
1653 ScalarMap.erase(I++);
1656 N->markReachableNodes(Alive);
1662 // The return values are alive as well.
1663 for (ReturnNodesTy::iterator I = ReturnNodes.begin(), E = ReturnNodes.end();
1665 I->second.getNode()->markReachableNodes(Alive);
1667 // Mark any nodes reachable by primary calls as alive...
1668 for (unsigned i = 0, e = FunctionCalls.size(); i != e; ++i)
1669 FunctionCalls[i].markReachableNodes(Alive);
1672 // Now find globals and aux call nodes that are already live or reach a live
1673 // value (which makes them live in turn), and continue till no more are found.
1676 hash_set<DSNode*> Visited;
1677 std::vector<unsigned char> AuxFCallsAlive(AuxFunctionCalls.size());
1680 // If any global node points to a non-global that is "alive", the global is
1681 // "alive" as well... Remove it from the GlobalNodes list so we only have
1682 // unreachable globals in the list.
1685 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
1686 for (unsigned i = 0; i != GlobalNodes.size(); ++i)
1687 if (CanReachAliveNodes(GlobalNodes[i].second, Alive, Visited,
1688 Flags & DSGraph::RemoveUnreachableGlobals)) {
1689 std::swap(GlobalNodes[i--], GlobalNodes.back()); // Move to end to...
1690 GlobalNodes.pop_back(); // erase efficiently
1694 // Mark only unresolvable call nodes for moving to the GlobalsGraph since
1695 // call nodes that get resolved will be difficult to remove from that graph.
1696 // The final unresolved call nodes must be handled specially at the end of
1697 // the BU pass (i.e., in main or other roots of the call graph).
1698 for (unsigned i = 0, e = AuxFunctionCalls.size(); i != e; ++i)
1699 if (!AuxFCallsAlive[i] &&
1700 (AuxFunctionCalls[i].isIndirectCall()
1701 || CallSiteUsesAliveArgs(AuxFunctionCalls[i], Alive, Visited,
1702 Flags & DSGraph::RemoveUnreachableGlobals))) {
1703 AuxFunctionCalls[i].markReachableNodes(Alive);
1704 AuxFCallsAlive[i] = true;
1709 // Move dead aux function calls to the end of the list
1710 unsigned CurIdx = 0;
1711 for (unsigned i = 0, e = AuxFunctionCalls.size(); i != e; ++i)
1712 if (AuxFCallsAlive[i])
1713 AuxFunctionCalls[CurIdx++].swap(AuxFunctionCalls[i]);
1715 // Copy and merge all global nodes and dead aux call nodes into the
1716 // GlobalsGraph, and all nodes reachable from those nodes
1718 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) {
1719 // Copy the unreachable call nodes to the globals graph, updating their
1720 // target pointers using the GGCloner
1721 for (unsigned i = CurIdx, e = AuxFunctionCalls.size(); i != e; ++i)
1722 GlobalsGraph->AuxFunctionCalls.push_back(DSCallSite(AuxFunctionCalls[i],
1725 // Crop all the useless ones out...
1726 AuxFunctionCalls.erase(AuxFunctionCalls.begin()+CurIdx,
1727 AuxFunctionCalls.end());
1729 // We are finally done with the GGCloner so we can clear it and then get rid
1730 // of unused nodes in the GlobalsGraph produced by merging.
1731 if (GGCloner.clonedNode()) {
1733 GlobalsGraph->removeTriviallyDeadNodes();
1736 // At this point, any nodes which are visited, but not alive, are nodes
1737 // which can be removed. Loop over all nodes, eliminating completely
1738 // unreachable nodes.
1740 std::vector<DSNode*> DeadNodes;
1741 DeadNodes.reserve(Nodes.size());
1742 for (unsigned i = 0; i != Nodes.size(); ++i)
1743 if (!Alive.count(Nodes[i])) {
1744 DSNode *N = Nodes[i];
1745 Nodes[i--] = Nodes.back(); // move node to end of vector
1746 Nodes.pop_back(); // Erase node from alive list.
1747 DeadNodes.push_back(N);
1748 N->dropAllReferences();
1750 assert(Nodes[i]->getForwardNode() == 0 && "Alive forwarded node?");
1753 // Remove all unreachable globals from the ScalarMap.
1754 // If flag RemoveUnreachableGlobals is set, GlobalNodes has only dead nodes.
1755 // In either case, the dead nodes will not be in the set Alive.
1756 for (unsigned i = 0, e = GlobalNodes.size(); i != e; ++i)
1757 if (!Alive.count(GlobalNodes[i].second))
1758 ScalarMap.erase(GlobalNodes[i].first);
1760 assert((Flags & DSGraph::RemoveUnreachableGlobals) && "non-dead global");
1762 // Delete all dead nodes now since their referrer counts are zero.
1763 for (unsigned i = 0, e = DeadNodes.size(); i != e; ++i)
1764 delete DeadNodes[i];
1766 DEBUG(AssertGraphOK(); GlobalsGraph->AssertGraphOK());
1769 void DSGraph::AssertGraphOK() const {
1770 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1771 Nodes[i]->assertOK();
1773 for (ScalarMapTy::const_iterator I = ScalarMap.begin(),
1774 E = ScalarMap.end(); I != E; ++I) {
1775 assert(I->second.getNode() && "Null node in scalarmap!");
1776 AssertNodeInGraph(I->second.getNode());
1777 if (GlobalValue *GV = dyn_cast<GlobalValue>(I->first)) {
1778 assert(I->second.getNode()->isGlobalNode() &&
1779 "Global points to node, but node isn't global?");
1780 AssertNodeContainsGlobal(I->second.getNode(), GV);
1783 AssertCallNodesInGraph();
1784 AssertAuxCallNodesInGraph();
1787 /// computeNodeMapping - Given roots in two different DSGraphs, traverse the
1788 /// nodes reachable from the two graphs, computing the mapping of nodes from
1789 /// the first to the second graph.
1791 void DSGraph::computeNodeMapping(const DSNodeHandle &NH1,
1792 const DSNodeHandle &NH2, NodeMapTy &NodeMap,
1793 bool StrictChecking) {
1794 DSNode *N1 = NH1.getNode(), *N2 = NH2.getNode();
1795 if (N1 == 0 || N2 == 0) return;
1797 DSNodeHandle &Entry = NodeMap[N1];
1798 if (Entry.getNode()) {
1799 // Termination of recursion!
1800 assert(!StrictChecking ||
1801 (Entry.getNode() == N2 &&
1802 Entry.getOffset() == (NH2.getOffset()-NH1.getOffset())) &&
1803 "Inconsistent mapping detected!");
1808 Entry.setOffset(NH2.getOffset()-NH1.getOffset());
1810 // Loop over all of the fields that N1 and N2 have in common, recursively
1811 // mapping the edges together now.
1812 int N2Idx = NH2.getOffset()-NH1.getOffset();
1813 unsigned N2Size = N2->getSize();
1814 for (unsigned i = 0, e = N1->getSize(); i < e; i += DS::PointerSize)
1815 if (unsigned(N2Idx)+i < N2Size)
1816 computeNodeMapping(N1->getLink(i), N2->getLink(N2Idx+i), NodeMap);