1 package Analysis.Disjoint;
3 import Analysis.CallGraph.*;
4 import Analysis.Liveness;
5 import Analysis.ArrayReferencees;
8 import IR.Tree.Modifiers;
13 public class DisjointAnalysis {
16 // data from the compiler
18 public CallGraph callGraph;
19 public Liveness liveness;
20 public ArrayReferencees arrayReferencees;
21 public TypeUtil typeUtil;
22 public int allocationDepth;
25 // used to identify HeapRegionNode objects
26 // A unique ID equates an object in one
27 // ownership graph with an object in another
28 // graph that logically represents the same
30 // start at 10 and increment to reserve some
31 // IDs for special purposes
32 static protected int uniqueIDcount = 10;
35 // An out-of-scope method created by the
36 // analysis that has no parameters, and
37 // appears to allocate the command line
38 // arguments, then invoke the source code's
39 // main method. The purpose of this is to
40 // provide the analysis with an explicit
41 // top-level context with no parameters
42 protected MethodDescriptor mdAnalysisEntry;
43 protected FlatMethod fmAnalysisEntry;
45 // main method defined by source program
46 protected MethodDescriptor mdSourceEntry;
48 // the set of task and/or method descriptors
49 // reachable in call graph
50 protected Set<Descriptor>
53 // current descriptors to visit in fixed-point
54 // interprocedural analysis, prioritized by
55 // dependency in the call graph
56 protected PriorityQueue<DescriptorQWrapper>
59 // a duplication of the above structure, but
60 // for efficient testing of inclusion
61 protected HashSet<Descriptor>
62 descriptorsToVisitSet;
64 // storage for priorities (doesn't make sense)
65 // to add it to the Descriptor class, just in
67 protected Hashtable<Descriptor, Integer>
68 mapDescriptorToPriority;
71 // maps a descriptor to its current partial result
72 // from the intraprocedural fixed-point analysis--
73 // then the interprocedural analysis settles, this
74 // mapping will have the final results for each
76 protected Hashtable<Descriptor, ReachGraph>
77 mapDescriptorToCompleteReachGraph;
79 // maps a descriptor to its known dependents: namely
80 // methods or tasks that call the descriptor's method
81 // AND are part of this analysis (reachable from main)
82 protected Hashtable< Descriptor, Set<Descriptor> >
83 mapDescriptorToSetDependents;
85 // maps each flat new to one analysis abstraction
86 // allocate site object, these exist outside reach graphs
87 protected Hashtable<FlatNew, AllocSite>
88 mapFlatNewToAllocSite;
90 // maps intergraph heap region IDs to intergraph
91 // allocation sites that created them, a redundant
92 // structure for efficiency in some operations
93 protected Hashtable<Integer, AllocSite>
96 // maps a method to its initial heap model (IHM) that
97 // is the set of reachability graphs from every caller
98 // site, all merged together. The reason that we keep
99 // them separate is that any one call site's contribution
100 // to the IHM may changed along the path to the fixed point
101 protected Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >
102 mapDescriptorToIHMcontributions;
104 // TODO -- CHANGE EDGE/TYPE/FIELD storage!
105 public static final String arrayElementFieldName = "___element_";
106 static protected Hashtable<TypeDescriptor, FieldDescriptor>
109 // for controlling DOT file output
110 protected boolean writeFinalDOTs;
111 protected boolean writeAllIncrementalDOTs;
113 // supporting DOT output--when we want to write every
114 // partial method result, keep a tally for generating
116 protected Hashtable<Descriptor, Integer>
117 mapDescriptorToNumUpdates;
120 // allocate various structures that are not local
121 // to a single class method--should be done once
122 protected void allocateStructures() {
123 descriptorsToAnalyze = new HashSet<Descriptor>();
125 mapDescriptorToCompleteReachGraph =
126 new Hashtable<Descriptor, ReachGraph>();
128 mapDescriptorToNumUpdates =
129 new Hashtable<Descriptor, Integer>();
131 mapDescriptorToSetDependents =
132 new Hashtable< Descriptor, Set<Descriptor> >();
134 mapFlatNewToAllocSite =
135 new Hashtable<FlatNew, AllocSite>();
137 mapDescriptorToIHMcontributions =
138 new Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >();
140 mapHrnIdToAllocSite =
141 new Hashtable<Integer, AllocSite>();
143 mapTypeToArrayField =
144 new Hashtable <TypeDescriptor, FieldDescriptor>();
146 descriptorsToVisitQ =
147 new PriorityQueue<DescriptorQWrapper>();
149 descriptorsToVisitSet =
150 new HashSet<Descriptor>();
152 mapDescriptorToPriority =
153 new Hashtable<Descriptor, Integer>();
158 // this analysis generates a disjoint reachability
159 // graph for every reachable method in the program
160 public DisjointAnalysis( State s,
165 ) throws java.io.IOException {
166 init( s, tu, cg, l, ar );
169 protected void init( State state,
173 ArrayReferencees arrayReferencees
174 ) throws java.io.IOException {
177 this.typeUtil = typeUtil;
178 this.callGraph = callGraph;
179 this.liveness = liveness;
180 this.arrayReferencees = arrayReferencees;
181 this.allocationDepth = state.DISJOINTALLOCDEPTH;
182 this.writeFinalDOTs = state.DISJOINTWRITEDOTS && !state.DISJOINTWRITEALL;
183 this.writeAllIncrementalDOTs = state.DISJOINTWRITEDOTS && state.DISJOINTWRITEALL;
185 // set some static configuration for ReachGraphs
186 ReachGraph.allocationDepth = allocationDepth;
187 ReachGraph.typeUtil = typeUtil;
189 allocateStructures();
191 double timeStartAnalysis = (double) System.nanoTime();
193 // start interprocedural fixed-point computation
196 double timeEndAnalysis = (double) System.nanoTime();
197 double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
198 String treport = String.format( "The reachability analysis took %.3f sec.", dt );
199 String justtime = String.format( "%.2f", dt );
200 System.out.println( treport );
202 if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
206 if( state.DISJOINTWRITEIHMS ) {
210 if( state.DISJOINTALIASFILE != null ) {
212 // not supporting tasks yet...
215 writeAllAliasesJava( aliasFile,
218 state.DISJOINTALIASTAB,
226 // fixed-point computation over the call graph--when a
227 // method's callees are updated, it must be reanalyzed
228 protected void analyzeMethods() throws java.io.IOException {
231 // This analysis does not support Bamboo at the moment,
232 // but if it does in the future we would initialize the
233 // set of descriptors to analyze as the program-reachable
234 // tasks and the methods callable by them. For Java,
235 // just methods reachable from the main method.
236 System.out.println( "No Bamboo support yet..." );
240 // add all methods transitively reachable from the
241 // source's main to set for analysis
242 mdSourceEntry = typeUtil.getMain();
243 descriptorsToAnalyze.add( mdSourceEntry );
244 descriptorsToAnalyze.addAll(
245 callGraph.getAllMethods( mdSourceEntry )
248 // fabricate an empty calling context that will call
249 // the source's main, but call graph doesn't know
250 // about it, so explicitly add it
251 makeAnalysisEntryMethod( mdSourceEntry );
252 descriptorsToAnalyze.add( mdAnalysisEntry );
255 // topologically sort according to the call graph so
256 // leaf calls are ordered first, smarter analysis order
257 LinkedList<Descriptor> sortedDescriptors =
258 topologicalSort( descriptorsToAnalyze );
260 // add sorted descriptors to priority queue, and duplicate
261 // the queue as a set for efficiently testing whether some
262 // method is marked for analysis
264 Iterator<Descriptor> dItr = sortedDescriptors.iterator();
265 while( dItr.hasNext() ) {
266 Descriptor d = dItr.next();
267 mapDescriptorToPriority.put( d, new Integer( p ) );
268 descriptorsToVisitQ.add( new DescriptorQWrapper( p, d ) );
269 descriptorsToVisitSet.add( d );
273 // analyze methods from the priority queue until it is empty
274 while( !descriptorsToVisitQ.isEmpty() ) {
275 Descriptor d = descriptorsToVisitQ.poll().getDescriptor();
276 assert descriptorsToVisitSet.contains( d );
277 descriptorsToVisitSet.remove( d );
279 // because the task or method descriptor just extracted
280 // was in the "to visit" set it either hasn't been analyzed
281 // yet, or some method that it depends on has been
282 // updated. Recompute a complete reachability graph for
283 // this task/method and compare it to any previous result.
284 // If there is a change detected, add any methods/tasks
285 // that depend on this one to the "to visit" set.
287 System.out.println( "Analyzing " + d );
289 ReachGraph rg = analyzeMethod( d );
290 ReachGraph rgPrev = getPartial( d );
292 if( !rg.equals( rgPrev ) ) {
295 // results for d changed, so enqueue dependents
296 // of d for further analysis
297 Iterator<Descriptor> depsItr = getDependents( d ).iterator();
298 while( depsItr.hasNext() ) {
299 Descriptor dNext = depsItr.next();
307 protected ReachGraph analyzeMethod( Descriptor d )
308 throws java.io.IOException {
310 // get the flat code for this descriptor
312 if( d == mdAnalysisEntry ) {
313 fm = fmAnalysisEntry;
315 fm = state.getMethodFlat( d );
318 // intraprocedural work set
319 Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
320 flatNodesToVisit.add( fm );
322 // mapping of current partial results
323 Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph =
324 new Hashtable<FlatNode, ReachGraph>();
326 // the set of return nodes partial results that will be combined as
327 // the final, conservative approximation of the entire method
328 HashSet<FlatReturnNode> setReturns = new HashSet<FlatReturnNode>();
330 while( !flatNodesToVisit.isEmpty() ) {
331 FlatNode fn = (FlatNode) flatNodesToVisit.iterator().next();
332 flatNodesToVisit.remove( fn );
334 //System.out.println( " "+fn );
336 // effect transfer function defined by this node,
337 // then compare it to the old graph at this node
338 // to see if anything was updated.
340 ReachGraph rg = new ReachGraph();
342 // start by merging all node's parents' graphs
343 for( int i = 0; i < fn.numPrev(); ++i ) {
344 FlatNode pn = fn.getPrev( i );
345 if( mapFlatNodeToReachGraph.containsKey( pn ) ) {
346 ReachGraph rgParent = mapFlatNodeToReachGraph.get( pn );
347 rg.merge( rgParent );
351 // modify rg with appropriate transfer function
352 analyzeFlatNode( d, fm, fn, setReturns, rg );
355 if( takeDebugSnapshots &&
356 d.getSymbol().equals( descSymbolDebug ) ) {
357 debugSnapshot( rg, fn );
361 // if the results of the new graph are different from
362 // the current graph at this node, replace the graph
363 // with the update and enqueue the children
364 ReachGraph rgPrev = mapFlatNodeToReachGraph.get( fn );
365 if( !rg.equals( rgPrev ) ) {
366 mapFlatNodeToReachGraph.put( fn, rg );
368 for( int i = 0; i < fn.numNext(); i++ ) {
369 FlatNode nn = fn.getNext( i );
370 flatNodesToVisit.add( nn );
375 // end by merging all return nodes into a complete
376 // ownership graph that represents all possible heap
377 // states after the flat method returns
378 ReachGraph completeGraph = new ReachGraph();
380 assert !setReturns.isEmpty();
381 Iterator retItr = setReturns.iterator();
382 while( retItr.hasNext() ) {
383 FlatReturnNode frn = (FlatReturnNode) retItr.next();
385 assert mapFlatNodeToReachGraph.containsKey( frn );
386 ReachGraph rgRet = mapFlatNodeToReachGraph.get( frn );
388 completeGraph.merge( rgRet );
391 return completeGraph;
396 analyzeFlatNode( Descriptor d,
397 FlatMethod fmContaining,
399 HashSet<FlatReturnNode> setRetNodes,
401 ) throws java.io.IOException {
404 // any variables that are no longer live should be
405 // nullified in the graph to reduce edges
406 //rg.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
413 // use node type to decide what transfer function
414 // to apply to the reachability graph
415 switch( fn.kind() ) {
417 case FKind.FlatMethod: {
418 // construct this method's initial heap model (IHM)
419 // since we're working on the FlatMethod, we know
420 // the incoming ReachGraph 'rg' is empty
422 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
423 getIHMcontributions( d );
425 Set entrySet = heapsFromCallers.entrySet();
426 Iterator itr = entrySet.iterator();
427 while( itr.hasNext() ) {
428 Map.Entry me = (Map.Entry) itr.next();
429 FlatCall fc = (FlatCall) me.getKey();
430 ReachGraph rgContrib = (ReachGraph) me.getValue();
432 assert fc.getMethod().equals( d );
434 // some call sites are in same method context though,
435 // and all of them should be merged together first,
436 // then heaps from different contexts should be merged
437 // THIS ASSUMES DIFFERENT CONTEXTS NEED SPECIAL CONSIDERATION!
438 // such as, do allocation sites need to be aged?
440 rg.merge_diffMethodContext( rgContrib );
444 case FKind.FlatOpNode:
445 FlatOpNode fon = (FlatOpNode) fn;
446 if( fon.getOp().getOp() == Operation.ASSIGN ) {
449 rg.assignTempXEqualToTempY( lhs, rhs );
453 case FKind.FlatCastNode:
454 FlatCastNode fcn = (FlatCastNode) fn;
458 TypeDescriptor td = fcn.getType();
461 rg.assignTempXEqualToCastedTempY( lhs, rhs, td );
464 case FKind.FlatFieldNode:
465 FlatFieldNode ffn = (FlatFieldNode) fn;
468 fld = ffn.getField();
469 if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
470 rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld );
474 case FKind.FlatSetFieldNode:
475 FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
477 fld = fsfn.getField();
479 if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
480 rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs );
484 case FKind.FlatElementNode:
485 FlatElementNode fen = (FlatElementNode) fn;
488 if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
490 assert rhs.getType() != null;
491 assert rhs.getType().isArray();
493 TypeDescriptor tdElement = rhs.getType().dereference();
494 FieldDescriptor fdElement = getArrayField( tdElement );
496 rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement );
500 case FKind.FlatSetElementNode:
501 FlatSetElementNode fsen = (FlatSetElementNode) fn;
503 if( arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
504 // skip this node if it cannot create new reachability paths
510 if( !rhs.getType().isImmutable() || rhs.getType().isArray() ) {
512 assert lhs.getType() != null;
513 assert lhs.getType().isArray();
515 TypeDescriptor tdElement = lhs.getType().dereference();
516 FieldDescriptor fdElement = getArrayField( tdElement );
518 rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs );
523 FlatNew fnn = (FlatNew) fn;
525 if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
526 AllocSite as = getAllocSiteFromFlatNewPRIVATE( fnn );
527 rg.assignTempEqualToNewAlloc( lhs, as );
531 case FKind.FlatCall: {
532 FlatCall fc = (FlatCall) fn;
533 MethodDescriptor mdCallee = fc.getMethod();
534 FlatMethod fmCallee = state.getMethodFlat( mdCallee );
536 // the transformation for a call site should update the
537 // current heap abstraction with any effects from the callee,
538 // or if the method is virtual, the effects from any possible
539 // callees, so find the set of callees...
540 Set<MethodDescriptor> setPossibleCallees =
541 new HashSet<MethodDescriptor>();
543 if( mdCallee.isStatic() ) {
544 setPossibleCallees.add( mdCallee );
546 TypeDescriptor typeDesc = fc.getThis().getType();
547 setPossibleCallees.addAll( callGraph.getMethods( mdCallee, typeDesc ) );
550 ReachGraph rgMergeOfEffects = new ReachGraph();
552 Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
553 while( mdItr.hasNext() ) {
554 MethodDescriptor mdPossible = mdItr.next();
555 FlatMethod fmPossible = state.getMethodFlat( mdPossible );
557 addDependent( mdPossible, // callee
560 // don't alter the working graph (rg) until we compute a
561 // result for every possible callee, merge them all together,
562 // then set rg to that
563 ReachGraph rgCopy = new ReachGraph();
566 ReachGraph rgEffect = getPartial( mdPossible );
568 if( rgEffect == null ) {
569 // if this method has never been analyzed just schedule it
570 // for analysis and skip over this call site for now
571 enqueue( mdPossible );
573 rgCopy.resolveMethodCall( fc, fmPossible, rgEffect );
576 rgMergeOfEffects.merge( rgCopy );
580 // now we're done, but BEFORE we set rg = rgMergeOfEffects:
581 // calculate the heap this call site can reach--note this is
582 // not used for the current call site transform, we are
583 // grabbing this heap model for future analysis of the callees,
584 // so if different results emerge we will return to this site
585 ReachGraph heapForThisCall_old =
586 getIHMcontribution( mdCallee, fc );
588 ReachGraph heapForThisCall_cur = rg.makeCalleeView( fc,
591 if( !heapForThisCall_cur.equals( heapForThisCall_old ) ) {
592 // if heap at call site changed, update the contribution,
593 // and reschedule the callee for analysis
594 addIHMcontribution( mdCallee, fc, heapForThisCall_cur );
599 // now that we've taken care of building heap models for
600 // callee analysis, finish this transformation
601 rg = rgMergeOfEffects;
605 case FKind.FlatReturnNode:
606 FlatReturnNode frn = (FlatReturnNode) fn;
607 rhs = frn.getReturnTemp();
608 if( rhs != null && !rhs.getType().isImmutable() ) {
609 rg.assignReturnEqualToTemp( rhs );
611 setRetNodes.add( frn );
617 // dead variables were removed before the above transfer function
618 // was applied, so eliminate heap regions and edges that are no
619 // longer part of the abstractly-live heap graph, and sweep up
620 // and reachability effects that are altered by the reduction
621 //rg.abstractGarbageCollect();
625 // at this point rg should be the correct update
626 // by an above transfer function, or untouched if
627 // the flat node type doesn't affect the heap
631 // this method should generate integers strictly greater than zero!
632 // special "shadow" regions are made from a heap region by negating
634 static public Integer generateUniqueHeapRegionNodeID() {
636 return new Integer( uniqueIDcount );
641 static public FieldDescriptor getArrayField( TypeDescriptor tdElement ) {
642 FieldDescriptor fdElement = mapTypeToArrayField.get( tdElement );
643 if( fdElement == null ) {
644 fdElement = new FieldDescriptor( new Modifiers( Modifiers.PUBLIC ),
646 arrayElementFieldName,
649 mapTypeToArrayField.put( tdElement, fdElement );
656 private void writeFinalGraphs() {
657 Set entrySet = mapDescriptorToCompleteReachGraph.entrySet();
658 Iterator itr = entrySet.iterator();
659 while( itr.hasNext() ) {
660 Map.Entry me = (Map.Entry) itr.next();
661 Descriptor d = (Descriptor) me.getKey();
662 ReachGraph rg = (ReachGraph) me.getValue();
665 rg.writeGraph( "COMPLETE"+d,
666 true, // write labels (variables)
667 true, // selectively hide intermediate temp vars
668 true, // prune unreachable heap regions
669 false, // show back edges to confirm graph validity
670 true, // hide subset reachability states
671 true ); // hide edge taints
672 } catch( IOException e ) {}
676 private void writeFinalIHMs() {
677 Iterator d2IHMsItr = mapDescriptorToIHMcontributions.entrySet().iterator();
678 while( d2IHMsItr.hasNext() ) {
679 Map.Entry me1 = (Map.Entry) d2IHMsItr.next();
680 Descriptor d = (Descriptor) me1.getKey();
681 Hashtable<FlatCall, ReachGraph> IHMs = (Hashtable<FlatCall, ReachGraph>) me1.getValue();
683 Iterator fc2rgItr = IHMs.entrySet().iterator();
684 while( fc2rgItr.hasNext() ) {
685 Map.Entry me2 = (Map.Entry) fc2rgItr.next();
686 FlatCall fc = (FlatCall) me2.getKey();
687 ReachGraph rg = (ReachGraph) me2.getValue();
690 rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc,
691 true, // write labels (variables)
692 false, // selectively hide intermediate temp vars
693 false, // prune unreachable heap regions
694 false, // show back edges to confirm graph validity
695 true, // hide subset reachability states
696 true ); // hide edge taints
697 } catch( IOException e ) {}
705 // return just the allocation site associated with one FlatNew node
706 protected AllocSite getAllocSiteFromFlatNewPRIVATE( FlatNew fnew ) {
708 if( !mapFlatNewToAllocSite.containsKey( fnew ) ) {
710 (AllocSite) Canonical.makeCanonical( new AllocSite( allocationDepth,
716 // the newest nodes are single objects
717 for( int i = 0; i < allocationDepth; ++i ) {
718 Integer id = generateUniqueHeapRegionNodeID();
719 as.setIthOldest( i, id );
720 mapHrnIdToAllocSite.put( id, as );
723 // the oldest node is a summary node
724 as.setSummary( generateUniqueHeapRegionNodeID() );
726 // and one special node is older than all
727 // nodes and shadow nodes for the site
728 as.setSiteSummary( generateUniqueHeapRegionNodeID() );
730 mapFlatNewToAllocSite.put( fnew, as );
733 return mapFlatNewToAllocSite.get( fnew );
738 // return all allocation sites in the method (there is one allocation
739 // site per FlatNew node in a method)
740 protected HashSet<AllocSite> getAllocSiteSet(Descriptor d) {
741 if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
742 buildAllocSiteSet(d);
745 return mapDescriptorToAllocSiteSet.get(d);
751 protected void buildAllocSiteSet(Descriptor d) {
752 HashSet<AllocSite> s = new HashSet<AllocSite>();
754 FlatMethod fm = state.getMethodFlat( d );
756 // visit every node in this FlatMethod's IR graph
757 // and make a set of the allocation sites from the
758 // FlatNew node's visited
759 HashSet<FlatNode> visited = new HashSet<FlatNode>();
760 HashSet<FlatNode> toVisit = new HashSet<FlatNode>();
763 while( !toVisit.isEmpty() ) {
764 FlatNode n = toVisit.iterator().next();
766 if( n instanceof FlatNew ) {
767 s.add(getAllocSiteFromFlatNewPRIVATE( (FlatNew) n) );
773 for( int i = 0; i < n.numNext(); ++i ) {
774 FlatNode child = n.getNext( i );
775 if( !visited.contains( child ) ) {
776 toVisit.add( child );
781 mapDescriptorToAllocSiteSet.put( d, s );
785 protected HashSet<AllocSite> getFlaggedAllocSites(Descriptor dIn) {
787 HashSet<AllocSite> out = new HashSet<AllocSite>();
788 HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
789 HashSet<Descriptor> visited = new HashSet<Descriptor>();
793 while( !toVisit.isEmpty() ) {
794 Descriptor d = toVisit.iterator().next();
798 HashSet<AllocSite> asSet = getAllocSiteSet(d);
799 Iterator asItr = asSet.iterator();
800 while( asItr.hasNext() ) {
801 AllocSite as = (AllocSite) asItr.next();
802 if( as.getDisjointAnalysisId() != null ) {
807 // enqueue callees of this method to be searched for
808 // allocation sites also
809 Set callees = callGraph.getCalleeSet(d);
810 if( callees != null ) {
811 Iterator methItr = callees.iterator();
812 while( methItr.hasNext() ) {
813 MethodDescriptor md = (MethodDescriptor) methItr.next();
815 if( !visited.contains(md) ) {
827 protected HashSet<AllocSite>
828 getFlaggedAllocSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
830 HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
831 HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
832 HashSet<Descriptor> visited = new HashSet<Descriptor>();
836 // traverse this task and all methods reachable from this task
837 while( !toVisit.isEmpty() ) {
838 Descriptor d = toVisit.iterator().next();
842 HashSet<AllocSite> asSet = getAllocSiteSet(d);
843 Iterator asItr = asSet.iterator();
844 while( asItr.hasNext() ) {
845 AllocSite as = (AllocSite) asItr.next();
846 TypeDescriptor typed = as.getType();
847 if( typed != null ) {
848 ClassDescriptor cd = typed.getClassDesc();
849 if( cd != null && cd.hasFlags() ) {
855 // enqueue callees of this method to be searched for
856 // allocation sites also
857 Set callees = callGraph.getCalleeSet(d);
858 if( callees != null ) {
859 Iterator methItr = callees.iterator();
860 while( methItr.hasNext() ) {
861 MethodDescriptor md = (MethodDescriptor) methItr.next();
863 if( !visited.contains(md) ) {
877 protected String computeAliasContextHistogram() {
879 Hashtable<Integer, Integer> mapNumContexts2NumDesc =
880 new Hashtable<Integer, Integer>();
882 Iterator itr = mapDescriptorToAllDescriptors.entrySet().iterator();
883 while( itr.hasNext() ) {
884 Map.Entry me = (Map.Entry) itr.next();
885 HashSet<Descriptor> s = (HashSet<Descriptor>) me.getValue();
887 Integer i = mapNumContexts2NumDesc.get( s.size() );
889 i = new Integer( 0 );
891 mapNumContexts2NumDesc.put( s.size(), i + 1 );
897 itr = mapNumContexts2NumDesc.entrySet().iterator();
898 while( itr.hasNext() ) {
899 Map.Entry me = (Map.Entry) itr.next();
900 Integer c0 = (Integer) me.getKey();
901 Integer d0 = (Integer) me.getValue();
903 s += String.format( "%4d methods had %4d unique alias contexts.\n", d0, c0 );
906 s += String.format( "\n%4d total methods analayzed.\n", total );
911 protected int numMethodsAnalyzed() {
912 return descriptorsToAnalyze.size();
919 // Take in source entry which is the program's compiled entry and
920 // create a new analysis entry, a method that takes no parameters
921 // and appears to allocate the command line arguments and call the
922 // source entry with them. The purpose of this analysis entry is
923 // to provide a top-level method context with no parameters left.
924 protected void makeAnalysisEntryMethod( MethodDescriptor mdSourceEntry ) {
926 Modifiers mods = new Modifiers();
927 mods.addModifier( Modifiers.PUBLIC );
928 mods.addModifier( Modifiers.STATIC );
930 TypeDescriptor returnType =
931 new TypeDescriptor( TypeDescriptor.VOID );
933 this.mdAnalysisEntry =
934 new MethodDescriptor( mods,
936 "analysisEntryMethod"
939 TempDescriptor cmdLineArgs =
940 new TempDescriptor( "args",
941 mdSourceEntry.getParamType( 0 )
945 new FlatNew( mdSourceEntry.getParamType( 0 ),
950 TempDescriptor[] sourceEntryArgs = new TempDescriptor[1];
951 sourceEntryArgs[0] = cmdLineArgs;
954 new FlatCall( mdSourceEntry,
960 FlatReturnNode frn = new FlatReturnNode( null );
962 FlatExit fe = new FlatExit();
964 this.fmAnalysisEntry =
965 new FlatMethod( mdAnalysisEntry,
969 this.fmAnalysisEntry.addNext( fn );
976 protected LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
978 Set <Descriptor> discovered = new HashSet <Descriptor>();
979 LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
981 Iterator<Descriptor> itr = toSort.iterator();
982 while( itr.hasNext() ) {
983 Descriptor d = itr.next();
985 if( !discovered.contains( d ) ) {
986 dfsVisit( d, toSort, sorted, discovered );
993 // While we're doing DFS on call graph, remember
994 // dependencies for efficient queuing of methods
995 // during interprocedural analysis:
997 // a dependent of a method decriptor d for this analysis is:
998 // 1) a method or task that invokes d
999 // 2) in the descriptorsToAnalyze set
1000 protected void dfsVisit( Descriptor d,
1001 Set <Descriptor> toSort,
1002 LinkedList<Descriptor> sorted,
1003 Set <Descriptor> discovered ) {
1004 discovered.add( d );
1006 // only methods have callers, tasks never do
1007 if( d instanceof MethodDescriptor ) {
1009 MethodDescriptor md = (MethodDescriptor) d;
1011 // the call graph is not aware that we have a fabricated
1012 // analysis entry that calls the program source's entry
1013 if( md == mdSourceEntry ) {
1014 if( !discovered.contains( mdAnalysisEntry ) ) {
1015 addDependent( mdSourceEntry, // callee
1016 mdAnalysisEntry // caller
1018 dfsVisit( mdAnalysisEntry, toSort, sorted, discovered );
1022 // otherwise call graph guides DFS
1023 Iterator itr = callGraph.getCallerSet( md ).iterator();
1024 while( itr.hasNext() ) {
1025 Descriptor dCaller = (Descriptor) itr.next();
1027 // only consider callers in the original set to analyze
1028 if( !toSort.contains( dCaller ) ) {
1032 if( !discovered.contains( dCaller ) ) {
1033 addDependent( md, // callee
1037 dfsVisit( dCaller, toSort, sorted, discovered );
1042 sorted.addFirst( d );
1046 protected void enqueue( Descriptor d ) {
1047 if( !descriptorsToVisitSet.contains( d ) ) {
1048 Integer priority = mapDescriptorToPriority.get( d );
1049 descriptorsToVisitQ.add( new DescriptorQWrapper( priority,
1052 descriptorsToVisitSet.add( d );
1057 protected ReachGraph getPartial( Descriptor d ) {
1058 return mapDescriptorToCompleteReachGraph.get( d );
1061 protected void setPartial( Descriptor d, ReachGraph rg ) {
1062 mapDescriptorToCompleteReachGraph.put( d, rg );
1064 // when the flag for writing out every partial
1065 // result is set, we should spit out the graph,
1066 // but in order to give it a unique name we need
1067 // to track how many partial results for this
1068 // descriptor we've already written out
1069 if( writeAllIncrementalDOTs ) {
1070 if( !mapDescriptorToNumUpdates.containsKey( d ) ) {
1071 mapDescriptorToNumUpdates.put( d, new Integer( 0 ) );
1073 Integer n = mapDescriptorToNumUpdates.get( d );
1076 rg.writeGraph( d+"COMPLETE"+String.format( "%05d", n ),
1077 true, // write labels (variables)
1078 true, // selectively hide intermediate temp vars
1079 true, // prune unreachable heap regions
1080 false, // show back edges to confirm graph validity
1081 false, // show parameter indices (unmaintained!)
1082 true, // hide subset reachability states
1083 true); // hide edge taints
1084 } catch( IOException e ) {}
1086 mapDescriptorToNumUpdates.put( d, n + 1 );
1091 // a dependent of a method decriptor d for this analysis is:
1092 // 1) a method or task that invokes d
1093 // 2) in the descriptorsToAnalyze set
1094 protected void addDependent( Descriptor callee, Descriptor caller ) {
1095 Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
1096 if( deps == null ) {
1097 deps = new HashSet<Descriptor>();
1100 mapDescriptorToSetDependents.put( callee, deps );
1103 protected Set<Descriptor> getDependents( Descriptor callee ) {
1104 Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
1105 if( deps == null ) {
1106 deps = new HashSet<Descriptor>();
1107 mapDescriptorToSetDependents.put( callee, deps );
1113 public Hashtable<FlatCall, ReachGraph> getIHMcontributions( Descriptor d ) {
1115 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1116 mapDescriptorToIHMcontributions.get( d );
1118 if( heapsFromCallers == null ) {
1119 heapsFromCallers = new Hashtable<FlatCall, ReachGraph>();
1120 mapDescriptorToIHMcontributions.put( d, heapsFromCallers );
1123 return heapsFromCallers;
1126 public ReachGraph getIHMcontribution( Descriptor d,
1129 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1130 getIHMcontributions( d );
1132 if( !heapsFromCallers.containsKey( fc ) ) {
1133 heapsFromCallers.put( fc, new ReachGraph() );
1136 return heapsFromCallers.get( fc );
1139 public void addIHMcontribution( Descriptor d,
1143 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1144 getIHMcontributions( d );
1146 heapsFromCallers.put( fc, rg );
1153 // get successive captures of the analysis state
1154 boolean takeDebugSnapshots = false;
1155 String descSymbolDebug = "addSomething";
1156 boolean stopAfterCapture = true;
1158 // increments every visit to debugSnapshot, don't fiddle with it
1159 int debugCounter = 0;
1161 // the value of debugCounter to start reporting the debugCounter
1162 // to the screen to let user know what debug iteration we're at
1163 int numStartCountReport = 0;
1165 // the frequency of debugCounter values to print out, 0 no report
1166 int freqCountReport = 0;
1168 // the debugCounter value at which to start taking snapshots
1169 int iterStartCapture = 0;
1171 // the number of snapshots to take
1172 int numIterToCapture = 300;
1174 void debugSnapshot( ReachGraph rg, FlatNode fn ) {
1175 if( debugCounter > iterStartCapture + numIterToCapture ) {
1180 if( debugCounter > numStartCountReport &&
1181 freqCountReport > 0 &&
1182 debugCounter % freqCountReport == 0
1184 System.out.println( " @@@ debug counter = "+
1187 if( debugCounter > iterStartCapture ) {
1188 System.out.println( " @@@ capturing debug "+
1189 (debugCounter - iterStartCapture)+
1192 String.format( "snap%04d",
1193 debugCounter - iterStartCapture );
1195 graphName = graphName + fn;
1198 rg.writeGraph( graphName,
1199 true, // write labels (variables)
1200 true, // selectively hide intermediate temp vars
1201 false, // prune unreachable heap regions
1202 false, // show back edges to confirm graph validity
1203 true, // hide subset reachability states
1204 true );// hide edge taints
1205 } catch( Exception e ) {
1206 System.out.println( "Error writing debug capture." );
1211 if( debugCounter == iterStartCapture + numIterToCapture &&
1214 System.out.println( "Stopping analysis after debug captures." );