import Analysis.CallGraph.*;
import Analysis.Liveness;
import Analysis.ArrayReferencees;
+import Analysis.OoOJava.RBlockRelationAnalysis;
+import Analysis.OoOJava.RBlockStatusAnalysis;
import IR.*;
import IR.Flat.*;
import IR.Tree.Modifiers;
// should attempt to be deterministic
public static boolean determinismDesired;
+ // when we want to enforce determinism in the
+ // analysis we need to sort descriptors rather
+ // than toss them in efficient sets, use this
+ public static DescriptorComparator dComp =
+ new DescriptorComparator();
+
+
// data from the compiler
public State state;
public CallGraph callGraph;
public Liveness liveness;
public ArrayReferencees arrayReferencees;
+ public RBlockRelationAnalysis rblockRel;
+ public RBlockStatusAnalysis rblockStatus;
public TypeUtil typeUtil;
public int allocationDepth;
+
+ protected boolean doEffectsAnalysis = false;
+ protected EffectsAnalysis effectsAnalysis;
// data structure for public interface
- private Hashtable<Descriptor, HashSet<AllocSite> > mapDescriptorToAllocSiteSet;
+ private Hashtable< Descriptor, HashSet<AllocSite> >
+ mapDescriptorToAllocSiteSet;
// for public interface methods to warn that they
// current descriptors to visit in fixed-point
// interprocedural analysis, prioritized by
// dependency in the call graph
- protected Stack<DescriptorQWrapper>
+ protected Stack<Descriptor>
descriptorsToVisitStack;
protected PriorityQueue<DescriptorQWrapper>
descriptorsToVisitQ;
protected Set<Descriptor>
calleesToEnqueue;
-
// maps a descriptor to its current partial result
// from the intraprocedural fixed-point analysis--
// then the interprocedural analysis settles, this
protected Hashtable< Descriptor, Set<Descriptor> >
mapDescriptorToSetDependents;
+ // if the analysis client wants to flag allocation sites
+ // programmatically, it should provide a set of FlatNew
+ // statements--this may be null if unneeded
+ protected Set<FlatNew> sitesToFlag;
+
// maps each flat new to one analysis abstraction
// allocate site object, these exist outside reach graphs
protected Hashtable<FlatNew, AllocSite>
static protected Hashtable<FlatNode, ReachGraph> fn2rg =
new Hashtable<FlatNode, ReachGraph>();
+ private Hashtable<FlatCall, Descriptor> fc2enclosing;
+
// allocate various structures that are not local
// to a single class method--should be done once
- protected void allocateStructures() {
- descriptorsToAnalyze = new HashSet<Descriptor>();
+ protected void allocateStructures() {
+
+ if( determinismDesired ) {
+ // use an ordered set
+ descriptorsToAnalyze = new TreeSet<Descriptor>( dComp );
+ } else {
+ // otherwise use a speedy hashset
+ descriptorsToAnalyze = new HashSet<Descriptor>();
+ }
mapDescriptorToCompleteReachGraph =
new Hashtable<Descriptor, ReachGraph>();
mapBackEdgeToMonotone =
new Hashtable<FlatNode, ReachGraph>();
-
+
mapHrnIdToAllocSite =
new Hashtable<Integer, AllocSite>();
state.DISJOINTDVISITSTACKEESONTOP
) {
descriptorsToVisitStack =
- new Stack<DescriptorQWrapper>();
+ new Stack<Descriptor>();
}
if( state.DISJOINTDVISITPQUE ) {
mapDescriptorToReachGraph =
new Hashtable<Descriptor, ReachGraph>();
+
+ pm = new PointerMethod();
+
+ fc2enclosing = new Hashtable<FlatCall, Descriptor>();
}
TypeUtil tu,
CallGraph cg,
Liveness l,
- ArrayReferencees ar
- ) throws java.io.IOException {
- init( s, tu, cg, l, ar );
+ ArrayReferencees ar,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra,
+ RBlockStatusAnalysis rsa
+ ) {
+ init( s, tu, cg, l, ar, sitesToFlag, rra, rsa );
}
protected void init( State state,
TypeUtil typeUtil,
CallGraph callGraph,
Liveness liveness,
- ArrayReferencees arrayReferencees
- ) throws java.io.IOException {
+ ArrayReferencees arrayReferencees,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra,
+ RBlockStatusAnalysis rsa
+ ) {
analysisComplete = false;
- this.state = state;
- this.typeUtil = typeUtil;
- this.callGraph = callGraph;
- this.liveness = liveness;
- this.arrayReferencees = arrayReferencees;
+ this.state = state;
+ this.typeUtil = typeUtil;
+ this.callGraph = callGraph;
+ this.liveness = liveness;
+ this.arrayReferencees = arrayReferencees;
+ this.sitesToFlag = sitesToFlag;
+ this.rblockRel = rra;
+ this.rblockStatus = rsa;
+
+ if( rblockRel != null ) {
+ doEffectsAnalysis = true;
+ effectsAnalysis = new EffectsAnalysis();
+ }
+
this.allocationDepth = state.DISJOINTALLOCDEPTH;
this.releaseMode = state.DISJOINTRELEASEMODE;
this.determinismDesired = state.DISJOINTDETERMINISM;
this.stopAfterCapture = state.DISJOINTSNAPSTOPAFTER;
this.snapVisitCounter = 1; // count visits from 1 (user will write 1, means 1st visit)
this.snapNodeCounter = 0; // count nodes from 0
- this.pm=new PointerMethod();
assert
state.DISJOINTDVISITSTACK ||
ReachGraph.allocationDepth = allocationDepth;
ReachGraph.typeUtil = typeUtil;
- ReachGraph.debugCallSiteVisitsUntilExit = state.DISJOINTDEBUGCALLCOUNT;
+ ReachGraph.debugCallSiteVisitStartCapture
+ = state.DISJOINTDEBUGCALLVISITTOSTART;
+
+ ReachGraph.debugCallSiteNumVisitsToCapture
+ = state.DISJOINTDEBUGCALLNUMVISITS;
+
+ ReachGraph.debugCallSiteStopAfter
+ = state.DISJOINTDEBUGCALLSTOPAFTER;
+
+ ReachGraph.debugCallSiteVisitCounter
+ = 0; // count visits from 1, is incremented before first visit
+
+
allocateStructures();
double timeStartAnalysis = (double) System.nanoTime();
// start interprocedural fixed-point computation
- analyzeMethods();
+ try {
+ analyzeMethods();
+ } catch( IOException e ) {
+ throw new Error( "IO Exception while writing disjointness analysis output." );
+ }
+
analysisComplete=true;
double timeEndAnalysis = (double) System.nanoTime();
String justtime = String.format( "%.2f", dt );
System.out.println( treport );
- if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
- writeFinalGraphs();
- }
+ try {
+ if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
+ writeFinalGraphs();
+ }
- if( state.DISJOINTWRITEIHMS ) {
- writeFinalIHMs();
- }
+ if( state.DISJOINTWRITEIHMS ) {
+ writeFinalIHMs();
+ }
- if( state.DISJOINTALIASFILE != null ) {
- if( state.TASK ) {
- writeAllSharing(state.DISJOINTALIASFILE, treport, justtime, state.DISJOINTALIASTAB, state.lines);
- } else {
- writeAllSharingJava(state.DISJOINTALIASFILE,
- treport,
- justtime,
- state.DISJOINTALIASTAB,
- state.lines
- );
+ if( state.DISJOINTWRITEINITCONTEXTS ) {
+ writeInitialContexts();
}
+
+ if( state.DISJOINTALIASFILE != null ) {
+ if( state.TASK ) {
+ writeAllSharing(state.DISJOINTALIASFILE, treport, justtime, state.DISJOINTALIASTAB, state.lines);
+ } else {
+ writeAllSharingJava(state.DISJOINTALIASFILE,
+ treport,
+ justtime,
+ state.DISJOINTALIASTAB,
+ state.lines
+ );
+ }
+ }
+ } catch( IOException e ) {
+ throw new Error( "IO Exception while writing disjointness analysis output." );
+ }
+
+ if( doEffectsAnalysis ) {
+ effectsAnalysis.writeEffects( "effects.txt" );
}
}
// method's callees are updated, it must be reanalyzed
protected void analyzeMethods() throws java.io.IOException {
+ // task or non-task (java) mode determines what the roots
+ // of the call chain are, and establishes the set of methods
+ // reachable from the roots that will be analyzed
+
if( state.TASK ) {
- // This analysis does not support Bamboo at the moment,
- // but if it does in the future we would initialize the
- // set of descriptors to analyze as the program-reachable
- // tasks and the methods callable by them. For Java,
- // just methods reachable from the main method.
- System.out.println( "Bamboo..." );
- Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ System.out.println( "Bamboo mode..." );
- while (taskItr.hasNext()) {
- TaskDescriptor td = (TaskDescriptor) taskItr.next();
- if (!descriptorsToAnalyze.contains(td)) {
- descriptorsToAnalyze.add(td);
- descriptorsToAnalyze.addAll(callGraph.getAllMethods(td));
- }
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ while( taskItr.hasNext() ) {
+ TaskDescriptor td = (TaskDescriptor) taskItr.next();
+ if( !descriptorsToAnalyze.contains( td ) ) {
+ // add all methods transitively reachable from the
+ // tasks as well
+ descriptorsToAnalyze.add( td );
+ descriptorsToAnalyze.addAll( callGraph.getAllMethods( td ) );
+ }
}
-
+
} else {
+ System.out.println( "Java mode..." );
+
// add all methods transitively reachable from the
// source's main to set for analysis
mdSourceEntry = typeUtil.getMain();
descriptorsToAnalyze.add( mdSourceEntry );
- descriptorsToAnalyze.addAll(
- callGraph.getAllMethods( mdSourceEntry )
- );
-
+ descriptorsToAnalyze.addAll( callGraph.getAllMethods( mdSourceEntry ) );
+
// fabricate an empty calling context that will call
// the source's main, but call graph doesn't know
// about it, so explicitly add it
descriptorsToAnalyze.add( mdAnalysisEntry );
}
- // topologically sort according to the call graph so
- // leaf calls are ordered first, smarter analysis order
- // CHANGED: order leaf calls last!!
- LinkedList<Descriptor> sortedDescriptors =
- topologicalSort( descriptorsToAnalyze );
-
- // add sorted descriptors to priority queue, and duplicate
- // the queue as a set for efficiently testing whether some
- // method is marked for analysis
- int p = 0;
- Iterator<Descriptor> dItr = sortedDescriptors.iterator();
- while( dItr.hasNext() ) {
- Descriptor d = dItr.next();
-
- mapDescriptorToPriority.put( d, new Integer( p ) );
- if( state.DISJOINTDVISITSTACK ||
- state.DISJOINTDVISITSTACKEESONTOP
- ) {
- descriptorsToVisitStack.add( new DescriptorQWrapper( p, d ) );
+ // now, depending on the interprocedural mode for visiting
+ // methods, set up the needed data structures
- } else if( state.DISJOINTDVISITPQUE ) {
+ if( state.DISJOINTDVISITPQUE ) {
+
+ // topologically sort according to the call graph so
+ // leaf calls are last, helps build contexts up first
+ LinkedList<Descriptor> sortedDescriptors =
+ topologicalSort( descriptorsToAnalyze );
+
+ // add sorted descriptors to priority queue, and duplicate
+ // the queue as a set for efficiently testing whether some
+ // method is marked for analysis
+ int p = 0;
+ Iterator<Descriptor> dItr;
+
+ // for the priority queue, give items at the head
+ // of the sorted list a low number (highest priority)
+ while( !sortedDescriptors.isEmpty() ) {
+ Descriptor d = sortedDescriptors.removeFirst();
+ mapDescriptorToPriority.put( d, new Integer( p ) );
descriptorsToVisitQ.add( new DescriptorQWrapper( p, d ) );
+ descriptorsToVisitSet.add( d );
+ ++p;
}
- descriptorsToVisitSet.add( d );
- ++p;
+ } else if( state.DISJOINTDVISITSTACK ||
+ state.DISJOINTDVISITSTACKEESONTOP
+ ) {
+ // if we're doing the stack scheme, just throw the root
+ // method or tasks on the stack
+ if( state.TASK ) {
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ while( taskItr.hasNext() ) {
+ TaskDescriptor td = (TaskDescriptor) taskItr.next();
+ descriptorsToVisitStack.add( td );
+ descriptorsToVisitSet.add( td );
+ }
+
+ } else {
+ descriptorsToVisitStack.add( mdAnalysisEntry );
+ descriptorsToVisitSet.add( mdAnalysisEntry );
+ }
+
+ } else {
+ throw new Error( "Unknown method scheduling mode" );
}
- // analyze methods from the priority queue until it is empty
+
+ // analyze scheduled methods until there are no more to visit
while( moreDescriptorsToVisit() ) {
Descriptor d = null;
if( state.DISJOINTDVISITSTACK ||
state.DISJOINTDVISITSTACKEESONTOP
) {
- d = descriptorsToVisitStack.pop().getDescriptor();
+ d = descriptorsToVisitStack.pop();
} else if( state.DISJOINTDVISITPQUE ) {
d = descriptorsToVisitQ.poll().getDescriptor();
if( !rg.equals( rgPrev ) ) {
setPartial( d, rg );
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println( " complete graph changed, scheduling callers for analysis:" );
+ }
+
// results for d changed, so enqueue dependents
// of d for further analysis
Iterator<Descriptor> depsItr = getDependents( d ).iterator();
while( depsItr.hasNext() ) {
Descriptor dNext = depsItr.next();
enqueue( dNext );
- }
- if( state.DISJOINTDVISITSTACKEESONTOP ) {
- depsItr = calleesToEnqueue.iterator();
- while( depsItr.hasNext() ) {
- Descriptor dNext = depsItr.next();
- enqueue( dNext );
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println( " "+dNext );
}
- calleesToEnqueue.clear();
- }
+ }
+ }
- } else {
- // we got the result result as the last visit
- // to this method, but we might need to clean
- // something up
- if( state.DISJOINTDVISITSTACKEESONTOP ) {
- calleesToEnqueue.clear();
+ // whether or not the method under analysis changed,
+ // we may have some callees that are scheduled for
+ // more analysis, and they should go on the top of
+ // the stack now (in other method-visiting modes they
+ // are already enqueued at this point
+ if( state.DISJOINTDVISITSTACKEESONTOP ) {
+ Iterator<Descriptor> depsItr = calleesToEnqueue.iterator();
+ while( depsItr.hasNext() ) {
+ Descriptor dNext = depsItr.next();
+ enqueue( dNext );
}
- }
-
- }
+ calleesToEnqueue.clear();
+ }
+
+ }
}
protected ReachGraph analyzeMethod( Descriptor d )
rg.merge( rgParent );
}
}
-
+
if( takeDebugSnapshots &&
d.getSymbol().equals( descSymbolDebug )
// nullified in the graph to reduce edges
//rg.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
-
- TempDescriptor lhs;
- TempDescriptor rhs;
- FieldDescriptor fld;
+ TempDescriptor lhs;
+ TempDescriptor rhs;
+ FieldDescriptor fld;
+ TypeDescriptor tdElement;
+ FieldDescriptor fdElement;
+ FlatSESEEnterNode sese;
+ FlatSESEExitNode fsexn;
// use node type to decide what transfer function
// to apply to the reachability graph
if( fon.getOp().getOp() == Operation.ASSIGN ) {
lhs = fon.getDest();
rhs = fon.getLeft();
- rg.assignTempXEqualToTempY( lhs, rhs );
+
+ // before transfer, do effects analysis support
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x gets status of y
+ if(!rg.isAccessible(rhs)){
+ rg.makeInaccessible(lhs);
+ }
+ }
+ }
+
+ // transfer func
+ rg.assignTempXEqualToTempY( lhs, rhs );
}
break;
TypeDescriptor td = fcn.getType();
assert td != null;
+
+ // before transfer, do effects analysis support
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x gets status of y
+ if(!rg.isAccessible(rhs)){
+ rg.makeInaccessible(lhs);
+ }
+ }
+ }
+ // transfer func
rg.assignTempXEqualToCastedTempY( lhs, rhs, td );
break;
case FKind.FlatFieldNode:
FlatFieldNode ffn = (FlatFieldNode) fn;
+
lhs = ffn.getDst();
rhs = ffn.getSrc();
fld = ffn.getField();
- if( shouldAnalysisTrack( fld.getType() ) ) {
+
+ // before graph transform, possible inject
+ // a stall-site taint
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x=y.f, stall y if not accessible
+ // contributes read effects on stall site of y
+ if(!rg.isAccessible(rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
+
+ // after this, x and y are accessbile.
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
+ }
+
+ if( shouldAnalysisTrack( fld.getType() ) ) {
+ // transfer func
rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld );
}
+
+ // after transfer, use updated graph to
+ // do effects analysis
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ effectsAnalysis.analyzeFlatFieldNode( rg, rhs, fld );
+ }
break;
case FKind.FlatSetFieldNode:
FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
+
lhs = fsfn.getDst();
fld = fsfn.getField();
rhs = fsfn.getSrc();
+
+ boolean strongUpdate = false;
+
+ // before transfer func, possibly inject
+ // stall-site taints
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x.y=f , stall x and y if they are not accessible
+ // also contribute write effects on stall site of x
+ if(!rg.isAccessible(lhs)) {
+ rg.taintStallSite(fn, lhs);
+ }
+
+ if(!rg.isAccessible(rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
+
+ // accessible status update
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
+ }
+
if( shouldAnalysisTrack( fld.getType() ) ) {
- rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs );
+ // transfer func
+ strongUpdate = rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs );
}
+
+ // use transformed graph to do effects analysis
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ effectsAnalysis.analyzeFlatSetFieldNode( rg, lhs, fld, strongUpdate );
+ }
break;
case FKind.FlatElementNode:
FlatElementNode fen = (FlatElementNode) fn;
+
lhs = fen.getDst();
rhs = fen.getSrc();
- if( shouldAnalysisTrack( lhs.getType() ) ) {
- assert rhs.getType() != null;
- assert rhs.getType().isArray();
-
- TypeDescriptor tdElement = rhs.getType().dereference();
- FieldDescriptor fdElement = getArrayField( tdElement );
-
+ assert rhs.getType() != null;
+ assert rhs.getType().isArray();
+
+ tdElement = rhs.getType().dereference();
+ fdElement = getArrayField( tdElement );
+
+ // before transfer func, possibly inject
+ // stall-site taint
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x=y.f, stall y if not accessible
+ // contributes read effects on stall site of y
+ // after this, x and y are accessbile.
+ if(!rg.isAccessible(rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
+
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
+ }
+
+ if( shouldAnalysisTrack( lhs.getType() ) ) {
+ // transfer func
rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement );
}
+
+ // use transformed graph to do effects analysis
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ effectsAnalysis.analyzeFlatFieldNode( rg, rhs, fdElement );
+ }
break;
case FKind.FlatSetElementNode:
FlatSetElementNode fsen = (FlatSetElementNode) fn;
- if( arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
- // skip this node if it cannot create new reachability paths
- break;
- }
-
lhs = fsen.getDst();
rhs = fsen.getSrc();
- if( shouldAnalysisTrack( rhs.getType() ) ) {
- assert lhs.getType() != null;
- assert lhs.getType().isArray();
-
- TypeDescriptor tdElement = lhs.getType().dereference();
- FieldDescriptor fdElement = getArrayField( tdElement );
+ assert lhs.getType() != null;
+ assert lhs.getType().isArray();
+
+ tdElement = lhs.getType().dereference();
+ fdElement = getArrayField( tdElement );
+
+ // before transfer func, possibly inject
+ // stall-site taints
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ if(rblockStatus.isInCriticalRegion(fmContaining, fn)){
+ // x.y=f , stall x and y if they are not accessible
+ // also contribute write effects on stall site of x
+ if(!rg.isAccessible(lhs)) {
+ rg.taintStallSite(fn, lhs);
+ }
+
+ if(!rg.isAccessible(rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
+
+ // accessible status update
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
+ }
+
+ if( shouldAnalysisTrack( rhs.getType() ) ) {
+ // transfer func, BUT
+ // skip this node if it cannot create new reachability paths
+ if( !arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
+ rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs );
+ }
+ }
- rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs );
+ // use transformed graph to do effects analysis
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ effectsAnalysis.analyzeFlatSetFieldNode( rg, lhs, fdElement,
+ false );
}
break;
lhs = fnn.getDst();
if( shouldAnalysisTrack( lhs.getType() ) ) {
AllocSite as = getAllocSiteFromFlatNewPRIVATE( fnn );
- rg.assignTempEqualToNewAlloc( lhs, as );
+
+ // before transform, support effects analysis
+ if (doEffectsAnalysis && fmContaining != fmAnalysisEntry) {
+ if (rblockStatus.isInCriticalRegion(fmContaining, fn)) {
+ // after creating new object, lhs is accessible
+ rg.makeAccessible(lhs);
+ }
+ }
+
+ // transfer func
+ rg.assignTempEqualToNewAlloc( lhs, as );
+ }
+ break;
+
+ case FKind.FlatSESEEnterNode:
+ sese = (FlatSESEEnterNode) fn;
+
+ if( sese.getIsCallerSESEplaceholder() ) {
+ // ignore these dummy rblocks!
+ break;
+ }
+
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ // always remove ALL stall site taints at enter
+ rg.removeAllStallSiteTaints();
+
+ // inject taints for in-set vars
+ rg.taintInSetVars( sese );
+ }
+ break;
+
+ case FKind.FlatSESEExitNode:
+ fsexn = (FlatSESEExitNode) fn;
+ sese = fsexn.getFlatEnter();
+
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+
+ // @ sese exit make all live variables
+ // inaccessible to later parent statements
+ rg.makeInaccessible( liveness.getLiveInTemps( fmContaining, fn ) );
+
+ // always remove ALL stall site taints at exit
+ rg.removeAllStallSiteTaints();
+
+ // remove in-set var taints for the exiting rblock
+ rg.removeInContextTaints( sese );
}
break;
+
case FKind.FlatCall: {
- //TODO: temporal fix for task descriptor case
- //MethodDescriptor mdCaller = fmContaining.getMethod();
Descriptor mdCaller;
- if(fmContaining.getMethod()!=null){
- mdCaller = fmContaining.getMethod();
- }else{
- mdCaller = fmContaining.getTask();
+ if( fmContaining.getMethod() != null ){
+ mdCaller = fmContaining.getMethod();
+ } else {
+ mdCaller = fmContaining.getTask();
}
FlatCall fc = (FlatCall) fn;
MethodDescriptor mdCallee = fc.getMethod();
FlatMethod fmCallee = state.getMethodFlat( mdCallee );
- boolean writeDebugDOTs =
+
+ boolean debugCallSite =
mdCaller.getSymbol().equals( state.DISJOINTDEBUGCALLER ) &&
- mdCallee.getSymbol().equals( state.DISJOINTDEBUGCALLEE );
+ mdCallee.getSymbol().equals( state.DISJOINTDEBUGCALLEE );
+
+ boolean writeDebugDOTs = false;
+ boolean stopAfter = false;
+ if( debugCallSite ) {
+ ++ReachGraph.debugCallSiteVisitCounter;
+ System.out.println( " $$$ Debug call site visit "+
+ ReachGraph.debugCallSiteVisitCounter+
+ " $$$"
+ );
+ if(
+ (ReachGraph.debugCallSiteVisitCounter >=
+ ReachGraph.debugCallSiteVisitStartCapture) &&
+
+ (ReachGraph.debugCallSiteVisitCounter <
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture)
+ ) {
+ writeDebugDOTs = true;
+ System.out.println( " $$$ Capturing this call site visit $$$" );
+ if( ReachGraph.debugCallSiteStopAfter &&
+ (ReachGraph.debugCallSiteVisitCounter ==
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture - 1)
+ ) {
+ stopAfter = true;
+ }
+ }
+ }
// calculate the heap this call site can reach--note this is
// and reschedule the callee for analysis
addIHMcontribution( mdCallee, fc, heapForThisCall_cur );
+ // map a FlatCall to its enclosing method/task descriptor
+ // so we can write that info out later
+ fc2enclosing.put( fc, mdCaller );
+
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println( " context changed, scheduling callee: "+mdCallee );
+ }
+
if( state.DISJOINTDVISITSTACKEESONTOP ) {
calleesToEnqueue.add( mdCallee );
} else {
}
-
// the transformation for a call site should update the
// current heap abstraction with any effects from the callee,
// or if the method is virtual, the effects from any possible
// callees, so find the set of callees...
- Set<MethodDescriptor> setPossibleCallees =
- new HashSet<MethodDescriptor>();
+ Set<MethodDescriptor> setPossibleCallees;
+ if( determinismDesired ) {
+ // use an ordered set
+ setPossibleCallees = new TreeSet<MethodDescriptor>( dComp );
+ } else {
+ // otherwise use a speedy hashset
+ setPossibleCallees = new HashSet<MethodDescriptor>();
+ }
if( mdCallee.isStatic() ) {
setPossibleCallees.add( mdCallee );
);
}
- ReachGraph rgMergeOfEffects = new ReachGraph();
+ ReachGraph rgMergeOfPossibleCallers = new ReachGraph();
Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
while( mdItr.hasNext() ) {
// don't alter the working graph (rg) until we compute a
// result for every possible callee, merge them all together,
// then set rg to that
- ReachGraph rgCopy = new ReachGraph();
- rgCopy.merge( rg );
+ ReachGraph rgPossibleCaller = new ReachGraph();
+ rgPossibleCaller.merge( rg );
- ReachGraph rgEffect = getPartial( mdPossible );
+ ReachGraph rgPossibleCallee = getPartial( mdPossible );
- if( rgEffect == null ) {
+ if( rgPossibleCallee == null ) {
// if this method has never been analyzed just schedule it
// for analysis and skip over this call site for now
if( state.DISJOINTDVISITSTACKEESONTOP ) {
} else {
enqueue( mdPossible );
}
+
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println( " callee hasn't been analyzed, scheduling: "+mdPossible );
+ }
+
} else {
- rgCopy.resolveMethodCall( fc,
- fmPossible,
- rgEffect,
- callerNodeIDsCopiedToCallee,
- writeDebugDOTs
- );
+ // calculate the method call transform
+ rgPossibleCaller.resolveMethodCall( fc,
+ fmPossible,
+ rgPossibleCallee,
+ callerNodeIDsCopiedToCallee,
+ writeDebugDOTs
+ );
+
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ if( !rgPossibleCallee.isAccessible( ReachGraph.tdReturn ) ) {
+ rgPossibleCaller.makeInaccessible( fc.getReturnTemp() );
+ }
+ }
+
}
- rgMergeOfEffects.merge( rgCopy );
+ rgMergeOfPossibleCallers.merge( rgPossibleCaller );
+ }
+
+
+ if( stopAfter ) {
+ System.out.println( "$$$ Exiting after requested captures of call site. $$$" );
+ System.exit( 0 );
}
// now that we've taken care of building heap models for
// callee analysis, finish this transformation
- rg = rgMergeOfEffects;
+ rg = rgMergeOfPossibleCallers;
} break;
case FKind.FlatReturnNode:
FlatReturnNode frn = (FlatReturnNode) fn;
rhs = frn.getReturnTemp();
+
+ // before transfer, do effects analysis support
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ if(!rg.isAccessible(rhs)){
+ rg.makeInaccessible(ReachGraph.tdReturn);
+ }
+ }
+
if( rhs != null && shouldAnalysisTrack( rhs.getType() ) ) {
rg.assignReturnEqualToTemp( rhs );
}
+
setRetNodes.add( frn );
break;
ReachGraph rg = (ReachGraph) me.getValue();
rg.writeGraph( "COMPLETE"+d,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide subset reachability states
- true ); // hide edge taints
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability altogether
+ true, // hide subset reachability states
+ true, // hide predicates
+ false ); // hide edge taints
}
}
FlatCall fc = (FlatCall) me2.getKey();
ReachGraph rg = (ReachGraph) me2.getValue();
- rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc,
+ rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc2enclosing.get( fc )+fc,
true, // write labels (variables)
true, // selectively hide intermediate temp vars
+ true, // hide reachability altogether
true, // prune unreachable heap regions
- false, // hide subset reachability states
+ true, // hide subset reachability states
+ false, // hide predicates
true ); // hide edge taints
}
}
}
+
+ private void writeInitialContexts() {
+ Set entrySet = mapDescriptorToInitialContext.entrySet();
+ Iterator itr = entrySet.iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ Descriptor d = (Descriptor) me.getKey();
+ ReachGraph rg = (ReachGraph) me.getValue();
+
+ rg.writeGraph( "INITIAL"+d,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide all reachability
+ true, // hide subset reachability states
+ true, // hide predicates
+ false );// hide edge taints
+ }
+ }
protected ReachGraph getPartial( Descriptor d ) {
true, // write labels (variables)
true, // selectively hide intermediate temp vars
true, // prune unreachable heap regions
- false, // hide subset reachability states
- true ); // hide edge taints
+ false, // hide all reachability
+ true, // hide subset reachability states
+ false, // hide predicates
+ false); // hide edge taints
mapDescriptorToNumUpdates.put( d, n + 1 );
}
// return just the allocation site associated with one FlatNew node
protected AllocSite getAllocSiteFromFlatNewPRIVATE( FlatNew fnew ) {
+ boolean flagProgrammatically = false;
+ if( sitesToFlag != null && sitesToFlag.contains( fnew ) ) {
+ flagProgrammatically = true;
+ }
+
if( !mapFlatNewToAllocSite.containsKey( fnew ) ) {
AllocSite as = AllocSite.factory( allocationDepth,
fnew,
- fnew.getDisjointId()
+ fnew.getDisjointId(),
+ flagProgrammatically
);
// the newest nodes are single objects
protected LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
- Set <Descriptor> discovered = new HashSet <Descriptor>();
- LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
+ Set<Descriptor> discovered;
+
+ if( determinismDesired ) {
+ // use an ordered set
+ discovered = new TreeSet<Descriptor>( dComp );
+ } else {
+ // otherwise use a speedy hashset
+ discovered = new HashSet<Descriptor>();
+ }
+
+ LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
Iterator<Descriptor> itr = toSort.iterator();
while( itr.hasNext() ) {
protected void enqueue( Descriptor d ) {
+
if( !descriptorsToVisitSet.contains( d ) ) {
- Integer priority = mapDescriptorToPriority.get( d );
if( state.DISJOINTDVISITSTACK ||
state.DISJOINTDVISITSTACKEESONTOP
) {
- descriptorsToVisitStack.add( new DescriptorQWrapper( priority,
- d )
- );
+ descriptorsToVisitStack.add( d );
} else if( state.DISJOINTDVISITPQUE ) {
+ Integer priority = mapDescriptorToPriority.get( d );
descriptorsToVisitQ.add( new DescriptorQWrapper( priority,
d )
);
getIHMcontributions( d );
if( !heapsFromCallers.containsKey( fc ) ) {
- heapsFromCallers.put( fc, new ReachGraph() );
+ return null;
}
return heapsFromCallers.get( fc );
}
+
public void addIHMcontribution( Descriptor d,
FlatCall fc,
ReachGraph rg
heapsFromCallers.put( fc, rg );
}
+
private AllocSite createParameterAllocSite( ReachGraph rg,
- TempDescriptor tempDesc
+ TempDescriptor tempDesc,
+ boolean flagRegions
) {
- FlatNew flatNew = new FlatNew( tempDesc.getType(), // type
- tempDesc, // param temp
- false, // global alloc?
- "param"+tempDesc // disjoint site ID string
- );
+ FlatNew flatNew;
+ if( flagRegions ) {
+ flatNew = new FlatNew( tempDesc.getType(), // type
+ tempDesc, // param temp
+ false, // global alloc?
+ "param"+tempDesc // disjoint site ID string
+ );
+ } else {
+ flatNew = new FlatNew( tempDesc.getType(), // type
+ tempDesc, // param temp
+ false, // global alloc?
+ null // disjoint site ID string
+ );
+ }
+
// create allocation site
AllocSite as = AllocSite.factory( allocationDepth,
flatNew,
- flatNew.getDisjointId()
+ flatNew.getDisjointId(),
+ false
);
for (int i = 0; i < allocationDepth; ++i) {
Integer id = generateUniqueHeapRegionNodeID();
if(i==dimCount){
as = alloc;
}else{
- as = createParameterAllocSite(rg, tempDesc);
+ as = createParameterAllocSite(rg, tempDesc, false);
}
// make a new reference to allocated node
hrnSummary =
if(prevNode==null){
// make a new reference between new summary node and source
- RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
hrnSummary, // dest
typeDesc, // type
fd.getSymbol(), // field name
alpha, // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
typeDesc, // type
arrayElementFieldName, // field name
alpha, // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
typeDesc.setArrayCount(0);
if(!mapToExistingNode.containsKey(typeDesc)){
TempDescriptor tempDesc=new TempDescriptor(type.getSymbol(),typeDesc);
- AllocSite as = createParameterAllocSite(rg, tempDesc);
+ AllocSite as = createParameterAllocSite(rg, tempDesc, false);
// make a new reference to allocated node
HeapRegionNode hrnSummary =
rg.createNewHeapRegionNode(as.getSummary(), // id or null to generate a new one
typeDesc, // type
arrayElementFieldName, // field name
alpha, // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
prevNode=hrnSummary;
}else{
- HeapRegionNode hrnSummary=mapToExistingNode.get(typeDesc);
+ HeapRegionNode hrnSummary=mapToExistingNode.get(typeDesc);
if(prevNode.getReferenceTo(hrnSummary, typeDesc, arrayElementFieldName)==null){
RefEdge edgeToSummary = new RefEdge(prevNode, // source
hrnSummary, // dest
typeDesc, // type
arrayElementFieldName, // field name
alpha, // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
}
TempDescriptor tempDesc = fm.getParameter(idx);
- AllocSite as = createParameterAllocSite(rg, tempDesc);
+ AllocSite as = createParameterAllocSite(rg, tempDesc, true);
VariableNode lnX = rg.getVariableNodeFromTemp(tempDesc);
Integer idNewest = as.getIthOldest(0);
HeapRegionNode hrnNewest = rg.id2hrn.get(idNewest);
taskDesc.getParamType(idx), // type
null, // field name
hrnNewest.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(lnX, hrnNewest, edgeNew);
//corresponding allocsite has already been created for a parameter variable.
allocSite=as;
}else{
- allocSite = createParameterAllocSite(rg, td);
+ allocSite = createParameterAllocSite(rg, td, false);
}
String strDesc = allocSite.toStringForDOT()
+ "\\nsummary";
type, // type
fd.getSymbol(), // field name
hrnNewest.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
fd.getType(), // type
fd.getSymbol(), // field name
srcHRN.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue) // predicates
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
);
rg.addRefEdge(srcHRN, hrnDst, edgeToSummary);
return asSetTotal;
}
+ public Set<Descriptor> getDescriptorsToAnalyze() {
+ return descriptorsToAnalyze;
+ }
+ public EffectsAnalysis getEffectsAnalysis(){
+ return effectsAnalysis;
+ }
// get successive captures of the analysis state, use compiler
" @@@" );
String graphName;
if( in ) {
- graphName = String.format( "snap%02d_%04din",
+ graphName = String.format( "snap%03d_%04din",
snapVisitCounter,
snapNodeCounter );
} else {
- graphName = String.format( "snap%02d_%04dout",
+ graphName = String.format( "snap%03d_%04dout",
snapVisitCounter,
snapNodeCounter );
}
graphName = graphName + fn;
}
rg.writeGraph( graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide subset reachability states
- true );// hide edge taints
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability
+ true, // hide subset reachability states
+ true, // hide predicates
+ false );// hide edge taints
}
}