// MethodDescriptor maps to HashSet<MethodDescriptor or TaskDescriptor>
protected Hashtable mapCallee2CallerSet;
- protected BaseCallGraph() {}
+ protected BaseCallGraph() {
+ }
protected TypeUtil typeUtil;
// that call the given method
public Set getCallerSet(MethodDescriptor md) {
Set s = (Set) mapCallee2CallerSet.get(md);
-
+
if( s == null ) {
return new HashSet();
}
Stack<ClassDescriptor> possInterfaces=new Stack<ClassDescriptor>();
ClassDescriptor tmpcd=cn;
while(tmpcd!=null) {
- for(Iterator supit=tmpcd.getSuperInterfaces();supit.hasNext();) {
+ for(Iterator supit=tmpcd.getSuperInterfaces(); supit.hasNext(); ) {
possInterfaces.add((ClassDescriptor)supit.next());
}
tmpcd=tmpcd.getSuperDesc();
}
while(!possInterfaces.isEmpty()) {
ClassDescriptor IFdesc=possInterfaces.pop();
- for(Iterator supit=IFdesc.getSuperInterfaces();supit.hasNext();) {
+ for(Iterator supit=IFdesc.getSuperInterfaces(); supit.hasNext(); ) {
possInterfaces.add((ClassDescriptor)supit.next());
}
Set possiblematches=IFdesc.getMethodTable().getSet(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
if (md.matches(matchmd)) {
if (!mapVirtual2ImplementationSet.containsKey(matchmd))
}
}
}
-
+
ClassDescriptor superdesc=cn.getSuperDesc();
if (superdesc!=null) {
Set possiblematches=superdesc.getMethodTable().getSet(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
if (md.matches(matchmd)) {
if (!mapVirtual2ImplementationSet.containsKey(matchmd))
ns.add(md);
Set s=(Set)mapVirtual2ImplementationSet.get(md);
if (s!=null)
- for(Iterator it=s.iterator(); it.hasNext();) {
+ for(Iterator it=s.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
ns.addAll(getMethods(md2));
}
found.add(d);
Set s=(Set)mapCaller2CalleeSet.get(d);
if (s!=null)
- for(Iterator it=s.iterator(); it.hasNext();) {
+ for(Iterator it=s.iterator(); it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)it.next();
if( !found.contains(md) ) {
ns.addAll(getMoreMethodCalls(found, md));
Set s=(Set)mapCaller2CalleeSet.get(md);
if (s!=null) {
- for(Iterator it=s.iterator(); it.hasNext();) {
+ for(Iterator it=s.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if( !callable.contains(md2) ) {
callable.add(md2);
}
return callable;
}
-
- // Returns a set of methods containing SESEs and located at the first
- // in transitive call chain starting from d
+
+ // Returns a set of methods containing SESEs and located at the first
+ // in transitive call chain starting from d
public Set getFirstReachableMethodContainingSESE(Descriptor d,
- Set<MethodDescriptor> methodsContainingSESEs) {
+ Set<MethodDescriptor> methodsContainingSESEs) {
HashSet tovisit = new HashSet();
tovisit.add(d);
HashSet callable = new HashSet();
Set s = (Set) mapCaller2CalleeSet.get(md);
if (s != null) {
- for (Iterator it = s.iterator(); it.hasNext();) {
- MethodDescriptor md2 = (MethodDescriptor) it.next();
- if (!callable.contains(md2)) {
- callable.add(md2);
- if (!methodsContainingSESEs.contains(md2)) {
- // if current method has sese, do not need to go down
- tovisit.add(md2);
- }
- }
- }
+ for (Iterator it = s.iterator(); it.hasNext(); ) {
+ MethodDescriptor md2 = (MethodDescriptor) it.next();
+ if (!callable.contains(md2)) {
+ callable.add(md2);
+ if (!methodsContainingSESEs.contains(md2)) {
+ // if current method has sese, do not need to go down
+ tovisit.add(md2);
+ }
+ }
+ }
}
}
// callable.retainAll(methodsContainingSESEs);
return callable;
}
-
+
private void buildGraph() {
Iterator it=state.getClassSymbolTable().getDescriptorsIterator();
if (fn.kind()==FKind.FlatCall) {
FlatCall fc=(FlatCall)fn;
MethodDescriptor calledmethod=fc.getMethod();
- Set methodsthatcouldbecalled=fc.getThis()==null ? getMethods(calledmethod) :
+ Set methodsthatcouldbecalled=fc.getThis()==null?getMethods(calledmethod):
getMethods(calledmethod, fc.getThis().getType());
-
+
// add caller -> callee maps
if( !mapCaller2CalleeSet.containsKey(caller) ) {
mapCaller2CalleeSet.put(caller, new HashSet() );
if (fm==null)
continue;
analyzeMethod(md, fm);
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()==FKind.FlatCall) {
FlatCall fcall=(FlatCall)fn;
MethodDescriptor methodd=fcall.getMethod();
if (methodd.getClassDesc()==tu.getClass(TypeUtil.ThreadClass)&&
- methodd.getSymbol().equals("start")&&methodd.numParameters()==0&&!methodd.getModifiers().isStatic()) {
+ methodd.getSymbol().equals("start")&&methodd.numParameters()==0&&!methodd.getModifiers().isStatic()) {
//Have call to start
HashSet ns=new HashSet();
ns.addAll(callees);
}
}
- for(Iterator mdit=callees.iterator();mdit.hasNext();) {
+ for(Iterator mdit=callees.iterator(); mdit.hasNext(); ) {
MethodDescriptor callee=(MethodDescriptor)mdit.next();
if (!discovered.contains(callee)) {
discovered.add(callee);
public static final int SHADOWAGE_oldest = -102;
public static final int SHADOWAGE_summary = -103;
- protected Integer id;
- protected int allocationDepth;
+ protected Integer id;
+ protected int allocationDepth;
protected Vector<Integer> ithOldest;
- protected Integer summary;
- protected FlatNew flatNew;
- protected String disjointId;
- protected boolean isFlagged;
-
-
- public static AllocSite factory( int allocationDepth,
- FlatNew flatNew,
- String disjointId,
- boolean markAsFlagged
- ) {
- AllocSite out = new AllocSite( allocationDepth,
- flatNew,
- disjointId,
- markAsFlagged );
- out = (AllocSite) Canonical.makeCanonical( out );
+ protected Integer summary;
+ protected FlatNew flatNew;
+ protected String disjointId;
+ protected boolean isFlagged;
+
+
+ public static AllocSite factory(int allocationDepth,
+ FlatNew flatNew,
+ String disjointId,
+ boolean markAsFlagged
+ ) {
+ AllocSite out = new AllocSite(allocationDepth,
+ flatNew,
+ disjointId,
+ markAsFlagged);
+ out = (AllocSite) Canonical.makeCanonical(out);
return out;
}
- protected AllocSite( int allocationDepth,
- FlatNew flatNew,
- String disjointId,
- boolean markAsFlagged
- ) {
+ protected AllocSite(int allocationDepth,
+ FlatNew flatNew,
+ String disjointId,
+ boolean markAsFlagged
+ ) {
assert allocationDepth >= 1;
// mark this allocation site as being flagged
// for the analysis if
- // 1) we have a non-null disjointID (a named flagged site)
+ // 1) we have a non-null disjointID (a named flagged site)
// OR
- // 2) the type is a class with Bamboo-parameter flags
+ // 2) the type is a class with Bamboo-parameter flags
// OR
// 3) a client wants to programmatically flag this site,
// such as the OoOJava method effects analysis
}
- ithOldest = new Vector<Integer>( allocationDepth );
+ ithOldest = new Vector<Integer>(allocationDepth);
id = generateUniqueAllocSiteID();
}
static public Integer generateUniqueAllocSiteID() {
++uniqueIDcount;
- return new Integer( uniqueIDcount );
+ return new Integer(uniqueIDcount);
}
public int getUniqueAllocSiteID() {
return allocationDepth;
}
- public void setIthOldest( int i, Integer id ) {
+ public void setIthOldest(int i, Integer id) {
assert i >= 0;
assert i < allocationDepth;
assert id != null;
- ithOldest.add( i, id );
+ ithOldest.add(i, id);
}
- public Integer getIthOldest( int i ) {
+ public Integer getIthOldest(int i) {
assert i >= 0;
assert i < allocationDepth;
- return ithOldest.get( i );
+ return ithOldest.get(i);
}
- public Integer getIthOldestShadow( int i ) {
+ public Integer getIthOldestShadow(int i) {
assert i >= 0;
assert i < allocationDepth;
- return -ithOldest.get( i );
+ return -ithOldest.get(i);
}
public Integer getOldest() {
- return ithOldest.get( allocationDepth - 1 );
+ return ithOldest.get(allocationDepth - 1);
}
public Integer getOldestShadow() {
- return -ithOldest.get( allocationDepth - 1 );
+ return -ithOldest.get(allocationDepth - 1);
}
- public void setSummary( Integer id ) {
+ public void setSummary(Integer id) {
assert id != null;
summary = id;
}
return isFlagged;
}
- public int getAgeCategory( Integer id ) {
+ public int getAgeCategory(Integer id) {
- if( id.equals( summary ) ) {
+ if( id.equals(summary) ) {
return AGE_summary;
}
- if( id.equals( getOldest() ) ) {
+ if( id.equals(getOldest() ) ) {
return AGE_oldest;
}
for( int i = 0; i < allocationDepth - 1; ++i ) {
- if( id.equals( ithOldest.get( i ) ) ) {
+ if( id.equals(ithOldest.get(i) ) ) {
return AGE_in_I;
}
}
return AGE_notInThisSite;
}
- public Integer getAge( Integer id ) {
+ public Integer getAge(Integer id) {
for( int i = 0; i < allocationDepth; ++i ) {
- if( id.equals( ithOldest.get( i ) ) ) {
- return new Integer( i );
+ if( id.equals(ithOldest.get(i) ) ) {
+ return new Integer(i);
}
}
return null;
}
- public int getShadowAgeCategory( Integer id ) {
- if( id.equals( -summary ) ) {
+ public int getShadowAgeCategory(Integer id) {
+ if( id.equals(-summary) ) {
return SHADOWAGE_summary;
}
- if( id.equals( getOldestShadow() ) ) {
+ if( id.equals(getOldestShadow() ) ) {
return SHADOWAGE_oldest;
}
for( int i = 0; i < allocationDepth - 1; ++i ) {
- if( id.equals( getIthOldestShadow( i ) ) ) {
+ if( id.equals(getIthOldestShadow(i) ) ) {
return SHADOWAGE_in_I;
}
}
return SHADOWAGE_notInThisSite;
}
- public Integer getShadowAge( Integer id ) {
+ public Integer getShadowAge(Integer id) {
for( int i = 0; i < allocationDepth - 1; ++i ) {
- if( id.equals( getIthOldestShadow( i ) ) ) {
- return new Integer( -i );
+ if( id.equals(getIthOldestShadow(i) ) ) {
+ return new Integer(-i);
}
}
return null;
}
- public Integer getShadowIDfromID( Integer id ) {
- int ageCat = getAgeCategory( id );
+ public Integer getShadowIDfromID(Integer id) {
+ int ageCat = getAgeCategory(id);
switch( ageCat ) {
-
+
case AGE_summary:
case AGE_oldest:
case AGE_in_I:
return -id;
-
+
case AGE_notInThisSite:
default:
- System.out.println( toStringWithIDs() );
- throw new Error( "ID "+id+" not from this site." );
+ System.out.println(toStringWithIDs() );
+ throw new Error("ID "+id+" not from this site.");
}
}
public String toStringVerbose() {
if( disjointId == null ) {
return "allocSite"+id+" "+
- flatNew.getType().toPrettyString();
+ flatNew.getType().toPrettyString();
}
return "allocSite "+disjointId+" ("+id+") "+
- flatNew.getType().toPrettyString();
+ flatNew.getType().toPrettyString();
}
public String toStringForDOT() {
if( disjointId != null ) {
return "disjoint "+disjointId+"\\n"+toString()+
- "\\n"+getType().toPrettyString();
+ "\\n"+getType().toPrettyString();
} else {
- return toString()+
- "\\n"+getType().toPrettyString();
+ return toString()+
+ "\\n"+getType().toPrettyString();
}
}
public String toStringWithIDs() {
String s = "allocSite"+id+" ";
for( int i = 0; i < ithOldest.size(); ++i ) {
- s += i+"("+ithOldest.get( i )+") ";
+ s += i+"("+ithOldest.get(i)+") ";
}
s += "summary("+summary+")";
return s;
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
public int hashCodeSpecific() {
return id.hashCode();
- }
+ }
}
// map a task or stall site (both a FlatNode) to a variable
// and then finally to a state machine
protected Hashtable< FlatNode, Hashtable<TempDescriptor, StateMachineForEffects>> fn2var2smfe;
-
+
// remember all the FlatNode/TempDescriptor pairs that have a state machines
// for easy retrieval of all machines
protected Set<Pair<FlatNode, TempDescriptor>> allMachineNamePairs;
}
public StateMachineForEffects getStateMachine(FlatNode fn, TempDescriptor var) {
- Hashtable<TempDescriptor, StateMachineForEffects> var2smfe = fn2var2smfe.get( fn );
+ Hashtable<TempDescriptor, StateMachineForEffects> var2smfe = fn2var2smfe.get(fn);
if( var2smfe == null ) {
var2smfe = new Hashtable<TempDescriptor, StateMachineForEffects>();
- fn2var2smfe.put( fn, var2smfe );
+ fn2var2smfe.put(fn, var2smfe);
}
-
- StateMachineForEffects smfe = var2smfe.get( var );
+
+ StateMachineForEffects smfe = var2smfe.get(var);
if( smfe == null ) {
- smfe = new StateMachineForEffects( fn );
- var2smfe.put( var, smfe );
- allMachineNamePairs.add( new Pair<FlatNode, TempDescriptor>( fn, var ) );
+ smfe = new StateMachineForEffects(fn);
+ var2smfe.put(var, smfe);
+ allMachineNamePairs.add(new Pair<FlatNode, TempDescriptor>(fn, var) );
}
return smfe;
}
- public void addToStateMachine( Taint t,
- Effect e,
- FlatNode currentProgramPoint ) {
+ public void addToStateMachine(Taint t,
+ Effect e,
+ FlatNode currentProgramPoint) {
FlatNode taskOrStallSite;
if( t.isStallSiteTaint() ) {
taskOrStallSite = t.getStallSite();
TempDescriptor var = t.getVar();
- StateMachineForEffects smfe = getStateMachine( taskOrStallSite, var );
+ StateMachineForEffects smfe = getStateMachine(taskOrStallSite, var);
FlatNode whereDefined = t.getWhereDefined();
- smfe.addEffect( whereDefined, e );
+ smfe.addEffect(whereDefined, e);
// reads of pointers make a transition
if( e.getType() == Effect.read &&
((e.getField()!=null && e.getField().getType().isPtr())
- ||(e.getField()==null && e.getAffectedAllocSite().getFlatNew().getType().dereference().isPtr()))) {
-
- smfe.addTransition( whereDefined,
- currentProgramPoint,
- e );
+ ||(e.getField()==null && e.getAffectedAllocSite().getFlatNew().getType().dereference().isPtr()))) {
+
+ smfe.addTransition(whereDefined,
+ currentProgramPoint,
+ e);
}
}
Iterator<FlatNode> fnItr = fn2var2smfe.keySet().iterator();
while( fnItr.hasNext() ) {
FlatNode fn = fnItr.next();
-
- Hashtable<TempDescriptor, StateMachineForEffects>
- var2smfe = fn2var2smfe.get( fn );
-
+
+ Hashtable<TempDescriptor, StateMachineForEffects>
+ var2smfe = fn2var2smfe.get(fn);
+
Iterator<TempDescriptor> varItr = var2smfe.keySet().iterator();
while( varItr.hasNext() ) {
- TempDescriptor var = varItr.next();
+ TempDescriptor var = varItr.next();
- StateMachineForEffects smfe = var2smfe.get( var );
+ StateMachineForEffects smfe = var2smfe.get(var);
- smfe.writeAsDOT( prefix+"statemachine_"+fn.toString()+var.toString() );
+ smfe.writeAsDOT(prefix+"statemachine_"+fn.toString()+var.toString() );
}
}
}
- //TODO JIM! Give me the REAALL number here.
+ //TODO JIM! Give me the REAALL number here.
public int getTotalNumOfWeakGroups() {
// TODO Auto-generated method stub
return 1;
// the canon of objects
private static Hashtable<Canonical, Canonical>
- canon = new Hashtable<Canonical, Canonical>();
-
+ canon = new Hashtable<Canonical, Canonical>();
- public static Canonical makeCanonical( Canonical c ) {
- if( canon.containsKey( c ) ) {
- return canon.get( c );
+ public static Canonical makeCanonical(Canonical c) {
+
+ if( canon.containsKey(c) ) {
+ return canon.get(c);
}
-
+
c.canonicalValue = canonicalCount;
++canonicalCount;
- canon.put( c, c );
+ canon.put(c, c);
return c;
}
-
+
// any Canonical with value still 0 is NOT CANONICAL!
private int canonicalValue = 0;
- abstract public boolean equalsSpecific( Object o );
+ abstract public boolean equalsSpecific(Object o);
- final public boolean equals( Object o ) {
+ final public boolean equals(Object o) {
if( o == null ) {
return false;
}
Canonical c = (Canonical) o;
if( this.canonicalValue == 0 ||
- c.canonicalValue == 0
+ c.canonicalValue == 0
) {
- return equalsSpecific( o );
+ return equalsSpecific(o);
}
return this.canonicalValue == c.canonicalValue;
}
-
+
// canonical objects should never be modified
// and therefore have changing hash codes, so
// use a standard canonical hash code method to
abstract public int hashCodeSpecific();
private boolean hasHash = false;
- private int oldHash;
+ private int oldHash;
final public int hashCode() {
-
+
// the quick mode
if( DisjointAnalysis.releaseMode && hasHash ) {
return oldHash;
if( hasHash ) {
if( oldHash != hash ) {
- throw new Error( "A CANONICAL HASH CHANGED" );
+ throw new Error("A CANONICAL HASH CHANGED");
}
} else {
hasHash = true;
oldHash = hash;
}
-
+
return hash;
}
// mapping of a non-trivial operation to its result
- private static Hashtable<CanonicalOp, Canonical>
- op2result = new Hashtable<CanonicalOp, Canonical>();
-
+ private static Hashtable<CanonicalOp, Canonical>
+ op2result = new Hashtable<CanonicalOp, Canonical>();
+
///////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////
-
+
// not weighty, don't bother with caching
- public static ReachTuple unionUpArity( ReachTuple rt1,
- ReachTuple rt2 ) {
+ public static ReachTuple unionUpArity(ReachTuple rt1,
+ ReachTuple rt2) {
assert rt1 != null;
assert rt2 != null;
assert rt1.isCanonical();
assert rt1.hrnID == rt2.hrnID;
assert rt1.isMultiObject == rt2.isMultiObject;
assert rt1.isOutOfContext == rt2.isOutOfContext;
-
+
ReachTuple out;
if( rt1.isMultiObject ) {
// on two non-ZERO arity multi regions, union arity is always
// ZERO-OR-MORE
- out = ReachTuple.factory( rt1.hrnID,
- true,
- ReachTuple.ARITY_ZEROORMORE,
- rt1.isOutOfContext );
-
+ out = ReachTuple.factory(rt1.hrnID,
+ true,
+ ReachTuple.ARITY_ZEROORMORE,
+ rt1.isOutOfContext);
+
} else {
// a single object region can only be ARITY_ONE (or zero by
// being absent)
}
// not weighty, no caching
- public static ReachTuple changeHrnIDTo( ReachTuple rt,
- Integer hrnIDToChangeTo ) {
+ public static ReachTuple changeHrnIDTo(ReachTuple rt,
+ Integer hrnIDToChangeTo) {
assert rt != null;
assert hrnIDToChangeTo != null;
- ReachTuple out = ReachTuple.factory( hrnIDToChangeTo,
- rt.isMultiObject,
- rt.arity,
- rt.isOutOfContext
- );
+ ReachTuple out = ReachTuple.factory(hrnIDToChangeTo,
+ rt.isMultiObject,
+ rt.arity,
+ rt.isOutOfContext
+ );
assert out.isCanonical();
return out;
}
- public static ReachState attach( ReachState rs,
- ExistPredSet preds ) {
+ public static ReachState attach(ReachState rs,
+ ExistPredSet preds) {
assert rs != null;
assert preds != null;
assert rs.isCanonical();
assert preds.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_ATTACH_EXISTPREDSET,
- rs,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_ATTACH_EXISTPREDSET,
+ rs,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
-
+
// otherwise, no cached result...
ReachState out = new ReachState();
- out.reachTuples.addAll( rs.reachTuples );
- out.preds = Canonical.join( rs.preds,
- preds );
-
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+ out.reachTuples.addAll(rs.reachTuples);
+ out.preds = Canonical.join(rs.preds,
+ preds);
+
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachState add( ReachState rs,
- ReachTuple rt ) {
+ public static ReachState add(ReachState rs,
+ ReachTuple rt) {
assert rs != null;
assert rt != null;
// this is only safe if we are certain the new tuple's
// ID doesn't already appear in the reach state
- assert rs.containsHrnID( rt.getHrnID(),
- rt.isOutOfContext() ) == null;
-
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_ADD_REACHTUPLE,
- rs,
- rt );
-
- Canonical result = op2result.get( op );
+ assert rs.containsHrnID(rt.getHrnID(),
+ rt.isOutOfContext() ) == null;
+
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_ADD_REACHTUPLE,
+ rs,
+ rt);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
// otherwise, no cached result...
ReachState out = new ReachState();
- out.reachTuples.addAll( rs.reachTuples );
- out.reachTuples.add( rt );
+ out.reachTuples.addAll(rs.reachTuples);
+ out.reachTuples.add(rt);
out.preds = rs.preds;
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
-
- public static ReachState unionUpArity( ReachState rs1,
- ReachState rs2 ) {
+
+ public static ReachState unionUpArity(ReachState rs1,
+ ReachState rs2) {
assert rs1 != null;
assert rs2 != null;
assert rs1.isCanonical();
assert rs2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_UNIONUPARITY_REACHSTATE,
- rs1,
- rs2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_UNIONUPARITY_REACHSTATE,
+ rs1,
+ rs2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
-
+
// otherwise, no cached result...
ReachState out = new ReachState();
Iterator<ReachTuple> rtItr = rs1.iterator();
while( rtItr.hasNext() ) {
ReachTuple rt1 = rtItr.next();
- ReachTuple rt2 = rs2.containsHrnID( rt1.getHrnID(),
- rt1.isOutOfContext()
- );
+ ReachTuple rt2 = rs2.containsHrnID(rt1.getHrnID(),
+ rt1.isOutOfContext()
+ );
if( rt2 != null ) {
- out.reachTuples.add( unionUpArity( rt1, rt2 ) );
+ out.reachTuples.add(unionUpArity(rt1, rt2) );
} else {
- out.reachTuples.add( rt1 );
+ out.reachTuples.add(rt1);
}
}
rtItr = rs2.iterator();
while( rtItr.hasNext() ) {
ReachTuple rt2 = rtItr.next();
- ReachTuple rt1 = rs1.containsHrnID( rt2.getHrnID(),
- rt2.isOutOfContext()
- );
+ ReachTuple rt1 = rs1.containsHrnID(rt2.getHrnID(),
+ rt2.isOutOfContext()
+ );
if( rt1 == null ) {
- out.reachTuples.add( rt2 );
+ out.reachTuples.add(rt2);
}
}
- out.preds = Canonical.join( rs1.getPreds(),
- rs2.getPreds()
- );
-
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+ out.preds = Canonical.join(rs1.getPreds(),
+ rs2.getPreds()
+ );
+
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachState addUpArity( ReachState rs,
- ReachTuple rt ) {
+ public static ReachState addUpArity(ReachState rs,
+ ReachTuple rt) {
assert rs != null;
assert rt != null;
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_ADDUPARITY_REACHTUPLE,
- rs,
- rt );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_ADDUPARITY_REACHTUPLE,
+ rs,
+ rt);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
// the reason for this add is that we are aware a tuple
// with the same hrnID might already be in the state, so
// if it is we should combine properly
- ReachState rtOnly = ReachState.factory( rt );
- out = Canonical.unionUpArity( rs, rtOnly );
-
- op2result.put( op, out );
+ ReachState rtOnly = ReachState.factory(rt);
+ out = Canonical.unionUpArity(rs, rtOnly);
+
+ op2result.put(op, out);
return out;
}
-
- public static ReachState remove( ReachState rs, ReachTuple rt ) {
+
+ public static ReachState remove(ReachState rs, ReachTuple rt) {
assert rs != null;
assert rt != null;
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_REMOVE_REACHTUPLE,
- rs,
- rt );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_REMOVE_REACHTUPLE,
+ rs,
+ rt);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
- // otherwise, no cached result...
+ // otherwise, no cached result...
ReachState out = new ReachState();
- out.reachTuples.addAll( rs.reachTuples );
- out.reachTuples.remove( rt );
+ out.reachTuples.addAll(rs.reachTuples);
+ out.reachTuples.remove(rt);
out.preds = rs.preds;
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
-
-
- public static ReachState ageTuplesFrom( ReachState rs,
- AllocSite as ) {
+
+
+ public static ReachState ageTuplesFrom(ReachState rs,
+ AllocSite as) {
assert rs != null;
assert as != null;
assert rs.isCanonical();
assert as.isCanonical();
-
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_AGETUPLESFROM_ALLOCSITE,
- rs,
- as );
-
- Canonical result = op2result.get( op );
+
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_AGETUPLESFROM_ALLOCSITE,
+ rs,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
-
+
// otherwise, no cached result...
ReachState out = new ReachState();
Iterator<ReachTuple> rtItr = rs.iterator();
while( rtItr.hasNext() ) {
ReachTuple rt = rtItr.next();
- Integer hrnID = rt.getHrnID();
- int age = as.getAgeCategory( hrnID );
+ Integer hrnID = rt.getHrnID();
+ int age = as.getAgeCategory(hrnID);
// hrnIDs not associated with
// the site should be left alone, and
if( age == AllocSite.AGE_notInThisSite ||
rt.isOutOfContext()
) {
- out.reachTuples.add( rt );
+ out.reachTuples.add(rt);
} else if( age == AllocSite.AGE_summary ) {
// remember the summary tuple, but don't add it
} else {
assert age == AllocSite.AGE_in_I;
- Integer I = as.getAge( hrnID );
+ Integer I = as.getAge(hrnID);
assert I != null;
// otherwise, we change this hrnID to the
// next older hrnID
- Integer hrnIDToChangeTo = as.getIthOldest( I + 1 );
+ Integer hrnIDToChangeTo = as.getIthOldest(I + 1);
ReachTuple rtAged =
- Canonical.changeHrnIDTo( rt, hrnIDToChangeTo );
- out.reachTuples.add( rtAged );
+ Canonical.changeHrnIDTo(rt, hrnIDToChangeTo);
+ out.reachTuples.add(rtAged);
}
}
// Merge them by arity
// 4. (not handled) we found neither, do nothing
if( rtSummary != null && rtOldest == null ) {
- out.reachTuples.add( rtSummary );
+ out.reachTuples.add(rtSummary);
} else if( rtSummary == null && rtOldest != null ) {
- out.reachTuples.add( ReachTuple.factory( as.getSummary(),
- true, // multi
- rtOldest.getArity(),
- false // out-of-context
- )
- );
-
- } else if( rtSummary != null && rtOldest != null ) {
- out.reachTuples.add( Canonical.unionUpArity( rtSummary,
- ReachTuple.factory( as.getSummary(),
- true, // muli
- rtOldest.getArity(),
- false // out-of-context
- )
- )
- );
+ out.reachTuples.add(ReachTuple.factory(as.getSummary(),
+ true, // multi
+ rtOldest.getArity(),
+ false // out-of-context
+ )
+ );
+
+ } else if( rtSummary != null && rtOldest != null ) {
+ out.reachTuples.add(Canonical.unionUpArity(rtSummary,
+ ReachTuple.factory(as.getSummary(),
+ true, // muli
+ rtOldest.getArity(),
+ false // out-of-context
+ )
+ )
+ );
}
out.preds = rs.preds;
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachSet unionORpreds( ReachSet rs1,
- ReachSet rs2 ) {
+ public static ReachSet unionORpreds(ReachSet rs1,
+ ReachSet rs2) {
assert rs1 != null;
assert rs2 != null;
assert rs1.isCanonical();
assert rs2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_UNIONORPREDS_REACHSET,
- rs1,
- rs2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_UNIONORPREDS_REACHSET,
+ rs1,
+ rs2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
Iterator<ReachState> stateItr = rs1.iterator();
while( stateItr.hasNext() ) {
ReachState state1 = stateItr.next();
- ReachState state2 = rs2.containsIgnorePreds( state1 );
+ ReachState state2 = rs2.containsIgnorePreds(state1);
if( state2 != null ) {
- out.reachStates.add( ReachState.factory( state1.reachTuples,
- Canonical.join( state1.preds,
- state2.preds
- )
- ) );
+ out.reachStates.add(ReachState.factory(state1.reachTuples,
+ Canonical.join(state1.preds,
+ state2.preds
+ )
+ ) );
} else {
- out.reachStates.add( state1 );
+ out.reachStates.add(state1);
}
}
stateItr = rs2.iterator();
while( stateItr.hasNext() ) {
ReachState state2 = stateItr.next();
- ReachState state1 = rs1.containsIgnorePreds( state2 );
+ ReachState state1 = rs1.containsIgnorePreds(state2);
if( state1 == null ) {
- out.reachStates.add( state2 );
+ out.reachStates.add(state2);
}
}
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
// avoid eploding states we'll take an overapproximation
// by preferring the predicates from the state in the FIRST
// set, so order of arguments matters
- public static ReachSet intersection( ReachSet rs1,
- ReachSet rs2 ) {
+ public static ReachSet intersection(ReachSet rs1,
+ ReachSet rs2) {
assert rs1 != null;
assert rs2 != null;
assert rs1.isCanonical();
assert rs2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_INTERSECTION_REACHSET,
- rs1,
- rs2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_INTERSECTION_REACHSET,
+ rs1,
+ rs2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
Iterator<ReachState> itr = rs1.iterator();
while( itr.hasNext() ) {
ReachState state1 = (ReachState) itr.next();
- ReachState state2 = rs2.containsIgnorePreds( state1 );
+ ReachState state2 = rs2.containsIgnorePreds(state1);
if( state2 != null ) {
- // prefer the predicates on state1, an overapproximation
- // of state1 preds AND state2 preds
- out.reachStates.add( state1 );
+ // prefer the predicates on state1, an overapproximation
+ // of state1 preds AND state2 preds
+ out.reachStates.add(state1);
}
}
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachSet add( ReachSet rs,
- ReachState state ) {
- return unionORpreds( rs,
- ReachSet.factory( state )
- );
+ public static ReachSet add(ReachSet rs,
+ ReachState state) {
+ return unionORpreds(rs,
+ ReachSet.factory(state)
+ );
}
- public static ReachSet remove( ReachSet rs,
- ReachState state ) {
+ public static ReachSet remove(ReachSet rs,
+ ReachState state) {
assert rs != null;
assert state != null;
assert rs.isCanonical();
assert state.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_REMOVE_REACHSTATE,
- rs,
- state );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_REMOVE_REACHSTATE,
+ rs,
+ state);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
- // otherwise, no cached result...
+ // otherwise, no cached result...
ReachSet out = new ReachSet();
- out.reachStates.addAll( rs.reachStates );
- out.reachStates.remove( state );
+ out.reachStates.addAll(rs.reachStates);
+ out.reachStates.remove(state);
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachSet applyChangeSet( ReachSet rs,
- ChangeSet cs,
- boolean keepSourceState ) {
+ public static ReachSet applyChangeSet(ReachSet rs,
+ ChangeSet cs,
+ boolean keepSourceState) {
assert rs != null;
assert cs != null;
assert rs.isCanonical();
assert cs.isCanonical();
- // this primitive operand stuff is just a way to
+ // this primitive operand stuff is just a way to
// ensure distinct inputs to a CanonicalOp
int primOperand;
if( keepSourceState ) {
primOperand = 0x2b;
}
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_APPLY_CHANGESET,
- rs,
- cs,
- primOperand );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_APPLY_CHANGESET,
+ rs,
+ cs,
+ primOperand);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ReachSet out = new ReachSet();
Iterator<ReachState> stateItr = rs.iterator();
while( ctItr.hasNext() ) {
ChangeTuple ct = ctItr.next();
- if( stateOrig.equalsIgnorePreds( ct.getStateToMatch() ) ) {
- // use the new state, but the original predicates
- ReachState stateNew =
- ReachState.factory( ct.getStateToAdd().reachTuples,
- stateOrig.preds
- );
- out.reachStates.add( stateNew );
+ if( stateOrig.equalsIgnorePreds(ct.getStateToMatch() ) ) {
+ // use the new state, but the original predicates
+ ReachState stateNew =
+ ReachState.factory(ct.getStateToAdd().reachTuples,
+ stateOrig.preds
+ );
+ out.reachStates.add(stateNew);
changeFound = true;
}
}
if( keepSourceState || !changeFound ) {
- out.reachStates.add( stateOrig );
+ out.reachStates.add(stateOrig);
}
}
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ChangeSet unionUpArityToChangeSet( ReachSet rsO,
- ReachSet rsR ) {
+ public static ChangeSet unionUpArityToChangeSet(ReachSet rsO,
+ ReachSet rsR) {
assert rsO != null;
assert rsR != null;
assert rsO.isCanonical();
assert rsR.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_UNIONTOCHANGESET_REACHSET,
- rsO,
- rsR );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_UNIONTOCHANGESET_REACHSET,
+ rsO,
+ rsR);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ChangeSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ChangeSet out = ChangeSet.factory();
Iterator<ReachState> itrO = rsO.iterator();
Iterator<ReachTuple> itrRelement = r.iterator();
while( itrRelement.hasNext() ) {
ReachTuple rtR = itrRelement.next();
- ReachTuple rtO = o.containsHrnID( rtR.getHrnID(),
- rtR.isOutOfContext()
- );
+ ReachTuple rtO = o.containsHrnID(rtR.getHrnID(),
+ rtR.isOutOfContext()
+ );
if( rtO != null ) {
- theUnion = Canonical.add( theUnion,
- Canonical.unionUpArity( rtR,
- rtO
- )
- );
+ theUnion = Canonical.add(theUnion,
+ Canonical.unionUpArity(rtR,
+ rtO
+ )
+ );
} else {
- theUnion = Canonical.add( theUnion,
- rtR
- );
+ theUnion = Canonical.add(theUnion,
+ rtR
+ );
}
}
Iterator<ReachTuple> itrOelement = o.iterator();
while( itrOelement.hasNext() ) {
ReachTuple rtO = itrOelement.next();
- ReachTuple rtR = theUnion.containsHrnID( rtO.getHrnID(),
- rtO.isOutOfContext()
- );
+ ReachTuple rtR = theUnion.containsHrnID(rtO.getHrnID(),
+ rtO.isOutOfContext()
+ );
if( rtR == null ) {
- theUnion = Canonical.add( theUnion,
- rtO
- );
+ theUnion = Canonical.add(theUnion,
+ rtO
+ );
}
}
-
+
if( !theUnion.isEmpty() ) {
- out =
- Canonical.union( out,
- ChangeSet.factory(
- ChangeTuple.factory( o, theUnion )
- )
- );
+ out =
+ Canonical.union(out,
+ ChangeSet.factory(
+ ChangeTuple.factory(o, theUnion)
+ )
+ );
}
}
}
assert out.isCanonical();
- op2result.put( op, out );
+ op2result.put(op, out);
return out;
}
- public static ReachSet ageTuplesFrom( ReachSet rs,
- AllocSite as ) {
+ public static ReachSet ageTuplesFrom(ReachSet rs,
+ AllocSite as) {
assert rs != null;
assert as != null;
assert rs.isCanonical();
assert as.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_AGETUPLESFROM_ALLOCSITE,
- rs,
- as );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_AGETUPLESFROM_ALLOCSITE,
+ rs,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
-
+
// otherwise, no cached result...
ReachSet out = new ReachSet();
Iterator<ReachState> itrS = rs.iterator();
while( itrS.hasNext() ) {
ReachState state = itrS.next();
- out.reachStates.add( Canonical.ageTuplesFrom( state, as ) );
+ out.reachStates.add(Canonical.ageTuplesFrom(state, as) );
}
-
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ReachSet pruneBy( ReachSet rsO,
- ReachSet rsP ) {
+ public static ReachSet pruneBy(ReachSet rsO,
+ ReachSet rsP) {
assert rsO != null;
assert rsP != null;
assert rsO.isCanonical();
assert rsP.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_PRUNEBY_REACHSET,
- rsO,
- rsP );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_PRUNEBY_REACHSET,
+ rsO,
+ rsP);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ReachSet out = new ReachSet();
Iterator<ReachState> itrO = rsO.iterator();
while( itrP.hasNext() && !subsetExists ) {
ReachState stateP = itrP.next();
- if( stateP.isSubset( stateO ) ) {
+ if( stateP.isSubset(stateO) ) {
subsetExists = true;
}
}
-
+
if( subsetExists ) {
- out.reachStates.add( stateO );
+ out.reachStates.add(stateO);
}
}
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ChangeSet union( ChangeSet cs1,
- ChangeSet cs2 ) {
+ public static ChangeSet union(ChangeSet cs1,
+ ChangeSet cs2) {
assert cs1 != null;
assert cs2 != null;
assert cs1.isCanonical();
assert cs2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.CHANGESET_UNION_CHANGESET,
- cs1,
- cs2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.CHANGESET_UNION_CHANGESET,
+ cs1,
+ cs2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ChangeSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ChangeSet out = new ChangeSet();
- out.changeTuples.addAll( cs1.changeTuples );
- out.changeTuples.addAll( cs2.changeTuples );
+ out.changeTuples.addAll(cs1.changeTuples);
+ out.changeTuples.addAll(cs2.changeTuples);
- out = (ChangeSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out = (ChangeSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ChangeSet add( ChangeSet cs,
- ChangeTuple ct ) {
+ public static ChangeSet add(ChangeSet cs,
+ ChangeTuple ct) {
assert cs != null;
assert ct != null;
assert cs.isCanonical();
assert ct.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.CHANGESET_UNION_CHANGETUPLE,
- cs,
- ct );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.CHANGESET_UNION_CHANGETUPLE,
+ cs,
+ ct);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ChangeSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ChangeSet out = new ChangeSet();
- out.changeTuples.addAll( cs.changeTuples );
- out.changeTuples.add( ct );
-
- out = (ChangeSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out.changeTuples.addAll(cs.changeTuples);
+ out.changeTuples.add(ct);
+
+ out = (ChangeSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ExistPredSet join( ExistPredSet eps1,
- ExistPredSet eps2 ) {
+ public static ExistPredSet join(ExistPredSet eps1,
+ ExistPredSet eps2) {
assert eps1 != null;
assert eps2 != null;
assert eps1.isCanonical();
assert eps2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.EXISTPREDSET_JOIN_EXISTPREDSET,
- eps1,
- eps2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.EXISTPREDSET_JOIN_EXISTPREDSET,
+ eps1,
+ eps2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ExistPredSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ExistPredSet out = new ExistPredSet();
- out.preds.addAll( eps1.preds );
- out.preds.addAll( eps2.preds );
+ out.preds.addAll(eps1.preds);
+ out.preds.addAll(eps2.preds);
- out = (ExistPredSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out = (ExistPredSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ExistPredSet add( ExistPredSet eps,
- ExistPred ep ) {
+ public static ExistPredSet add(ExistPredSet eps,
+ ExistPred ep) {
assert eps != null;
assert eps.isCanonical();
assert ep.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.EXISTPREDSET_ADD_EXISTPRED,
- eps,
- ep );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.EXISTPREDSET_ADD_EXISTPRED,
+ eps,
+ ep);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ExistPredSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
ExistPredSet out = new ExistPredSet();
- out.preds.addAll( eps.preds );
- out.preds.add( ep );
-
- out = (ExistPredSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out.preds.addAll(eps.preds);
+ out.preds.add(ep);
+
+ out = (ExistPredSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static ReachSet toCallerContext( ReachSet rs,
- AllocSite as ) {
+ public static ReachSet toCallerContext(ReachSet rs,
+ AllocSite as) {
assert rs != null;
assert as != null;
assert rs.isCanonical();
assert as.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_TOCALLERCONTEXT_ALLOCSITE,
- rs,
- as );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_TOCALLERCONTEXT_ALLOCSITE,
+ rs,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
Iterator<ReachState> itr = rs.iterator();
while( itr.hasNext() ) {
ReachState state = itr.next();
- out = Canonical.unionORpreds( out,
- Canonical.toCallerContext( state, as )
- );
+ out = Canonical.unionORpreds(out,
+ Canonical.toCallerContext(state, as)
+ );
}
assert out.isCanonical();
- op2result.put( op, out );
+ op2result.put(op, out);
return out;
}
-
- public static ReachSet toCallerContext( ReachState state,
- AllocSite as ) {
+
+ public static ReachSet toCallerContext(ReachState state,
+ AllocSite as) {
assert state != null;
assert as != null;
assert state.isCanonical();
assert as.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_TOCALLERCONTEXT_ALLOCSITE,
- state,
- as );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_TOCALLERCONTEXT_ALLOCSITE,
+ state,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
// -> 0?
// -> 1?
// -> 2S?
- // 2S?* -> {2S*, 2S?*}
+ // 2S?* -> {2S*, 2S?*}
boolean found2Sooc = false;
while( itr.hasNext() ) {
ReachTuple rt = itr.next();
- int age = as.getAgeCategory( rt.getHrnID() );
+ int age = as.getAgeCategory(rt.getHrnID() );
if( age == AllocSite.AGE_notInThisSite ) {
- // things not from the site just go back in
- baseState = Canonical.addUpArity( baseState, rt );
+ // things not from the site just go back in
+ baseState = Canonical.addUpArity(baseState, rt);
} else if( age == AllocSite.AGE_summary ) {
- if( rt.isOutOfContext() ) {
- // if its out-of-context, we only deal here with the ZERO-OR-MORE
- // arity, if ARITY-ONE we'll branch the base state after the loop
- if( rt.getArity() == ReachTuple.ARITY_ZEROORMORE ) {
- // add two overly conservative symbols to reach state (PUNTING)
-
- baseState = Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getSummary(),
- true, // multi
- ReachTuple.ARITY_ZEROORMORE,
- false // out-of-context
- )
- );
+ if( rt.isOutOfContext() ) {
+ // if its out-of-context, we only deal here with the ZERO-OR-MORE
+ // arity, if ARITY-ONE we'll branch the base state after the loop
+ if( rt.getArity() == ReachTuple.ARITY_ZEROORMORE ) {
+ // add two overly conservative symbols to reach state (PUNTING)
+
+ baseState = Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getSummary(),
+ true, // multi
+ ReachTuple.ARITY_ZEROORMORE,
+ false // out-of-context
+ )
+ );
+
+ baseState = Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getSummary(),
+ true, // multi
+ ReachTuple.ARITY_ZEROORMORE,
+ true // out-of-context
+ )
+ );
+ } else {
+ assert rt.getArity() == ReachTuple.ARITY_ONE;
+ found2Sooc = true;
+ }
- baseState = Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getSummary(),
- true, // multi
- ReachTuple.ARITY_ZEROORMORE,
- true // out-of-context
- )
- );
- } else {
- assert rt.getArity() == ReachTuple.ARITY_ONE;
- found2Sooc = true;
- }
-
- } else {
- // the in-context just becomes shadow
- baseState = Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getSummaryShadow(),
- true, // multi
- rt.getArity(),
- false // out-of-context
- )
- );
- }
+ } else {
+ // the in-context just becomes shadow
+ baseState = Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getSummaryShadow(),
+ true, // multi
+ rt.getArity(),
+ false // out-of-context
+ )
+ );
+ }
} else {
- // otherwise age is in range [0, k]
- Integer I = as.getAge( rt.getHrnID() );
- assert I != null;
- assert !rt.isMultiObject();
- assert rt.getArity() == ReachTuple.ARITY_ONE;
-
- if( rt.isOutOfContext() ) {
- // becomes the in-context version
- baseState = Canonical.addUpArity( baseState,
- ReachTuple.factory( rt.getHrnID(),
- false, // multi
- ReachTuple.ARITY_ONE,
- false // out-of-context
- )
- );
-
- } else {
- // otherwise the ith symbol becomes shadowed
- baseState = Canonical.addUpArity( baseState,
- ReachTuple.factory( -rt.getHrnID(),
- false, // multi
- ReachTuple.ARITY_ONE,
- false // out-of-context
- )
- );
- }
+ // otherwise age is in range [0, k]
+ Integer I = as.getAge(rt.getHrnID() );
+ assert I != null;
+ assert !rt.isMultiObject();
+ assert rt.getArity() == ReachTuple.ARITY_ONE;
+
+ if( rt.isOutOfContext() ) {
+ // becomes the in-context version
+ baseState = Canonical.addUpArity(baseState,
+ ReachTuple.factory(rt.getHrnID(),
+ false, // multi
+ ReachTuple.ARITY_ONE,
+ false // out-of-context
+ )
+ );
+
+ } else {
+ // otherwise the ith symbol becomes shadowed
+ baseState = Canonical.addUpArity(baseState,
+ ReachTuple.factory(-rt.getHrnID(),
+ false, // multi
+ ReachTuple.ARITY_ONE,
+ false // out-of-context
+ )
+ );
+ }
}
}
if( found2Sooc ) {
// make a branch with every possibility of the one-to-many
// mapping for 2S? appended to the baseState
- out = Canonical.add( out,
- Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getSummary(),
- true, // multi
- ReachTuple.ARITY_ONE,
- false // out-of-context
- )
- )
- );
-
- out = Canonical.add( out,
- Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getSummary(),
- true, // multi
- ReachTuple.ARITY_ONE,
- true // out-of-context
- )
- )
- );
+ out = Canonical.add(out,
+ Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getSummary(),
+ true, // multi
+ ReachTuple.ARITY_ONE,
+ false // out-of-context
+ )
+ )
+ );
+
+ out = Canonical.add(out,
+ Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getSummary(),
+ true, // multi
+ ReachTuple.ARITY_ONE,
+ true // out-of-context
+ )
+ )
+ );
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
- out = Canonical.add( out,
- Canonical.addUpArity( baseState,
- ReachTuple.factory( as.getIthOldest( i ),
- false, // multi
- ReachTuple.ARITY_ONE,
- true // out-of-context
- )
- )
- );
+ out = Canonical.add(out,
+ Canonical.addUpArity(baseState,
+ ReachTuple.factory(as.getIthOldest(i),
+ false, // multi
+ ReachTuple.ARITY_ONE,
+ true // out-of-context
+ )
+ )
+ );
}
} else {
- // just use current baseState
- out = Canonical.add( out,
- baseState );
+ // just use current baseState
+ out = Canonical.add(out,
+ baseState);
}
assert out.isCanonical();
- op2result.put( op, out );
+ op2result.put(op, out);
return out;
}
- public static ReachSet unshadow( ReachSet rs,
- AllocSite as ) {
+ public static ReachSet unshadow(ReachSet rs,
+ AllocSite as) {
assert rs != null;
assert as != null;
assert rs.isCanonical();
assert as.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_UNSHADOW_ALLOCSITE,
- rs,
- as );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_UNSHADOW_ALLOCSITE,
+ rs,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
Iterator<ReachState> itr = rs.iterator();
while( itr.hasNext() ) {
ReachState state = itr.next();
- out = Canonical.add( out,
- Canonical.unshadow( state, as )
- );
+ out = Canonical.add(out,
+ Canonical.unshadow(state, as)
+ );
}
assert out.isCanonical();
- op2result.put( op, out );
+ op2result.put(op, out);
return out;
}
- public static ReachState unshadow( ReachState state,
- AllocSite as ) {
+ public static ReachState unshadow(ReachState state,
+ AllocSite as) {
assert state != null;
assert as != null;
assert state.isCanonical();
assert as.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_UNSHADOW_ALLOCSITE,
- state,
- as );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_UNSHADOW_ALLOCSITE,
+ state,
+ as);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
// -0 -> 0
// -1 -> 1
// -2S -> 2S
-
+
// otherwise, no cached result...
ReachState out = ReachState.factory();
Iterator<ReachTuple> itr = state.iterator();
while( itr.hasNext() ) {
ReachTuple rt = itr.next();
- int age = as.getShadowAgeCategory( rt.getHrnID() );
-
+ int age = as.getShadowAgeCategory(rt.getHrnID() );
+
if( age == AllocSite.SHADOWAGE_notInThisSite ) {
- // things not from the site just go back in
- out = Canonical.addUpArity( out, rt );
+ // things not from the site just go back in
+ out = Canonical.addUpArity(out, rt);
} else {
- assert !rt.isOutOfContext();
-
- // otherwise unshadow it
- out = Canonical.addUpArity( out,
- ReachTuple.factory( -rt.getHrnID(),
- rt.isMultiObject(),
- rt.getArity(),
- false
- )
- );
+ assert !rt.isOutOfContext();
+
+ // otherwise unshadow it
+ out = Canonical.addUpArity(out,
+ ReachTuple.factory(-rt.getHrnID(),
+ rt.isMultiObject(),
+ rt.getArity(),
+ false
+ )
+ );
}
}
- out = Canonical.attach( out,
- state.getPreds()
- );
+ out = Canonical.attach(out,
+ state.getPreds()
+ );
assert out.isCanonical();
- op2result.put( op, out );
+ op2result.put(op, out);
return out;
}
- public static ReachState changePredsTo( ReachState rs,
- ExistPredSet preds ) {
+ public static ReachState changePredsTo(ReachState rs,
+ ExistPredSet preds) {
assert rs != null;
assert rs.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSTATE_CHANGEPREDSTO_EXISTPREDSET,
- rs,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSTATE_CHANGEPREDSTO_EXISTPREDSET,
+ rs,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachState) result;
}
-
+
// otherwise, no cached result...
ReachState out = new ReachState();
// just remake state with the true predicate attached
- out.reachTuples.addAll( rs.reachTuples );
+ out.reachTuples.addAll(rs.reachTuples);
out.preds = preds;
-
- out = (ReachState) makeCanonical( out );
- op2result.put( op, out );
+
+ out = (ReachState) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static ReachSet changePredsTo( ReachSet rs,
- ExistPredSet preds ) {
+ public static ReachSet changePredsTo(ReachSet rs,
+ ExistPredSet preds) {
assert rs != null;
assert rs.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.REACHSET_CHANGEPREDSTO_EXISTPREDSET,
- rs,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.REACHSET_CHANGEPREDSTO_EXISTPREDSET,
+ rs,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (ReachSet) result;
}
-
+
// otherwise, no cached result...
ReachSet out = ReachSet.factory();
Iterator<ReachState> itr = rs.iterator();
while( itr.hasNext() ) {
ReachState state = itr.next();
- out = Canonical.add( out,
- Canonical.changePredsTo( state,
- preds
- )
- );
+ out = Canonical.add(out,
+ Canonical.changePredsTo(state,
+ preds
+ )
+ );
}
-
- out = (ReachSet) makeCanonical( out );
- op2result.put( op, out );
+
+ out = (ReachSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static Taint attach( Taint t,
- ExistPredSet preds ) {
+ public static Taint attach(Taint t,
+ ExistPredSet preds) {
assert t != null;
assert preds != null;
assert t.isCanonical();
assert preds.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINT_ATTACH_EXISTPREDSET,
- t,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINT_ATTACH_EXISTPREDSET,
+ t,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (Taint) result;
}
-
+
// otherwise, no cached result...
- Taint out = new Taint( t );
- out.preds = Canonical.join( t.preds,
- preds );
-
- out = (Taint) makeCanonical( out );
- op2result.put( op, out );
+ Taint out = new Taint(t);
+ out.preds = Canonical.join(t.preds,
+ preds);
+
+ out = (Taint) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static TaintSet add( TaintSet ts,
- Taint t ) {
+ public static TaintSet add(TaintSet ts,
+ Taint t) {
assert ts != null;
assert t != null;
assert ts.isCanonical();
assert t.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_ADD_TAINT,
- ts,
- t );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_ADD_TAINT,
+ ts,
+ t);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
TaintSet out = new TaintSet();
- out.taints.addAll( ts.taints );
- out.taints.add( t );
-
- out = (TaintSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out.taints.addAll(ts.taints);
+ out.taints.add(t);
+
+ out = (TaintSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static TaintSet addPTR( TaintSet ts,
- Taint t ) {
+ public static TaintSet addPTR(TaintSet ts,
+ Taint t) {
return add(ts, t);
}
- public static TaintSet union( TaintSet ts1,
- TaintSet ts2 ) {
+ public static TaintSet union(TaintSet ts1,
+ TaintSet ts2) {
assert ts1 != null;
assert ts2 != null;
assert ts1.isCanonical();
assert ts2.isCanonical();
-
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_UNION_TAINTSET,
- ts1,
- ts2 );
-
- Canonical result = op2result.get( op );
+
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_UNION_TAINTSET,
+ ts1,
+ ts2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
TaintSet out = new TaintSet();
// first add everything from 1, and if it was also in 2
Iterator<Taint> tItr = ts1.iterator();
while( tItr.hasNext() ) {
Taint t1 = tItr.next();
- Taint t2 = ts2.containsIgnorePreds( t1 );
+ Taint t2 = ts2.containsIgnorePreds(t1);
if( t2 != null ) {
- Taint tNew = new Taint( t1 );
- tNew.preds = Canonical.join( t1.preds,
- t2.preds
- );
- tNew = (Taint) makeCanonical( tNew );
- out.taints.add( tNew );
+ Taint tNew = new Taint(t1);
+ tNew.preds = Canonical.join(t1.preds,
+ t2.preds
+ );
+ tNew = (Taint) makeCanonical(tNew);
+ out.taints.add(tNew);
} else {
- out.taints.add( t1 );
+ out.taints.add(t1);
}
}
-
+
// then add everything in 2 that wasn't in 1
tItr = ts2.iterator();
while( tItr.hasNext() ) {
Taint t2 = tItr.next();
- Taint t1 = ts1.containsIgnorePreds( t2 );
+ Taint t1 = ts1.containsIgnorePreds(t2);
if( t1 == null ) {
- out.taints.add( t2 );
+ out.taints.add(t2);
}
}
- out = (TaintSet) makeCanonical( out );
- op2result.put( op, out );
- return out;
+ out = (TaintSet) makeCanonical(out);
+ op2result.put(op, out);
+ return out;
}
- public static TaintSet unionPTR( TaintSet ts1,
- TaintSet ts2 ) {
+ public static TaintSet unionPTR(TaintSet ts1,
+ TaintSet ts2) {
assert ts1 != null;
assert ts2 != null;
assert ts1.isCanonical();
assert ts2.isCanonical();
-
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_UNION_TAINTSET,
- ts1,
- ts2 );
-
- Canonical result = op2result.get( op );
+
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_UNION_TAINTSET,
+ ts1,
+ ts2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
-
- // otherwise, no cached result...
+
+ // otherwise, no cached result...
TaintSet out = new TaintSet();
out.taints.addAll(ts1.taints);
out.taints.addAll(ts2.taints);
out= (TaintSet) Canonical.makeCanonical(out);
- op2result.put( op, out );
- return out;
+ op2result.put(op, out);
+ return out;
}
- public static TaintSet unionORpreds( TaintSet ts1,
- TaintSet ts2 ) {
+ public static TaintSet unionORpreds(TaintSet ts1,
+ TaintSet ts2) {
assert ts1 != null;
assert ts2 != null;
assert ts1.isCanonical();
assert ts2.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_UNIONORPREDS_TAINTSET,
- ts1,
- ts2 );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_UNIONORPREDS_TAINTSET,
+ ts1,
+ ts2);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
Iterator<Taint> tItr = ts1.iterator();
while( tItr.hasNext() ) {
Taint t1 = tItr.next();
- Taint t2 = ts2.containsIgnorePreds( t1 );
-
+ Taint t2 = ts2.containsIgnorePreds(t1);
+
if( t2 != null ) {
- Taint tNew = new Taint( t1 );
- tNew.preds = Canonical.join( t1.preds,
- t2.preds
- );
- tNew = (Taint) makeCanonical( tNew );
- out.taints.add( tNew );
+ Taint tNew = new Taint(t1);
+ tNew.preds = Canonical.join(t1.preds,
+ t2.preds
+ );
+ tNew = (Taint) makeCanonical(tNew);
+ out.taints.add(tNew);
} else {
- out.taints.add( t1 );
+ out.taints.add(t1);
}
}
tItr = ts2.iterator();
while( tItr.hasNext() ) {
Taint t2 = tItr.next();
- Taint t1 = ts1.containsIgnorePreds( t2 );
-
+ Taint t1 = ts1.containsIgnorePreds(t2);
+
if( t1 == null ) {
- out.taints.add( t2 );
+ out.taints.add(t2);
}
}
-
- out = (TaintSet) makeCanonical( out );
- op2result.put( op, out );
+
+ out = (TaintSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
// BOO, HISS! SESE (rblock) operand does NOT extend
// Canonical, so we can't cache this op by its
// canonical arguments--THINK ABOUT A BETTER WAY!
- public static TaintSet removeInContextTaints( TaintSet ts,
- FlatSESEEnterNode sese ) {
+ public static TaintSet removeInContextTaints(TaintSet ts,
+ FlatSESEEnterNode sese) {
assert ts != null;
assert ts.isCanonical();
assert sese != null;
// unaffected, and if the taint has a non-empty predicate
// it is out of context so it should go through, too
if( t.getSESE() == null ||
- !t.getSESE().equals( sese ) ||
+ !t.getSESE().equals(sese) ||
!t.getPreds().isEmpty()
) {
- out.taints.add( t );
+ out.taints.add(t);
}
}
-
- out = (TaintSet) makeCanonical( out );
+
+ out = (TaintSet) makeCanonical(out);
//op2result.put( op, out ); CRY CRY
return out;
}
// BOO, HISS! SESE (rblock) operand does NOT extend
// Canonical, so we can't cache this op by its
// canonical arguments--THINK ABOUT A BETTER WAY!
- public static TaintSet removeSESETaints( TaintSet ts,
- Set<FlatSESEEnterNode> seseSet ) {
+ public static TaintSet removeSESETaints(TaintSet ts,
+ Set<FlatSESEEnterNode> seseSet) {
assert ts != null;
assert ts.isCanonical();
// it is out of context so it should go through, too
if( t.getSESE() == null ||
!seseSet.contains(t)) {
- out.taints.add( t );
+ out.taints.add(t);
}
}
-
- out = (TaintSet) makeCanonical( out );
+
+ out = (TaintSet) makeCanonical(out);
//op2result.put( op, out ); CRY CRY
return out;
}
- public static TaintSet removeInContextTaintsNP( TaintSet ts,
- FlatSESEEnterNode sese ) {
+ public static TaintSet removeInContextTaintsNP(TaintSet ts,
+ FlatSESEEnterNode sese) {
assert ts != null;
assert ts.isCanonical();
// unaffected, and if the taint has a non-empty predicate
// it is out of context so it should go through, too
if( t.getSESE()!=null && t.getSESE()!=sese) {
- out.taints.add( t );
+ out.taints.add(t);
}
}
-
- out = (TaintSet) makeCanonical( out );
+
+ out = (TaintSet) makeCanonical(out);
return out;
}
- public static TaintSet removeStallSiteTaints( TaintSet ts ) {
+ public static TaintSet removeStallSiteTaints(TaintSet ts) {
assert ts != null;
assert ts.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_REMOVESTALLSITETAINTS,
- ts,
- ts );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_REMOVESTALLSITETAINTS,
+ ts,
+ ts);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
-
+
// otherwise, no cached result...
TaintSet out = new TaintSet();
// only take non-stall site taints onward
if( t.getStallSite() == null ) {
- out.taints.add( t );
+ out.taints.add(t);
}
}
-
- out = (TaintSet) makeCanonical( out );
- op2result.put( op, out );
+
+ out = (TaintSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static Taint changePredsTo( Taint t,
- ExistPredSet preds ) {
+ public static Taint changePredsTo(Taint t,
+ ExistPredSet preds) {
assert t != null;
assert t.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINT_CHANGEPREDSTO_EXISTPREDSET,
- t,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINT_CHANGEPREDSTO_EXISTPREDSET,
+ t,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (Taint) result;
}
-
+
// otherwise, no cached result...
- Taint out = new Taint( t.sese,
- t.stallSite,
- t.var,
- t.allocSite,
- t.fnDefined,
- preds
- );
-
- out = (Taint) makeCanonical( out );
- op2result.put( op, out );
+ Taint out = new Taint(t.sese,
+ t.stallSite,
+ t.var,
+ t.allocSite,
+ t.fnDefined,
+ preds
+ );
+
+ out = (Taint) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
- public static TaintSet changePredsTo( TaintSet ts,
- ExistPredSet preds ) {
+ public static TaintSet changePredsTo(TaintSet ts,
+ ExistPredSet preds) {
assert ts != null;
assert ts.isCanonical();
- CanonicalOp op =
- new CanonicalOp( CanonicalOp.TAINTSET_CHANGEPREDSTO_EXISTPREDSET,
- ts,
- preds );
-
- Canonical result = op2result.get( op );
+ CanonicalOp op =
+ new CanonicalOp(CanonicalOp.TAINTSET_CHANGEPREDSTO_EXISTPREDSET,
+ ts,
+ preds);
+
+ Canonical result = op2result.get(op);
if( result != null ) {
return (TaintSet) result;
}
-
+
// otherwise, no cached result...
TaintSet out = TaintSet.factory();
Iterator<Taint> itr = ts.iterator();
while( itr.hasNext() ) {
Taint t = itr.next();
- out = Canonical.add( out,
- Canonical.changePredsTo( t, preds )
- );
+ out = Canonical.add(out,
+ Canonical.changePredsTo(t, preds)
+ );
}
-
- out = (TaintSet) makeCanonical( out );
- op2result.put( op, out );
+
+ out = (TaintSet) makeCanonical(out);
+ op2result.put(op, out);
return out;
}
// BOO, HISS! FlatNode operand does NOT extend
// Canonical, so we can't cache this op by its
// canonical arguments--THINK ABOUT A BETTER WAY!
- public static Taint changeWhereDefined( Taint t,
- FlatNode pp ) {
+ public static Taint changeWhereDefined(Taint t,
+ FlatNode pp) {
assert t != null;
assert t.isCanonical();
// never a cached result...
- Taint out = new Taint( t.sese,
- t.stallSite,
- t.var,
- t.allocSite,
- pp,
- t.preds
- );
-
- out = (Taint) makeCanonical( out );
+ Taint out = new Taint(t.sese,
+ t.stallSite,
+ t.var,
+ t.allocSite,
+ pp,
+ t.preds
+ );
+
+ out = (Taint) makeCanonical(out);
//op2result.put( op, out ); CRY CRY
return out;
}
// BOO, HISS! FlatNode operand does NOT extend
// Canonical, so we can't cache this op by its
// canonical arguments--THINK ABOUT A BETTER WAY!
- public static TaintSet changeWhereDefined( TaintSet ts,
- FlatNode pp ) {
+ public static TaintSet changeWhereDefined(TaintSet ts,
+ FlatNode pp) {
assert ts != null;
assert ts.isCanonical();
Iterator<Taint> itr = ts.iterator();
while( itr.hasNext() ) {
Taint t = itr.next();
- out = Canonical.add( out,
- Canonical.changeWhereDefined( t, pp )
- );
+ out = Canonical.add(out,
+ Canonical.changeWhereDefined(t, pp)
+ );
}
-
- out = (TaintSet) makeCanonical( out );
+
+ out = (TaintSet) makeCanonical(out);
//op2result.put( op, out ); CRY CRY
return out;
}
package Analysis.Disjoint;
-// a CanonicalOperation defines an operation on
+// a CanonicalOperation defines an operation on
// Canonical objects. The Canonical class maps
// an op to its result, so when you ask the
// Canonical static methods to do an op that is
protected int opCode;
protected Canonical operand1;
protected Canonical operand2;
- protected int operand3;
+ protected int operand3;
- public CanonicalOp( int opc,
- Canonical op1,
- Canonical op2 ) {
+ public CanonicalOp(int opc,
+ Canonical op1,
+ Canonical op2) {
this( opc, op1, op2, PRIM_OP_UNUSED );
}
- public CanonicalOp( int opc,
- Canonical op1,
- Canonical op2,
- int op3 ) {
+ public CanonicalOp(int opc,
+ Canonical op1,
+ Canonical op2,
+ int op3) {
assert op1.isCanonical();
assert op2.isCanonical();
opCode = opc;
operand2 = op2;
operand3 = op3;
}
-
+
public int hashCode() {
return opCode ^
- (operand1.getCanonicalValue() << 2) ^
- (operand2.getCanonicalValue() << 1) ^
- (operand3 << 3);
+ (operand1.getCanonicalValue() << 2) ^
+ (operand2.getCanonicalValue() << 1) ^
+ (operand3 << 3);
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
CanonicalOp co = (CanonicalOp) o;
return opCode == co.opCode &&
- (operand1.getCanonicalValue() == co.operand1.getCanonicalValue()) &&
- (operand2.getCanonicalValue() == co.operand2.getCanonicalValue()) &&
- operand3 == co.operand3;
+ (operand1.getCanonicalValue() == co.operand1.getCanonicalValue()) &&
+ (operand2.getCanonicalValue() == co.operand2.getCanonicalValue()) &&
+ operand3 == co.operand3;
}
}
public static ChangeSet factory() {
ChangeSet out = new ChangeSet();
- out = (ChangeSet) Canonical.makeCanonical( out );
+ out = (ChangeSet) Canonical.makeCanonical(out);
return out;
}
- public static ChangeSet factory( ChangeTuple ct ) {
+ public static ChangeSet factory(ChangeTuple ct) {
assert ct != null;
assert ct.isCanonical();
ChangeSet out = new ChangeSet();
- out.changeTuples.add( ct );
- out = (ChangeSet) Canonical.makeCanonical( out );
+ out.changeTuples.add(ct);
+ out = (ChangeSet) Canonical.makeCanonical(out);
return out;
- }
+ }
protected ChangeSet() {
changeTuples = new HashSet<ChangeTuple>();
return changeTuples.isEmpty();
}
- public boolean isSubset( ChangeSet ctsIn ) {
+ public boolean isSubset(ChangeSet ctsIn) {
assert ctsIn != null;
- return ctsIn.changeTuples.containsAll( this.changeTuples );
+ return ctsIn.changeTuples.containsAll(this.changeTuples);
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
}
ChangeSet cts = (ChangeSet) o;
- return changeTuples.equals( cts.changeTuples );
+ return changeTuples.equals(cts.changeTuples);
}
public int hashCodeSpecific() {
protected ReachState toMatch;
protected ReachState toAdd;
- public static ChangeTuple factory( ReachState toMatch,
- ReachState toAdd ) {
+ public static ChangeTuple factory(ReachState toMatch,
+ ReachState toAdd) {
// we don't care about the predicates hanging on
// change tuple states, so always set them to empty
// to ensure change tuple sets work out
ReachState toMatchNoPreds =
- ReachState.factory( toMatch.reachTuples,
- ExistPredSet.factory()
- );
+ ReachState.factory(toMatch.reachTuples,
+ ExistPredSet.factory()
+ );
ReachState toAddNoPreds =
- ReachState.factory( toAdd.reachTuples,
- ExistPredSet.factory()
- );
- ChangeTuple out = new ChangeTuple( toMatchNoPreds,
- toAddNoPreds );
- out = (ChangeTuple) Canonical.makeCanonical( out );
+ ReachState.factory(toAdd.reachTuples,
+ ExistPredSet.factory()
+ );
+ ChangeTuple out = new ChangeTuple(toMatchNoPreds,
+ toAddNoPreds);
+ out = (ChangeTuple) Canonical.makeCanonical(out);
return out;
}
- protected ChangeTuple( ReachState toMatch,
- ReachState toAdd ) {
+ protected ChangeTuple(ReachState toMatch,
+ ReachState toAdd) {
this.toMatch = toMatch;
this.toAdd = toAdd;
}
public ReachState getStateToAdd() {
return toAdd;
}
-
- public boolean equalsSpecific( Object o ) {
+
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
-
+
if( !(o instanceof ChangeTuple) ) {
return false;
}
ChangeTuple ct = (ChangeTuple) o;
return
- toMatch.equals( ct.toMatch ) &&
- toAdd.equals( ct.toAdd );
+ toMatch.equals(ct.toMatch) &&
+ toAdd.equals(ct.toAdd);
}
public int hashCodeSpecific() {
public class DescriptorComparator implements Comparator {
- public int compare( Object o1, Object o2 ) {
+ public int compare(Object o1, Object o2) {
assert o1 instanceof Descriptor;
assert o2 instanceof Descriptor;
return d1.getNum() - d2.getNum();
}
-
+
}
private int priority;
private Descriptor d;
- public DescriptorQWrapper( Integer p, Descriptor d ) {
+ public DescriptorQWrapper(Integer p, Descriptor d) {
priority = p.intValue();
this.d = d;
}
- public DescriptorQWrapper( int p, Descriptor d ) {
+ public DescriptorQWrapper(int p, Descriptor d) {
priority = p;
this.d = d;
}
public Descriptor getDescriptor() {
return d;
}
-
- public int compareTo( Object o ) throws ClassCastException {
+
+ public int compareTo(Object o) throws ClassCastException {
if( !(o instanceof DescriptorQWrapper) ) {
throw new ClassCastException();
if( !( o instanceof DescriptorQWrapper) ) {
return false;
}
-
+
DescriptorQWrapper dqw = (DescriptorQWrapper) o;
- return d.equals( dqw.d );
- }
+ return d.equals(dqw.d);
+ }
}
public class DisjointAnalysis implements HeapAnalysis {
-
+
///////////////////////////////////////////
//
// Public interface to discover possible
// if an object allocated at the target site may be
// reachable from both an object from root1 and an
// object allocated at root2, return TRUE
- public boolean mayBothReachTarget( FlatMethod fm,
- FlatNew fnRoot1,
- FlatNew fnRoot2,
- FlatNew fnTarget ) {
-
- AllocSite asr1 = getAllocationSiteFromFlatNew( fnRoot1 );
- AllocSite asr2 = getAllocationSiteFromFlatNew( fnRoot2 );
+ public boolean mayBothReachTarget(FlatMethod fm,
+ FlatNew fnRoot1,
+ FlatNew fnRoot2,
+ FlatNew fnTarget) {
+
+ AllocSite asr1 = getAllocationSiteFromFlatNew(fnRoot1);
+ AllocSite asr2 = getAllocationSiteFromFlatNew(fnRoot2);
assert asr1.isFlagged();
assert asr2.isFlagged();
- AllocSite ast = getAllocationSiteFromFlatNew( fnTarget );
- ReachGraph rg = getPartial( fm.getMethod() );
+ AllocSite ast = getAllocationSiteFromFlatNew(fnTarget);
+ ReachGraph rg = getPartial(fm.getMethod() );
- return rg.mayBothReachTarget( asr1, asr2, ast );
+ return rg.mayBothReachTarget(asr1, asr2, ast);
}
// similar to the method above, return TRUE if ever
// more than one object from the root allocation site
// may reach an object from the target site
- public boolean mayManyReachTarget( FlatMethod fm,
- FlatNew fnRoot,
- FlatNew fnTarget ) {
-
- AllocSite asr = getAllocationSiteFromFlatNew( fnRoot );
+ public boolean mayManyReachTarget(FlatMethod fm,
+ FlatNew fnRoot,
+ FlatNew fnTarget) {
+
+ AllocSite asr = getAllocationSiteFromFlatNew(fnRoot);
assert asr.isFlagged();
-
- AllocSite ast = getAllocationSiteFromFlatNew( fnTarget );
- ReachGraph rg = getPartial( fm.getMethod() );
-
- return rg.mayManyReachTarget( asr, ast );
+
+ AllocSite ast = getAllocationSiteFromFlatNew(fnTarget);
+ ReachGraph rg = getPartial(fm.getMethod() );
+
+ return rg.mayManyReachTarget(asr, ast);
}
-
+
public HashSet<AllocSite>
- getFlaggedAllocationSitesReachableFromTask(TaskDescriptor td) {
+ getFlaggedAllocationSitesReachableFromTask(TaskDescriptor td) {
checkAnalysisComplete();
return getFlaggedAllocationSitesReachableFromTaskPRIVATE(td);
}
-
+
public AllocSite getAllocationSiteFromFlatNew(FlatNew fn) {
checkAnalysisComplete();
return getAllocSiteFromFlatNewPRIVATE(fn);
- }
-
+ }
+
public AllocSite getAllocationSiteFromHeapRegionNodeID(Integer id) {
checkAnalysisComplete();
return mapHrnIdToAllocSite.get(id);
}
-
+
public Set<HeapRegionNode> hasPotentialSharing(Descriptor taskOrMethod,
int paramIndex1,
int paramIndex2) {
assert(rg != null);
return rg.mayReachSharedObjects(fm, paramIndex1, paramIndex2);
}
-
+
public Set<HeapRegionNode> hasPotentialSharing(Descriptor taskOrMethod,
int paramIndex, AllocSite alloc) {
checkAnalysisComplete();
ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
FlatMethod fm=state.getMethodFlat(taskOrMethod);
- assert (rg != null);
+ assert(rg != null);
return rg.mayReachSharedObjects(fm, paramIndex, alloc);
}
checkAnalysisComplete();
ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
FlatMethod fm=state.getMethodFlat(taskOrMethod);
- assert (rg != null);
+ assert(rg != null);
return rg.mayReachSharedObjects(fm, paramIndex, alloc);
}
AllocSite alloc1, AllocSite alloc2) {
checkAnalysisComplete();
ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
- assert (rg != null);
+ assert(rg != null);
return rg.mayReachSharedObjects(alloc1, alloc2);
}
-
+
public String prettyPrintNodeSet(Set<HeapRegionNode> s) {
checkAnalysisComplete();
AllocSite as = n.getAllocSite();
if (as == null) {
- out += " " + n.toString() + ",\n";
+ out += " " + n.toString() + ",\n";
} else {
- out += " " + n.toString() + ": " + as.toStringVerbose()
- + ",\n";
+ out += " " + n.toString() + ": " + as.toStringVerbose()
+ + ",\n";
}
}
out += "}\n";
return out;
}
-
+
// use the methods given above to check every possible sharing class
// between task parameters and flagged allocation sites reachable
// from the task
- public void writeAllSharing(String outputFile,
+ public void writeAllSharing(String outputFile,
String timeReport,
String justTime,
boolean tabularOutput,
int numLines
)
- throws java.io.IOException {
+ throws java.io.IOException {
checkAnalysisComplete();
BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
TaskDescriptor td = (TaskDescriptor) taskItr.next();
if (!tabularOutput) {
- bw.write("\n---------" + td + "--------\n");
+ bw.write("\n---------" + td + "--------\n");
}
HashSet<AllocSite> allocSites = getFlaggedAllocationSitesReachableFromTask(td);
FlatMethod fm = state.getMethodFlat(td);
for (int i = 0; i < fm.numParameters(); ++i) {
- // skip parameters with types that cannot reference
- // into the heap
- if( !shouldAnalysisTrack( fm.getParameter( i ).getType() ) ) {
- continue;
- }
-
- // for the ith parameter check for sharing classes to all
- // higher numbered parameters
- for (int j = i + 1; j < fm.numParameters(); ++j) {
-
- // skip parameters with types that cannot reference
- // into the heap
- if( !shouldAnalysisTrack( fm.getParameter( j ).getType() ) ) {
- continue;
- }
-
-
- common = hasPotentialSharing(td, i, j);
- if (!common.isEmpty()) {
- foundSomeSharing = true;
- ++numSharing;
- if (!tabularOutput) {
- bw.write("Potential sharing between parameters " + i
- + " and " + j + ".\n");
- bw.write(prettyPrintNodeSet(common) + "\n");
- }
- }
- }
-
- // for the ith parameter, check for sharing classes against
- // the set of allocation sites reachable from this
- // task context
- Iterator allocItr = allocSites.iterator();
- while (allocItr.hasNext()) {
- AllocSite as = (AllocSite) allocItr.next();
- common = hasPotentialSharing(td, i, as);
- if (!common.isEmpty()) {
- foundSomeSharing = true;
- ++numSharing;
- if (!tabularOutput) {
- bw.write("Potential sharing between parameter " + i
- + " and " + as.getFlatNew() + ".\n");
- bw.write(prettyPrintNodeSet(common) + "\n");
- }
- }
- }
+ // skip parameters with types that cannot reference
+ // into the heap
+ if( !shouldAnalysisTrack(fm.getParameter(i).getType() ) ) {
+ continue;
+ }
+
+ // for the ith parameter check for sharing classes to all
+ // higher numbered parameters
+ for (int j = i + 1; j < fm.numParameters(); ++j) {
+
+ // skip parameters with types that cannot reference
+ // into the heap
+ if( !shouldAnalysisTrack(fm.getParameter(j).getType() ) ) {
+ continue;
+ }
+
+
+ common = hasPotentialSharing(td, i, j);
+ if (!common.isEmpty()) {
+ foundSomeSharing = true;
+ ++numSharing;
+ if (!tabularOutput) {
+ bw.write("Potential sharing between parameters " + i
+ + " and " + j + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ }
+ }
+ }
+
+ // for the ith parameter, check for sharing classes against
+ // the set of allocation sites reachable from this
+ // task context
+ Iterator allocItr = allocSites.iterator();
+ while (allocItr.hasNext()) {
+ AllocSite as = (AllocSite) allocItr.next();
+ common = hasPotentialSharing(td, i, as);
+ if (!common.isEmpty()) {
+ foundSomeSharing = true;
+ ++numSharing;
+ if (!tabularOutput) {
+ bw.write("Potential sharing between parameter " + i
+ + " and " + as.getFlatNew() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ }
+ }
+ }
}
// for each allocation site check for sharing classes with
HashSet<AllocSite> outerChecked = new HashSet<AllocSite>();
Iterator allocItr1 = allocSites.iterator();
while (allocItr1.hasNext()) {
- AllocSite as1 = (AllocSite) allocItr1.next();
-
- Iterator allocItr2 = allocSites.iterator();
- while (allocItr2.hasNext()) {
- AllocSite as2 = (AllocSite) allocItr2.next();
-
- if (!outerChecked.contains(as2)) {
- common = hasPotentialSharing(td, as1, as2);
-
- if (!common.isEmpty()) {
- foundSomeSharing = true;
- ++numSharing;
- if (!tabularOutput) {
- bw.write("Potential sharing between "
- + as1.getFlatNew() + " and "
- + as2.getFlatNew() + ".\n");
- bw.write(prettyPrintNodeSet(common) + "\n");
- }
- }
- }
- }
+ AllocSite as1 = (AllocSite) allocItr1.next();
+
+ Iterator allocItr2 = allocSites.iterator();
+ while (allocItr2.hasNext()) {
+ AllocSite as2 = (AllocSite) allocItr2.next();
+
+ if (!outerChecked.contains(as2)) {
+ common = hasPotentialSharing(td, as1, as2);
+
+ if (!common.isEmpty()) {
+ foundSomeSharing = true;
+ ++numSharing;
+ if (!tabularOutput) {
+ bw.write("Potential sharing between "
+ + as1.getFlatNew() + " and "
+ + as2.getFlatNew() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ }
+ }
+ }
+ }
- outerChecked.add(as1);
+ outerChecked.add(as1);
}
if (!foundSomeSharing) {
- if (!tabularOutput) {
- bw.write("No sharing between flagged objects in Task " + td
- + ".\n");
- }
+ if (!tabularOutput) {
+ bw.write("No sharing between flagged objects in Task " + td
+ + ".\n");
+ }
}
}
-
+
if (tabularOutput) {
bw.write(" & " + numSharing + " & " + justTime + " & " + numLines
+ " & " + numMethodsAnalyzed() + " \\\\\n");
}
-
+
// this version of writeAllSharing is for Java programs that have no tasks
// ***********************************
// WARNING: THIS DOES NOT DO THE RIGHT THING, REPORTS 0 ALWAYS!
// It should use mayBothReachTarget and mayManyReachTarget like
// OoOJava does to query analysis results
// ***********************************
- public void writeAllSharingJava(String outputFile,
+ public void writeAllSharingJava(String outputFile,
String timeReport,
String justTime,
boolean tabularOutput,
int numLines
)
- throws java.io.IOException {
+ throws java.io.IOException {
checkAnalysisComplete();
assert !state.TASK;
int numSharing = 0;
BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
-
+
bw.write("Conducting disjoint reachability analysis with allocation depth = "
+ allocationDepth + "\n");
bw.write(timeReport + "\n\n");
Iterator allocItr2 = allocSites.iterator();
while (allocItr2.hasNext()) {
- AllocSite as2 = (AllocSite) allocItr2.next();
-
- if (!outerChecked.contains(as2)) {
- Set<HeapRegionNode> common = hasPotentialSharing(d,
- as1, as2);
-
- if (!common.isEmpty()) {
- foundSomeSharing = true;
- bw.write("Potential sharing between "
- + as1.getDisjointAnalysisId() + " and "
- + as2.getDisjointAnalysisId() + ".\n");
- bw.write(prettyPrintNodeSet(common) + "\n");
- ++numSharing;
- }
- }
+ AllocSite as2 = (AllocSite) allocItr2.next();
+
+ if (!outerChecked.contains(as2)) {
+ Set<HeapRegionNode> common = hasPotentialSharing(d,
+ as1, as2);
+
+ if (!common.isEmpty()) {
+ foundSomeSharing = true;
+ bw.write("Potential sharing between "
+ + as1.getDisjointAnalysisId() + " and "
+ + as2.getDisjointAnalysisId() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ ++numSharing;
+ }
+ }
}
outerChecked.add(as1);
bw.close();
}
-
+
///////////////////////////////////////////
//
// end public interface
if( !analysisComplete ) {
throw new Error("Warning: public interface method called while analysis is running.");
}
- }
+ }
// should attempt to be deterministic
public static boolean determinismDesired;
- // when we want to enforce determinism in the
+ // when we want to enforce determinism in the
// analysis we need to sort descriptors rather
// than toss them in efficient sets, use this
public static DescriptorComparator dComp =
// data from the compiler
- public State state;
- public CallGraph callGraph;
- public Liveness liveness;
+ public State state;
+ public CallGraph callGraph;
+ public Liveness liveness;
public ArrayReferencees arrayReferencees;
public RBlockRelationAnalysis rblockRel;
- public TypeUtil typeUtil;
- public int allocationDepth;
+ public TypeUtil typeUtil;
+ public int allocationDepth;
protected boolean doEffectsAnalysis = false;
protected EffectsAnalysis effectsAnalysis;
protected BuildStateMachines buildStateMachines;
-
+
// data structure for public interface
- private Hashtable< Descriptor, HashSet<AllocSite> >
- mapDescriptorToAllocSiteSet;
+ private Hashtable< Descriptor, HashSet<AllocSite> >
+ mapDescriptorToAllocSiteSet;
+
-
// for public interface methods to warn that they
// are grabbing results during analysis
private boolean analysisComplete;
// provide the analysis with an explicit
// top-level context with no parameters
protected MethodDescriptor mdAnalysisEntry;
- protected FlatMethod fmAnalysisEntry;
+ protected FlatMethod fmAnalysisEntry;
// main method defined by source program
protected MethodDescriptor mdSourceEntry;
// the set of task and/or method descriptors
// reachable in call graph
- protected Set<Descriptor>
- descriptorsToAnalyze;
+ protected Set<Descriptor>
+ descriptorsToAnalyze;
// current descriptors to visit in fixed-point
// interprocedural analysis, prioritized by
// dependency in the call graph
protected Stack<Descriptor>
- descriptorsToVisitStack;
- protected PriorityQueue<DescriptorQWrapper>
- descriptorsToVisitQ;
-
+ descriptorsToVisitStack;
+ protected PriorityQueue<DescriptorQWrapper>
+ descriptorsToVisitQ;
+
// a duplication of the above structure, but
// for efficient testing of inclusion
- protected HashSet<Descriptor>
- descriptorsToVisitSet;
+ protected HashSet<Descriptor>
+ descriptorsToVisitSet;
// storage for priorities (doesn't make sense)
// to add it to the Descriptor class, just in
// this analysis
- protected Hashtable<Descriptor, Integer>
- mapDescriptorToPriority;
+ protected Hashtable<Descriptor, Integer>
+ mapDescriptorToPriority;
// when analyzing a method and scheduling more:
// remember set of callee's enqueued for analysis
// so they can be put on top of the callers in
// the stack-visit mode
protected Set<Descriptor>
- calleesToEnqueue;
+ calleesToEnqueue;
// maps a descriptor to its current partial result
// from the intraprocedural fixed-point analysis--
// then the interprocedural analysis settles, this
// mapping will have the final results for each
// method descriptor
- protected Hashtable<Descriptor, ReachGraph>
- mapDescriptorToCompleteReachGraph;
+ protected Hashtable<Descriptor, ReachGraph>
+ mapDescriptorToCompleteReachGraph;
// maps a descriptor to its known dependents: namely
// methods or tasks that call the descriptor's method
// AND are part of this analysis (reachable from main)
protected Hashtable< Descriptor, Set<Descriptor> >
- mapDescriptorToSetDependents;
+ mapDescriptorToSetDependents;
// if the analysis client wants to flag allocation sites
// programmatically, it should provide a set of FlatNew
// maps each flat new to one analysis abstraction
// allocate site object, these exist outside reach graphs
protected Hashtable<FlatNew, AllocSite>
- mapFlatNewToAllocSite;
+ mapFlatNewToAllocSite;
// maps intergraph heap region IDs to intergraph
// allocation sites that created them, a redundant
// structure for efficiency in some operations
protected Hashtable<Integer, AllocSite>
- mapHrnIdToAllocSite;
+ mapHrnIdToAllocSite;
// maps a method to its initial heap model (IHM) that
// is the set of reachability graphs from every caller
// them separate is that any one call site's contribution
// to the IHM may changed along the path to the fixed point
protected Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >
- mapDescriptorToIHMcontributions;
+ mapDescriptorToIHMcontributions;
// additionally, keep a mapping from descriptors to the
// merged in-coming initial context, because we want this
// initial context to be STRICTLY MONOTONIC
protected Hashtable<Descriptor, ReachGraph>
- mapDescriptorToInitialContext;
+ mapDescriptorToInitialContext;
// make the result for back edges analysis-wide STRICTLY
// MONOTONIC as well, but notice we use FlatNode as the
// key for this map: in case we want to consider other
// nodes as back edge's in future implementations
protected Hashtable<FlatNode, ReachGraph>
- mapBackEdgeToMonotone;
+ mapBackEdgeToMonotone;
public static final String arrayElementFieldName = "___element_";
static protected Hashtable<TypeDescriptor, FieldDescriptor>
- mapTypeToArrayField;
+ mapTypeToArrayField;
protected boolean suppressOutput;
// partial method result, keep a tally for generating
// unique filenames
protected Hashtable<Descriptor, Integer>
- mapDescriptorToNumUpdates;
-
- //map task descriptor to initial task parameter
+ mapDescriptorToNumUpdates;
+
+ //map task descriptor to initial task parameter
protected Hashtable<Descriptor, ReachGraph>
- mapDescriptorToReachGraph;
+ mapDescriptorToReachGraph;
protected PointerMethod pm;
static protected Hashtable<FlatNode, ReachGraph> fn2rgAtEnter =
new Hashtable<FlatNode, ReachGraph>();
- private Hashtable<FlatCall, Descriptor> fc2enclosing;
-
+ private Hashtable<FlatCall, Descriptor> fc2enclosing;
+
Accessible accessible;
// allocate various structures that are not local
// to a single class method--should be done once
protected void allocateStructures() {
-
+
if( determinismDesired ) {
// use an ordered set
- descriptorsToAnalyze = new TreeSet<Descriptor>( dComp );
+ descriptorsToAnalyze = new TreeSet<Descriptor>(dComp);
} else {
// otherwise use a speedy hashset
descriptorsToAnalyze = new HashSet<Descriptor>();
mapDescriptorToSetDependents =
new Hashtable< Descriptor, Set<Descriptor> >();
- mapFlatNewToAllocSite =
+ mapFlatNewToAllocSite =
new Hashtable<FlatNew, AllocSite>();
mapDescriptorToIHMcontributions =
new Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >();
mapDescriptorToInitialContext =
- new Hashtable<Descriptor, ReachGraph>();
+ new Hashtable<Descriptor, ReachGraph>();
mapBackEdgeToMonotone =
new Hashtable<FlatNode, ReachGraph>();
-
+
mapHrnIdToAllocSite =
new Hashtable<Integer, AllocSite>();
- mapTypeToArrayField =
+ mapTypeToArrayField =
new Hashtable <TypeDescriptor, FieldDescriptor>();
if( state.DISJOINTDVISITSTACK ||
- state.DISJOINTDVISITSTACKEESONTOP
+ state.DISJOINTDVISITSTACKEESONTOP
) {
descriptorsToVisitStack =
new Stack<Descriptor>();
mapDescriptorToPriority =
new Hashtable<Descriptor, Integer>();
-
- calleesToEnqueue =
- new HashSet<Descriptor>();
+
+ calleesToEnqueue =
+ new HashSet<Descriptor>();
mapDescriptorToAllocSiteSet =
- new Hashtable<Descriptor, HashSet<AllocSite> >();
-
- mapDescriptorToReachGraph =
- new Hashtable<Descriptor, ReachGraph>();
+ new Hashtable<Descriptor, HashSet<AllocSite> >();
+
+ mapDescriptorToReachGraph =
+ new Hashtable<Descriptor, ReachGraph>();
pm = new PointerMethod();
// this analysis generates a disjoint reachability
// graph for every reachable method in the program
- public DisjointAnalysis( State s,
- TypeUtil tu,
- CallGraph cg,
- Liveness l,
- ArrayReferencees ar,
- Set<FlatNew> sitesToFlag,
- RBlockRelationAnalysis rra
- ) {
- init( s, tu, cg, l, ar, sitesToFlag, rra, null, false );
+ public DisjointAnalysis(State s,
+ TypeUtil tu,
+ CallGraph cg,
+ Liveness l,
+ ArrayReferencees ar,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra
+ ) {
+ init(s, tu, cg, l, ar, sitesToFlag, rra, null, false);
}
- public DisjointAnalysis( State s,
- TypeUtil tu,
- CallGraph cg,
- Liveness l,
- ArrayReferencees ar,
- Set<FlatNew> sitesToFlag,
- RBlockRelationAnalysis rra,
- boolean suppressOutput
- ) {
- init( s, tu, cg, l, ar, sitesToFlag, rra, null, suppressOutput );
+ public DisjointAnalysis(State s,
+ TypeUtil tu,
+ CallGraph cg,
+ Liveness l,
+ ArrayReferencees ar,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra,
+ boolean suppressOutput
+ ) {
+ init(s, tu, cg, l, ar, sitesToFlag, rra, null, suppressOutput);
}
- public DisjointAnalysis( State s,
- TypeUtil tu,
- CallGraph cg,
- Liveness l,
- ArrayReferencees ar,
- Set<FlatNew> sitesToFlag,
- RBlockRelationAnalysis rra,
- BuildStateMachines bsm,
- boolean suppressOutput
- ) {
- init( s, tu, cg, l, ar, sitesToFlag, rra, bsm, suppressOutput );
+ public DisjointAnalysis(State s,
+ TypeUtil tu,
+ CallGraph cg,
+ Liveness l,
+ ArrayReferencees ar,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra,
+ BuildStateMachines bsm,
+ boolean suppressOutput
+ ) {
+ init(s, tu, cg, l, ar, sitesToFlag, rra, bsm, suppressOutput);
}
-
- protected void init( State state,
- TypeUtil typeUtil,
- CallGraph callGraph,
- Liveness liveness,
- ArrayReferencees arrayReferencees,
- Set<FlatNew> sitesToFlag,
- RBlockRelationAnalysis rra,
- BuildStateMachines bsm,
- boolean suppressOutput
- ) {
-
+
+ protected void init(State state,
+ TypeUtil typeUtil,
+ CallGraph callGraph,
+ Liveness liveness,
+ ArrayReferencees arrayReferencees,
+ Set<FlatNew> sitesToFlag,
+ RBlockRelationAnalysis rra,
+ BuildStateMachines bsm,
+ boolean suppressOutput
+ ) {
+
analysisComplete = false;
-
+
this.state = state;
this.typeUtil = typeUtil;
this.callGraph = callGraph;
if( rblockRel != null ) {
doEffectsAnalysis = true;
effectsAnalysis = new EffectsAnalysis();
-
+
//note: instead of reachgraph's isAccessible, using the result of accessible analysis
//since accessible gives us more accurate results
accessible=new Accessible(state, callGraph, rra, liveness);
accessible.doAnalysis();
}
-
+
this.allocationDepth = state.DISJOINTALLOCDEPTH;
this.releaseMode = state.DISJOINTRELEASEMODE;
this.determinismDesired = state.DISJOINTDETERMINISM;
this.snapNodeCounter = 0; // count nodes from 0
assert
- state.DISJOINTDVISITSTACK ||
- state.DISJOINTDVISITPQUE ||
- state.DISJOINTDVISITSTACKEESONTOP;
+ state.DISJOINTDVISITSTACK ||
+ state.DISJOINTDVISITPQUE ||
+ state.DISJOINTDVISITSTACKEESONTOP;
assert !(state.DISJOINTDVISITSTACK && state.DISJOINTDVISITPQUE);
assert !(state.DISJOINTDVISITSTACK && state.DISJOINTDVISITSTACKEESONTOP);
assert !(state.DISJOINTDVISITPQUE && state.DISJOINTDVISITSTACKEESONTOP);
-
+
// set some static configuration for ReachGraphs
ReachGraph.allocationDepth = allocationDepth;
ReachGraph.typeUtil = typeUtil;
ReachGraph.debugCallSiteStopAfter
= state.DISJOINTDEBUGCALLSTOPAFTER;
- ReachGraph.debugCallSiteVisitCounter
+ ReachGraph.debugCallSiteVisitCounter
= 0; // count visits from 1, is incremented before first visit
-
+
EffectsAnalysis.state = state;
EffectsAnalysis.buildStateMachines = buildStateMachines;
if( suppressOutput ) {
- System.out.println( "* Running disjoint reachability analysis with output suppressed! *" );
+ System.out.println("* Running disjoint reachability analysis with output suppressed! *");
}
allocateStructures();
try {
analyzeMethods();
} catch( IOException e ) {
- throw new Error( "IO Exception while writing disjointness analysis output." );
+ throw new Error("IO Exception while writing disjointness analysis output.");
}
analysisComplete=true;
double timeEndAnalysis = (double) System.nanoTime();
- double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
+ double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow(10.0, 9.0) );
String treport;
if( sitesToFlag != null ) {
- treport = String.format( "Disjoint reachability analysis flagged %d sites and took %.3f sec.", sitesToFlag.size(), dt );
- if(sitesToFlag.size()>0){
- treport+="\nFlagged sites:"+"\n"+sitesToFlag.toString();
+ treport = String.format("Disjoint reachability analysis flagged %d sites and took %.3f sec.", sitesToFlag.size(), dt);
+ if(sitesToFlag.size()>0) {
+ treport+="\nFlagged sites:"+"\n"+sitesToFlag.toString();
}
} else {
- treport = String.format( "Disjoint reachability analysis took %.3f sec.", dt );
+ treport = String.format("Disjoint reachability analysis took %.3f sec.", dt);
}
- String justtime = String.format( "%.2f", dt );
- System.out.println( treport );
+ String justtime = String.format("%.2f", dt);
+ System.out.println(treport);
try {
if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
- writeFinalGraphs();
+ writeFinalGraphs();
}
if( state.DISJOINTWRITEIHMS && !suppressOutput ) {
- writeFinalIHMs();
+ writeFinalIHMs();
}
if( state.DISJOINTWRITEINITCONTEXTS && !suppressOutput ) {
- writeInitialContexts();
+ writeInitialContexts();
}
if( state.DISJOINTALIASFILE != null && !suppressOutput ) {
- if( state.TASK ) {
- writeAllSharing(state.DISJOINTALIASFILE, treport, justtime, state.DISJOINTALIASTAB, state.lines);
- } else {
- writeAllSharingJava(state.DISJOINTALIASFILE,
- treport,
- justtime,
- state.DISJOINTALIASTAB,
- state.lines
- );
- }
+ if( state.TASK ) {
+ writeAllSharing(state.DISJOINTALIASFILE, treport, justtime, state.DISJOINTALIASTAB, state.lines);
+ } else {
+ writeAllSharingJava(state.DISJOINTALIASFILE,
+ treport,
+ justtime,
+ state.DISJOINTALIASTAB,
+ state.lines
+ );
+ }
}
if( state.RCR ) {
- buildStateMachines.writeStateMachines();
+ buildStateMachines.writeStateMachines();
}
} catch( IOException e ) {
- throw new Error( "IO Exception while writing disjointness analysis output." );
+ throw new Error("IO Exception while writing disjointness analysis output.");
}
}
return !descriptorsToVisitQ.isEmpty();
}
- throw new Error( "Neither descriptor visiting mode set" );
+ throw new Error("Neither descriptor visiting mode set");
}
// fixed-point computation over the call graph--when a
// method's callees are updated, it must be reanalyzed
- protected void analyzeMethods() throws java.io.IOException {
+ protected void analyzeMethods() throws java.io.IOException {
// task or non-task (java) mode determines what the roots
// of the call chain are, and establishes the set of methods
// reachable from the roots that will be analyzed
-
+
if( state.TASK ) {
if( !suppressOutput ) {
- System.out.println( "Bamboo mode..." );
+ System.out.println("Bamboo mode...");
}
-
- Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
while( taskItr.hasNext() ) {
- TaskDescriptor td = (TaskDescriptor) taskItr.next();
- if( !descriptorsToAnalyze.contains( td ) ) {
- // add all methods transitively reachable from the
- // tasks as well
- descriptorsToAnalyze.add( td );
- descriptorsToAnalyze.addAll( callGraph.getAllMethods( td ) );
- }
- }
-
+ TaskDescriptor td = (TaskDescriptor) taskItr.next();
+ if( !descriptorsToAnalyze.contains(td) ) {
+ // add all methods transitively reachable from the
+ // tasks as well
+ descriptorsToAnalyze.add(td);
+ descriptorsToAnalyze.addAll(callGraph.getAllMethods(td) );
+ }
+ }
+
} else {
if( !suppressOutput ) {
- System.out.println( "Java mode..." );
+ System.out.println("Java mode...");
}
// add all methods transitively reachable from the
// source's main to set for analysis
mdSourceEntry = typeUtil.getMain();
- descriptorsToAnalyze.add( mdSourceEntry );
- descriptorsToAnalyze.addAll( callGraph.getAllMethods( mdSourceEntry ) );
-
+ descriptorsToAnalyze.add(mdSourceEntry);
+ descriptorsToAnalyze.addAll(callGraph.getAllMethods(mdSourceEntry) );
+
// fabricate an empty calling context that will call
// the source's main, but call graph doesn't know
// about it, so explicitly add it
- makeAnalysisEntryMethod( mdSourceEntry );
- descriptorsToAnalyze.add( mdAnalysisEntry );
+ makeAnalysisEntryMethod(mdSourceEntry);
+ descriptorsToAnalyze.add(mdAnalysisEntry);
}
- // now, depending on the interprocedural mode for visiting
+ // now, depending on the interprocedural mode for visiting
// methods, set up the needed data structures
if( state.DISJOINTDVISITPQUE ) {
-
- // topologically sort according to the call graph so
+
+ // topologically sort according to the call graph so
// leaf calls are last, helps build contexts up first
- LinkedList<Descriptor> sortedDescriptors =
- topologicalSort( descriptorsToAnalyze );
+ LinkedList<Descriptor> sortedDescriptors =
+ topologicalSort(descriptorsToAnalyze);
// add sorted descriptors to priority queue, and duplicate
// the queue as a set for efficiently testing whether some
// for the priority queue, give items at the head
// of the sorted list a low number (highest priority)
while( !sortedDescriptors.isEmpty() ) {
- Descriptor d = sortedDescriptors.removeFirst();
- mapDescriptorToPriority.put( d, new Integer( p ) );
- descriptorsToVisitQ.add( new DescriptorQWrapper( p, d ) );
- descriptorsToVisitSet.add( d );
- ++p;
+ Descriptor d = sortedDescriptors.removeFirst();
+ mapDescriptorToPriority.put(d, new Integer(p) );
+ descriptorsToVisitQ.add(new DescriptorQWrapper(p, d) );
+ descriptorsToVisitSet.add(d);
+ ++p;
}
} else if( state.DISJOINTDVISITSTACK ||
- state.DISJOINTDVISITSTACKEESONTOP
+ state.DISJOINTDVISITSTACKEESONTOP
) {
// if we're doing the stack scheme, just throw the root
// method or tasks on the stack
if( state.TASK ) {
- Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
- while( taskItr.hasNext() ) {
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ while( taskItr.hasNext() ) {
TaskDescriptor td = (TaskDescriptor) taskItr.next();
- descriptorsToVisitStack.add( td );
- descriptorsToVisitSet.add( td );
- }
-
+ descriptorsToVisitStack.add(td);
+ descriptorsToVisitSet.add(td);
+ }
+
} else {
- descriptorsToVisitStack.add( mdAnalysisEntry );
- descriptorsToVisitSet.add( mdAnalysisEntry );
+ descriptorsToVisitStack.add(mdAnalysisEntry);
+ descriptorsToVisitSet.add(mdAnalysisEntry);
}
} else {
- throw new Error( "Unknown method scheduling mode" );
+ throw new Error("Unknown method scheduling mode");
}
if( state.DISJOINTDVISITSTACK ||
state.DISJOINTDVISITSTACKEESONTOP
) {
- d = descriptorsToVisitStack.pop();
+ d = descriptorsToVisitStack.pop();
} else if( state.DISJOINTDVISITPQUE ) {
- d = descriptorsToVisitQ.poll().getDescriptor();
+ d = descriptorsToVisitQ.poll().getDescriptor();
}
- assert descriptorsToVisitSet.contains( d );
- descriptorsToVisitSet.remove( d );
+ assert descriptorsToVisitSet.contains(d);
+ descriptorsToVisitSet.remove(d);
// because the task or method descriptor just extracted
// was in the "to visit" set it either hasn't been analyzed
// that depend on this one to the "to visit" set.
if( !suppressOutput ) {
- System.out.println( "Analyzing " + d );
+ System.out.println("Analyzing " + d);
}
if( state.DISJOINTDVISITSTACKEESONTOP ) {
- assert calleesToEnqueue.isEmpty();
- }
-
- ReachGraph rg = analyzeMethod( d );
- ReachGraph rgPrev = getPartial( d );
-
- if( !rg.equals( rgPrev ) ) {
- setPartial( d, rg );
-
- if( state.DISJOINTDEBUGSCHEDULING ) {
- System.out.println( " complete graph changed, scheduling callers for analysis:" );
- }
-
- // results for d changed, so enqueue dependents
- // of d for further analysis
- Iterator<Descriptor> depsItr = getDependents( d ).iterator();
+ assert calleesToEnqueue.isEmpty();
+ }
+
+ ReachGraph rg = analyzeMethod(d);
+ ReachGraph rgPrev = getPartial(d);
+
+ if( !rg.equals(rgPrev) ) {
+ setPartial(d, rg);
+
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println(" complete graph changed, scheduling callers for analysis:");
+ }
+
+ // results for d changed, so enqueue dependents
+ // of d for further analysis
+ Iterator<Descriptor> depsItr = getDependents(d).iterator();
while( depsItr.hasNext() ) {
Descriptor dNext = depsItr.next();
- enqueue( dNext );
+ enqueue(dNext);
- if( state.DISJOINTDEBUGSCHEDULING ) {
- System.out.println( " "+dNext );
- }
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println(" "+dNext);
+ }
}
}
// whether or not the method under analysis changed,
- // we may have some callees that are scheduled for
+ // we may have some callees that are scheduled for
// more analysis, and they should go on the top of
// the stack now (in other method-visiting modes they
// are already enqueued at this point
if( state.DISJOINTDVISITSTACKEESONTOP ) {
- Iterator<Descriptor> depsItr = calleesToEnqueue.iterator();
- while( depsItr.hasNext() ) {
- Descriptor dNext = depsItr.next();
- enqueue( dNext );
- }
- calleesToEnqueue.clear();
- }
-
- }
+ Iterator<Descriptor> depsItr = calleesToEnqueue.iterator();
+ while( depsItr.hasNext() ) {
+ Descriptor dNext = depsItr.next();
+ enqueue(dNext);
+ }
+ calleesToEnqueue.clear();
+ }
+
+ }
}
- protected ReachGraph analyzeMethod( Descriptor d )
- throws java.io.IOException {
+ protected ReachGraph analyzeMethod(Descriptor d)
+ throws java.io.IOException {
// get the flat code for this descriptor
FlatMethod fm;
if( d == mdAnalysisEntry ) {
fm = fmAnalysisEntry;
} else {
- fm = state.getMethodFlat( d );
+ fm = state.getMethodFlat(d);
}
- pm.analyzeMethod( fm );
+ pm.analyzeMethod(fm);
// intraprocedural work set
Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
- flatNodesToVisit.add( fm );
+ flatNodesToVisit.add(fm);
// if determinism is desired by client, shadow the
// set with a queue to make visit order deterministic
Queue<FlatNode> flatNodesToVisitQ = null;
if( determinismDesired ) {
flatNodesToVisitQ = new LinkedList<FlatNode>();
- flatNodesToVisitQ.add( fm );
+ flatNodesToVisitQ.add(fm);
}
-
+
// mapping of current partial results
Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph =
new Hashtable<FlatNode, ReachGraph>();
while( !flatNodesToVisit.isEmpty() ) {
- FlatNode fn;
+ FlatNode fn;
if( determinismDesired ) {
- assert !flatNodesToVisitQ.isEmpty();
- fn = flatNodesToVisitQ.remove();
+ assert !flatNodesToVisitQ.isEmpty();
+ fn = flatNodesToVisitQ.remove();
} else {
- fn = flatNodesToVisit.iterator().next();
+ fn = flatNodesToVisit.iterator().next();
}
- flatNodesToVisit.remove( fn );
+ flatNodesToVisit.remove(fn);
// effect transfer function defined by this node,
// then compare it to the old graph at this node
ReachGraph rg = new ReachGraph();
TaskDescriptor taskDesc;
- if(fn instanceof FlatMethod && (taskDesc=((FlatMethod)fn).getTask())!=null){
- if(mapDescriptorToReachGraph.containsKey(taskDesc)){
- // retrieve existing reach graph if it is not first time
- rg=mapDescriptorToReachGraph.get(taskDesc);
- }else{
- // create initial reach graph for a task
- rg=createInitialTaskReachGraph((FlatMethod)fn);
- rg.globalSweep();
- mapDescriptorToReachGraph.put(taskDesc, rg);
- }
+ if(fn instanceof FlatMethod && (taskDesc=((FlatMethod)fn).getTask())!=null) {
+ if(mapDescriptorToReachGraph.containsKey(taskDesc)) {
+ // retrieve existing reach graph if it is not first time
+ rg=mapDescriptorToReachGraph.get(taskDesc);
+ } else {
+ // create initial reach graph for a task
+ rg=createInitialTaskReachGraph((FlatMethod)fn);
+ rg.globalSweep();
+ mapDescriptorToReachGraph.put(taskDesc, rg);
+ }
}
// start by merging all node's parents' graphs
for( int i = 0; i < pm.numPrev(fn); ++i ) {
FlatNode pn = pm.getPrev(fn,i);
- if( mapFlatNodeToReachGraph.containsKey( pn ) ) {
- ReachGraph rgParent = mapFlatNodeToReachGraph.get( pn );
- rg.merge( rgParent );
+ if( mapFlatNodeToReachGraph.containsKey(pn) ) {
+ ReachGraph rgParent = mapFlatNodeToReachGraph.get(pn);
+ rg.merge(rgParent);
}
}
-
- if( takeDebugSnapshots &&
- d.getSymbol().equals( descSymbolDebug )
+
+ if( takeDebugSnapshots &&
+ d.getSymbol().equals(descSymbolDebug)
) {
- debugSnapshot( rg, fn, true );
+ debugSnapshot(rg, fn, true);
}
// modify rg with appropriate transfer function
- rg = analyzeFlatNode( d, fm, fn, setReturns, rg );
+ rg = analyzeFlatNode(d, fm, fn, setReturns, rg);
- if( takeDebugSnapshots &&
- d.getSymbol().equals( descSymbolDebug )
+ if( takeDebugSnapshots &&
+ d.getSymbol().equals(descSymbolDebug)
) {
- debugSnapshot( rg, fn, false );
- ++snapNodeCounter;
+ debugSnapshot(rg, fn, false);
+ ++snapNodeCounter;
}
-
+
// if the results of the new graph are different from
// the current graph at this node, replace the graph
// with the update and enqueue the children
- ReachGraph rgPrev = mapFlatNodeToReachGraph.get( fn );
- if( !rg.equals( rgPrev ) ) {
- mapFlatNodeToReachGraph.put( fn, rg );
+ ReachGraph rgPrev = mapFlatNodeToReachGraph.get(fn);
+ if( !rg.equals(rgPrev) ) {
+ mapFlatNodeToReachGraph.put(fn, rg);
- for( int i = 0; i < pm.numNext( fn ); i++ ) {
- FlatNode nn = pm.getNext( fn, i );
+ for( int i = 0; i < pm.numNext(fn); i++ ) {
+ FlatNode nn = pm.getNext(fn, i);
- flatNodesToVisit.add( nn );
- if( determinismDesired ) {
- flatNodesToVisitQ.add( nn );
- }
+ flatNodesToVisit.add(nn);
+ if( determinismDesired ) {
+ flatNodesToVisitQ.add(nn);
+ }
}
}
}
while( retItr.hasNext() ) {
FlatReturnNode frn = (FlatReturnNode) retItr.next();
- assert mapFlatNodeToReachGraph.containsKey( frn );
- ReachGraph rgRet = mapFlatNodeToReachGraph.get( frn );
+ assert mapFlatNodeToReachGraph.containsKey(frn);
+ ReachGraph rgRet = mapFlatNodeToReachGraph.get(frn);
- completeGraph.merge( rgRet );
+ completeGraph.merge(rgRet);
}
- if( takeDebugSnapshots &&
- d.getSymbol().equals( descSymbolDebug )
+ if( takeDebugSnapshots &&
+ d.getSymbol().equals(descSymbolDebug)
) {
// increment that we've visited the debug snap
// method, and reset the node counter
- System.out.println( " @@@ debug snap at visit "+snapVisitCounter );
+ System.out.println(" @@@ debug snap at visit "+snapVisitCounter);
++snapVisitCounter;
snapNodeCounter = 0;
- if( snapVisitCounter == visitStartCapture + numVisitsToCapture &&
- stopAfterCapture
+ if( snapVisitCounter == visitStartCapture + numVisitsToCapture &&
+ stopAfterCapture
) {
- System.out.println( "!!! Stopping analysis after debug snap captures. !!!" );
- System.exit( 0 );
+ System.out.println("!!! Stopping analysis after debug snap captures. !!!");
+ System.exit(0);
}
}
return completeGraph;
}
-
+
protected ReachGraph
- analyzeFlatNode( Descriptor d,
- FlatMethod fmContaining,
- FlatNode fn,
- HashSet<FlatReturnNode> setRetNodes,
- ReachGraph rg
- ) throws java.io.IOException {
-
-
+ analyzeFlatNode(Descriptor d,
+ FlatMethod fmContaining,
+ FlatNode fn,
+ HashSet<FlatReturnNode> setRetNodes,
+ ReachGraph rg
+ ) throws java.io.IOException {
+
+
// any variables that are no longer live should be
// nullified in the graph to reduce edges
//rg.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
- TempDescriptor lhs;
- TempDescriptor rhs;
- FieldDescriptor fld;
- TypeDescriptor tdElement;
- FieldDescriptor fdElement;
+ TempDescriptor lhs;
+ TempDescriptor rhs;
+ FieldDescriptor fld;
+ TypeDescriptor tdElement;
+ FieldDescriptor fdElement;
FlatSESEEnterNode sese;
- FlatSESEExitNode fsexn;
+ FlatSESEExitNode fsexn;
//Stores the flatnode's reach graph at enter
ReachGraph rgOnEnter = new ReachGraph();
- rgOnEnter.merge( rg );
+ rgOnEnter.merge(rg);
fn2rgAtEnter.put(fn, rgOnEnter);
-
+
// use node type to decide what transfer function
// to apply to the reachability graph
switch( fn.kind() ) {
case FKind.FlatGenReachNode: {
FlatGenReachNode fgrn = (FlatGenReachNode) fn;
-
- System.out.println( " Generating reach graph for program point: "+fgrn.getGraphName() );
-
- rg.writeGraph( "genReach"+fgrn.getGraphName(),
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide reachability altogether
- true, // hide subset reachability states
- true, // hide predicates
- true ); // hide edge taints
+
+ System.out.println(" Generating reach graph for program point: "+fgrn.getGraphName() );
+
+ rg.writeGraph("genReach"+fgrn.getGraphName(),
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability altogether
+ true, // hide subset reachability states
+ true, // hide predicates
+ true); // hide edge taints
} break;
// since we're working on the FlatMethod, we know
// the incoming ReachGraph 'rg' is empty
- Hashtable<FlatCall, ReachGraph> heapsFromCallers =
- getIHMcontributions( d );
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions(d);
Set entrySet = heapsFromCallers.entrySet();
Iterator itr = entrySet.iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- FlatCall fc = (FlatCall) me.getKey();
- ReachGraph rgContrib = (ReachGraph) me.getValue();
+ Map.Entry me = (Map.Entry)itr.next();
+ FlatCall fc = (FlatCall) me.getKey();
+ ReachGraph rgContrib = (ReachGraph) me.getValue();
- assert fc.getMethod().equals( d );
+ assert fc.getMethod().equals(d);
- rg.merge( rgContrib );
+ rg.merge(rgContrib);
}
// additionally, we are enforcing STRICT MONOTONICITY for the
// method's initial context, so grow the context by whatever
// the previously computed context was, and put the most
// up-to-date context back in the map
- ReachGraph rgPrevContext = mapDescriptorToInitialContext.get( d );
- rg.merge( rgPrevContext );
- mapDescriptorToInitialContext.put( d, rg );
+ ReachGraph rgPrevContext = mapDescriptorToInitialContext.get(d);
+ rg.merge(rgPrevContext);
+ mapDescriptorToInitialContext.put(d, rg);
} break;
-
+
case FKind.FlatOpNode:
FlatOpNode fon = (FlatOpNode) fn;
if( fon.getOp().getOp() == Operation.ASSIGN ) {
lhs = fon.getDest();
rhs = fon.getLeft();
- // before transfer, do effects analysis support
- if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- if(rblockRel.isPotentialStallSite(fn)){
- // x gets status of y
+ // before transfer, do effects analysis support
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x gets status of y
// if(!rg.isAccessible(rhs)){
- if(!accessible.isAccessible(fn, rhs)){
- rg.makeInaccessible(lhs);
- }
- }
- }
+ if(!accessible.isAccessible(fn, rhs)) {
+ rg.makeInaccessible(lhs);
+ }
+ }
+ }
- // transfer func
- rg.assignTempXEqualToTempY( lhs, rhs );
+ // transfer func
+ rg.assignTempXEqualToTempY(lhs, rhs);
}
break;
// before transfer, do effects analysis support
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- if(rblockRel.isPotentialStallSite(fn)){
- // x gets status of y
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x gets status of y
// if(!rg.isAccessible(rhs)){
- if(!accessible.isAccessible(fn,rhs)){
- rg.makeInaccessible(lhs);
- }
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.makeInaccessible(lhs);
+ }
+ }
}
-
+
// transfer func
- rg.assignTempXEqualToCastedTempY( lhs, rhs, td );
+ rg.assignTempXEqualToCastedTempY(lhs, rhs, td);
break;
case FKind.FlatFieldNode:
// a stall-site taint
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- if(rblockRel.isPotentialStallSite(fn)){
- // x=y.f, stall y if not accessible
- // contributes read effects on stall site of y
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x=y.f, stall y if not accessible
+ // contributes read effects on stall site of y
// if(!rg.isAccessible(rhs)) {
- if(!accessible.isAccessible(fn,rhs)) {
- rg.taintStallSite(fn, rhs);
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
- // after this, x and y are accessbile.
- rg.makeAccessible(lhs);
- rg.makeAccessible(rhs);
- }
+ // after this, x and y are accessbile.
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
}
- if( shouldAnalysisTrack( fld.getType() ) ) {
- // transfer func
- rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld, fn );
- }
+ if( shouldAnalysisTrack(fld.getType() ) ) {
+ // transfer func
+ rg.assignTempXEqualToTempYFieldF(lhs, rhs, fld, fn);
+ }
// after transfer, use updated graph to
// do effects analysis
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- effectsAnalysis.analyzeFlatFieldNode( rg, rhs, fld, fn );
+ effectsAnalysis.analyzeFlatFieldNode(rg, rhs, fld, fn);
}
break;
// stall-site taints
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- if(rblockRel.isPotentialStallSite(fn)){
- // x.y=f , stall x and y if they are not accessible
- // also contribute write effects on stall site of x
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x.y=f , stall x and y if they are not accessible
+ // also contribute write effects on stall site of x
// if(!rg.isAccessible(lhs)) {
- if(!accessible.isAccessible(fn,lhs)) {
- rg.taintStallSite(fn, lhs);
- }
+ if(!accessible.isAccessible(fn,lhs)) {
+ rg.taintStallSite(fn, lhs);
+ }
// if(!rg.isAccessible(rhs)) {
- if(!accessible.isAccessible(fn,rhs)) {
- rg.taintStallSite(fn, rhs);
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
- // accessible status update
- rg.makeAccessible(lhs);
- rg.makeAccessible(rhs);
- }
+ // accessible status update
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
}
- if( shouldAnalysisTrack( fld.getType() ) ) {
- // transfer func
- strongUpdate = rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs, fn );
- }
+ if( shouldAnalysisTrack(fld.getType() ) ) {
+ // transfer func
+ strongUpdate = rg.assignTempXFieldFEqualToTempY(lhs, fld, rhs, fn);
+ }
// use transformed graph to do effects analysis
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- effectsAnalysis.analyzeFlatSetFieldNode( rg, lhs, fld, fn, strongUpdate );
+ effectsAnalysis.analyzeFlatSetFieldNode(rg, lhs, fld, fn, strongUpdate);
}
break;
assert rhs.getType().isArray();
tdElement = rhs.getType().dereference();
- fdElement = getArrayField( tdElement );
+ fdElement = getArrayField(tdElement);
// before transfer func, possibly inject
// stall-site taint
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- if(rblockRel.isPotentialStallSite(fn)){
- // x=y.f, stall y if not accessible
- // contributes read effects on stall site of y
- // after this, x and y are accessbile.
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x=y.f, stall y if not accessible
+ // contributes read effects on stall site of y
+ // after this, x and y are accessbile.
// if(!rg.isAccessible(rhs)) {
- if(!accessible.isAccessible(fn,rhs)) {
- rg.taintStallSite(fn, rhs);
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
- rg.makeAccessible(lhs);
- rg.makeAccessible(rhs);
- }
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
}
- if( shouldAnalysisTrack( lhs.getType() ) ) {
- // transfer func
- rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement, fn );
+ if( shouldAnalysisTrack(lhs.getType() ) ) {
+ // transfer func
+ rg.assignTempXEqualToTempYFieldF(lhs, rhs, fdElement, fn);
}
// use transformed graph to do effects analysis
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- effectsAnalysis.analyzeFlatFieldNode( rg, rhs, fdElement, fn );
- }
+ effectsAnalysis.analyzeFlatFieldNode(rg, rhs, fdElement, fn);
+ }
break;
case FKind.FlatSetElementNode:
rhs = fsen.getSrc();
assert lhs.getType() != null;
- assert lhs.getType().isArray();
+ assert lhs.getType().isArray();
tdElement = lhs.getType().dereference();
- fdElement = getArrayField( tdElement );
+ fdElement = getArrayField(tdElement);
// before transfer func, possibly inject
// stall-site taints
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
-
- if(rblockRel.isPotentialStallSite(fn)){
- // x.y=f , stall x and y if they are not accessible
- // also contribute write effects on stall site of x
+
+ if(rblockRel.isPotentialStallSite(fn)) {
+ // x.y=f , stall x and y if they are not accessible
+ // also contribute write effects on stall site of x
// if(!rg.isAccessible(lhs)) {
- if(!accessible.isAccessible(fn,lhs)) {
- rg.taintStallSite(fn, lhs);
- }
+ if(!accessible.isAccessible(fn,lhs)) {
+ rg.taintStallSite(fn, lhs);
+ }
// if(!rg.isAccessible(rhs)) {
- if(!accessible.isAccessible(fn,rhs)) {
- rg.taintStallSite(fn, rhs);
- }
-
- // accessible status update
- rg.makeAccessible(lhs);
- rg.makeAccessible(rhs);
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.taintStallSite(fn, rhs);
+ }
+
+ // accessible status update
+ rg.makeAccessible(lhs);
+ rg.makeAccessible(rhs);
+ }
}
- if( shouldAnalysisTrack( rhs.getType() ) ) {
- // transfer func, BUT
- // skip this node if it cannot create new reachability paths
- if( !arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
- rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs, fn );
- }
+ if( shouldAnalysisTrack(rhs.getType() ) ) {
+ // transfer func, BUT
+ // skip this node if it cannot create new reachability paths
+ if( !arrayReferencees.doesNotCreateNewReaching(fsen) ) {
+ rg.assignTempXFieldFEqualToTempY(lhs, fdElement, rhs, fn);
+ }
}
// use transformed graph to do effects analysis
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- effectsAnalysis.analyzeFlatSetFieldNode( rg, lhs, fdElement, fn,
- false );
+ effectsAnalysis.analyzeFlatSetFieldNode(rg, lhs, fdElement, fn,
+ false);
}
break;
-
+
case FKind.FlatNew:
FlatNew fnn = (FlatNew) fn;
lhs = fnn.getDst();
- if( shouldAnalysisTrack( lhs.getType() ) ) {
- AllocSite as = getAllocSiteFromFlatNewPRIVATE( fnn );
-
- // before transform, support effects analysis
- if (doEffectsAnalysis && fmContaining != fmAnalysisEntry) {
- if (rblockRel.isPotentialStallSite(fn)) {
- // after creating new object, lhs is accessible
- rg.makeAccessible(lhs);
- }
- }
+ if( shouldAnalysisTrack(lhs.getType() ) ) {
+ AllocSite as = getAllocSiteFromFlatNewPRIVATE(fnn);
+
+ // before transform, support effects analysis
+ if (doEffectsAnalysis && fmContaining != fmAnalysisEntry) {
+ if (rblockRel.isPotentialStallSite(fn)) {
+ // after creating new object, lhs is accessible
+ rg.makeAccessible(lhs);
+ }
+ }
- // transfer func
- rg.assignTempEqualToNewAlloc( lhs, as );
+ // transfer func
+ rg.assignTempEqualToNewAlloc(lhs, as);
}
break;
sese = (FlatSESEEnterNode) fn;
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
-
- // always remove ALL stall site taints at enter
- rg.removeAllStallSiteTaints();
- // inject taints for in-set vars
- rg.taintInSetVars( sese );
+ // always remove ALL stall site taints at enter
+ rg.removeAllStallSiteTaints();
+
+ // inject taints for in-set vars
+ rg.taintInSetVars(sese);
}
break;
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
- // @ sese exit make all live variables
- // inaccessible to later parent statements
- rg.makeInaccessible( liveness.getLiveInTemps( fmContaining, fn ) );
-
- // always remove ALL stall site taints at exit
- rg.removeAllStallSiteTaints();
-
- // remove in-set var taints for the exiting rblock
- rg.removeInContextTaints( sese );
+ // @ sese exit make all live variables
+ // inaccessible to later parent statements
+ rg.makeInaccessible(liveness.getLiveInTemps(fmContaining, fn) );
+
+ // always remove ALL stall site taints at exit
+ rg.removeAllStallSiteTaints();
+
+ // remove in-set var taints for the exiting rblock
+ rg.removeInContextTaints(sese);
}
break;
case FKind.FlatCall: {
Descriptor mdCaller;
- if( fmContaining.getMethod() != null ){
- mdCaller = fmContaining.getMethod();
+ if( fmContaining.getMethod() != null ) {
+ mdCaller = fmContaining.getMethod();
} else {
- mdCaller = fmContaining.getTask();
- }
- FlatCall fc = (FlatCall) fn;
+ mdCaller = fmContaining.getTask();
+ }
+ FlatCall fc = (FlatCall) fn;
MethodDescriptor mdCallee = fc.getMethod();
- FlatMethod fmCallee = state.getMethodFlat( mdCallee );
+ FlatMethod fmCallee = state.getMethodFlat(mdCallee);
- if( mdCallee.getSymbol().equals( "genReach" ) ) {
- rg.writeGraph( "genReach"+d,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide reachability altogether
- true, // hide subset reachability states
- true, // hide predicates
- true ); // hide edge taints
- break;
+ if( mdCallee.getSymbol().equals("genReach") ) {
+ rg.writeGraph("genReach"+d,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability altogether
+ true, // hide subset reachability states
+ true, // hide predicates
+ true); // hide edge taints
+ break;
}
-
+
boolean debugCallSite =
- mdCaller.getSymbol().equals( state.DISJOINTDEBUGCALLER ) &&
- mdCallee.getSymbol().equals( state.DISJOINTDEBUGCALLEE );
+ mdCaller.getSymbol().equals(state.DISJOINTDEBUGCALLER) &&
+ mdCallee.getSymbol().equals(state.DISJOINTDEBUGCALLEE);
boolean writeDebugDOTs = false;
boolean stopAfter = false;
if( debugCallSite ) {
- ++ReachGraph.debugCallSiteVisitCounter;
- System.out.println( " $$$ Debug call site visit "+
- ReachGraph.debugCallSiteVisitCounter+
- " $$$"
- );
- if(
- (ReachGraph.debugCallSiteVisitCounter >=
- ReachGraph.debugCallSiteVisitStartCapture) &&
-
- (ReachGraph.debugCallSiteVisitCounter <
- ReachGraph.debugCallSiteVisitStartCapture +
- ReachGraph.debugCallSiteNumVisitsToCapture)
- ) {
- writeDebugDOTs = true;
- System.out.println( " $$$ Capturing this call site visit $$$" );
- if( ReachGraph.debugCallSiteStopAfter &&
- (ReachGraph.debugCallSiteVisitCounter ==
- ReachGraph.debugCallSiteVisitStartCapture +
- ReachGraph.debugCallSiteNumVisitsToCapture - 1)
- ) {
- stopAfter = true;
- }
- }
+ ++ReachGraph.debugCallSiteVisitCounter;
+ System.out.println(" $$$ Debug call site visit "+
+ ReachGraph.debugCallSiteVisitCounter+
+ " $$$"
+ );
+ if(
+ (ReachGraph.debugCallSiteVisitCounter >=
+ ReachGraph.debugCallSiteVisitStartCapture) &&
+
+ (ReachGraph.debugCallSiteVisitCounter <
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture)
+ ) {
+ writeDebugDOTs = true;
+ System.out.println(" $$$ Capturing this call site visit $$$");
+ if( ReachGraph.debugCallSiteStopAfter &&
+ (ReachGraph.debugCallSiteVisitCounter ==
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture - 1)
+ ) {
+ stopAfter = true;
+ }
+ }
}
// not used for the current call site transform, we are
// grabbing this heap model for future analysis of the callees,
// so if different results emerge we will return to this site
- ReachGraph heapForThisCall_old =
- getIHMcontribution( mdCallee, fc );
+ ReachGraph heapForThisCall_old =
+ getIHMcontribution(mdCallee, fc);
// the computation of the callee-reachable heap
// is useful for making the callee starting point
// and for applying the call site transfer function
- Set<Integer> callerNodeIDsCopiedToCallee =
+ Set<Integer> callerNodeIDsCopiedToCallee =
new HashSet<Integer>();
- ReachGraph heapForThisCall_cur =
- rg.makeCalleeView( fc,
- fmCallee,
- callerNodeIDsCopiedToCallee,
- writeDebugDOTs
- );
+ ReachGraph heapForThisCall_cur =
+ rg.makeCalleeView(fc,
+ fmCallee,
+ callerNodeIDsCopiedToCallee,
+ writeDebugDOTs
+ );
// enforce that a call site contribution can only
// monotonically increase
- heapForThisCall_cur.merge( heapForThisCall_old );
+ heapForThisCall_cur.merge(heapForThisCall_old);
- if( !heapForThisCall_cur.equals( heapForThisCall_old ) ) {
- // if heap at call site changed, update the contribution,
- // and reschedule the callee for analysis
- addIHMcontribution( mdCallee, fc, heapForThisCall_cur );
+ if( !heapForThisCall_cur.equals(heapForThisCall_old) ) {
+ // if heap at call site changed, update the contribution,
+ // and reschedule the callee for analysis
+ addIHMcontribution(mdCallee, fc, heapForThisCall_cur);
- // map a FlatCall to its enclosing method/task descriptor
- // so we can write that info out later
- fc2enclosing.put( fc, mdCaller );
+ // map a FlatCall to its enclosing method/task descriptor
+ // so we can write that info out later
+ fc2enclosing.put(fc, mdCaller);
- if( state.DISJOINTDEBUGSCHEDULING ) {
- System.out.println( " context changed, scheduling callee: "+mdCallee );
- }
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println(" context changed, scheduling callee: "+mdCallee);
+ }
- if( state.DISJOINTDVISITSTACKEESONTOP ) {
- calleesToEnqueue.add( mdCallee );
- } else {
- enqueue( mdCallee );
- }
+ if( state.DISJOINTDVISITSTACKEESONTOP ) {
+ calleesToEnqueue.add(mdCallee);
+ } else {
+ enqueue(mdCallee);
+ }
}
// callees, so find the set of callees...
Set<MethodDescriptor> setPossibleCallees;
if( determinismDesired ) {
- // use an ordered set
- setPossibleCallees = new TreeSet<MethodDescriptor>( dComp );
+ // use an ordered set
+ setPossibleCallees = new TreeSet<MethodDescriptor>(dComp);
} else {
- // otherwise use a speedy hashset
- setPossibleCallees = new HashSet<MethodDescriptor>();
+ // otherwise use a speedy hashset
+ setPossibleCallees = new HashSet<MethodDescriptor>();
}
- if( mdCallee.isStatic() ) {
- setPossibleCallees.add( mdCallee );
+ if( mdCallee.isStatic() ) {
+ setPossibleCallees.add(mdCallee);
} else {
TypeDescriptor typeDesc = fc.getThis().getType();
- setPossibleCallees.addAll( callGraph.getMethods( mdCallee,
- typeDesc )
- );
+ setPossibleCallees.addAll(callGraph.getMethods(mdCallee,
+ typeDesc)
+ );
}
ReachGraph rgMergeOfPossibleCallers = new ReachGraph();
Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
while( mdItr.hasNext() ) {
- MethodDescriptor mdPossible = mdItr.next();
- FlatMethod fmPossible = state.getMethodFlat( mdPossible );
-
- addDependent( mdPossible, // callee
- d ); // caller
-
- // don't alter the working graph (rg) until we compute a
- // result for every possible callee, merge them all together,
- // then set rg to that
- ReachGraph rgPossibleCaller = new ReachGraph();
- rgPossibleCaller.merge( rg );
-
- ReachGraph rgPossibleCallee = getPartial( mdPossible );
-
- if( rgPossibleCallee == null ) {
- // if this method has never been analyzed just schedule it
- // for analysis and skip over this call site for now
- if( state.DISJOINTDVISITSTACKEESONTOP ) {
- calleesToEnqueue.add( mdPossible );
- } else {
- enqueue( mdPossible );
- }
-
- if( state.DISJOINTDEBUGSCHEDULING ) {
- System.out.println( " callee hasn't been analyzed, scheduling: "+mdPossible );
- }
-
- } else {
- // calculate the method call transform
- rgPossibleCaller.resolveMethodCall( fc,
- fmPossible,
- rgPossibleCallee,
- callerNodeIDsCopiedToCallee,
- writeDebugDOTs
- );
-
- if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
+ MethodDescriptor mdPossible = mdItr.next();
+ FlatMethod fmPossible = state.getMethodFlat(mdPossible);
+
+ addDependent(mdPossible, // callee
+ d); // caller
+
+ // don't alter the working graph (rg) until we compute a
+ // result for every possible callee, merge them all together,
+ // then set rg to that
+ ReachGraph rgPossibleCaller = new ReachGraph();
+ rgPossibleCaller.merge(rg);
+
+ ReachGraph rgPossibleCallee = getPartial(mdPossible);
+
+ if( rgPossibleCallee == null ) {
+ // if this method has never been analyzed just schedule it
+ // for analysis and skip over this call site for now
+ if( state.DISJOINTDVISITSTACKEESONTOP ) {
+ calleesToEnqueue.add(mdPossible);
+ } else {
+ enqueue(mdPossible);
+ }
+
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println(" callee hasn't been analyzed, scheduling: "+mdPossible);
+ }
+
+ } else {
+ // calculate the method call transform
+ rgPossibleCaller.resolveMethodCall(fc,
+ fmPossible,
+ rgPossibleCallee,
+ callerNodeIDsCopiedToCallee,
+ writeDebugDOTs
+ );
+
+ if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
// if( !rgPossibleCallee.isAccessible( ReachGraph.tdReturn ) ) {
- if( !accessible.isAccessible(fn, ReachGraph.tdReturn ) ) {
- rgPossibleCaller.makeInaccessible( fc.getReturnTemp() );
- }
- }
+ if( !accessible.isAccessible(fn, ReachGraph.tdReturn) ) {
+ rgPossibleCaller.makeInaccessible(fc.getReturnTemp() );
+ }
+ }
+
+ }
- }
-
- rgMergeOfPossibleCallers.merge( rgPossibleCaller );
+ rgMergeOfPossibleCallers.merge(rgPossibleCaller);
}
if( stopAfter ) {
- System.out.println( "$$$ Exiting after requested captures of call site. $$$" );
- System.exit( 0 );
+ System.out.println("$$$ Exiting after requested captures of call site. $$$");
+ System.exit(0);
}
//need to consider more
FlatNode nextFN=fmCallee.getNext(0);
if( nextFN instanceof FlatSESEEnterNode ) {
- FlatSESEEnterNode calleeSESE=(FlatSESEEnterNode)nextFN;
- if(!calleeSESE.getIsLeafSESE()){
- rg.makeInaccessible( liveness.getLiveInTemps( fmContaining, fn ) );
- }
+ FlatSESEEnterNode calleeSESE=(FlatSESEEnterNode)nextFN;
+ if(!calleeSESE.getIsLeafSESE()) {
+ rg.makeInaccessible(liveness.getLiveInTemps(fmContaining, fn) );
+ }
}
-
+
} break;
-
+
case FKind.FlatReturnNode:
FlatReturnNode frn = (FlatReturnNode) fn;
// before transfer, do effects analysis support
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
// if(!rg.isAccessible(rhs)){
- if(!accessible.isAccessible(fn,rhs)){
- rg.makeInaccessible(ReachGraph.tdReturn);
- }
+ if(!accessible.isAccessible(fn,rhs)) {
+ rg.makeInaccessible(ReachGraph.tdReturn);
+ }
}
- if( rhs != null && shouldAnalysisTrack( rhs.getType() ) ) {
- rg.assignReturnEqualToTemp( rhs );
+ if( rhs != null && shouldAnalysisTrack(rhs.getType() ) ) {
+ rg.assignReturnEqualToTemp(rhs);
}
- setRetNodes.add( frn );
+ setRetNodes.add(frn);
break;
} // end switch
-
+
// dead variables were removed before the above transfer function
// was applied, so eliminate heap regions and edges that are no
// longer part of the abstractly-live heap graph, and sweep up
// back edges are strictly monotonic
- if( pm.isBackEdge( fn ) ) {
- ReachGraph rgPrevResult = mapBackEdgeToMonotone.get( fn );
- rg.merge( rgPrevResult );
- mapBackEdgeToMonotone.put( fn, rg );
+ if( pm.isBackEdge(fn) ) {
+ ReachGraph rgPrevResult = mapBackEdgeToMonotone.get(fn);
+ rg.merge(rgPrevResult);
+ mapBackEdgeToMonotone.put(fn, rg);
}
-
+
// at this point rg should be the correct update
// by an above transfer function, or untouched if
// the flat node type doesn't affect the heap
}
-
+
// this method should generate integers strictly greater than zero!
// special "shadow" regions are made from a heap region by negating
// the ID
static public Integer generateUniqueHeapRegionNodeID() {
++uniqueIDcount;
- return new Integer( uniqueIDcount );
+ return new Integer(uniqueIDcount);
}
-
- static public FieldDescriptor getArrayField( TypeDescriptor tdElement ) {
- FieldDescriptor fdElement = mapTypeToArrayField.get( tdElement );
+
+ static public FieldDescriptor getArrayField(TypeDescriptor tdElement) {
+ FieldDescriptor fdElement = mapTypeToArrayField.get(tdElement);
if( fdElement == null ) {
- fdElement = new FieldDescriptor( new Modifiers( Modifiers.PUBLIC ),
- tdElement,
- arrayElementFieldName,
- null,
- false );
- mapTypeToArrayField.put( tdElement, fdElement );
+ fdElement = new FieldDescriptor(new Modifiers(Modifiers.PUBLIC),
+ tdElement,
+ arrayElementFieldName,
+ null,
+ false);
+ mapTypeToArrayField.put(tdElement, fdElement);
}
return fdElement;
}
-
-
+
+
private void writeFinalGraphs() {
Set entrySet = mapDescriptorToCompleteReachGraph.entrySet();
Iterator itr = entrySet.iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- Descriptor d = (Descriptor) me.getKey();
+ Map.Entry me = (Map.Entry)itr.next();
+ Descriptor d = (Descriptor) me.getKey();
ReachGraph rg = (ReachGraph) me.getValue();
String graphName;
if( d instanceof TaskDescriptor ) {
- graphName = "COMPLETEtask"+d;
+ graphName = "COMPLETEtask"+d;
} else {
- graphName = "COMPLETE"+d;
+ graphName = "COMPLETE"+d;
}
- rg.writeGraph( graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide reachability altogether
- true, // hide subset reachability states
- true, // hide predicates
- false ); // hide edge taints
+ rg.writeGraph(graphName,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability altogether
+ true, // hide subset reachability states
+ true, // hide predicates
+ false); // hide edge taints
}
}
private void writeFinalIHMs() {
Iterator d2IHMsItr = mapDescriptorToIHMcontributions.entrySet().iterator();
while( d2IHMsItr.hasNext() ) {
- Map.Entry me1 = (Map.Entry) d2IHMsItr.next();
- Descriptor d = (Descriptor) me1.getKey();
- Hashtable<FlatCall, ReachGraph> IHMs = (Hashtable<FlatCall, ReachGraph>) me1.getValue();
+ Map.Entry me1 = (Map.Entry)d2IHMsItr.next();
+ Descriptor d = (Descriptor) me1.getKey();
+ Hashtable<FlatCall, ReachGraph> IHMs = (Hashtable<FlatCall, ReachGraph>)me1.getValue();
Iterator fc2rgItr = IHMs.entrySet().iterator();
while( fc2rgItr.hasNext() ) {
- Map.Entry me2 = (Map.Entry) fc2rgItr.next();
- FlatCall fc = (FlatCall) me2.getKey();
- ReachGraph rg = (ReachGraph) me2.getValue();
-
- rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc2enclosing.get( fc )+fc,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // hide reachability altogether
- true, // prune unreachable heap regions
- true, // hide subset reachability states
- false, // hide predicates
- true ); // hide edge taints
+ Map.Entry me2 = (Map.Entry)fc2rgItr.next();
+ FlatCall fc = (FlatCall) me2.getKey();
+ ReachGraph rg = (ReachGraph) me2.getValue();
+
+ rg.writeGraph("IHMPARTFOR"+d+"FROM"+fc2enclosing.get(fc)+fc,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // hide reachability altogether
+ true, // prune unreachable heap regions
+ true, // hide subset reachability states
+ false, // hide predicates
+ true); // hide edge taints
}
}
}
Set entrySet = mapDescriptorToInitialContext.entrySet();
Iterator itr = entrySet.iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- Descriptor d = (Descriptor) me.getKey();
+ Map.Entry me = (Map.Entry)itr.next();
+ Descriptor d = (Descriptor) me.getKey();
ReachGraph rg = (ReachGraph) me.getValue();
- rg.writeGraph( "INITIAL"+d,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide all reachability
- true, // hide subset reachability states
- true, // hide predicates
- false );// hide edge taints
+ rg.writeGraph("INITIAL"+d,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide all reachability
+ true, // hide subset reachability states
+ true, // hide predicates
+ false); // hide edge taints
}
}
-
- protected ReachGraph getPartial( Descriptor d ) {
- return mapDescriptorToCompleteReachGraph.get( d );
+
+ protected ReachGraph getPartial(Descriptor d) {
+ return mapDescriptorToCompleteReachGraph.get(d);
}
- protected void setPartial( Descriptor d, ReachGraph rg ) {
- mapDescriptorToCompleteReachGraph.put( d, rg );
+ protected void setPartial(Descriptor d, ReachGraph rg) {
+ mapDescriptorToCompleteReachGraph.put(d, rg);
// when the flag for writing out every partial
// result is set, we should spit out the graph,
// to track how many partial results for this
// descriptor we've already written out
if( writeAllIncrementalDOTs ) {
- if( !mapDescriptorToNumUpdates.containsKey( d ) ) {
- mapDescriptorToNumUpdates.put( d, new Integer( 0 ) );
+ if( !mapDescriptorToNumUpdates.containsKey(d) ) {
+ mapDescriptorToNumUpdates.put(d, new Integer(0) );
}
- Integer n = mapDescriptorToNumUpdates.get( d );
-
+ Integer n = mapDescriptorToNumUpdates.get(d);
+
String graphName;
if( d instanceof TaskDescriptor ) {
- graphName = d+"COMPLETEtask"+String.format( "%05d", n );
+ graphName = d+"COMPLETEtask"+String.format("%05d", n);
} else {
- graphName = d+"COMPLETE"+String.format( "%05d", n );
- }
-
- rg.writeGraph( graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide all reachability
- true, // hide subset reachability states
- false, // hide predicates
- false); // hide edge taints
-
- mapDescriptorToNumUpdates.put( d, n + 1 );
+ graphName = d+"COMPLETE"+String.format("%05d", n);
+ }
+
+ rg.writeGraph(graphName,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide all reachability
+ true, // hide subset reachability states
+ false, // hide predicates
+ false); // hide edge taints
+
+ mapDescriptorToNumUpdates.put(d, n + 1);
}
}
// return just the allocation site associated with one FlatNew node
- protected AllocSite getAllocSiteFromFlatNewPRIVATE( FlatNew fnew ) {
+ protected AllocSite getAllocSiteFromFlatNewPRIVATE(FlatNew fnew) {
boolean flagProgrammatically = false;
- if( sitesToFlag != null && sitesToFlag.contains( fnew ) ) {
+ if( sitesToFlag != null && sitesToFlag.contains(fnew) ) {
flagProgrammatically = true;
}
- if( !mapFlatNewToAllocSite.containsKey( fnew ) ) {
- AllocSite as = AllocSite.factory( allocationDepth,
- fnew,
- fnew.getDisjointId(),
- flagProgrammatically
- );
+ if( !mapFlatNewToAllocSite.containsKey(fnew) ) {
+ AllocSite as = AllocSite.factory(allocationDepth,
+ fnew,
+ fnew.getDisjointId(),
+ flagProgrammatically
+ );
// the newest nodes are single objects
for( int i = 0; i < allocationDepth; ++i ) {
Integer id = generateUniqueHeapRegionNodeID();
- as.setIthOldest( i, id );
- mapHrnIdToAllocSite.put( id, as );
+ as.setIthOldest(i, id);
+ mapHrnIdToAllocSite.put(id, as);
}
// the oldest node is a summary node
- as.setSummary( generateUniqueHeapRegionNodeID() );
+ as.setSummary(generateUniqueHeapRegionNodeID() );
- mapFlatNewToAllocSite.put( fnew, as );
+ mapFlatNewToAllocSite.put(fnew, as);
}
- return mapFlatNewToAllocSite.get( fnew );
+ return mapFlatNewToAllocSite.get(fnew);
}
- public static boolean shouldAnalysisTrack( TypeDescriptor type ) {
+ public static boolean shouldAnalysisTrack(TypeDescriptor type) {
// don't track primitive types, but an array
// of primitives is heap memory
if( type.isImmutable() ) {
return true;
}
- protected int numMethodsAnalyzed() {
+ protected int numMethodsAnalyzed() {
return descriptorsToAnalyze.size();
}
-
-
-
-
+
+
+
+
// Take in source entry which is the program's compiled entry and
// create a new analysis entry, a method that takes no parameters
// and appears to allocate the command line arguments and call the
// source entry with them. The purpose of this analysis entry is
// to provide a top-level method context with no parameters left.
- protected void makeAnalysisEntryMethod( MethodDescriptor mdSourceEntry ) {
+ protected void makeAnalysisEntryMethod(MethodDescriptor mdSourceEntry) {
Modifiers mods = new Modifiers();
- mods.addModifier( Modifiers.PUBLIC );
- mods.addModifier( Modifiers.STATIC );
+ mods.addModifier(Modifiers.PUBLIC);
+ mods.addModifier(Modifiers.STATIC);
- TypeDescriptor returnType =
- new TypeDescriptor( TypeDescriptor.VOID );
+ TypeDescriptor returnType =
+ new TypeDescriptor(TypeDescriptor.VOID);
- this.mdAnalysisEntry =
- new MethodDescriptor( mods,
- returnType,
- "analysisEntryMethod"
- );
+ this.mdAnalysisEntry =
+ new MethodDescriptor(mods,
+ returnType,
+ "analysisEntryMethod"
+ );
- TempDescriptor cmdLineArgs =
- new TempDescriptor( "args",
- mdSourceEntry.getParamType( 0 )
- );
+ TempDescriptor cmdLineArgs =
+ new TempDescriptor("args",
+ mdSourceEntry.getParamType(0)
+ );
+
+ FlatNew fn =
+ new FlatNew(mdSourceEntry.getParamType(0),
+ cmdLineArgs,
+ false // is global
+ );
- FlatNew fn =
- new FlatNew( mdSourceEntry.getParamType( 0 ),
- cmdLineArgs,
- false // is global
- );
-
TempDescriptor[] sourceEntryArgs = new TempDescriptor[1];
sourceEntryArgs[0] = cmdLineArgs;
-
- FlatCall fc =
- new FlatCall( mdSourceEntry,
- null, // dst temp
- null, // this temp
- sourceEntryArgs
- );
- FlatReturnNode frn = new FlatReturnNode( null );
+ FlatCall fc =
+ new FlatCall(mdSourceEntry,
+ null, // dst temp
+ null, // this temp
+ sourceEntryArgs
+ );
+
+ FlatReturnNode frn = new FlatReturnNode(null);
FlatExit fe = new FlatExit();
- this.fmAnalysisEntry =
- new FlatMethod( mdAnalysisEntry,
- fe
- );
+ this.fmAnalysisEntry =
+ new FlatMethod(mdAnalysisEntry,
+ fe
+ );
- this.fmAnalysisEntry.addNext( fn );
- fn.addNext( fc );
- fc.addNext( frn );
- frn.addNext( fe );
+ this.fmAnalysisEntry.addNext(fn);
+ fn.addNext(fc);
+ fc.addNext(frn);
+ frn.addNext(fe);
}
- protected LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
+ protected LinkedList<Descriptor> topologicalSort(Set<Descriptor> toSort) {
Set<Descriptor> discovered;
if( determinismDesired ) {
// use an ordered set
- discovered = new TreeSet<Descriptor>( dComp );
+ discovered = new TreeSet<Descriptor>(dComp);
} else {
// otherwise use a speedy hashset
discovered = new HashSet<Descriptor>();
}
LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
-
+
Iterator<Descriptor> itr = toSort.iterator();
while( itr.hasNext() ) {
Descriptor d = itr.next();
-
- if( !discovered.contains( d ) ) {
- dfsVisit( d, toSort, sorted, discovered );
+
+ if( !discovered.contains(d) ) {
+ dfsVisit(d, toSort, sorted, discovered);
}
}
-
+
return sorted;
}
-
+
// While we're doing DFS on call graph, remember
// dependencies for efficient queuing of methods
// during interprocedural analysis:
// a dependent of a method decriptor d for this analysis is:
// 1) a method or task that invokes d
// 2) in the descriptorsToAnalyze set
- protected void dfsVisit( Descriptor d,
- Set <Descriptor> toSort,
- LinkedList<Descriptor> sorted,
- Set <Descriptor> discovered ) {
- discovered.add( d );
-
+ protected void dfsVisit(Descriptor d,
+ Set <Descriptor> toSort,
+ LinkedList<Descriptor> sorted,
+ Set <Descriptor> discovered) {
+ discovered.add(d);
+
// only methods have callers, tasks never do
if( d instanceof MethodDescriptor ) {
// the call graph is not aware that we have a fabricated
// analysis entry that calls the program source's entry
if( md == mdSourceEntry ) {
- if( !discovered.contains( mdAnalysisEntry ) ) {
- addDependent( mdSourceEntry, // callee
- mdAnalysisEntry // caller
- );
- dfsVisit( mdAnalysisEntry, toSort, sorted, discovered );
- }
+ if( !discovered.contains(mdAnalysisEntry) ) {
+ addDependent(mdSourceEntry, // callee
+ mdAnalysisEntry // caller
+ );
+ dfsVisit(mdAnalysisEntry, toSort, sorted, discovered);
+ }
}
// otherwise call graph guides DFS
- Iterator itr = callGraph.getCallerSet( md ).iterator();
+ Iterator itr = callGraph.getCallerSet(md).iterator();
while( itr.hasNext() ) {
Descriptor dCaller = (Descriptor) itr.next();
-
+
// only consider callers in the original set to analyze
- if( !toSort.contains( dCaller ) ) {
+ if( !toSort.contains(dCaller) ) {
continue;
- }
-
- if( !discovered.contains( dCaller ) ) {
- addDependent( md, // callee
- dCaller // caller
- );
-
- dfsVisit( dCaller, toSort, sorted, discovered );
+ }
+
+ if( !discovered.contains(dCaller) ) {
+ addDependent(md, // callee
+ dCaller // caller
+ );
+
+ dfsVisit(dCaller, toSort, sorted, discovered);
}
}
}
-
+
// for leaf-nodes last now!
- sorted.addLast( d );
+ sorted.addLast(d);
}
- protected void enqueue( Descriptor d ) {
+ protected void enqueue(Descriptor d) {
- if( !descriptorsToVisitSet.contains( d ) ) {
+ if( !descriptorsToVisitSet.contains(d) ) {
if( state.DISJOINTDVISITSTACK ||
state.DISJOINTDVISITSTACKEESONTOP
) {
- descriptorsToVisitStack.add( d );
+ descriptorsToVisitStack.add(d);
} else if( state.DISJOINTDVISITPQUE ) {
- Integer priority = mapDescriptorToPriority.get( d );
- descriptorsToVisitQ.add( new DescriptorQWrapper( priority,
- d )
- );
+ Integer priority = mapDescriptorToPriority.get(d);
+ descriptorsToVisitQ.add(new DescriptorQWrapper(priority,
+ d)
+ );
}
- descriptorsToVisitSet.add( d );
+ descriptorsToVisitSet.add(d);
}
}
// a dependent of a method decriptor d for this analysis is:
// 1) a method or task that invokes d
// 2) in the descriptorsToAnalyze set
- protected void addDependent( Descriptor callee, Descriptor caller ) {
- Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
+ protected void addDependent(Descriptor callee, Descriptor caller) {
+ Set<Descriptor> deps = mapDescriptorToSetDependents.get(callee);
if( deps == null ) {
deps = new HashSet<Descriptor>();
}
- deps.add( caller );
- mapDescriptorToSetDependents.put( callee, deps );
+ deps.add(caller);
+ mapDescriptorToSetDependents.put(callee, deps);
}
-
- protected Set<Descriptor> getDependents( Descriptor callee ) {
- Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
+
+ protected Set<Descriptor> getDependents(Descriptor callee) {
+ Set<Descriptor> deps = mapDescriptorToSetDependents.get(callee);
if( deps == null ) {
deps = new HashSet<Descriptor>();
- mapDescriptorToSetDependents.put( callee, deps );
+ mapDescriptorToSetDependents.put(callee, deps);
}
return deps;
}
-
- public Hashtable<FlatCall, ReachGraph> getIHMcontributions( Descriptor d ) {
- Hashtable<FlatCall, ReachGraph> heapsFromCallers =
- mapDescriptorToIHMcontributions.get( d );
-
+ public Hashtable<FlatCall, ReachGraph> getIHMcontributions(Descriptor d) {
+
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ mapDescriptorToIHMcontributions.get(d);
+
if( heapsFromCallers == null ) {
heapsFromCallers = new Hashtable<FlatCall, ReachGraph>();
- mapDescriptorToIHMcontributions.put( d, heapsFromCallers );
+ mapDescriptorToIHMcontributions.put(d, heapsFromCallers);
}
-
+
return heapsFromCallers;
}
- public ReachGraph getIHMcontribution( Descriptor d,
- FlatCall fc
- ) {
- Hashtable<FlatCall, ReachGraph> heapsFromCallers =
- getIHMcontributions( d );
+ public ReachGraph getIHMcontribution(Descriptor d,
+ FlatCall fc
+ ) {
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions(d);
- if( !heapsFromCallers.containsKey( fc ) ) {
+ if( !heapsFromCallers.containsKey(fc) ) {
return null;
}
- return heapsFromCallers.get( fc );
+ return heapsFromCallers.get(fc);
}
- public void addIHMcontribution( Descriptor d,
- FlatCall fc,
- ReachGraph rg
- ) {
- Hashtable<FlatCall, ReachGraph> heapsFromCallers =
- getIHMcontributions( d );
+ public void addIHMcontribution(Descriptor d,
+ FlatCall fc,
+ ReachGraph rg
+ ) {
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions(d);
- heapsFromCallers.put( fc, rg );
+ heapsFromCallers.put(fc, rg);
}
- private AllocSite createParameterAllocSite( ReachGraph rg,
- TempDescriptor tempDesc,
- boolean flagRegions
- ) {
-
+ private AllocSite createParameterAllocSite(ReachGraph rg,
+ TempDescriptor tempDesc,
+ boolean flagRegions
+ ) {
+
FlatNew flatNew;
if( flagRegions ) {
- flatNew = new FlatNew( tempDesc.getType(), // type
- tempDesc, // param temp
- false, // global alloc?
- "param"+tempDesc // disjoint site ID string
- );
+ flatNew = new FlatNew(tempDesc.getType(), // type
+ tempDesc, // param temp
+ false, // global alloc?
+ "param"+tempDesc // disjoint site ID string
+ );
} else {
- flatNew = new FlatNew( tempDesc.getType(), // type
- tempDesc, // param temp
- false, // global alloc?
- null // disjoint site ID string
- );
+ flatNew = new FlatNew(tempDesc.getType(), // type
+ tempDesc, // param temp
+ false, // global alloc?
+ null // disjoint site ID string
+ );
}
// create allocation site
- AllocSite as = AllocSite.factory( allocationDepth,
- flatNew,
- flatNew.getDisjointId(),
- false
- );
+ AllocSite as = AllocSite.factory(allocationDepth,
+ flatNew,
+ flatNew.getDisjointId(),
+ false
+ );
for (int i = 0; i < allocationDepth; ++i) {
- Integer id = generateUniqueHeapRegionNodeID();
- as.setIthOldest(i, id);
- mapHrnIdToAllocSite.put(id, as);
+ Integer id = generateUniqueHeapRegionNodeID();
+ as.setIthOldest(i, id);
+ mapHrnIdToAllocSite.put(id, as);
}
// the oldest node is a summary node
- as.setSummary( generateUniqueHeapRegionNodeID() );
-
+ as.setSummary(generateUniqueHeapRegionNodeID() );
+
rg.age(as);
-
+
return as;
-
+
}
-private Set<FieldDescriptor> getFieldSetTobeAnalyzed(TypeDescriptor typeDesc){
-
- Set<FieldDescriptor> fieldSet=new HashSet<FieldDescriptor>();
- if(!typeDesc.isImmutable()){
- ClassDescriptor classDesc = typeDesc.getClassDesc();
- for (Iterator it = classDesc.getFields(); it.hasNext();) {
- FieldDescriptor field = (FieldDescriptor) it.next();
- TypeDescriptor fieldType = field.getType();
- if (shouldAnalysisTrack( fieldType )) {
- fieldSet.add(field);
- }
- }
+ private Set<FieldDescriptor> getFieldSetTobeAnalyzed(TypeDescriptor typeDesc) {
+
+ Set<FieldDescriptor> fieldSet=new HashSet<FieldDescriptor>();
+ if(!typeDesc.isImmutable()) {
+ ClassDescriptor classDesc = typeDesc.getClassDesc();
+ for (Iterator it = classDesc.getFields(); it.hasNext(); ) {
+ FieldDescriptor field = (FieldDescriptor) it.next();
+ TypeDescriptor fieldType = field.getType();
+ if (shouldAnalysisTrack(fieldType)) {
+ fieldSet.add(field);
+ }
+ }
}
return fieldSet;
-
-}
- private HeapRegionNode createMultiDeimensionalArrayHRN(ReachGraph rg, AllocSite alloc, HeapRegionNode srcHRN, FieldDescriptor fd, Hashtable<HeapRegionNode, HeapRegionNode> map, Hashtable<TypeDescriptor, HeapRegionNode> mapToExistingNode, ReachSet alpha ){
-
- int dimCount=fd.getType().getArrayCount();
- HeapRegionNode prevNode=null;
- HeapRegionNode arrayEntryNode=null;
- for(int i=dimCount;i>0;i--){
- TypeDescriptor typeDesc=fd.getType().dereference();//hack to get instance of type desc
- typeDesc.setArrayCount(i);
- TempDescriptor tempDesc=new TempDescriptor(typeDesc.getSymbol(),typeDesc);
- HeapRegionNode hrnSummary ;
- if(!mapToExistingNode.containsKey(typeDesc)){
- AllocSite as;
- if(i==dimCount){
- as = alloc;
- }else{
- as = createParameterAllocSite(rg, tempDesc, false);
- }
- // make a new reference to allocated node
- hrnSummary =
- rg.createNewHeapRegionNode(as.getSummary(), // id or null to generate a new one
- false, // single object?
- true, // summary?
- false, // out-of-context?
- as.getType(), // type
- as, // allocation site
- alpha, // inherent reach
- alpha, // current reach
- ExistPredSet.factory(rg.predTrue), // predicates
- tempDesc.toString() // description
- );
- rg.id2hrn.put(as.getSummary(),hrnSummary);
-
- mapToExistingNode.put(typeDesc, hrnSummary);
- }else{
- hrnSummary=mapToExistingNode.get(typeDesc);
- }
-
- if(prevNode==null){
- // make a new reference between new summary node and source
- RefEdge edgeToSummary = new RefEdge(srcHRN, // source
- hrnSummary, // dest
- typeDesc, // type
- fd.getSymbol(), // field name
- alpha, // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
-
- rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
- prevNode=hrnSummary;
- arrayEntryNode=hrnSummary;
- }else{
- // make a new reference between summary nodes of array
- RefEdge edgeToSummary = new RefEdge(prevNode, // source
- hrnSummary, // dest
- typeDesc, // type
- arrayElementFieldName, // field name
- alpha, // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
-
- rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
- prevNode=hrnSummary;
- }
-
+ }
+
+ private HeapRegionNode createMultiDeimensionalArrayHRN(ReachGraph rg, AllocSite alloc, HeapRegionNode srcHRN, FieldDescriptor fd, Hashtable<HeapRegionNode, HeapRegionNode> map, Hashtable<TypeDescriptor, HeapRegionNode> mapToExistingNode, ReachSet alpha) {
+
+ int dimCount=fd.getType().getArrayCount();
+ HeapRegionNode prevNode=null;
+ HeapRegionNode arrayEntryNode=null;
+ for(int i=dimCount; i>0; i--) {
+ TypeDescriptor typeDesc=fd.getType().dereference(); //hack to get instance of type desc
+ typeDesc.setArrayCount(i);
+ TempDescriptor tempDesc=new TempDescriptor(typeDesc.getSymbol(),typeDesc);
+ HeapRegionNode hrnSummary;
+ if(!mapToExistingNode.containsKey(typeDesc)) {
+ AllocSite as;
+ if(i==dimCount) {
+ as = alloc;
+ } else {
+ as = createParameterAllocSite(rg, tempDesc, false);
+ }
+ // make a new reference to allocated node
+ hrnSummary =
+ rg.createNewHeapRegionNode(as.getSummary(), // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ false, // out-of-context?
+ as.getType(), // type
+ as, // allocation site
+ alpha, // inherent reach
+ alpha, // current reach
+ ExistPredSet.factory(rg.predTrue), // predicates
+ tempDesc.toString() // description
+ );
+ rg.id2hrn.put(as.getSummary(),hrnSummary);
+
+ mapToExistingNode.put(typeDesc, hrnSummary);
+ } else {
+ hrnSummary=mapToExistingNode.get(typeDesc);
+ }
+
+ if(prevNode==null) {
+ // make a new reference between new summary node and source
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ hrnSummary, // dest
+ typeDesc, // type
+ fd.getSymbol(), // field name
+ alpha, // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+
+ rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
+ prevNode=hrnSummary;
+ arrayEntryNode=hrnSummary;
+ } else {
+ // make a new reference between summary nodes of array
+ RefEdge edgeToSummary = new RefEdge(prevNode, // source
+ hrnSummary, // dest
+ typeDesc, // type
+ arrayElementFieldName, // field name
+ alpha, // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+
+ rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
+ prevNode=hrnSummary;
+ }
+
+ }
+
+ // create a new obj node if obj has at least one non-primitive field
+ TypeDescriptor type=fd.getType();
+ if(getFieldSetTobeAnalyzed(type).size()>0) {
+ TypeDescriptor typeDesc=type.dereference();
+ typeDesc.setArrayCount(0);
+ if(!mapToExistingNode.containsKey(typeDesc)) {
+ TempDescriptor tempDesc=new TempDescriptor(type.getSymbol(),typeDesc);
+ AllocSite as = createParameterAllocSite(rg, tempDesc, false);
+ // make a new reference to allocated node
+ HeapRegionNode hrnSummary =
+ rg.createNewHeapRegionNode(as.getSummary(), // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ false, // out-of-context?
+ typeDesc, // type
+ as, // allocation site
+ alpha, // inherent reach
+ alpha, // current reach
+ ExistPredSet.factory(rg.predTrue), // predicates
+ tempDesc.toString() // description
+ );
+ rg.id2hrn.put(as.getSummary(),hrnSummary);
+ mapToExistingNode.put(typeDesc, hrnSummary);
+ RefEdge edgeToSummary = new RefEdge(prevNode, // source
+ hrnSummary, // dest
+ typeDesc, // type
+ arrayElementFieldName, // field name
+ alpha, // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+ rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
+ prevNode=hrnSummary;
+ } else {
+ HeapRegionNode hrnSummary=mapToExistingNode.get(typeDesc);
+ if(prevNode.getReferenceTo(hrnSummary, typeDesc, arrayElementFieldName)==null) {
+ RefEdge edgeToSummary = new RefEdge(prevNode, // source
+ hrnSummary, // dest
+ typeDesc, // type
+ arrayElementFieldName, // field name
+ alpha, // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+ rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
}
-
- // create a new obj node if obj has at least one non-primitive field
- TypeDescriptor type=fd.getType();
- if(getFieldSetTobeAnalyzed(type).size()>0){
- TypeDescriptor typeDesc=type.dereference();
- typeDesc.setArrayCount(0);
- if(!mapToExistingNode.containsKey(typeDesc)){
- TempDescriptor tempDesc=new TempDescriptor(type.getSymbol(),typeDesc);
- AllocSite as = createParameterAllocSite(rg, tempDesc, false);
- // make a new reference to allocated node
- HeapRegionNode hrnSummary =
- rg.createNewHeapRegionNode(as.getSummary(), // id or null to generate a new one
- false, // single object?
- true, // summary?
- false, // out-of-context?
- typeDesc, // type
- as, // allocation site
- alpha, // inherent reach
- alpha, // current reach
- ExistPredSet.factory(rg.predTrue), // predicates
- tempDesc.toString() // description
- );
- rg.id2hrn.put(as.getSummary(),hrnSummary);
- mapToExistingNode.put(typeDesc, hrnSummary);
- RefEdge edgeToSummary = new RefEdge(prevNode, // source
- hrnSummary, // dest
- typeDesc, // type
- arrayElementFieldName, // field name
- alpha, // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
- rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
- prevNode=hrnSummary;
- }else{
- HeapRegionNode hrnSummary=mapToExistingNode.get(typeDesc);
- if(prevNode.getReferenceTo(hrnSummary, typeDesc, arrayElementFieldName)==null){
- RefEdge edgeToSummary = new RefEdge(prevNode, // source
- hrnSummary, // dest
- typeDesc, // type
- arrayElementFieldName, // field name
- alpha, // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
- rg.addRefEdge(prevNode, hrnSummary, edgeToSummary);
- }
- prevNode=hrnSummary;
- }
+ prevNode=hrnSummary;
+ }
}
-
- map.put(arrayEntryNode, prevNode);
- return arrayEntryNode;
-}
-private ReachGraph createInitialTaskReachGraph(FlatMethod fm) {
+ map.put(arrayEntryNode, prevNode);
+ return arrayEntryNode;
+ }
+
+ private ReachGraph createInitialTaskReachGraph(FlatMethod fm) {
ReachGraph rg = new ReachGraph();
TaskDescriptor taskDesc = fm.getTask();
-
+
for (int idx = 0; idx < taskDesc.numParameters(); idx++) {
- Descriptor paramDesc = taskDesc.getParameter(idx);
- TypeDescriptor paramTypeDesc = taskDesc.getParamType(idx);
-
- // setup data structure
- Set<HashMap<HeapRegionNode, FieldDescriptor>> workSet =
- new HashSet<HashMap<HeapRegionNode, FieldDescriptor>>();
- Hashtable<TypeDescriptor, HeapRegionNode> mapTypeToExistingSummaryNode =
- new Hashtable<TypeDescriptor, HeapRegionNode>();
- Hashtable<HeapRegionNode, HeapRegionNode> mapToFirstDimensionArrayNode =
- new Hashtable<HeapRegionNode, HeapRegionNode>();
- Set<String> doneSet = new HashSet<String>();
-
- TempDescriptor tempDesc = fm.getParameter(idx);
-
- AllocSite as = createParameterAllocSite(rg, tempDesc, true);
- VariableNode lnX = rg.getVariableNodeFromTemp(tempDesc);
- Integer idNewest = as.getIthOldest(0);
- HeapRegionNode hrnNewest = rg.id2hrn.get(idNewest);
+ Descriptor paramDesc = taskDesc.getParameter(idx);
+ TypeDescriptor paramTypeDesc = taskDesc.getParamType(idx);
+
+ // setup data structure
+ Set<HashMap<HeapRegionNode, FieldDescriptor>> workSet =
+ new HashSet<HashMap<HeapRegionNode, FieldDescriptor>>();
+ Hashtable<TypeDescriptor, HeapRegionNode> mapTypeToExistingSummaryNode =
+ new Hashtable<TypeDescriptor, HeapRegionNode>();
+ Hashtable<HeapRegionNode, HeapRegionNode> mapToFirstDimensionArrayNode =
+ new Hashtable<HeapRegionNode, HeapRegionNode>();
+ Set<String> doneSet = new HashSet<String>();
+
+ TempDescriptor tempDesc = fm.getParameter(idx);
+
+ AllocSite as = createParameterAllocSite(rg, tempDesc, true);
+ VariableNode lnX = rg.getVariableNodeFromTemp(tempDesc);
+ Integer idNewest = as.getIthOldest(0);
+ HeapRegionNode hrnNewest = rg.id2hrn.get(idNewest);
+
+ // make a new reference to allocated node
+ RefEdge edgeNew = new RefEdge(lnX, // source
+ hrnNewest, // dest
+ taskDesc.getParamType(idx), // type
+ null, // field name
+ hrnNewest.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+ rg.addRefEdge(lnX, hrnNewest, edgeNew);
+
+ // set-up a work set for class field
+ ClassDescriptor classDesc = paramTypeDesc.getClassDesc();
+ for (Iterator it = classDesc.getFields(); it.hasNext(); ) {
+ FieldDescriptor fd = (FieldDescriptor) it.next();
+ TypeDescriptor fieldType = fd.getType();
+ if (shouldAnalysisTrack(fieldType)) {
+ HashMap<HeapRegionNode, FieldDescriptor> newMap = new HashMap<HeapRegionNode, FieldDescriptor>();
+ newMap.put(hrnNewest, fd);
+ workSet.add(newMap);
+ }
+ }
- // make a new reference to allocated node
- RefEdge edgeNew = new RefEdge(lnX, // source
- hrnNewest, // dest
- taskDesc.getParamType(idx), // type
- null, // field name
- hrnNewest.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
- rg.addRefEdge(lnX, hrnNewest, edgeNew);
-
- // set-up a work set for class field
- ClassDescriptor classDesc = paramTypeDesc.getClassDesc();
- for (Iterator it = classDesc.getFields(); it.hasNext();) {
- FieldDescriptor fd = (FieldDescriptor) it.next();
- TypeDescriptor fieldType = fd.getType();
- if (shouldAnalysisTrack( fieldType )) {
- HashMap<HeapRegionNode, FieldDescriptor> newMap = new HashMap<HeapRegionNode, FieldDescriptor>();
- newMap.put(hrnNewest, fd);
+ int uniqueIdentifier = 0;
+ while (!workSet.isEmpty()) {
+ HashMap<HeapRegionNode, FieldDescriptor> map = workSet
+ .iterator().next();
+ workSet.remove(map);
+
+ Set<HeapRegionNode> key = map.keySet();
+ HeapRegionNode srcHRN = key.iterator().next();
+ FieldDescriptor fd = map.get(srcHRN);
+ TypeDescriptor type = fd.getType();
+ String doneSetIdentifier = srcHRN.getIDString() + "_" + fd;
+
+ if (!doneSet.contains(doneSetIdentifier)) {
+ doneSet.add(doneSetIdentifier);
+ if (!mapTypeToExistingSummaryNode.containsKey(type)) {
+ // create new summary Node
+ TempDescriptor td = new TempDescriptor("temp"
+ + uniqueIdentifier, type);
+
+ AllocSite allocSite;
+ if(type.equals(paramTypeDesc)) {
+ //corresponding allocsite has already been created for a parameter variable.
+ allocSite=as;
+ } else {
+ allocSite = createParameterAllocSite(rg, td, false);
+ }
+ String strDesc = allocSite.toStringForDOT()
+ + "\\nsummary";
+ TypeDescriptor allocType=allocSite.getType();
+
+ HeapRegionNode hrnSummary;
+ if(allocType.isArray() && allocType.getArrayCount()>0) {
+ hrnSummary=createMultiDeimensionalArrayHRN(rg,allocSite,srcHRN,fd,mapToFirstDimensionArrayNode,mapTypeToExistingSummaryNode,hrnNewest.getAlpha());
+ } else {
+ hrnSummary =
+ rg.createNewHeapRegionNode(allocSite.getSummary(), // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ false, // out-of-context?
+ allocSite.getType(), // type
+ allocSite, // allocation site
+ hrnNewest.getAlpha(), // inherent reach
+ hrnNewest.getAlpha(), // current reach
+ ExistPredSet.factory(rg.predTrue), // predicates
+ strDesc // description
+ );
+ rg.id2hrn.put(allocSite.getSummary(),hrnSummary);
+
+ // make a new reference to summary node
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ hrnSummary, // dest
+ type, // type
+ fd.getSymbol(), // field name
+ hrnNewest.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+
+ rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
+ }
+ uniqueIdentifier++;
+
+ mapTypeToExistingSummaryNode.put(type, hrnSummary);
+
+ // set-up a work set for fields of the class
+ Set<FieldDescriptor> fieldTobeAnalyzed=getFieldSetTobeAnalyzed(type);
+ for (Iterator iterator = fieldTobeAnalyzed.iterator(); iterator
+ .hasNext(); ) {
+ FieldDescriptor fieldDescriptor = (FieldDescriptor) iterator
+ .next();
+ HeapRegionNode newDstHRN;
+ if(mapToFirstDimensionArrayNode.containsKey(hrnSummary)) {
+ //related heap region node is already exsited.
+ newDstHRN=mapToFirstDimensionArrayNode.get(hrnSummary);
+ } else {
+ newDstHRN=hrnSummary;
+ }
+ doneSetIdentifier = newDstHRN.getIDString() + "_" + fieldDescriptor;
+ if(!doneSet.contains(doneSetIdentifier)) {
+ // add new work item
+ HashMap<HeapRegionNode, FieldDescriptor> newMap =
+ new HashMap<HeapRegionNode, FieldDescriptor>();
+ newMap.put(newDstHRN, fieldDescriptor);
workSet.add(newMap);
+ }
}
+
+ } else {
+ // if there exists corresponding summary node
+ HeapRegionNode hrnDst=mapTypeToExistingSummaryNode.get(type);
+
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ hrnDst, // dest
+ fd.getType(), // type
+ fd.getSymbol(), // field name
+ srcHRN.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue), // predicates
+ null
+ );
+ rg.addRefEdge(srcHRN, hrnDst, edgeToSummary);
+
+ }
}
-
- int uniqueIdentifier = 0;
- while (!workSet.isEmpty()) {
- HashMap<HeapRegionNode, FieldDescriptor> map = workSet
- .iterator().next();
- workSet.remove(map);
-
- Set<HeapRegionNode> key = map.keySet();
- HeapRegionNode srcHRN = key.iterator().next();
- FieldDescriptor fd = map.get(srcHRN);
- TypeDescriptor type = fd.getType();
- String doneSetIdentifier = srcHRN.getIDString() + "_" + fd;
-
- if (!doneSet.contains(doneSetIdentifier)) {
- doneSet.add(doneSetIdentifier);
- if (!mapTypeToExistingSummaryNode.containsKey(type)) {
- // create new summary Node
- TempDescriptor td = new TempDescriptor("temp"
- + uniqueIdentifier, type);
-
- AllocSite allocSite;
- if(type.equals(paramTypeDesc)){
- //corresponding allocsite has already been created for a parameter variable.
- allocSite=as;
- }else{
- allocSite = createParameterAllocSite(rg, td, false);
- }
- String strDesc = allocSite.toStringForDOT()
- + "\\nsummary";
- TypeDescriptor allocType=allocSite.getType();
-
- HeapRegionNode hrnSummary;
- if(allocType.isArray() && allocType.getArrayCount()>0){
- hrnSummary=createMultiDeimensionalArrayHRN(rg,allocSite,srcHRN,fd,mapToFirstDimensionArrayNode,mapTypeToExistingSummaryNode,hrnNewest.getAlpha());
- }else{
- hrnSummary =
- rg.createNewHeapRegionNode(allocSite.getSummary(), // id or null to generate a new one
- false, // single object?
- true, // summary?
- false, // out-of-context?
- allocSite.getType(), // type
- allocSite, // allocation site
- hrnNewest.getAlpha(), // inherent reach
- hrnNewest.getAlpha(), // current reach
- ExistPredSet.factory(rg.predTrue), // predicates
- strDesc // description
- );
- rg.id2hrn.put(allocSite.getSummary(),hrnSummary);
-
- // make a new reference to summary node
- RefEdge edgeToSummary = new RefEdge(srcHRN, // source
- hrnSummary, // dest
- type, // type
- fd.getSymbol(), // field name
- hrnNewest.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
-
- rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
- }
- uniqueIdentifier++;
-
- mapTypeToExistingSummaryNode.put(type, hrnSummary);
-
- // set-up a work set for fields of the class
- Set<FieldDescriptor> fieldTobeAnalyzed=getFieldSetTobeAnalyzed(type);
- for (Iterator iterator = fieldTobeAnalyzed.iterator(); iterator
- .hasNext();) {
- FieldDescriptor fieldDescriptor = (FieldDescriptor) iterator
- .next();
- HeapRegionNode newDstHRN;
- if(mapToFirstDimensionArrayNode.containsKey(hrnSummary)){
- //related heap region node is already exsited.
- newDstHRN=mapToFirstDimensionArrayNode.get(hrnSummary);
- }else{
- newDstHRN=hrnSummary;
- }
- doneSetIdentifier = newDstHRN.getIDString() + "_" + fieldDescriptor;
- if(!doneSet.contains(doneSetIdentifier)){
- // add new work item
- HashMap<HeapRegionNode, FieldDescriptor> newMap =
- new HashMap<HeapRegionNode, FieldDescriptor>();
- newMap.put(newDstHRN, fieldDescriptor);
- workSet.add(newMap);
- }
- }
-
- }else{
- // if there exists corresponding summary node
- HeapRegionNode hrnDst=mapTypeToExistingSummaryNode.get(type);
-
- RefEdge edgeToSummary = new RefEdge(srcHRN, // source
- hrnDst, // dest
- fd.getType(), // type
- fd.getSymbol(), // field name
- srcHRN.getAlpha(), // beta
- ExistPredSet.factory(rg.predTrue), // predicates
- null
- );
- rg.addRefEdge(srcHRN, hrnDst, edgeToSummary);
-
- }
- }
- }
- }
+ }
+ }
return rg;
-}
+ }
// return all allocation sites in the method (there is one allocation
// site per FlatNew node in a method)
-private HashSet<AllocSite> getAllocationSiteSet(Descriptor d) {
- if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
- buildAllocationSiteSet(d);
- }
+ private HashSet<AllocSite> getAllocationSiteSet(Descriptor d) {
+ if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
+ buildAllocationSiteSet(d);
+ }
- return mapDescriptorToAllocSiteSet.get(d);
+ return mapDescriptorToAllocSiteSet.get(d);
-}
+ }
-private void buildAllocationSiteSet(Descriptor d) {
+ private void buildAllocationSiteSet(Descriptor d) {
HashSet<AllocSite> s = new HashSet<AllocSite>();
FlatMethod fm;
mapDescriptorToAllocSiteSet.put(d, s);
}
- private HashSet<AllocSite> getFlaggedAllocationSites(Descriptor dIn) {
-
- HashSet<AllocSite> out = new HashSet<AllocSite>();
- HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
- HashSet<Descriptor> visited = new HashSet<Descriptor>();
-
- toVisit.add(dIn);
-
- while (!toVisit.isEmpty()) {
- Descriptor d = toVisit.iterator().next();
- toVisit.remove(d);
- visited.add(d);
-
- HashSet<AllocSite> asSet = getAllocationSiteSet(d);
- Iterator asItr = asSet.iterator();
- while (asItr.hasNext()) {
- AllocSite as = (AllocSite) asItr.next();
- if (as.getDisjointAnalysisId() != null) {
- out.add(as);
- }
- }
-
- // enqueue callees of this method to be searched for
- // allocation sites also
- Set callees = callGraph.getCalleeSet(d);
- if (callees != null) {
- Iterator methItr = callees.iterator();
- while (methItr.hasNext()) {
- MethodDescriptor md = (MethodDescriptor) methItr.next();
-
- if (!visited.contains(md)) {
- toVisit.add(md);
- }
- }
- }
- }
-
- return out;
+ private HashSet<AllocSite> getFlaggedAllocationSites(Descriptor dIn) {
+
+ HashSet<AllocSite> out = new HashSet<AllocSite>();
+ HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
+ HashSet<Descriptor> visited = new HashSet<Descriptor>();
+
+ toVisit.add(dIn);
+
+ while (!toVisit.isEmpty()) {
+ Descriptor d = toVisit.iterator().next();
+ toVisit.remove(d);
+ visited.add(d);
+
+ HashSet<AllocSite> asSet = getAllocationSiteSet(d);
+ Iterator asItr = asSet.iterator();
+ while (asItr.hasNext()) {
+ AllocSite as = (AllocSite) asItr.next();
+ if (as.getDisjointAnalysisId() != null) {
+ out.add(as);
+ }
+ }
+
+ // enqueue callees of this method to be searched for
+ // allocation sites also
+ Set callees = callGraph.getCalleeSet(d);
+ if (callees != null) {
+ Iterator methItr = callees.iterator();
+ while (methItr.hasNext()) {
+ MethodDescriptor md = (MethodDescriptor) methItr.next();
+
+ if (!visited.contains(md)) {
+ toVisit.add(md);
+ }
}
-
-
-private HashSet<AllocSite>
-getFlaggedAllocationSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
-
- HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
- HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
- HashSet<Descriptor> visited = new HashSet<Descriptor>();
-
- toVisit.add(td);
-
- // traverse this task and all methods reachable from this task
- while( !toVisit.isEmpty() ) {
- Descriptor d = toVisit.iterator().next();
- toVisit.remove(d);
- visited.add(d);
-
- HashSet<AllocSite> asSet = getAllocationSiteSet(d);
- Iterator asItr = asSet.iterator();
- while( asItr.hasNext() ) {
+ }
+ }
+
+ return out;
+ }
+
+
+ private HashSet<AllocSite>
+ getFlaggedAllocationSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
+
+ HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
+ HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
+ HashSet<Descriptor> visited = new HashSet<Descriptor>();
+
+ toVisit.add(td);
+
+ // traverse this task and all methods reachable from this task
+ while( !toVisit.isEmpty() ) {
+ Descriptor d = toVisit.iterator().next();
+ toVisit.remove(d);
+ visited.add(d);
+
+ HashSet<AllocSite> asSet = getAllocationSiteSet(d);
+ Iterator asItr = asSet.iterator();
+ while( asItr.hasNext() ) {
AllocSite as = (AllocSite) asItr.next();
TypeDescriptor typed = as.getType();
if( typed != null ) {
asSetTotal.add(as);
}
}
- }
+ }
- // enqueue callees of this method to be searched for
- // allocation sites also
- Set callees = callGraph.getCalleeSet(d);
- if( callees != null ) {
+ // enqueue callees of this method to be searched for
+ // allocation sites also
+ Set callees = callGraph.getCalleeSet(d);
+ if( callees != null ) {
Iterator methItr = callees.iterator();
while( methItr.hasNext() ) {
MethodDescriptor md = (MethodDescriptor) methItr.next();
toVisit.add(md);
}
}
+ }
}
- }
- return asSetTotal;
-}
+ return asSetTotal;
+ }
public Set<Descriptor> getDescriptorsToAnalyze() {
return descriptorsToAnalyze;
}
- public EffectsAnalysis getEffectsAnalysis(){
+ public EffectsAnalysis getEffectsAnalysis() {
return effectsAnalysis;
}
-
- public ReachGraph getReachGraph(Descriptor d){
+
+ public ReachGraph getReachGraph(Descriptor d) {
return mapDescriptorToCompleteReachGraph.get(d);
}
-
- public ReachGraph getEnterReachGraph(FlatNode fn){
+
+ public ReachGraph getEnterReachGraph(FlatNode fn) {
return fn2rgAtEnter.get(fn);
}
-
+
// get successive captures of the analysis state, use compiler
// flags to control
boolean takeDebugSnapshots = false;
- String descSymbolDebug = null;
+ String descSymbolDebug = null;
boolean stopAfterCapture = false;
- int snapVisitCounter = 0;
- int snapNodeCounter = 0;
- int visitStartCapture = 0;
- int numVisitsToCapture = 0;
+ int snapVisitCounter = 0;
+ int snapNodeCounter = 0;
+ int visitStartCapture = 0;
+ int numVisitsToCapture = 0;
- void debugSnapshot( ReachGraph rg, FlatNode fn, boolean in ) {
+ void debugSnapshot(ReachGraph rg, FlatNode fn, boolean in) {
if( snapVisitCounter > visitStartCapture + numVisitsToCapture ) {
return;
}
}
if( snapVisitCounter >= visitStartCapture ) {
- System.out.println( " @@@ snapping visit="+snapVisitCounter+
- ", node="+snapNodeCounter+
- " @@@" );
+ System.out.println(" @@@ snapping visit="+snapVisitCounter+
+ ", node="+snapNodeCounter+
+ " @@@");
String graphName;
if( in ) {
- graphName = String.format( "snap%03d_%04din",
- snapVisitCounter,
- snapNodeCounter );
+ graphName = String.format("snap%03d_%04din",
+ snapVisitCounter,
+ snapNodeCounter);
} else {
- graphName = String.format( "snap%03d_%04dout",
- snapVisitCounter,
- snapNodeCounter );
+ graphName = String.format("snap%03d_%04dout",
+ snapVisitCounter,
+ snapNodeCounter);
}
if( fn != null ) {
graphName = graphName + fn;
}
- rg.writeGraph( graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // hide reachability
- false, // hide subset reachability states
- true, // hide predicates
- true ); // hide edge taints
+ rg.writeGraph(graphName,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide reachability
+ false, // hide subset reachability states
+ true, // hide predicates
+ true); // hide edge taints
}
}
}
Effect in = (Effect) o;
-
- if (affectedAllocSite.equals(in.getAffectedAllocSite())
- && type == in.getType()
+
+ if (affectedAllocSite.equals(in.getAffectedAllocSite())
+ && type == in.getType()
&& ((field!=null&&field.equals(in.getField()))||
- (field==null&&in.getField()==null))) {
+ (field==null&&in.getField()==null))) {
return true;
} else {
return false;
import Analysis.Pointer.AllocFactory.AllocNode;
/////////////////////////////////////////////
-//
+//
// Effects analysis computes read/write/strong
// update and other sorts of effects for the
// scope of a method or rblock. The effects
private Hashtable<FlatSESEEnterNode, Hashtable<Taint, Set<Effect>> > sese2te;
private Hashtable<FlatNode, Hashtable<Taint, Set<Effect>> > stallSite2te;
- public static State state;
+ public static State state;
public static BuildStateMachines buildStateMachines;
public Set<Effect> getEffects(Taint t) {
- Taint tNoPreds = Canonical.changePredsTo( t,
- ReachGraph.predsEmpty
- );
+ Taint tNoPreds = Canonical.changePredsTo(t,
+ ReachGraph.predsEmpty
+ );
return taint2effects.get(tNoPreds);
}
}
protected void add(Taint t, Effect e, FlatNode currentProgramPoint) {
- Taint tNoPreds = Canonical.changePredsTo( t,
- ReachGraph.predsEmpty
- );
+ Taint tNoPreds = Canonical.changePredsTo(t,
+ ReachGraph.predsEmpty
+ );
if( state.RCR ) {
- buildStateMachines.addToStateMachine( t, e, currentProgramPoint );
+ buildStateMachines.addToStateMachine(t, e, currentProgramPoint);
}
// add to the global bag
if( t.getSESE() != null ) {
FlatSESEEnterNode sese = t.getSESE();
- Hashtable<Taint, Set<Effect>> te = sese2te.get( sese );
+ Hashtable<Taint, Set<Effect>> te = sese2te.get(sese);
if( te == null ) {
- te = new Hashtable<Taint, Set<Effect>>();
+ te = new Hashtable<Taint, Set<Effect>>();
}
Set<Effect> effects = te.get(tNoPreds);
if (effects == null) {
- effects = new HashSet<Effect>();
+ effects = new HashSet<Effect>();
}
effects.add(e);
te.put(tNoPreds, effects);
assert t.getStallSite() != null;
FlatNode stallSite = t.getStallSite();
- Hashtable<Taint, Set<Effect>> te = stallSite2te.get( stallSite );
+ Hashtable<Taint, Set<Effect>> te = stallSite2te.get(stallSite);
if( te == null ) {
- te = new Hashtable<Taint, Set<Effect>>();
+ te = new Hashtable<Taint, Set<Effect>>();
}
Set<Effect> effects = te.get(tNoPreds);
if (effects == null) {
- effects = new HashSet<Effect>();
+ effects = new HashSet<Effect>();
}
effects.add(e);
te.put(tNoPreds, effects);
stallSite2te.put(stallSite, te);
- }
+ }
}
- public Hashtable<Taint, Set<Effect>> get( FlatSESEEnterNode sese ) {
+ public Hashtable<Taint, Set<Effect>> get(FlatSESEEnterNode sese) {
return sese2te.get(sese);
}
- public Hashtable<Taint, Set<Effect>> get( FlatNode stallSite ) {
+ public Hashtable<Taint, Set<Effect>> get(FlatNode stallSite) {
return stallSite2te.get(stallSite);
}
return;
}
- for (Iterator<RefEdge> iterator = vn.iteratorToReferencees(); iterator.hasNext();) {
- RefEdge edge = iterator.next();
- TaintSet taintSet = edge.getTaints();
+ for (Iterator<RefEdge> iterator = vn.iteratorToReferencees(); iterator.hasNext(); ) {
+ RefEdge edge = iterator.next();
+ TaintSet taintSet = edge.getTaints();
AllocSite affectedAlloc = edge.getDst().getAllocSite();
- Effect effect = new Effect(affectedAlloc, Effect.read, fld);
+ Effect effect = new Effect(affectedAlloc, Effect.read, fld);
- for (Iterator<Taint> taintSetIter = taintSet.iterator(); taintSetIter.hasNext();) {
- Taint taint = taintSetIter.next();
- add(taint, effect, currentProgramPoint);
+ for (Iterator<Taint> taintSetIter = taintSet.iterator(); taintSetIter.hasNext(); ) {
+ Taint taint = taintSetIter.next();
+ add(taint, effect, currentProgramPoint);
}
}
}
public void analyzeFlatFieldNode(Set<Edge> sources, FieldDescriptor fld, FlatNode currentProgramPoint) {
- for (Edge edge:sources) {
- TaintSet taintSet = edge.getTaints();
- Alloc affectedAlloc = edge.getDst().getAllocSite();
- Effect effect = new Effect(affectedAlloc, Effect.read, fld);
+ for (Edge edge : sources) {
+ TaintSet taintSet = edge.getTaints();
+ Alloc affectedAlloc = edge.getDst().getAllocSite();
+ Effect effect = new Effect(affectedAlloc, Effect.read, fld);
if (taintSet!=null)
- for (Taint taint:taintSet.getTaints()) {
+ for (Taint taint : taintSet.getTaints()) {
add(taint, effect, currentProgramPoint);
}
}
return;
}
- for (Iterator<RefEdge> iterator = vn.iteratorToReferencees(); iterator.hasNext();) {
- RefEdge edge = iterator.next();
- TaintSet taintSet = edge.getTaints();
+ for (Iterator<RefEdge> iterator = vn.iteratorToReferencees(); iterator.hasNext(); ) {
+ RefEdge edge = iterator.next();
+ TaintSet taintSet = edge.getTaints();
AllocSite affectedAlloc = edge.getDst().getAllocSite();
- Effect effect = new Effect(affectedAlloc, Effect.write, fld);
- Effect effectSU = null;
+ Effect effect = new Effect(affectedAlloc, Effect.write, fld);
+ Effect effectSU = null;
if (strongUpdate) {
- effectSU = new Effect(affectedAlloc, Effect.strongupdate, fld);
+ effectSU = new Effect(affectedAlloc, Effect.strongupdate, fld);
}
- for (Iterator<Taint> taintSetIter = taintSet.iterator(); taintSetIter.hasNext();) {
- Taint taint = taintSetIter.next();
- add( taint, effect, currentProgramPoint );
+ for (Iterator<Taint> taintSetIter = taintSet.iterator(); taintSetIter.hasNext(); ) {
+ Taint taint = taintSetIter.next();
+ add(taint, effect, currentProgramPoint);
- if (strongUpdate) {
- add( taint, effectSU, currentProgramPoint );
- }
+ if (strongUpdate) {
+ add(taint, effectSU, currentProgramPoint);
+ }
}
}
}
public void analyzeFlatSetFieldNode(Set<Edge> dstedges, FieldDescriptor fld, FlatNode currentProgramPoint) {
- for (Edge edge:dstedges) {
+ for (Edge edge : dstedges) {
TaintSet taintSet = edge.getTaints();
Alloc affectedAlloc = edge.getDst().getAllocSite();
Effect effect = new Effect(affectedAlloc, Effect.write, fld);
if (taintSet!=null)
- for (Taint taint:taintSet.getTaints()) {
- add(taint, effect, currentProgramPoint );
+ for (Taint taint : taintSet.getTaints()) {
+ add(taint, effect, currentProgramPoint);
}
}
}
public String toString() {
- return taint2effects.toString();
+ return taint2effects.toString();
}
- public void writeEffects( String outfile ) {
+ public void writeEffects(String outfile) {
try {
BufferedWriter bw = new BufferedWriter(new FileWriter(outfile));
-
- bw.write( "Effects\n---------------\n\n" );
+
+ bw.write("Effects\n---------------\n\n");
Iterator meItr = taint2effects.entrySet().iterator();
while( meItr.hasNext() ) {
- Map.Entry me = (Map.Entry) meItr.next();
- Taint taint = (Taint) me.getKey();
- Set<Effect> effects = (Set<Effect>) me.getValue();
-
- Iterator<Effect> eItr = effects.iterator();
- while( eItr.hasNext() ) {
- Effect e = eItr.next();
-
- bw.write( taint+"-->"+e+"\n" );
- }
+ Map.Entry me = (Map.Entry)meItr.next();
+ Taint taint = (Taint) me.getKey();
+ Set<Effect> effects = (Set<Effect>)me.getValue();
+
+ Iterator<Effect> eItr = effects.iterator();
+ while( eItr.hasNext() ) {
+ Effect e = eItr.next();
+
+ bw.write(taint+"-->"+e+"\n");
+ }
}
bw.close();
- } catch( IOException e ) {}
+ } catch( IOException e ) {
+ }
}
/*
* public MethodEffects getMethodEffectsByMethodContext(MethodContext mc){
* return mapMethodContextToMethodEffects.get(mc); }
- *
+ *
* public void createNewMapping(MethodContext mcNew) { if(!methodeffects)
* return; if (!mapMethodContextToMethodEffects.containsKey(mcNew)) {
* MethodEffects meNew = new MethodEffects();
* return; MethodEffects me = mapMethodContextToMethodEffects.get(mc);
* me.analyzeFlatFieldNode(og, srcDesc, fieldDesc);
* mapMethodContextToMethodEffects.put(mc, me); }
- *
+ *
* public void analyzeFlatSetFieldNode(MethodContext mc, OwnershipGraph og,
* TempDescriptor dstDesc, FieldDescriptor fieldDesc) { if(!methodeffects)
* return; MethodEffects me = mapMethodContextToMethodEffects.get(mc);
* me.analyzeFlatSetFieldNode(og, dstDesc, fieldDesc);
* mapMethodContextToMethodEffects.put(mc, me); }
- *
+ *
* public void analyzeFlatSetElementNode(MethodContext mc, OwnershipGraph og,
* TempDescriptor dstDesc, FieldDescriptor fieldDesc) { if(!methodeffects)
* return; MethodEffects me = mapMethodContextToMethodEffects.get(mc);
* me.analyzeFlatSetElementNode(og, dstDesc, fieldDesc);
* mapMethodContextToMethodEffects.put(mc, me); }
- *
+ *
* public void analyzeFlatElementNode(MethodContext mc, OwnershipGraph og,
* TempDescriptor dstDesc, FieldDescriptor fieldDesc) { if(!methodeffects)
* return; MethodEffects me = mapMethodContextToMethodEffects.get(mc);
* me.analyzeFlatElementNode(og, dstDesc, fieldDesc);
* mapMethodContextToMethodEffects.put(mc, me); }
- *
- *
+ *
+ *
* public void writeMethodEffectsResult() throws IOException {
- *
+ *
* try { BufferedWriter bw = new BufferedWriter(new FileWriter(
* "MethodEffects_report.txt"));
- *
+ *
* Set<MethodContext> mcSet = mapMethodContextToMethodEffects.keySet();
* Iterator<MethodContext> mcIter = mcSet.iterator(); while (mcIter.hasNext())
* { MethodContext mc = mcIter.next(); MethodDescriptor md =
* (MethodDescriptor) mc.getDescriptor();
- *
+ *
* int startIdx = 0; if (!md.isStatic()) { startIdx = 1; }
- *
+ *
* MethodEffects me = mapMethodContextToMethodEffects.get(mc); EffectsSet
* effectsSet = me.getEffects();
- *
+ *
* bw.write("Method " + mc + " :\n"); for (int i = startIdx; i <
* md.numParameters() + startIdx; i++) {
- *
+ *
* String paramName = md.getParamName(i - startIdx);
- *
+ *
* Set<EffectsKey> effectSet = effectsSet.getReadingSet(i); String keyStr =
* "{"; if (effectSet != null) { Iterator<EffectsKey> effectIter =
* effectSet.iterator(); while (effectIter.hasNext()) { EffectsKey key =
* effectIter.next(); keyStr += " " + key; } } keyStr += " }";
* bw.write(" Paramter " + paramName + " ReadingSet=" + keyStr + "\n");
- *
+ *
* effectSet = effectsSet.getWritingSet(new Integer(i)); keyStr = "{"; if
* (effectSet != null) { Iterator<EffectsKey> effectIter =
* effectSet.iterator(); while (effectIter.hasNext()) { EffectsKey key =
* effectIter.next(); keyStr += " " + key; } }
- *
+ *
* keyStr += " }"; bw.write(" Paramter " + paramName + " WritingngSet=" +
* keyStr + "\n");
- *
+ *
* } bw.write("\n");
- *
+ *
* }
- *
+ *
* bw.close(); } catch (IOException e) { System.err.println(e); }
- *
+ *
* }
*/
-
+
public Hashtable<Taint, Set<Effect>> getAllEffects() {
return taint2effects;
}
// Existence predicates in the callee final-result
// graph are relevant on the caller's callee-reachable
// graph parts. Any callee result elements with
-// predicates not satisfied in the caller are not
+// predicates not satisfied in the caller are not
// mapped in the call site transfer function
-public class ExistPred extends Canonical {
+public class ExistPred extends Canonical {
// there are several types of predicates, note that
// there are not subclasses of the ExistPred class
public static final int TYPE_EDGE = 0x414b;
protected int predType;
- // true predicates always evaluate to true
+ // true predicates always evaluate to true
// A node existence predicate is satisfied if the heap
// region ID defining a node is part of the given graph
// The reach state may be null--if not the predicate is
// satisfied when the edge exists AND it has the state.
- protected Integer n_hrnID;
+ protected Integer n_hrnID;
protected ReachState ne_state;
// An edge existence predicate is satisfied if the elements
// the source of an edge is *either* a variable
// node or a heap region node
protected TempDescriptor e_tdSrc;
- protected Integer e_hrnSrcID;
+ protected Integer e_hrnSrcID;
// the source of an edge might be out of the callee
// context but in the caller graph, a normal caller
// heap region or variable, OR it might be out of the
// caller context ALSO: an ooc node in the caller
- protected boolean e_srcOutCalleeContext;
- protected boolean e_srcOutCallerContext;
+ protected boolean e_srcOutCalleeContext;
+ protected boolean e_srcOutCallerContext;
// dst is always a heap region
- protected Integer e_hrnDstID;
+ protected Integer e_hrnDstID;
// a reference has a field name and type
protected TypeDescriptor e_type;
- protected String e_field;
+ protected String e_field;
// if the taint is non-null then the predicate
// is true only if the edge exists AND has the
// taint--ONLY ONE of the ne_state or e_taint
// may be non-null for an edge predicate
- protected Taint e_taint;
+ protected Taint e_taint;
// a static debug flag for higher abstraction code
// to enable debug info at this level
public static boolean debug = false;
-
+
// to make the true predicate
public static ExistPred factory() {
ExistPred out = new ExistPred();
- out = (ExistPred) Canonical.makeCanonical( out );
+ out = (ExistPred) Canonical.makeCanonical(out);
return out;
}
-
+
protected ExistPred() {
this.predType = TYPE_TRUE;
ne_state = null;
}
// node predicates
- public static ExistPred factory( Integer hrnID,
- ReachState state ) {
+ public static ExistPred factory(Integer hrnID,
+ ReachState state) {
- ExistPred out = new ExistPred( hrnID, state );
+ ExistPred out = new ExistPred(hrnID, state);
- out = (ExistPred) Canonical.makeCanonical( out );
+ out = (ExistPred) Canonical.makeCanonical(out);
return out;
}
-
- protected ExistPred( Integer hrnID,
- ReachState state ) {
+
+ protected ExistPred(Integer hrnID,
+ ReachState state) {
assert hrnID != null;
this.n_hrnID = hrnID;
this.ne_state = state;
}
// edge predicates
- public static ExistPred factory( TempDescriptor tdSrc,
- Integer hrnSrcID,
- Integer hrnDstID,
- TypeDescriptor type,
- String field,
- ReachState state,
- Taint taint,
- boolean srcOutCalleeContext,
- boolean srcOutCallerContext ) {
-
- ExistPred out = new ExistPred( tdSrc,
- hrnSrcID,
- hrnDstID,
- type,
- field,
- state,
- taint,
- srcOutCalleeContext,
- srcOutCallerContext );
-
- out = (ExistPred) Canonical.makeCanonical( out );
+ public static ExistPred factory(TempDescriptor tdSrc,
+ Integer hrnSrcID,
+ Integer hrnDstID,
+ TypeDescriptor type,
+ String field,
+ ReachState state,
+ Taint taint,
+ boolean srcOutCalleeContext,
+ boolean srcOutCallerContext) {
+
+ ExistPred out = new ExistPred(tdSrc,
+ hrnSrcID,
+ hrnDstID,
+ type,
+ field,
+ state,
+ taint,
+ srcOutCalleeContext,
+ srcOutCallerContext);
+
+ out = (ExistPred) Canonical.makeCanonical(out);
return out;
}
- protected ExistPred( TempDescriptor tdSrc,
- Integer hrnSrcID,
- Integer hrnDstID,
- TypeDescriptor type,
- String field,
- ReachState state,
- Taint taint,
- boolean srcOutCalleeContext,
- boolean srcOutCallerContext ) {
-
- assert (tdSrc == null) || (hrnSrcID == null);
+ protected ExistPred(TempDescriptor tdSrc,
+ Integer hrnSrcID,
+ Integer hrnDstID,
+ TypeDescriptor type,
+ String field,
+ ReachState state,
+ Taint taint,
+ boolean srcOutCalleeContext,
+ boolean srcOutCallerContext) {
+
+ assert(tdSrc == null) || (hrnSrcID == null);
assert hrnDstID != null;
assert type != null;
- assert (state == null) || (taint == null);
-
+ assert(state == null) || (taint == null);
+
// fields can be null when the edge is from
// a variable node to a heap region!
this.e_hrnSrcID = hrnSrcID;
this.e_hrnDstID = hrnDstID;
this.e_type = type;
- this.e_field = field;
+ this.e_field = field;
this.ne_state = state;
this.e_taint = taint;
this.predType = TYPE_EDGE;
}
// for node or edge, check inputs
- public static ExistPred factory( Integer hrnID,
- TempDescriptor tdSrc,
- Integer hrnSrcID,
- Integer hrnDstID,
- TypeDescriptor type,
- String field,
- ReachState state,
- Taint taint,
- boolean srcOutCalleeContext,
- boolean srcOutCallerContext ) {
+ public static ExistPred factory(Integer hrnID,
+ TempDescriptor tdSrc,
+ Integer hrnSrcID,
+ Integer hrnDstID,
+ TypeDescriptor type,
+ String field,
+ ReachState state,
+ Taint taint,
+ boolean srcOutCalleeContext,
+ boolean srcOutCallerContext) {
ExistPred out;
if( hrnID != null ) {
- out = new ExistPred( hrnID, state );
+ out = new ExistPred(hrnID, state);
} else {
- out = new ExistPred( tdSrc,
- hrnSrcID,
- hrnDstID,
- type,
- field,
- state,
- taint,
- srcOutCalleeContext,
- srcOutCallerContext );
+ out = new ExistPred(tdSrc,
+ hrnSrcID,
+ hrnDstID,
+ type,
+ field,
+ state,
+ taint,
+ srcOutCalleeContext,
+ srcOutCallerContext);
}
-
- out = (ExistPred) Canonical.makeCanonical( out );
+
+ out = (ExistPred) Canonical.makeCanonical(out);
return out;
}
// only consider the subest of the caller elements that
// are reachable by callee when testing predicates--if THIS
- // predicate is satisfied, return the predicate set of the
+ // predicate is satisfied, return the predicate set of the
// element that satisfied it, or null for false
- public ExistPredSet isSatisfiedBy( ReachGraph rg,
- Set<Integer> calleeReachableNodes
- ) {
+ public ExistPredSet isSatisfiedBy(ReachGraph rg,
+ Set<Integer> calleeReachableNodes
+ ) {
if( predType == TYPE_TRUE ) {
- return ExistPredSet.factory( ExistPred.factory() );
+ return ExistPredSet.factory(ExistPred.factory() );
}
if( predType == TYPE_NODE ) {
// first find node
- HeapRegionNode hrn = rg.id2hrn.get( n_hrnID );
+ HeapRegionNode hrn = rg.id2hrn.get(n_hrnID);
if( hrn == null ) {
- return null;
+ return null;
}
- if( !calleeReachableNodes.contains( n_hrnID ) ) {
- return null;
+ if( !calleeReachableNodes.contains(n_hrnID) ) {
+ return null;
}
// when the state is null we're done!
if( ne_state == null ) {
- return hrn.getPreds();
+ return hrn.getPreds();
} else {
- // otherwise look for state too
+ // otherwise look for state too
- // TODO: contains OR containsSuperSet OR containsWithZeroes??
- ReachState stateCaller = hrn.getAlpha().containsIgnorePreds( ne_state );
-
- if( stateCaller == null ) {
- return null;
+ // TODO: contains OR containsSuperSet OR containsWithZeroes??
+ ReachState stateCaller = hrn.getAlpha().containsIgnorePreds(ne_state);
- } else {
- // it was here, return the predicates on the state!!
- return stateCaller.getPreds();
- }
+ if( stateCaller == null ) {
+ return null;
+
+ } else {
+ // it was here, return the predicates on the state!!
+ return stateCaller.getPreds();
+ }
}
// unreachable program point!
}
-
+
if( predType == TYPE_EDGE ) {
// first establish whether the source of the
// reference edge exists
VariableNode vnSrc = null;
if( e_tdSrc != null ) {
- vnSrc = rg.td2vn.get( e_tdSrc );
+ vnSrc = rg.td2vn.get(e_tdSrc);
}
HeapRegionNode hrnSrc = null;
if( e_hrnSrcID != null ) {
- hrnSrc = rg.id2hrn.get( e_hrnSrcID );
+ hrnSrc = rg.id2hrn.get(e_hrnSrcID);
}
- assert (vnSrc == null) || (hrnSrc == null);
-
+ assert(vnSrc == null) || (hrnSrc == null);
+
// the source is not present in graph
if( vnSrc == null && hrnSrc == null ) {
- return null;
+ return null;
}
RefSrcNode rsn;
if( vnSrc != null ) {
- rsn = vnSrc;
- assert e_srcOutCalleeContext;
- assert !e_srcOutCallerContext;
+ rsn = vnSrc;
+ assert e_srcOutCalleeContext;
+ assert !e_srcOutCallerContext;
} else {
- assert !(e_srcOutCalleeContext && e_srcOutCallerContext);
+ assert !(e_srcOutCalleeContext && e_srcOutCallerContext);
- if( e_srcOutCalleeContext ) {
- if( calleeReachableNodes.contains( e_hrnSrcID ) ) {
- return null;
- }
+ if( e_srcOutCalleeContext ) {
+ if( calleeReachableNodes.contains(e_hrnSrcID) ) {
+ return null;
+ }
- } else if( e_srcOutCallerContext ) {
- if( !hrnSrc.isOutOfContext() ) {
- return null;
- }
+ } else if( e_srcOutCallerContext ) {
+ if( !hrnSrc.isOutOfContext() ) {
+ return null;
+ }
- } else {
+ } else {
- if( !calleeReachableNodes.contains( e_hrnSrcID ) ) {
- return null;
- }
- if( hrnSrc.isOutOfContext() ) {
- return null;
- }
+ if( !calleeReachableNodes.contains(e_hrnSrcID) ) {
+ return null;
+ }
+ if( hrnSrc.isOutOfContext() ) {
+ return null;
+ }
- }
+ }
- rsn = hrnSrc;
+ rsn = hrnSrc;
}
// is the destination present?
- HeapRegionNode hrnDst = rg.id2hrn.get( e_hrnDstID );
+ HeapRegionNode hrnDst = rg.id2hrn.get(e_hrnDstID);
if( hrnDst == null ) {
- return null;
+ return null;
}
- if( !calleeReachableNodes.contains( e_hrnDstID ) ) {
- return null;
+ if( !calleeReachableNodes.contains(e_hrnDstID) ) {
+ return null;
}
// is there an edge between them with the given
// type and field?
// TODO: type OR a subtype?
- RefEdge edge = rsn.getReferenceTo( hrnDst,
- e_type,
- e_field );
+ RefEdge edge = rsn.getReferenceTo(hrnDst,
+ e_type,
+ e_field);
if( edge == null ) {
- return null;
+ return null;
}
// when the state and taint are null we're done!
- if( ne_state == null &&
+ if( ne_state == null &&
e_taint == null ) {
- return edge.getPreds();
+ return edge.getPreds();
} else if( ne_state != null ) {
- // otherwise look for state too
+ // otherwise look for state too
- // TODO: contains OR containsSuperSet OR containsWithZeroes??
- ReachState stateCaller = edge.getBeta().containsIgnorePreds( ne_state );
-
- if( stateCaller == null ) {
- return null;
+ // TODO: contains OR containsSuperSet OR containsWithZeroes??
+ ReachState stateCaller = edge.getBeta().containsIgnorePreds(ne_state);
- } else {
- // it was here, return the predicates on the state!!
- return stateCaller.getPreds();
- }
+ if( stateCaller == null ) {
+ return null;
+
+ } else {
+ // it was here, return the predicates on the state!!
+ return stateCaller.getPreds();
+ }
} else {
- // otherwise look for taint
+ // otherwise look for taint
+
+ Taint tCaller = edge.getTaints().containsIgnorePreds(e_taint);
- Taint tCaller = edge.getTaints().containsIgnorePreds( e_taint );
-
- if( tCaller == null ) {
- return null;
+ if( tCaller == null ) {
+ return null;
- } else {
- // it was here, return the predicates on the taint!!
- return tCaller.getPreds();
- }
+ } else {
+ // it was here, return the predicates on the taint!!
+ return tCaller.getPreds();
+ }
}
// unreachable program point!
}
- throw new Error( "Unknown predicate type" );
+ throw new Error("Unknown predicate type");
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
if( ne_state == null ) {
if( pred.ne_state != null ) {
- return false;
+ return false;
}
- } else if( !ne_state.equals( pred.ne_state ) ) {
+ } else if( !ne_state.equals(pred.ne_state) ) {
return false;
}
-
+
if( n_hrnID == null ) {
if( pred.n_hrnID != null ) {
- return false;
+ return false;
}
- } else if( !n_hrnID.equals( pred.n_hrnID ) ) {
+ } else if( !n_hrnID.equals(pred.n_hrnID) ) {
return false;
}
-
+
if( e_tdSrc == null ) {
if( pred.e_tdSrc != null ) {
- return false;
+ return false;
}
- } else if( !e_tdSrc.equals( pred.e_tdSrc ) ) {
+ } else if( !e_tdSrc.equals(pred.e_tdSrc) ) {
return false;
}
if( e_hrnSrcID == null ) {
if( pred.e_hrnSrcID != null ) {
- return false;
+ return false;
}
} else {
- if( !e_hrnSrcID.equals( pred.e_hrnSrcID ) ) {
- return false;
+ if( !e_hrnSrcID.equals(pred.e_hrnSrcID) ) {
+ return false;
}
if( e_srcOutCalleeContext != pred.e_srcOutCalleeContext ) {
- return false;
+ return false;
}
if( e_srcOutCallerContext != pred.e_srcOutCallerContext ) {
- return false;
+ return false;
}
}
if( e_hrnDstID == null ) {
if( pred.e_hrnDstID != null ) {
- return false;
+ return false;
}
- } else if( !e_hrnDstID.equals( pred.e_hrnDstID ) ) {
+ } else if( !e_hrnDstID.equals(pred.e_hrnDstID) ) {
return false;
}
-
+
if( e_type == null ) {
if( pred.e_type != null ) {
- return false;
+ return false;
}
- } else if( !e_type.equals( pred.e_type ) ) {
+ } else if( !e_type.equals(pred.e_type) ) {
return false;
}
-
+
if( e_field == null ) {
if( pred.e_field != null ) {
- return false;
+ return false;
}
- } else if( !e_field.equals( pred.e_field ) ) {
+ } else if( !e_field.equals(pred.e_field) ) {
return false;
}
if( e_taint == null ) {
if( pred.e_taint != null ) {
- return false;
+ return false;
}
- } else if( !e_taint.equals( pred.e_taint ) ) {
+ } else if( !e_taint.equals(pred.e_taint) ) {
return false;
}
int hash = n_hrnID.intValue()*17;
if( ne_state != null ) {
- hash ^= ne_state.hashCode();
+ hash ^= ne_state.hashCode();
}
return hash;
}
-
+
if( predType == TYPE_EDGE ) {
- int hash = 0;
+ int hash = 0;
hash += e_type.hashCode()*17;
if( e_field != null ) {
- hash += e_field.hashCode()*7;
+ hash += e_field.hashCode()*7;
}
-
+
if( e_tdSrc != null ) {
- hash ^= e_tdSrc.hashCode()*11;
+ hash ^= e_tdSrc.hashCode()*11;
} else {
- hash ^= e_hrnSrcID.hashCode()*11;
- if( e_srcOutCalleeContext ) {
- hash ^= 0xf1aeb;
- }
- if( e_srcOutCallerContext ) {
- hash ^= 0x875d;
- }
+ hash ^= e_hrnSrcID.hashCode()*11;
+ if( e_srcOutCalleeContext ) {
+ hash ^= 0xf1aeb;
+ }
+ if( e_srcOutCallerContext ) {
+ hash ^= 0x875d;
+ }
}
hash += e_hrnDstID.hashCode();
if( ne_state != null ) {
- hash ^= ne_state.hashCode();
+ hash ^= ne_state.hashCode();
}
if( e_taint != null ) {
- hash ^= e_taint.hashCode();
+ hash ^= e_taint.hashCode();
}
-
+
return hash;
}
- throw new Error( "Unknown predicate type" );
+ throw new Error("Unknown predicate type");
}
-
+
public String toString() {
if( predType == TYPE_TRUE ) {
return "t";
if( predType == TYPE_NODE ) {
String s = n_hrnID.toString();
if( ne_state != null ) {
- s += "w"+ne_state;
+ s += "w"+ne_state;
}
return s;
}
if( predType == TYPE_EDGE ) {
String s = "(";
-
+
if( e_tdSrc != null ) {
- s += e_tdSrc.toString();
+ s += e_tdSrc.toString();
} else {
- s += e_hrnSrcID.toString();
+ s += e_hrnSrcID.toString();
}
if( e_srcOutCalleeContext ) {
- s += "(ooCLEEc)";
+ s += "(ooCLEEc)";
}
if( e_srcOutCallerContext ) {
- s += "(ooCLERc)";
+ s += "(ooCLERc)";
}
s += "-->"+e_hrnDstID+")";
if( ne_state != null ) {
- s += "w"+ne_state;
+ s += "w"+ne_state;
}
if( e_taint != null ) {
- s += "w"+e_taint;
+ s += "w"+e_taint;
}
return s;
}
- throw new Error( "Unknown predicate type" );
+ throw new Error("Unknown predicate type");
}
-
+
}
public static boolean debug = false;
-
+
public static ExistPredSet factory() {
ExistPredSet out = new ExistPredSet();
- out = (ExistPredSet) Canonical.makeCanonical( out );
+ out = (ExistPredSet) Canonical.makeCanonical(out);
return out;
}
- public static ExistPredSet factory( ExistPred pred ) {
+ public static ExistPredSet factory(ExistPred pred) {
ExistPredSet out = new ExistPredSet();
- out.preds.add( pred );
- out = (ExistPredSet) Canonical.makeCanonical( out );
+ out.preds.add(pred);
+ out = (ExistPredSet) Canonical.makeCanonical(out);
return out;
}
preds = new HashSet<ExistPred>();
}
-
+
public Iterator<ExistPred> iterator() {
return preds.iterator();
}
-
+
// only consider the subest of the caller elements that
// are reachable by callee when testing predicates
- public ExistPredSet isSatisfiedBy( ReachGraph rg,
- Set<Integer> calleeReachableNodes
- ) {
+ public ExistPredSet isSatisfiedBy(ReachGraph rg,
+ Set<Integer> calleeReachableNodes
+ ) {
ExistPredSet predsOut = null;
-
+
Iterator<ExistPred> predItr = preds.iterator();
while( predItr.hasNext() ) {
ExistPredSet predsFromSatisfier =
- predItr.next().isSatisfiedBy( rg,
- calleeReachableNodes );
+ predItr.next().isSatisfiedBy(rg,
+ calleeReachableNodes);
if( predsFromSatisfier != null ) {
- if( predsOut == null ) {
- predsOut = predsFromSatisfier;
- } else {
- predsOut = Canonical.join( predsOut,
- predsFromSatisfier );
- }
+ if( predsOut == null ) {
+ predsOut = predsFromSatisfier;
+ } else {
+ predsOut = Canonical.join(predsOut,
+ predsFromSatisfier);
+ }
}
}
-
+
return predsOut;
}
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
ExistPredSet eps = (ExistPredSet) o;
- return preds.equals( eps.preds );
+ return preds.equals(eps.preds);
}
ExistPred pred = predItr.next();
s += pred.toString();
if( predItr.hasNext() ) {
- s += " ||\\n";
+ s += " ||\\n";
}
}
s += "]";
return s;
}
-
+
public String toString() {
String s = "P[";
Iterator<ExistPred> predItr = preds.iterator();
ExistPred pred = predItr.next();
s += pred.toString();
if( predItr.hasNext() ) {
- s += " || ";
+ s += " || ";
}
}
s += "]";
// some reachability states are inherent
// to a node by its definition
- protected ReachSet inherent;
+ protected ReachSet inherent;
// use alpha for the current reach states
// and alphaNew during iterative calculations
protected ExistPredSet preds;
- public HeapRegionNode( Integer id,
- boolean isSingleObject,
- boolean isFlagged,
- boolean isNewSummary,
- boolean isOutOfContext,
- TypeDescriptor type,
- AllocSite allocSite,
- ReachSet inherent,
- ReachSet alpha,
- ExistPredSet preds,
- String description
- ) {
+ public HeapRegionNode(Integer id,
+ boolean isSingleObject,
+ boolean isFlagged,
+ boolean isNewSummary,
+ boolean isOutOfContext,
+ TypeDescriptor type,
+ AllocSite allocSite,
+ ReachSet inherent,
+ ReachSet alpha,
+ ExistPredSet preds,
+ String description
+ ) {
this.id = id;
this.isSingleObject = isSingleObject;
}
public HeapRegionNode copy() {
- return new HeapRegionNode( id,
- isSingleObject,
- isFlagged,
- isNewSummary,
- isOutOfContext,
- type,
- allocSite,
- inherent,
- alpha,
- preds,
- description );
+ return new HeapRegionNode(id,
+ isSingleObject,
+ isFlagged,
+ isNewSummary,
+ isOutOfContext,
+ type,
+ allocSite,
+ inherent,
+ alpha,
+ preds,
+ description);
}
// alpha and preds contribute towards reaching the
// fixed point, so use this method to determine if
// a node is "equal" to some previous visit, basically
- public boolean equalsIncludingAlphaAndPreds( HeapRegionNode hrn ) {
+ public boolean equalsIncludingAlphaAndPreds(HeapRegionNode hrn) {
- return equals( hrn ) &&
- alpha.equals( hrn.alpha ) &&
- preds.equals( hrn.preds );
+ return equals(hrn) &&
+ alpha.equals(hrn.alpha) &&
+ preds.equals(hrn.preds);
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
HeapRegionNode hrn = (HeapRegionNode) o;
- if( !id.equals( hrn.getID() ) ) {
+ if( !id.equals(hrn.getID() ) ) {
return false;
}
}
- // in other words, this node is not functionally
+ // in other words, this node is not functionally
// part of the graph (anymore)
public boolean isWiped() {
- return
+ return
getNumReferencers() == 0 &&
getNumReferencees() == 0;
}
- public void addReferencer( RefEdge edge ) {
+ public void addReferencer(RefEdge edge) {
assert edge != null;
- referencers.add( edge );
+ referencers.add(edge);
}
- public void removeReferencer( RefEdge edge ) {
+ public void removeReferencer(RefEdge edge) {
assert edge != null;
- assert referencers.contains( edge );
+ assert referencers.contains(edge);
- referencers.remove( edge );
+ referencers.remove(edge);
}
- public RefEdge getReferenceFrom( RefSrcNode rsn,
- TypeDescriptor type,
- String field
- ) {
+ public RefEdge getReferenceFrom(RefSrcNode rsn,
+ TypeDescriptor type,
+ String field
+ ) {
assert rsn != null;
Iterator<RefEdge> itrEdge = referencers.iterator();
while( itrEdge.hasNext() ) {
RefEdge edge = itrEdge.next();
- if( edge.getSrc().equals( rsn ) &&
- edge.typeEquals( type ) &&
- edge.fieldEquals( field )
+ if( edge.getSrc().equals(rsn) &&
+ edge.typeEquals(type) &&
+ edge.fieldEquals(field)
) {
return edge;
}
public TypeDescriptor getType() {
return type;
- }
+ }
public AllocSite getAllocSite() {
return allocSite;
}
-
+
public ReachSet getInherent() {
return inherent;
}
-
+
public ReachSet getAlpha() {
return alpha;
}
- public void setAlpha( ReachSet alpha ) {
+ public void setAlpha(ReachSet alpha) {
this.alpha = alpha;
}
return alphaNew;
}
- public void setAlphaNew( ReachSet alpha ) {
+ public void setAlphaNew(ReachSet alpha) {
this.alphaNew = alpha;
}
return preds;
}
- public void setPreds( ExistPredSet preds ) {
+ public void setPreds(ExistPredSet preds) {
this.preds = preds;
}
Iterator<ReachTuple> rtItr = state.iterator();
while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
+ ReachTuple rt = rtItr.next();
- if( !rt.isOutOfContext() ) {
- return false;
- }
+ if( !rt.isOutOfContext() ) {
+ return false;
+ }
}
}
return true;
}
-
+
public String getIDString() {
String s;
if( id < 0 ) {
- s = "minus" + new Integer( -id ).toString();
+ s = "minus" + new Integer(-id).toString();
} else {
s = id.toString();
}
public String getDescription() {
return description;
- }
+ }
- public String toStringDOT( boolean hideReach,
- boolean hideSubsetReach,
- boolean hidePreds ) {
+ public String toStringDOT(boolean hideReach,
+ boolean hideSubsetReach,
+ boolean hidePreds) {
String attributes = "";
-
+
if( isSingleObject ) {
attributes += "shape=box";
} else {
",label=\"ID"+getIDString()+"\\n"+
typeStr+"\\n"+
description;
-
+
if( !hideReach ) {
- s += "\\n"+alpha.toStringEscNewline( hideSubsetReach );
+ s += "\\n"+alpha.toStringEscNewline(hideSubsetReach);
}
if( !hidePreds ) {
s += "\\n"+preds.toStringEscNewline();
}
-
+
return s+"\"]";
}
protected Integer id;
protected Boolean ooc;
- public HrnIdOoc( Integer id, Boolean ooc ) {
+ public HrnIdOoc(Integer id, Boolean ooc) {
this.id = id;
this.ooc = ooc;
}
return ooc;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
HrnIdOoc hio = (HrnIdOoc) o;
- return
- id.equals( hio.id ) &&
- ooc.equals( hio.ooc );
+ return
+ id.equals(hio.id) &&
+ ooc.equals(hio.ooc);
}
public int hashCode() {
toprocess.remove(fn);
HashSet<FlatNode> myset=new HashSet<FlatNode>();
if (!analysisCares(fn)) {
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
if (map.containsKey(fn.getPrev(i)))
myset.addAll(map.get(fn.getPrev(i)));
}
}
if (!map.containsKey(fn)||!map.get(fn).equals(myset)) {
map.put(fn, myset);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
toprocess.add(fn.getNext(i));
}
}
}
- for(Iterator<FlatNode> it=map.keySet().iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=map.keySet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
if (analysisCares(fn)) {
HashSet<FlatNode> myset=new HashSet<FlatNode>();
- for(int i=0;i<fn.numPrev();i++) {
- if (map.containsKey(fn.getPrev(i)))
- myset.addAll(map.get(fn.getPrev(i)));
+ for(int i=0; i<fn.numPrev(); i++) {
+ if (map.containsKey(fn.getPrev(i)))
+ myset.addAll(map.get(fn.getPrev(i)));
}
if (!prevmap.containsKey(fn))
prevmap.put(fn, new Vector());
- for(Iterator<FlatNode> it2=myset.iterator();it2.hasNext();) {
+ for(Iterator<FlatNode> it2=myset.iterator(); it2.hasNext(); ) {
FlatNode fnprev=it2.next();
if (!nextmap.containsKey(fnprev))
nextmap.put(fnprev, new Vector());
Vector<FlatNode> vfn=nextmap.get(fn);
if (vfn==null)
return 0;
- else
+ else
return vfn.size();
}
case FKind.FlatGenReachNode:
case FKind.FlatExit:
return true;
+
case FKind.FlatCastNode:
FlatCastNode fcn=(FlatCastNode)fn;
TypeDescriptor td=fcn.getType();
return td.isPtr();
+
case FKind.FlatOpNode:
FlatOpNode fon = (FlatOpNode) fn;
return fon.getOp().getOp()==Operation.ASSIGN&&fon.getLeft().getType().isPtr();
+
default:
return false;
}
}
private void merge() {
- for(Pair<FlatNode, TempDescriptor> machinepair: bsm.getAllMachineNames()) {
+ for(Pair<FlatNode, TempDescriptor> machinepair : bsm.getAllMachineNames()) {
StateMachineForEffects sm=bsm.getStateMachine(machinepair);
merge(sm);
}
do {
mergeAgain=false;
HashMap<Pair<SMFEState, FieldDescriptor>, Set<SMFEState>> revMap=buildReverse(backMap);
- for(Map.Entry<Pair<SMFEState,FieldDescriptor>, Set<SMFEState>> entry:revMap.entrySet()) {
+ for(Map.Entry<Pair<SMFEState,FieldDescriptor>, Set<SMFEState>> entry : revMap.entrySet()) {
if (entry.getValue().size()>1) {
SMFEState first=null;
- for(SMFEState state:entry.getValue()) {
+ for(SMFEState state : entry.getValue()) {
if (removedStates.contains(state))
continue;
if (first==null) {
private HashMap<Pair<SMFEState, FieldDescriptor>, Set<SMFEState>> buildReverse(HashMap<SMFEState, Set<Pair<SMFEState, FieldDescriptor>>> backMap) {
HashMap<Pair<SMFEState, FieldDescriptor>, Set<SMFEState>> revMap=new HashMap<Pair<SMFEState, FieldDescriptor>, Set<SMFEState>>();
- for(Map.Entry<SMFEState, Set<Pair<SMFEState, FieldDescriptor>>>entry:backMap.entrySet()) {
+ for(Map.Entry<SMFEState, Set<Pair<SMFEState, FieldDescriptor>>>entry : backMap.entrySet()) {
SMFEState state=entry.getKey();
- for(Pair<SMFEState, FieldDescriptor> pair:entry.getValue()) {
+ for(Pair<SMFEState, FieldDescriptor> pair : entry.getValue()) {
if (!revMap.containsKey(pair))
revMap.put(pair, new HashSet<SMFEState>());
revMap.get(pair).add(state);
backMap.get(state1).addAll(backMap.get(state2));
//merge outgoing transitions
- for(Map.Entry<Effect, Set<SMFEState>> entry:state2.e2states.entrySet()) {
+ for(Map.Entry<Effect, Set<SMFEState>> entry : state2.e2states.entrySet()) {
Effect e=entry.getKey();
Set<SMFEState> states=entry.getValue();
if (state1.e2states.containsKey(e)) {
- for(SMFEState statetoadd:states) {
+ for(SMFEState statetoadd : states) {
if (!state1.e2states.get(e).add(statetoadd)) {
//already added...reduce reference count
statetoadd.refCount--;
}
//fix up the backmap of the edges we point to
- for(SMFEState st:states1) {
+ for(SMFEState st : states1) {
HashSet<Pair<SMFEState, FieldDescriptor>> toRemove=new HashSet<Pair<SMFEState, FieldDescriptor>>();
HashSet<Pair<SMFEState, FieldDescriptor>> toAdd=new HashSet<Pair<SMFEState, FieldDescriptor>>();
- for(Pair<SMFEState, FieldDescriptor> backpair:backMap.get(st)) {
+ for(Pair<SMFEState, FieldDescriptor> backpair : backMap.get(st)) {
if (backpair.getFirst()==state2) {
Pair<SMFEState, FieldDescriptor> newpair=new Pair<SMFEState, FieldDescriptor>(state1, backpair.getSecond());
toRemove.add(backpair);
}
//Fix up our new incoming edges
- for(Pair<SMFEState,FieldDescriptor> fromStatePair:backMap.get(state2)) {
+ for(Pair<SMFEState,FieldDescriptor> fromStatePair : backMap.get(state2)) {
SMFEState fromState=fromStatePair.getFirst();
- for(Map.Entry<Effect, Set<SMFEState>> fromEntry:fromState.e2states.entrySet()) {
+ for(Map.Entry<Effect, Set<SMFEState>> fromEntry : fromState.e2states.entrySet()) {
Effect e=fromEntry.getKey();
Set<SMFEState> states=fromEntry.getValue();
if (states.contains(state2)) {
states.remove(state2);
- if(states.add(state1) && !fromState.equals(state2)) {
- state1.refCount++;
- }
+ if(states.add(state1) && !fromState.equals(state2)) {
+ state1.refCount++;
+ }
}
}
}
private void prune() {
- for(Pair<FlatNode, TempDescriptor> machinepair: bsm.getAllMachineNames()) {
+ for(Pair<FlatNode, TempDescriptor> machinepair : bsm.getAllMachineNames()) {
StateMachineForEffects sm=bsm.getStateMachine(machinepair);
pruneNonConflictingStates(sm);
pruneEffects(sm);
}
private void pruneEffects(StateMachineForEffects sm) {
- for(Iterator<FlatNode> fnit=sm.fn2state.keySet().iterator(); fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=sm.fn2state.keySet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
SMFEState state=sm.fn2state.get(fn);
- for(Iterator<Effect> efit=state.effects.iterator();efit.hasNext();) {
+ for(Iterator<Effect> efit=state.effects.iterator(); efit.hasNext(); ) {
Effect e=efit.next();
//Is it a conflicting effecting
if (state.getConflicts().contains(e))
private void pruneNonConflictingStates(StateMachineForEffects sm) {
Set<SMFEState> canReachConflicts=buildConflictsAndMap(sm);
- for(Iterator<FlatNode> fnit=sm.fn2state.keySet().iterator(); fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=sm.fn2state.keySet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
SMFEState state=sm.fn2state.get(fn);
if (canReachConflicts.contains(state)) {
- for(Iterator<Effect> efit=state.e2states.keySet().iterator(); efit.hasNext();) {
+ for(Iterator<Effect> efit=state.e2states.keySet().iterator(); efit.hasNext(); ) {
Effect e=efit.next();
Set<SMFEState> stateset=state.e2states.get(e);
- for(Iterator<SMFEState> stit=stateset.iterator(); stit.hasNext();) {
+ for(Iterator<SMFEState> stit=stateset.iterator(); stit.hasNext(); ) {
SMFEState tostate=stit.next();
if(!canReachConflicts.contains(tostate))
stit.remove();
}
}
}
-
+
private HashMap<SMFEState, Set<Pair<SMFEState, FieldDescriptor>>> buildBackMap(StateMachineForEffects sm) {
return buildBackMap(sm, null);
}
if (!state.getConflicts().isEmpty()&&conflictStates!=null) {
conflictStates.add(state);
}
- for(Effect e:state.getEffectsAllowed()) {
- for(SMFEState stateout:state.transitionsTo(e)) {
+ for(Effect e : state.getEffectsAllowed()) {
+ for(SMFEState stateout : state.transitionsTo(e)) {
if (!backMap.containsKey(stateout)) {
toprocess.add(stateout);
backMap.put(stateout, new HashSet<Pair<SMFEState,FieldDescriptor>>());
return backMap;
}
-
+
private Set<SMFEState> buildConflictsAndMap(StateMachineForEffects sm) {
Set<SMFEState> conflictStates=new HashSet<SMFEState>();
HashMap<SMFEState, Set<Pair<SMFEState,FieldDescriptor>>> backMap=buildBackMap(sm, conflictStates);
while(!toprocess.isEmpty()) {
SMFEState state=toprocess.pop();
- for(Pair<SMFEState,FieldDescriptor> instatepair:backMap.get(state)) {
+ for(Pair<SMFEState,FieldDescriptor> instatepair : backMap.get(state)) {
SMFEState instate=instatepair.getFirst();
if (!canReachConflicts.contains(instate)) {
toprocess.add(instate);
}
return canReachConflicts;
}
-
+
private void groupStateMachines() {
- for(Pair<FlatNode, TempDescriptor> machinePair: bsm.getAllMachineNames()) {
+ for(Pair<FlatNode, TempDescriptor> machinePair : bsm.getAllMachineNames()) {
FlatNode fn=machinePair.getFirst();
StateMachineForEffects sm=bsm.getStateMachine(machinePair);
Set<FlatSESEEnterNode> taskSet=taskAnalysis.getPossibleExecutingRBlocks(fn);
- for(FlatSESEEnterNode sese:taskSet) {
+ for(FlatSESEEnterNode sese : taskSet) {
if (!groupMap.containsKey(sese))
groupMap.put(sese, new HashSet<StateMachineForEffects>());
groupMap.get(sese).add(sm);
private void computeConflictEffects() {
//Loop through all state machines
- for(Pair<FlatNode, TempDescriptor> machinePair: bsm.getAllMachineNames()) {
+ for(Pair<FlatNode, TempDescriptor> machinePair : bsm.getAllMachineNames()) {
FlatNode fn=machinePair.getFirst();
StateMachineForEffects sm=bsm.getStateMachine(machinePair);
Set<FlatSESEEnterNode> taskSet=taskAnalysis.getPossibleExecutingRBlocks(fn);
- for(FlatSESEEnterNode sese:taskSet) {
+ for(FlatSESEEnterNode sese : taskSet) {
Set<StateMachineForEffects> smgroup=groupMap.get(sese);
computeConflictingEffects(sm, smgroup);
}
}
}
-
+
private void computeConflictingEffects(StateMachineForEffects sm, Set<StateMachineForEffects> smgroup) {
boolean isStall=sm.getStallorSESE().kind()!=FKind.FlatSESEEnterNode;
- for(SMFEState state:sm.getStates()) {
- for(Effect e:state.getEffectsAllowed()) {
+ for(SMFEState state : sm.getStates()) {
+ for(Effect e : state.getEffectsAllowed()) {
Alloc a=e.getAffectedAllocSite();
FieldDescriptor fd=e.getField();
int type=e.getType();
if (!isStall&&Effect.isWrite(type)) {
hasConflict=true;
} else {
- for(StateMachineForEffects othersm:smgroup) {
+ for(StateMachineForEffects othersm : smgroup) {
boolean otherIsStall=othersm.getStallorSESE().kind()!=FKind.FlatSESEEnterNode;
//Stall sites can't conflict with each other
if (isStall&&otherIsStall) continue;
private void protectAgainstEvilTasks() {
- for( Pair<FlatNode, TempDescriptor> machinepair: bsm.getAllMachineNames() ) {
- StateMachineForEffects sm = bsm.getStateMachine( machinepair );
- protectAgainstEvilTasks( sm );
+ for( Pair<FlatNode, TempDescriptor> machinepair : bsm.getAllMachineNames() ) {
+ StateMachineForEffects sm = bsm.getStateMachine(machinepair);
+ protectAgainstEvilTasks(sm);
}
}
- private void protectAgainstEvilTasks( StateMachineForEffects sm ) {
+ private void protectAgainstEvilTasks(StateMachineForEffects sm) {
// first identify the set of <Alloc, Field> pairs for which this
// traverser will both read and write, remember the read effect
Set<Effect> allocAndFieldRW = new HashSet<Effect>();
- for( Pair<Alloc, FieldDescriptor> af: sm.effectsMap.keySet() ) {
- Integer effectType = sm.effectsMap.get( af );
+ for( Pair<Alloc, FieldDescriptor> af : sm.effectsMap.keySet() ) {
+ Integer effectType = sm.effectsMap.get(af);
if( (effectType & Effect.read) != 0 &&
(effectType & Effect.write) != 0
) {
- allocAndFieldRW.add( new Effect( af.getFirst(),
- Effect.read,
- af.getSecond()
- )
- );
+ allocAndFieldRW.add(new Effect(af.getFirst(),
+ Effect.read,
+ af.getSecond()
+ )
+ );
}
}
// next check the state machine: if an effect that initiates
// a transition is in the allocAndFieldRW set, then mark it
// as... POSSIBLY EVIL!!!!!
- for( SMFEState state: sm.getStates() ) {
- for( Effect effect: state.getTransitionEffects() ) {
- if( allocAndFieldRW.contains( effect ) ) {
- sm.addPossiblyEvilEffect( effect );
- }
+ for( SMFEState state : sm.getStates() ) {
+ for( Effect effect : state.getTransitionEffects() ) {
+ if( allocAndFieldRW.contains(effect) ) {
+ sm.addPossiblyEvilEffect(effect);
+ }
}
}
}
// use to disable improvements for comparison
protected static final boolean DISABLE_STRONG_UPDATES = false;
protected static final boolean DISABLE_GLOBAL_SWEEP = false;
-
+
// a special out-of-scope temp
- protected static final TempDescriptor tdReturn = new TempDescriptor( "_Return___" );
+ protected static final TempDescriptor tdReturn = new TempDescriptor("_Return___");
// predicate constants
- public static final ExistPred predTrue = ExistPred.factory(); // if no args, true
+ public static final ExistPred predTrue = ExistPred.factory(); // if no args, true
public static final ExistPredSet predsEmpty = ExistPredSet.factory();
- public static final ExistPredSet predsTrue = ExistPredSet.factory( predTrue );
-
+ public static final ExistPredSet predsTrue = ExistPredSet.factory(predTrue);
+
// some frequently used reachability constants
protected static final ReachState rstateEmpty = ReachState.factory();
- protected static final ReachSet rsetEmpty = ReachSet.factory();
- protected static final ReachSet rsetWithEmptyState = Canonical.changePredsTo( ReachSet.factory( rstateEmpty ),
- predsTrue );
+ protected static final ReachSet rsetEmpty = ReachSet.factory();
+ protected static final ReachSet rsetWithEmptyState = Canonical.changePredsTo(ReachSet.factory(rstateEmpty),
+ predsTrue);
// from DisjointAnalysis for convenience
- protected static int allocationDepth = -1;
+ protected static int allocationDepth = -1;
protected static TypeUtil typeUtil = null;
- protected static State state = null;
+ protected static State state = null;
// variable and heap region nodes indexed by unique ID
// convenient set of alloc sites for all heap regions
// present in the graph without having to search
- public Set<AllocSite> allocSites;
-
+ public Set<AllocSite> allocSites;
+
// set of inaccessible variables for current program statement
// with respect to stall-site analysis
public Set<TempDescriptor> inaccessibleVars;
inaccessibleVars = new HashSet<TempDescriptor>();
}
-
+
// temp descriptors are globally unique and map to
// exactly one variable node, easy
- protected VariableNode getVariableNodeFromTemp( TempDescriptor td ) {
+ protected VariableNode getVariableNodeFromTemp(TempDescriptor td) {
assert td != null;
- if( !td2vn.containsKey( td ) ) {
- td2vn.put( td, new VariableNode( td ) );
+ if( !td2vn.containsKey(td) ) {
+ td2vn.put(td, new VariableNode(td) );
}
- return td2vn.get( td );
+ return td2vn.get(td);
}
- //This method is created for client modules to access the Reachgraph
+ //This method is created for client modules to access the Reachgraph
//after the analysis is done and no modifications are to be made.
- public VariableNode getVariableNodeNoMutation( TempDescriptor td ) {
+ public VariableNode getVariableNodeNoMutation(TempDescriptor td) {
assert td != null;
- if( !td2vn.containsKey( td ) ) {
+ if( !td2vn.containsKey(td) ) {
return null;
}
- return td2vn.get( td );
+ return td2vn.get(td);
}
-
- public boolean hasVariable( TempDescriptor td ) {
- return td2vn.containsKey( td );
+
+ public boolean hasVariable(TempDescriptor td) {
+ return td2vn.containsKey(td);
}
// If a heap region or edge or variable should be
// in another graph, make a new object with
// equivalent properties for a new graph
- public boolean belongsToThis( RefSrcNode rsn ) {
+ public boolean belongsToThis(RefSrcNode rsn) {
if( rsn instanceof VariableNode ) {
VariableNode vn = (VariableNode) rsn;
- return this.td2vn.get( vn.getTempDescriptor() ) == vn;
+ return this.td2vn.get(vn.getTempDescriptor() ) == vn;
}
HeapRegionNode hrn = (HeapRegionNode) rsn;
- return this.id2hrn.get( hrn.getID() ) == hrn;
+ return this.id2hrn.get(hrn.getID() ) == hrn;
}
-
+
// in the merge() operation) or to create new heap
// regions with a new unique ID
protected HeapRegionNode
- createNewHeapRegionNode( Integer id,
- boolean isSingleObject,
- boolean isNewSummary,
- boolean isOutOfContext,
- TypeDescriptor type,
- AllocSite allocSite,
- ReachSet inherent,
- ReachSet alpha,
- ExistPredSet preds,
- String description
- ) {
+ createNewHeapRegionNode(Integer id,
+ boolean isSingleObject,
+ boolean isNewSummary,
+ boolean isOutOfContext,
+ TypeDescriptor type,
+ AllocSite allocSite,
+ ReachSet inherent,
+ ReachSet alpha,
+ ExistPredSet preds,
+ String description
+ ) {
TypeDescriptor typeToUse = null;
if( allocSite != null ) {
typeToUse = allocSite.getType();
- allocSites.add( allocSite );
+ allocSites.add(allocSite);
} else {
typeToUse = type;
}
if( allocSite != null && allocSite.isFlagged() ) {
markForAnalysis = true;
}
-
+
if( allocSite == null ) {
assert !markForAnalysis;
if( inherent == null ) {
if( markForAnalysis ) {
- inherent =
- Canonical.changePredsTo(
- ReachSet.factory(
- ReachState.factory(
- ReachTuple.factory( id,
- !isSingleObject,
- ReachTuple.ARITY_ONE,
- false // out-of-context
- )
- )
- ),
- predsTrue
- );
+ inherent =
+ Canonical.changePredsTo(
+ ReachSet.factory(
+ ReachState.factory(
+ ReachTuple.factory(id,
+ !isSingleObject,
+ ReachTuple.ARITY_ONE,
+ false // out-of-context
+ )
+ )
+ ),
+ predsTrue
+ );
} else {
inherent = rsetWithEmptyState;
}
assert preds != null;
- HeapRegionNode hrn = new HeapRegionNode( id,
- isSingleObject,
- markForAnalysis,
- isNewSummary,
- isOutOfContext,
- typeToUse,
- allocSite,
- inherent,
- alpha,
- preds,
- description );
- id2hrn.put( id, hrn );
+ HeapRegionNode hrn = new HeapRegionNode(id,
+ isSingleObject,
+ markForAnalysis,
+ isNewSummary,
+ isOutOfContext,
+ typeToUse,
+ allocSite,
+ inherent,
+ alpha,
+ preds,
+ description);
+ id2hrn.put(id, hrn);
return hrn;
}
// list of referencers and referencees.
//
////////////////////////////////////////////////
- protected void addRefEdge( RefSrcNode referencer,
- HeapRegionNode referencee,
- RefEdge edge ) {
+ protected void addRefEdge(RefSrcNode referencer,
+ HeapRegionNode referencee,
+ RefEdge edge) {
assert referencer != null;
assert referencee != null;
assert edge != null;
assert edge.getSrc() == referencer;
assert edge.getDst() == referencee;
- assert belongsToThis( referencer );
- assert belongsToThis( referencee );
+ assert belongsToThis(referencer);
+ assert belongsToThis(referencee);
// edges are getting added twice to graphs now, the
// kind that should have abstract facts merged--use
// this check to prevent that
- assert referencer.getReferenceTo( referencee,
- edge.getType(),
- edge.getField()
- ) == null;
+ assert referencer.getReferenceTo(referencee,
+ edge.getType(),
+ edge.getField()
+ ) == null;
- referencer.addReferencee( edge );
- referencee.addReferencer( edge );
+ referencer.addReferencee(edge);
+ referencee.addReferencer(edge);
}
- protected void removeRefEdge( RefEdge e ) {
- removeRefEdge( e.getSrc(),
- e.getDst(),
- e.getType(),
- e.getField() );
+ protected void removeRefEdge(RefEdge e) {
+ removeRefEdge(e.getSrc(),
+ e.getDst(),
+ e.getType(),
+ e.getField() );
}
- protected void removeRefEdge( RefSrcNode referencer,
- HeapRegionNode referencee,
- TypeDescriptor type,
- String field ) {
+ protected void removeRefEdge(RefSrcNode referencer,
+ HeapRegionNode referencee,
+ TypeDescriptor type,
+ String field) {
assert referencer != null;
assert referencee != null;
-
- RefEdge edge = referencer.getReferenceTo( referencee,
- type,
- field );
+
+ RefEdge edge = referencer.getReferenceTo(referencee,
+ type,
+ field);
assert edge != null;
- assert edge == referencee.getReferenceFrom( referencer,
- type,
- field );
-
- referencer.removeReferencee( edge );
- referencee.removeReferencer( edge );
+ assert edge == referencee.getReferenceFrom(referencer,
+ type,
+ field);
+
+ referencer.removeReferencee(edge);
+ referencee.removeReferencer(edge);
}
// return whether at least one edge was removed
- protected boolean clearRefEdgesFrom( RefSrcNode referencer,
- TypeDescriptor type,
- String field,
- boolean removeAll ) {
+ protected boolean clearRefEdgesFrom(RefSrcNode referencer,
+ TypeDescriptor type,
+ String field,
+ boolean removeAll) {
assert referencer != null;
boolean atLeastOneEdgeRemoved = false;
while( i.hasNext() ) {
RefEdge edge = i.next();
- if( removeAll ||
- (edge.typeEquals( type ) && edge.fieldEquals( field ))
- ){
+ if( removeAll ||
+ (edge.typeEquals(type) && edge.fieldEquals(field))
+ ) {
HeapRegionNode referencee = edge.getDst();
-
- removeRefEdge( referencer,
- referencee,
- edge.getType(),
- edge.getField() );
- atLeastOneEdgeRemoved = true;
+ removeRefEdge(referencer,
+ referencee,
+ edge.getType(),
+ edge.getField() );
+
+ atLeastOneEdgeRemoved = true;
}
}
return atLeastOneEdgeRemoved;
}
- protected void clearRefEdgesTo( HeapRegionNode referencee,
- TypeDescriptor type,
- String field,
- boolean removeAll ) {
+ protected void clearRefEdgesTo(HeapRegionNode referencee,
+ TypeDescriptor type,
+ String field,
+ boolean removeAll) {
assert referencee != null;
// get a copy of the set to iterate over, otherwise
while( i.hasNext() ) {
RefEdge edge = i.next();
- if( removeAll ||
- (edge.typeEquals( type ) && edge.fieldEquals( field ))
- ){
+ if( removeAll ||
+ (edge.typeEquals(type) && edge.fieldEquals(field))
+ ) {
RefSrcNode referencer = edge.getSrc();
- removeRefEdge( referencer,
- referencee,
- edge.getType(),
- edge.getField() );
+ removeRefEdge(referencer,
+ referencee,
+ edge.getType(),
+ edge.getField() );
}
}
}
- protected void clearNonVarRefEdgesTo( HeapRegionNode referencee ) {
+ protected void clearNonVarRefEdgesTo(HeapRegionNode referencee) {
assert referencee != null;
// get a copy of the set to iterate over, otherwise
RefEdge edge = i.next();
RefSrcNode referencer = edge.getSrc();
if( !(referencer instanceof VariableNode) ) {
- removeRefEdge( referencer,
- referencee,
- edge.getType(),
- edge.getField() );
+ removeRefEdge(referencer,
+ referencee,
+ edge.getType(),
+ edge.getField() );
}
}
}
// this is a common operation in many transfer functions: we want
// to add an edge, but if there is already such an edge we should
// merge the properties of the existing and the new edges
- protected void addEdgeOrMergeWithExisting( RefEdge edgeNew ) {
+ protected void addEdgeOrMergeWithExisting(RefEdge edgeNew) {
RefSrcNode src = edgeNew.getSrc();
- assert belongsToThis( src );
+ assert belongsToThis(src);
HeapRegionNode dst = edgeNew.getDst();
- assert belongsToThis( dst );
+ assert belongsToThis(dst);
// look to see if an edge with same field exists
// and merge with it, otherwise just add the edge
- RefEdge edgeExisting = src.getReferenceTo( dst,
- edgeNew.getType(),
- edgeNew.getField()
- );
-
+ RefEdge edgeExisting = src.getReferenceTo(dst,
+ edgeNew.getType(),
+ edgeNew.getField()
+ );
+
if( edgeExisting != null ) {
edgeExisting.setBeta(
- Canonical.unionORpreds( edgeExisting.getBeta(),
- edgeNew.getBeta()
- )
- );
+ Canonical.unionORpreds(edgeExisting.getBeta(),
+ edgeNew.getBeta()
+ )
+ );
edgeExisting.setPreds(
- Canonical.join( edgeExisting.getPreds(),
- edgeNew.getPreds()
- )
- );
+ Canonical.join(edgeExisting.getPreds(),
+ edgeNew.getPreds()
+ )
+ );
edgeExisting.setTaints(
- Canonical.unionORpreds( edgeExisting.getTaints(),
- edgeNew.getTaints()
- )
- );
-
- } else {
- addRefEdge( src, dst, edgeNew );
+ Canonical.unionORpreds(edgeExisting.getTaints(),
+ edgeNew.getTaints()
+ )
+ );
+
+ } else {
+ addRefEdge(src, dst, edgeNew);
}
}
//
////////////////////////////////////////////////////
- public void assignTempXEqualToTempY( TempDescriptor x,
- TempDescriptor y ) {
- assignTempXEqualToCastedTempY( x, y, null );
+ public void assignTempXEqualToTempY(TempDescriptor x,
+ TempDescriptor y) {
+ assignTempXEqualToCastedTempY(x, y, null);
}
- public void assignTempXEqualToCastedTempY( TempDescriptor x,
- TempDescriptor y,
- TypeDescriptor tdCast ) {
+ public void assignTempXEqualToCastedTempY(TempDescriptor x,
+ TempDescriptor y,
+ TypeDescriptor tdCast) {
+
+ VariableNode lnX = getVariableNodeFromTemp(x);
+ VariableNode lnY = getVariableNodeFromTemp(y);
- VariableNode lnX = getVariableNodeFromTemp( x );
- VariableNode lnY = getVariableNodeFromTemp( y );
-
- clearRefEdgesFrom( lnX, null, null, true );
+ clearRefEdgesFrom(lnX, null, null, true);
// note it is possible that the types of temps in the
// flat node to analyze will reveal that some typed
Iterator<RefEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- RefEdge edgeY = itrYhrn.next();
+ RefEdge edgeY = itrYhrn.next();
HeapRegionNode referencee = edgeY.getDst();
- RefEdge edgeNew = edgeY.copy();
+ RefEdge edgeNew = edgeY.copy();
- if( !isSuperiorType( x.getType(), edgeY.getType() ) ) {
- impossibleEdges.add( edgeY );
+ if( !isSuperiorType(x.getType(), edgeY.getType() ) ) {
+ impossibleEdges.add(edgeY);
continue;
}
- edgeNew.setSrc( lnX );
-
+ edgeNew.setSrc(lnX);
+
if( tdCast == null ) {
- edgeNew.setType( mostSpecificType( y.getType(),
- edgeY.getType(),
- referencee.getType()
- )
- );
+ edgeNew.setType(mostSpecificType(y.getType(),
+ edgeY.getType(),
+ referencee.getType()
+ )
+ );
} else {
- edgeNew.setType( mostSpecificType( y.getType(),
- edgeY.getType(),
- referencee.getType(),
- tdCast
- )
- );
+ edgeNew.setType(mostSpecificType(y.getType(),
+ edgeY.getType(),
+ referencee.getType(),
+ tdCast
+ )
+ );
}
- edgeNew.setField( null );
+ edgeNew.setField(null);
- addRefEdge( lnX, referencee, edgeNew );
+ addRefEdge(lnX, referencee, edgeNew);
}
Iterator<RefEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
RefEdge edgeImp = itrImp.next();
- removeRefEdge( edgeImp );
+ removeRefEdge(edgeImp);
}
}
- public void assignTempXEqualToTempYFieldF( TempDescriptor x,
- TempDescriptor y,
- FieldDescriptor f,
- FlatNode currentProgramPoint
- ) {
-
- VariableNode lnX = getVariableNodeFromTemp( x );
- VariableNode lnY = getVariableNodeFromTemp( y );
+ public void assignTempXEqualToTempYFieldF(TempDescriptor x,
+ TempDescriptor y,
+ FieldDescriptor f,
+ FlatNode currentProgramPoint
+ ) {
+
+ VariableNode lnX = getVariableNodeFromTemp(x);
+ VariableNode lnY = getVariableNodeFromTemp(y);
- clearRefEdgesFrom( lnX, null, null, true );
+ clearRefEdgesFrom(lnX, null, null, true);
// note it is possible that the types of temps in the
// flat node to analyze will reveal that some typed
Iterator<RefEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- RefEdge edgeY = itrYhrn.next();
+ RefEdge edgeY = itrYhrn.next();
HeapRegionNode hrnY = edgeY.getDst();
- ReachSet betaY = edgeY.getBeta();
+ ReachSet betaY = edgeY.getBeta();
Iterator<RefEdge> itrHrnFhrn = hrnY.iteratorToReferencees();
while( itrHrnFhrn.hasNext() ) {
- RefEdge edgeHrn = itrHrnFhrn.next();
+ RefEdge edgeHrn = itrHrnFhrn.next();
HeapRegionNode hrnHrn = edgeHrn.getDst();
- ReachSet betaHrn = edgeHrn.getBeta();
+ ReachSet betaHrn = edgeHrn.getBeta();
// prune edges that are not a matching field
- if( edgeHrn.getType() != null &&
- !edgeHrn.getField().equals( f.getSymbol() )
+ if( edgeHrn.getType() != null &&
+ !edgeHrn.getField().equals(f.getSymbol() )
) {
continue;
}
// check for impossible edges
- if( !isSuperiorType( x.getType(), edgeHrn.getType() ) ) {
- impossibleEdges.add( edgeHrn );
+ if( !isSuperiorType(x.getType(), edgeHrn.getType() ) ) {
+ impossibleEdges.add(edgeHrn);
continue;
}
TypeDescriptor tdNewEdge =
- mostSpecificType( edgeHrn.getType(),
- hrnHrn.getType()
- );
+ mostSpecificType(edgeHrn.getType(),
+ hrnHrn.getType()
+ );
+
+ TaintSet taints = Canonical.unionORpreds(edgeHrn.getTaints(),
+ edgeY.getTaints()
+ );
+ if( state.RCR ) {
+ // the DFJ way to generate taints changes for field statements
+ taints = Canonical.changeWhereDefined(taints,
+ currentProgramPoint);
+ }
- TaintSet taints = Canonical.unionORpreds( edgeHrn.getTaints(),
- edgeY.getTaints()
- );
- if( state.RCR ) {
- // the DFJ way to generate taints changes for field statements
- taints = Canonical.changeWhereDefined( taints,
- currentProgramPoint );
- }
-
- RefEdge edgeNew = new RefEdge( lnX,
- hrnHrn,
- tdNewEdge,
- null,
- Canonical.intersection( betaY, betaHrn ),
- predsTrue,
- taints
- );
+ RefEdge edgeNew = new RefEdge(lnX,
+ hrnHrn,
+ tdNewEdge,
+ null,
+ Canonical.intersection(betaY, betaHrn),
+ predsTrue,
+ taints
+ );
- addEdgeOrMergeWithExisting( edgeNew );
+ addEdgeOrMergeWithExisting(edgeNew);
}
}
Iterator<RefEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
RefEdge edgeImp = itrImp.next();
- removeRefEdge( edgeImp );
+ removeRefEdge(edgeImp);
}
// anytime you might remove edges between heap regions
- // you must global sweep to clean up broken reachability
+ // you must global sweep to clean up broken reachability
if( !impossibleEdges.isEmpty() ) {
if( !DISABLE_GLOBAL_SWEEP ) {
globalSweep();
// return whether a strong update was actually effected
- public boolean assignTempXFieldFEqualToTempY( TempDescriptor x,
- FieldDescriptor f,
- TempDescriptor y,
- FlatNode currentProgramPoint
- ) {
+ public boolean assignTempXFieldFEqualToTempY(TempDescriptor x,
+ FieldDescriptor f,
+ TempDescriptor y,
+ FlatNode currentProgramPoint
+ ) {
- VariableNode lnX = getVariableNodeFromTemp( x );
- VariableNode lnY = getVariableNodeFromTemp( y );
+ VariableNode lnX = getVariableNodeFromTemp(x);
+ VariableNode lnY = getVariableNodeFromTemp(y);
HashSet<HeapRegionNode> nodesWithNewAlpha = new HashSet<HeapRegionNode>();
HashSet<RefEdge> edgesWithNewBeta = new HashSet<RefEdge>();
Iterator<RefEdge> itrXhrn = lnX.iteratorToReferencees();
while( itrXhrn.hasNext() ) {
- RefEdge edgeX = itrXhrn.next();
+ RefEdge edgeX = itrXhrn.next();
HeapRegionNode hrnX = edgeX.getDst();
- // we can do a strong update here if one of two cases holds
+ // we can do a strong update here if one of two cases holds
if( f != null &&
- f != DisjointAnalysis.getArrayField( f.getType() ) &&
- ( (hrnX.getNumReferencers() == 1) || // case 1
- (hrnX.isSingleObject() && lnX.getNumReferencees() == 1) // case 2
- )
- ) {
- if( !DISABLE_STRONG_UPDATES ) {
- strongUpdateCond = true;
-
- boolean atLeastOne =
- clearRefEdgesFrom( hrnX,
- f.getType(),
- f.getSymbol(),
- false );
- if( atLeastOne ) {
- edgeRemovedByStrongUpdate = true;
- }
- }
+ f != DisjointAnalysis.getArrayField(f.getType() ) &&
+ ( (hrnX.getNumReferencers() == 1) || // case 1
+ (hrnX.isSingleObject() && lnX.getNumReferencees() == 1) // case 2
+ )
+ ) {
+ if( !DISABLE_STRONG_UPDATES ) {
+ strongUpdateCond = true;
+
+ boolean atLeastOne =
+ clearRefEdgesFrom(hrnX,
+ f.getType(),
+ f.getSymbol(),
+ false);
+ if( atLeastOne ) {
+ edgeRemovedByStrongUpdate = true;
+ }
+ }
}
}
-
+
// then do all token propagation
itrXhrn = lnX.iteratorToReferencees();
while( itrXhrn.hasNext() ) {
- RefEdge edgeX = itrXhrn.next();
+ RefEdge edgeX = itrXhrn.next();
HeapRegionNode hrnX = edgeX.getDst();
- ReachSet betaX = edgeX.getBeta();
- ReachSet R = Canonical.intersection( hrnX.getAlpha(),
- edgeX.getBeta()
- );
+ ReachSet betaX = edgeX.getBeta();
+ ReachSet R = Canonical.intersection(hrnX.getAlpha(),
+ edgeX.getBeta()
+ );
Iterator<RefEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- RefEdge edgeY = itrYhrn.next();
+ RefEdge edgeY = itrYhrn.next();
HeapRegionNode hrnY = edgeY.getDst();
- ReachSet O = edgeY.getBeta();
+ ReachSet O = edgeY.getBeta();
// check for impossible edges
- if( !isSuperiorType( f.getType(), edgeY.getType() ) ) {
- impossibleEdges.add( edgeY );
+ if( !isSuperiorType(f.getType(), edgeY.getType() ) ) {
+ impossibleEdges.add(edgeY);
continue;
}
// propagate tokens over nodes starting from hrnSrc, and it will
// take care of propagating back up edges from any touched nodes
- ChangeSet Cy = Canonical.unionUpArityToChangeSet( O, R );
- propagateTokensOverNodes( hrnY, Cy, nodesWithNewAlpha, edgesWithNewBeta );
+ ChangeSet Cy = Canonical.unionUpArityToChangeSet(O, R);
+ propagateTokensOverNodes(hrnY, Cy, nodesWithNewAlpha, edgesWithNewBeta);
// then propagate back just up the edges from hrn
- ChangeSet Cx = Canonical.unionUpArityToChangeSet( R, O );
- HashSet<RefEdge> todoEdges = new HashSet<RefEdge>();
+ ChangeSet Cx = Canonical.unionUpArityToChangeSet(R, O);
+ HashSet<RefEdge> todoEdges = new HashSet<RefEdge>();
Hashtable<RefEdge, ChangeSet> edgePlannedChanges =
new Hashtable<RefEdge, ChangeSet>();
Iterator<RefEdge> referItr = hrnX.iteratorToReferencers();
while( referItr.hasNext() ) {
RefEdge edgeUpstream = referItr.next();
- todoEdges.add( edgeUpstream );
- edgePlannedChanges.put( edgeUpstream, Cx );
+ todoEdges.add(edgeUpstream);
+ edgePlannedChanges.put(edgeUpstream, Cx);
}
- propagateTokensOverEdges( todoEdges,
- edgePlannedChanges,
- edgesWithNewBeta );
+ propagateTokensOverEdges(todoEdges,
+ edgePlannedChanges,
+ edgesWithNewBeta);
}
}
// then go back through and add the new edges
itrXhrn = lnX.iteratorToReferencees();
while( itrXhrn.hasNext() ) {
- RefEdge edgeX = itrXhrn.next();
+ RefEdge edgeX = itrXhrn.next();
HeapRegionNode hrnX = edgeX.getDst();
-
+
Iterator<RefEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- RefEdge edgeY = itrYhrn.next();
+ RefEdge edgeY = itrYhrn.next();
HeapRegionNode hrnY = edgeY.getDst();
// skip impossible edges here, we already marked them
// when computing reachability propagations above
- if( !isSuperiorType( f.getType(), edgeY.getType() ) ) {
+ if( !isSuperiorType(f.getType(), edgeY.getType() ) ) {
continue;
}
-
+
// prepare the new reference edge hrnX.f -> hrnY
- TypeDescriptor tdNewEdge =
- mostSpecificType( y.getType(),
- edgeY.getType(),
- hrnY.getType()
- );
-
- TaintSet taints = edgeY.getTaints();
-
- if( state.RCR ) {
- // the DFJ way to generate taints changes for field statements
- taints = Canonical.changeWhereDefined( taints,
- currentProgramPoint );
- }
-
- RefEdge edgeNew =
- new RefEdge( hrnX,
- hrnY,
- tdNewEdge,
- f.getSymbol(),
- Canonical.changePredsTo(
- Canonical.pruneBy( edgeY.getBeta(),
- hrnX.getAlpha()
- ),
- predsTrue
- ),
- predsTrue,
- taints
- );
-
- addEdgeOrMergeWithExisting( edgeNew );
+ TypeDescriptor tdNewEdge =
+ mostSpecificType(y.getType(),
+ edgeY.getType(),
+ hrnY.getType()
+ );
+
+ TaintSet taints = edgeY.getTaints();
+
+ if( state.RCR ) {
+ // the DFJ way to generate taints changes for field statements
+ taints = Canonical.changeWhereDefined(taints,
+ currentProgramPoint);
+ }
+
+ RefEdge edgeNew =
+ new RefEdge(hrnX,
+ hrnY,
+ tdNewEdge,
+ f.getSymbol(),
+ Canonical.changePredsTo(
+ Canonical.pruneBy(edgeY.getBeta(),
+ hrnX.getAlpha()
+ ),
+ predsTrue
+ ),
+ predsTrue,
+ taints
+ );
+
+ addEdgeOrMergeWithExisting(edgeNew);
}
}
Iterator<RefEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
RefEdge edgeImp = itrImp.next();
- removeRefEdge( edgeImp );
+ removeRefEdge(edgeImp);
}
// if there was a strong update, make sure to improve
- // reachability with a global sweep
- if( edgeRemovedByStrongUpdate || !impossibleEdges.isEmpty() ) {
+ // reachability with a global sweep
+ if( edgeRemovedByStrongUpdate || !impossibleEdges.isEmpty() ) {
if( !DISABLE_GLOBAL_SWEEP ) {
- globalSweep();
+ globalSweep();
}
- }
+ }
return edgeRemovedByStrongUpdate;
}
- public void assignReturnEqualToTemp( TempDescriptor x ) {
+ public void assignReturnEqualToTemp(TempDescriptor x) {
- VariableNode lnR = getVariableNodeFromTemp( tdReturn );
- VariableNode lnX = getVariableNodeFromTemp( x );
+ VariableNode lnR = getVariableNodeFromTemp(tdReturn);
+ VariableNode lnX = getVariableNodeFromTemp(x);
- clearRefEdgesFrom( lnR, null, null, true );
+ clearRefEdgesFrom(lnR, null, null, true);
Iterator<RefEdge> itrXhrn = lnX.iteratorToReferencees();
while( itrXhrn.hasNext() ) {
- RefEdge edgeX = itrXhrn.next();
+ RefEdge edgeX = itrXhrn.next();
HeapRegionNode referencee = edgeX.getDst();
- RefEdge edgeNew = edgeX.copy();
- edgeNew.setSrc( lnR );
- edgeNew.setTaints( Canonical.changePredsTo( edgeNew.getTaints(),
- predsTrue
- )
- );
+ RefEdge edgeNew = edgeX.copy();
+ edgeNew.setSrc(lnR);
+ edgeNew.setTaints(Canonical.changePredsTo(edgeNew.getTaints(),
+ predsTrue
+ )
+ );
- addRefEdge( lnR, referencee, edgeNew );
+ addRefEdge(lnR, referencee, edgeNew);
}
}
- public void assignTempEqualToNewAlloc( TempDescriptor x,
- AllocSite as ) {
+ public void assignTempEqualToNewAlloc(TempDescriptor x,
+ AllocSite as) {
assert x != null;
assert as != null;
- age( as );
+ age(as);
// after the age operation the newest (or zero-ith oldest)
// node associated with the allocation site should have
// no references to it as if it were a newly allocated
// heap region
- Integer idNewest = as.getIthOldest( 0 );
- HeapRegionNode hrnNewest = id2hrn.get( idNewest );
- assert hrnNewest != null;
+ Integer idNewest = as.getIthOldest(0);
+ HeapRegionNode hrnNewest = id2hrn.get(idNewest);
+ assert hrnNewest != null;
- VariableNode lnX = getVariableNodeFromTemp( x );
- clearRefEdgesFrom( lnX, null, null, true );
+ VariableNode lnX = getVariableNodeFromTemp(x);
+ clearRefEdgesFrom(lnX, null, null, true);
// make a new reference to allocated node
TypeDescriptor type = as.getType();
RefEdge edgeNew =
- new RefEdge( lnX, // source
- hrnNewest, // dest
- type, // type
- null, // field name
- hrnNewest.getAlpha(), // beta
- predsTrue, // predicates
- TaintSet.factory() // taints
- );
+ new RefEdge(lnX, // source
+ hrnNewest, // dest
+ type, // type
+ null, // field name
+ hrnNewest.getAlpha(), // beta
+ predsTrue, // predicates
+ TaintSet.factory() // taints
+ );
- addRefEdge( lnX, hrnNewest, edgeNew );
+ addRefEdge(lnX, hrnNewest, edgeNew);
}
// site, attempts to retrieve the heap region nodes using the
// integer id's contained in the allocation site should always
// return non-null heap regions.
- public void age( AllocSite as ) {
+ public void age(AllocSite as) {
- // keep track of allocation sites that are represented
+ // keep track of allocation sites that are represented
// in this graph for efficiency with other operations
- allocSites.add( as );
+ allocSites.add(as);
// if there is a k-th oldest node, it merges into
// the summary node
Integer idK = as.getOldest();
- if( id2hrn.containsKey( idK ) ) {
- HeapRegionNode hrnK = id2hrn.get( idK );
+ if( id2hrn.containsKey(idK) ) {
+ HeapRegionNode hrnK = id2hrn.get(idK);
// retrieve the summary node, or make it
// from scratch
- HeapRegionNode hrnSummary = getSummaryNode( as, false );
-
- mergeIntoSummary( hrnK, hrnSummary );
+ HeapRegionNode hrnSummary = getSummaryNode(as, false);
+
+ mergeIntoSummary(hrnK, hrnSummary);
}
// move down the line of heap region nodes
for( int i = allocationDepth - 1; i > 0; --i ) {
// only do the transfer if the i-1 node exists
- Integer idImin1th = as.getIthOldest( i - 1 );
- if( id2hrn.containsKey( idImin1th ) ) {
- HeapRegionNode hrnImin1 = id2hrn.get( idImin1th );
- if( hrnImin1.isWiped() ) {
- // there is no info on this node, just skip
- continue;
- }
-
- // either retrieve or make target of transfer
- HeapRegionNode hrnI = getIthNode( as, i, false );
-
- transferOnto( hrnImin1, hrnI );
+ Integer idImin1th = as.getIthOldest(i - 1);
+ if( id2hrn.containsKey(idImin1th) ) {
+ HeapRegionNode hrnImin1 = id2hrn.get(idImin1th);
+ if( hrnImin1.isWiped() ) {
+ // there is no info on this node, just skip
+ continue;
+ }
+
+ // either retrieve or make target of transfer
+ HeapRegionNode hrnI = getIthNode(as, i, false);
+
+ transferOnto(hrnImin1, hrnI);
}
}
// as stated above, the newest node should have had its
// references moved over to the second oldest, so we wipe newest
// in preparation for being the new object to assign something to
- HeapRegionNode hrn0 = getIthNode( as, 0, false );
- wipeOut( hrn0, true );
+ HeapRegionNode hrn0 = getIthNode(as, 0, false);
+ wipeOut(hrn0, true);
// now tokens in reachability sets need to "age" also
Iterator itrAllHRNodes = id2hrn.entrySet().iterator();
while( itrAllHRNodes.hasNext() ) {
- Map.Entry me = (Map.Entry) itrAllHRNodes.next();
+ Map.Entry me = (Map.Entry)itrAllHRNodes.next();
HeapRegionNode hrnToAge = (HeapRegionNode) me.getValue();
-
- ageTuplesFrom( as, hrnToAge );
+
+ ageTuplesFrom(as, hrnToAge);
Iterator<RefEdge> itrEdges = hrnToAge.iteratorToReferencers();
while( itrEdges.hasNext() ) {
- ageTuplesFrom( as, itrEdges.next() );
+ ageTuplesFrom(as, itrEdges.next() );
}
}
// after tokens have been aged, reset newest node's reachability
// and a brand new node has a "true" predicate
- hrn0.setAlpha( hrn0.getInherent() );
- hrn0.setPreds( predsTrue );
+ hrn0.setAlpha(hrn0.getInherent() );
+ hrn0.setPreds(predsTrue);
}
// either retrieve or create the needed heap region node
- protected HeapRegionNode getSummaryNode( AllocSite as,
- boolean shadow ) {
+ protected HeapRegionNode getSummaryNode(AllocSite as,
+ boolean shadow) {
Integer idSummary;
if( shadow ) {
idSummary = as.getSummary();
}
- HeapRegionNode hrnSummary = id2hrn.get( idSummary );
+ HeapRegionNode hrnSummary = id2hrn.get(idSummary);
if( hrnSummary == null ) {
String strDesc = as.toStringForDOT()+"\\nsummary";
- hrnSummary =
- createNewHeapRegionNode( idSummary, // id or null to generate a new one
- false, // single object?
- true, // summary?
- false, // out-of-context?
- as.getType(), // type
- as, // allocation site
- null, // inherent reach
- null, // current reach
- predsEmpty, // predicates
- strDesc // description
- );
- }
-
+ hrnSummary =
+ createNewHeapRegionNode(idSummary, // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ false, // out-of-context?
+ as.getType(), // type
+ as, // allocation site
+ null, // inherent reach
+ null, // current reach
+ predsEmpty, // predicates
+ strDesc // description
+ );
+ }
+
return hrnSummary;
}
// either retrieve or create the needed heap region node
- protected HeapRegionNode getIthNode( AllocSite as,
- Integer i,
- boolean shadow ) {
+ protected HeapRegionNode getIthNode(AllocSite as,
+ Integer i,
+ boolean shadow) {
Integer idIth;
if( shadow ) {
- idIth = as.getIthOldestShadow( i );
+ idIth = as.getIthOldestShadow(i);
} else {
- idIth = as.getIthOldest( i );
+ idIth = as.getIthOldest(i);
}
-
- HeapRegionNode hrnIth = id2hrn.get( idIth );
-
+
+ HeapRegionNode hrnIth = id2hrn.get(idIth);
+
if( hrnIth == null ) {
String strDesc = as.toStringForDOT()+"\\n"+i+" oldest";
- hrnIth = createNewHeapRegionNode( idIth, // id or null to generate a new one
- true, // single object?
- false, // summary?
- false, // out-of-context?
- as.getType(), // type
- as, // allocation site
- null, // inherent reach
- null, // current reach
- predsEmpty, // predicates
- strDesc // description
- );
+ hrnIth = createNewHeapRegionNode(idIth, // id or null to generate a new one
+ true, // single object?
+ false, // summary?
+ false, // out-of-context?
+ as.getType(), // type
+ as, // allocation site
+ null, // inherent reach
+ null, // current reach
+ predsEmpty, // predicates
+ strDesc // description
+ );
}
return hrnIth;
}
- protected void mergeIntoSummary( HeapRegionNode hrn,
- HeapRegionNode hrnSummary ) {
+ protected void mergeIntoSummary(HeapRegionNode hrn,
+ HeapRegionNode hrnSummary) {
assert hrnSummary.isNewSummary();
// assert that these nodes belong to THIS graph
- assert belongsToThis( hrn );
- assert belongsToThis( hrnSummary );
+ assert belongsToThis(hrn);
+ assert belongsToThis(hrnSummary);
assert hrn != hrnSummary;
while( itrReferencee.hasNext() ) {
RefEdge edge = itrReferencee.next();
RefEdge edgeMerged = edge.copy();
- edgeMerged.setSrc( hrnSummary );
+ edgeMerged.setSrc(hrnSummary);
HeapRegionNode hrnReferencee = edge.getDst();
- RefEdge edgeSummary =
- hrnSummary.getReferenceTo( hrnReferencee,
- edge.getType(),
- edge.getField()
- );
-
+ RefEdge edgeSummary =
+ hrnSummary.getReferenceTo(hrnReferencee,
+ edge.getType(),
+ edge.getField()
+ );
+
if( edgeSummary == null ) {
// the merge is trivial, nothing to be done
- addRefEdge( hrnSummary, hrnReferencee, edgeMerged );
+ addRefEdge(hrnSummary, hrnReferencee, edgeMerged);
} else {
// otherwise an edge from the referencer to hrnSummary exists already
// and the edge referencer->hrn should be merged with it
- edgeSummary.setBeta(
- Canonical.unionORpreds( edgeMerged.getBeta(),
- edgeSummary.getBeta()
- )
- );
- edgeSummary.setPreds(
- Canonical.join( edgeMerged.getPreds(),
- edgeSummary.getPreds()
- )
- );
+ edgeSummary.setBeta(
+ Canonical.unionORpreds(edgeMerged.getBeta(),
+ edgeSummary.getBeta()
+ )
+ );
+ edgeSummary.setPreds(
+ Canonical.join(edgeMerged.getPreds(),
+ edgeSummary.getPreds()
+ )
+ );
}
}
while( itrReferencer.hasNext() ) {
RefEdge edge = itrReferencer.next();
RefEdge edgeMerged = edge.copy();
- edgeMerged.setDst( hrnSummary );
+ edgeMerged.setDst(hrnSummary);
RefSrcNode onReferencer = edge.getSrc();
- RefEdge edgeSummary =
- onReferencer.getReferenceTo( hrnSummary,
- edge.getType(),
- edge.getField()
- );
+ RefEdge edgeSummary =
+ onReferencer.getReferenceTo(hrnSummary,
+ edge.getType(),
+ edge.getField()
+ );
if( edgeSummary == null ) {
// the merge is trivial, nothing to be done
- addRefEdge( onReferencer, hrnSummary, edgeMerged );
+ addRefEdge(onReferencer, hrnSummary, edgeMerged);
} else {
// otherwise an edge from the referencer to alpha_S exists already
// and the edge referencer->alpha_K should be merged with it
- edgeSummary.setBeta(
- Canonical.unionORpreds( edgeMerged.getBeta(),
- edgeSummary.getBeta()
- )
- );
- edgeSummary.setPreds(
- Canonical.join( edgeMerged.getPreds(),
- edgeSummary.getPreds()
- )
- );
+ edgeSummary.setBeta(
+ Canonical.unionORpreds(edgeMerged.getBeta(),
+ edgeSummary.getBeta()
+ )
+ );
+ edgeSummary.setPreds(
+ Canonical.join(edgeMerged.getPreds(),
+ edgeSummary.getPreds()
+ )
+ );
}
}
// then merge hrn reachability into hrnSummary
- hrnSummary.setAlpha(
- Canonical.unionORpreds( hrnSummary.getAlpha(),
- hrn.getAlpha()
- )
- );
-
+ hrnSummary.setAlpha(
+ Canonical.unionORpreds(hrnSummary.getAlpha(),
+ hrn.getAlpha()
+ )
+ );
+
hrnSummary.setPreds(
- Canonical.join( hrnSummary.getPreds(),
- hrn.getPreds()
- )
- );
-
+ Canonical.join(hrnSummary.getPreds(),
+ hrn.getPreds()
+ )
+ );
+
// and afterward, this node is gone
- wipeOut( hrn, true );
+ wipeOut(hrn, true);
}
- protected void transferOnto( HeapRegionNode hrnA,
- HeapRegionNode hrnB ) {
+ protected void transferOnto(HeapRegionNode hrnA,
+ HeapRegionNode hrnB) {
- assert belongsToThis( hrnA );
- assert belongsToThis( hrnB );
+ assert belongsToThis(hrnA);
+ assert belongsToThis(hrnB);
assert hrnA != hrnB;
// clear references in and out of node b?
// copy each: (edge in and out of A) to B
Iterator<RefEdge> itrReferencee = hrnA.iteratorToReferencees();
while( itrReferencee.hasNext() ) {
- RefEdge edge = itrReferencee.next();
+ RefEdge edge = itrReferencee.next();
HeapRegionNode hrnReferencee = edge.getDst();
- RefEdge edgeNew = edge.copy();
- edgeNew.setSrc( hrnB );
- edgeNew.setDst( hrnReferencee );
+ RefEdge edgeNew = edge.copy();
+ edgeNew.setSrc(hrnB);
+ edgeNew.setDst(hrnReferencee);
- addRefEdge( hrnB, hrnReferencee, edgeNew );
+ addRefEdge(hrnB, hrnReferencee, edgeNew);
}
Iterator<RefEdge> itrReferencer = hrnA.iteratorToReferencers();
while( itrReferencer.hasNext() ) {
- RefEdge edge = itrReferencer.next();
+ RefEdge edge = itrReferencer.next();
RefSrcNode rsnReferencer = edge.getSrc();
- RefEdge edgeNew = edge.copy();
- edgeNew.setSrc( rsnReferencer );
- edgeNew.setDst( hrnB );
+ RefEdge edgeNew = edge.copy();
+ edgeNew.setSrc(rsnReferencer);
+ edgeNew.setDst(hrnB);
- addRefEdge( rsnReferencer, hrnB, edgeNew );
+ addRefEdge(rsnReferencer, hrnB, edgeNew);
}
// replace hrnB reachability and preds with hrnA's
- hrnB.setAlpha( hrnA.getAlpha() );
- hrnB.setPreds( hrnA.getPreds() );
+ hrnB.setAlpha(hrnA.getAlpha() );
+ hrnB.setPreds(hrnA.getPreds() );
// after transfer, wipe out source
- wipeOut( hrnA, true );
+ wipeOut(hrnA, true);
}
// because the node is still hanging around in the graph, just
// not mechanically connected or have any reach or predicate
// information on it anymore--lots of ops can use this
- protected void wipeOut( HeapRegionNode hrn,
- boolean wipeVariableReferences ) {
+ protected void wipeOut(HeapRegionNode hrn,
+ boolean wipeVariableReferences) {
- assert belongsToThis( hrn );
+ assert belongsToThis(hrn);
- clearRefEdgesFrom( hrn, null, null, true );
+ clearRefEdgesFrom(hrn, null, null, true);
if( wipeVariableReferences ) {
- clearRefEdgesTo( hrn, null, null, true );
+ clearRefEdgesTo(hrn, null, null, true);
} else {
- clearNonVarRefEdgesTo( hrn );
+ clearNonVarRefEdgesTo(hrn);
}
- hrn.setAlpha( rsetEmpty );
- hrn.setPreds( predsEmpty );
+ hrn.setAlpha(rsetEmpty);
+ hrn.setPreds(predsEmpty);
}
- protected void ageTuplesFrom( AllocSite as, RefEdge edge ) {
- edge.setBeta(
- Canonical.ageTuplesFrom( edge.getBeta(),
- as
- )
- );
+ protected void ageTuplesFrom(AllocSite as, RefEdge edge) {
+ edge.setBeta(
+ Canonical.ageTuplesFrom(edge.getBeta(),
+ as
+ )
+ );
}
- protected void ageTuplesFrom( AllocSite as, HeapRegionNode hrn ) {
- hrn.setAlpha(
- Canonical.ageTuplesFrom( hrn.getAlpha(),
- as
- )
- );
+ protected void ageTuplesFrom(AllocSite as, HeapRegionNode hrn) {
+ hrn.setAlpha(
+ Canonical.ageTuplesFrom(hrn.getAlpha(),
+ as
+ )
+ );
}
- protected void propagateTokensOverNodes( HeapRegionNode nPrime,
- ChangeSet c0,
- HashSet<HeapRegionNode> nodesWithNewAlpha,
- HashSet<RefEdge> edgesWithNewBeta ) {
+ protected void propagateTokensOverNodes(HeapRegionNode nPrime,
+ ChangeSet c0,
+ HashSet<HeapRegionNode> nodesWithNewAlpha,
+ HashSet<RefEdge> edgesWithNewBeta) {
HashSet<HeapRegionNode> todoNodes
= new HashSet<HeapRegionNode>();
- todoNodes.add( nPrime );
-
+ todoNodes.add(nPrime);
+
HashSet<RefEdge> todoEdges
= new HashSet<RefEdge>();
-
+
Hashtable<HeapRegionNode, ChangeSet> nodePlannedChanges
= new Hashtable<HeapRegionNode, ChangeSet>();
- nodePlannedChanges.put( nPrime, c0 );
+ nodePlannedChanges.put(nPrime, c0);
Hashtable<RefEdge, ChangeSet> edgePlannedChanges
= new Hashtable<RefEdge, ChangeSet>();
// first propagate change sets everywhere they can go
while( !todoNodes.isEmpty() ) {
HeapRegionNode n = todoNodes.iterator().next();
- ChangeSet C = nodePlannedChanges.get( n );
+ ChangeSet C = nodePlannedChanges.get(n);
Iterator<RefEdge> referItr = n.iteratorToReferencers();
while( referItr.hasNext() ) {
RefEdge edge = referItr.next();
- todoEdges.add( edge );
+ todoEdges.add(edge);
- if( !edgePlannedChanges.containsKey( edge ) ) {
- edgePlannedChanges.put( edge,
- ChangeSet.factory()
- );
+ if( !edgePlannedChanges.containsKey(edge) ) {
+ edgePlannedChanges.put(edge,
+ ChangeSet.factory()
+ );
}
- edgePlannedChanges.put( edge,
- Canonical.union( edgePlannedChanges.get( edge ),
- C
- )
- );
+ edgePlannedChanges.put(edge,
+ Canonical.union(edgePlannedChanges.get(edge),
+ C
+ )
+ );
}
Iterator<RefEdge> refeeItr = n.iteratorToReferencees();
while( refeeItr.hasNext() ) {
- RefEdge edgeF = refeeItr.next();
+ RefEdge edgeF = refeeItr.next();
HeapRegionNode m = edgeF.getDst();
ChangeSet changesToPass = ChangeSet.factory();
Iterator<ChangeTuple> itrCprime = C.iterator();
while( itrCprime.hasNext() ) {
ChangeTuple c = itrCprime.next();
- if( edgeF.getBeta().containsIgnorePreds( c.getStateToMatch() )
- != null
- ) {
- changesToPass = Canonical.add( changesToPass, c );
+ if( edgeF.getBeta().containsIgnorePreds(c.getStateToMatch() )
+ != null
+ ) {
+ changesToPass = Canonical.add(changesToPass, c);
}
}
if( !changesToPass.isEmpty() ) {
- if( !nodePlannedChanges.containsKey( m ) ) {
- nodePlannedChanges.put( m, ChangeSet.factory() );
+ if( !nodePlannedChanges.containsKey(m) ) {
+ nodePlannedChanges.put(m, ChangeSet.factory() );
}
- ChangeSet currentChanges = nodePlannedChanges.get( m );
+ ChangeSet currentChanges = nodePlannedChanges.get(m);
- if( !changesToPass.isSubset( currentChanges ) ) {
+ if( !changesToPass.isSubset(currentChanges) ) {
- nodePlannedChanges.put( m,
- Canonical.union( currentChanges,
- changesToPass
- )
- );
- todoNodes.add( m );
+ nodePlannedChanges.put(m,
+ Canonical.union(currentChanges,
+ changesToPass
+ )
+ );
+ todoNodes.add(m);
}
}
}
- todoNodes.remove( n );
+ todoNodes.remove(n);
}
// then apply all of the changes for each node at once
Iterator itrMap = nodePlannedChanges.entrySet().iterator();
while( itrMap.hasNext() ) {
- Map.Entry me = (Map.Entry) itrMap.next();
+ Map.Entry me = (Map.Entry)itrMap.next();
HeapRegionNode n = (HeapRegionNode) me.getKey();
- ChangeSet C = (ChangeSet) me.getValue();
+ ChangeSet C = (ChangeSet) me.getValue();
// this propagation step is with respect to one change,
// so we capture the full change from the old alpha:
- ReachSet localDelta = Canonical.applyChangeSet( n.getAlpha(),
- C,
- true
- );
+ ReachSet localDelta = Canonical.applyChangeSet(n.getAlpha(),
+ C,
+ true
+ );
// but this propagation may be only one of many concurrent
// possible changes, so keep a running union with the node's
// partially updated new alpha set
- n.setAlphaNew( Canonical.unionORpreds( n.getAlphaNew(),
- localDelta
- )
- );
+ n.setAlphaNew(Canonical.unionORpreds(n.getAlphaNew(),
+ localDelta
+ )
+ );
- nodesWithNewAlpha.add( n );
+ nodesWithNewAlpha.add(n);
}
- propagateTokensOverEdges( todoEdges,
- edgePlannedChanges,
- edgesWithNewBeta
- );
+ propagateTokensOverEdges(todoEdges,
+ edgePlannedChanges,
+ edgesWithNewBeta
+ );
}
- protected void propagateTokensOverEdges( HashSet <RefEdge> todoEdges,
- Hashtable<RefEdge, ChangeSet> edgePlannedChanges,
- HashSet <RefEdge> edgesWithNewBeta ) {
-
+ protected void propagateTokensOverEdges(HashSet <RefEdge> todoEdges,
+ Hashtable<RefEdge, ChangeSet> edgePlannedChanges,
+ HashSet <RefEdge> edgesWithNewBeta) {
+
// first propagate all change tuples everywhere they can go
while( !todoEdges.isEmpty() ) {
RefEdge edgeE = todoEdges.iterator().next();
- todoEdges.remove( edgeE );
+ todoEdges.remove(edgeE);
- if( !edgePlannedChanges.containsKey( edgeE ) ) {
- edgePlannedChanges.put( edgeE,
- ChangeSet.factory()
- );
+ if( !edgePlannedChanges.containsKey(edgeE) ) {
+ edgePlannedChanges.put(edgeE,
+ ChangeSet.factory()
+ );
}
- ChangeSet C = edgePlannedChanges.get( edgeE );
+ ChangeSet C = edgePlannedChanges.get(edgeE);
ChangeSet changesToPass = ChangeSet.factory();
Iterator<ChangeTuple> itrC = C.iterator();
while( itrC.hasNext() ) {
ChangeTuple c = itrC.next();
- if( edgeE.getBeta().containsIgnorePreds( c.getStateToMatch() )
- != null
- ) {
- changesToPass = Canonical.add( changesToPass, c );
+ if( edgeE.getBeta().containsIgnorePreds(c.getStateToMatch() )
+ != null
+ ) {
+ changesToPass = Canonical.add(changesToPass, c);
}
}
while( referItr.hasNext() ) {
RefEdge edgeF = referItr.next();
- if( !edgePlannedChanges.containsKey( edgeF ) ) {
- edgePlannedChanges.put( edgeF,
- ChangeSet.factory()
- );
+ if( !edgePlannedChanges.containsKey(edgeF) ) {
+ edgePlannedChanges.put(edgeF,
+ ChangeSet.factory()
+ );
}
- ChangeSet currentChanges = edgePlannedChanges.get( edgeF );
+ ChangeSet currentChanges = edgePlannedChanges.get(edgeF);
- if( !changesToPass.isSubset( currentChanges ) ) {
- todoEdges.add( edgeF );
- edgePlannedChanges.put( edgeF,
- Canonical.union( currentChanges,
- changesToPass
- )
- );
+ if( !changesToPass.isSubset(currentChanges) ) {
+ todoEdges.add(edgeF);
+ edgePlannedChanges.put(edgeF,
+ Canonical.union(currentChanges,
+ changesToPass
+ )
+ );
}
}
}
// then apply all of the changes for each edge at once
Iterator itrMap = edgePlannedChanges.entrySet().iterator();
while( itrMap.hasNext() ) {
- Map.Entry me = (Map.Entry) itrMap.next();
- RefEdge e = (RefEdge) me.getKey();
+ Map.Entry me = (Map.Entry)itrMap.next();
+ RefEdge e = (RefEdge) me.getKey();
ChangeSet C = (ChangeSet) me.getValue();
// this propagation step is with respect to one change,
// so we capture the full change from the old beta:
ReachSet localDelta =
- Canonical.applyChangeSet( e.getBeta(),
- C,
- true
- );
+ Canonical.applyChangeSet(e.getBeta(),
+ C,
+ true
+ );
// but this propagation may be only one of many concurrent
// possible changes, so keep a running union with the edge's
// partially updated new beta set
- e.setBetaNew( Canonical.unionORpreds( e.getBetaNew(),
- localDelta
- )
- );
-
- edgesWithNewBeta.add( e );
+ e.setBetaNew(Canonical.unionORpreds(e.getBetaNew(),
+ localDelta
+ )
+ );
+
+ edgesWithNewBeta.add(e);
}
}
- public void taintInSetVars( FlatSESEEnterNode sese ) {
+ public void taintInSetVars(FlatSESEEnterNode sese) {
Iterator<TempDescriptor> isvItr = sese.getInVarSet().iterator();
while( isvItr.hasNext() ) {
TempDescriptor isv = isvItr.next();
-
+
// use this where defined flatnode to support RCR/DFJ
FlatNode whereDefined = null;
if( state.RCR ) {
- whereDefined = sese;
+ whereDefined = sese;
}
// in-set var taints should NOT propagate back into callers
// so give it FALSE(EMPTY) predicates
- taintTemp( sese,
- null,
- isv,
- whereDefined,
- predsEmpty
- );
+ taintTemp(sese,
+ null,
+ isv,
+ whereDefined,
+ predsEmpty
+ );
}
}
- public void taintStallSite( FlatNode stallSite,
- TempDescriptor var ) {
+ public void taintStallSite(FlatNode stallSite,
+ TempDescriptor var) {
// use this where defined flatnode to support RCR/DFJ
FlatNode whereDefined = null;
if( state.RCR ) {
whereDefined = stallSite;
}
-
+
// stall site taint should propagate back into callers
// so give it TRUE predicates
- taintTemp( null,
- stallSite,
- var,
- whereDefined,
- predsTrue
- );
+ taintTemp(null,
+ stallSite,
+ var,
+ whereDefined,
+ predsTrue
+ );
}
- protected void taintTemp( FlatSESEEnterNode sese,
- FlatNode stallSite,
- TempDescriptor var,
- FlatNode whereDefined,
- ExistPredSet preds
- ) {
-
- VariableNode vn = getVariableNodeFromTemp( var );
-
+ protected void taintTemp(FlatSESEEnterNode sese,
+ FlatNode stallSite,
+ TempDescriptor var,
+ FlatNode whereDefined,
+ ExistPredSet preds
+ ) {
+
+ VariableNode vn = getVariableNodeFromTemp(var);
+
Iterator<RefEdge> reItr = vn.iteratorToReferencees();
while( reItr.hasNext() ) {
RefEdge re = reItr.next();
-
- Taint taint = Taint.factory( sese,
- stallSite,
- var,
- re.getDst().getAllocSite(),
- whereDefined,
- preds
- );
-
- re.setTaints( Canonical.add( re.getTaints(),
- taint
- )
- );
+
+ Taint taint = Taint.factory(sese,
+ stallSite,
+ var,
+ re.getDst().getAllocSite(),
+ whereDefined,
+ preds
+ );
+
+ re.setTaints(Canonical.add(re.getTaints(),
+ taint
+ )
+ );
}
}
-
- public void removeInContextTaints( FlatSESEEnterNode sese ) {
+
+ public void removeInContextTaints(FlatSESEEnterNode sese) {
Iterator meItr = id2hrn.entrySet().iterator();
while( meItr.hasNext() ) {
- Map.Entry me = (Map.Entry) meItr.next();
- Integer id = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)meItr.next();
+ Integer id = (Integer) me.getKey();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
Iterator<RefEdge> reItr = hrn.iteratorToReferencers();
while( reItr.hasNext() ) {
- RefEdge re = reItr.next();
+ RefEdge re = reItr.next();
- re.setTaints( Canonical.removeInContextTaints( re.getTaints(),
- sese
- )
- );
+ re.setTaints(Canonical.removeInContextTaints(re.getTaints(),
+ sese
+ )
+ );
}
}
}
Iterator meItr = id2hrn.entrySet().iterator();
while( meItr.hasNext() ) {
- Map.Entry me = (Map.Entry) meItr.next();
- Integer id = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)meItr.next();
+ Integer id = (Integer) me.getKey();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
Iterator<RefEdge> reItr = hrn.iteratorToReferencers();
while( reItr.hasNext() ) {
- RefEdge re = reItr.next();
-
- re.setTaints( Canonical.removeStallSiteTaints( re.getTaints()
- )
- );
+ RefEdge re = reItr.next();
+
+ re.setTaints(Canonical.removeStallSiteTaints(re.getTaints()
+ )
+ );
}
}
}
// already an appropriate out-of-context edge in a callee
// view graph for merging, or null if a new one will be added
protected RefEdge
- getOutOfContextReferenceTo( HeapRegionNode hrn,
- TypeDescriptor srcType,
- TypeDescriptor refType,
- String refField ) {
- assert belongsToThis( hrn );
+ getOutOfContextReferenceTo(HeapRegionNode hrn,
+ TypeDescriptor srcType,
+ TypeDescriptor refType,
+ String refField) {
+ assert belongsToThis(hrn);
- HeapRegionNode hrnInContext = id2hrn.get( hrn.getID() );
+ HeapRegionNode hrnInContext = id2hrn.get(hrn.getID() );
if( hrnInContext == null ) {
return null;
}
while( refItr.hasNext() ) {
RefEdge re = refItr.next();
- assert belongsToThis( re.getSrc() );
- assert belongsToThis( re.getDst() );
+ assert belongsToThis(re.getSrc() );
+ assert belongsToThis(re.getDst() );
if( !(re.getSrc() instanceof HeapRegionNode) ) {
- continue;
+ continue;
}
HeapRegionNode hrnSrc = (HeapRegionNode) re.getSrc();
if( !hrnSrc.isOutOfContext() ) {
- continue;
+ continue;
}
-
+
if( srcType == null ) {
- if( hrnSrc.getType() != null ) {
- continue;
- }
+ if( hrnSrc.getType() != null ) {
+ continue;
+ }
} else {
- if( !srcType.equals( hrnSrc.getType() ) ) {
- continue;
- }
+ if( !srcType.equals(hrnSrc.getType() ) ) {
+ continue;
+ }
}
- if( !re.typeEquals( refType ) ) {
- continue;
+ if( !re.typeEquals(refType) ) {
+ continue;
}
- if( !re.fieldEquals( refField ) ) {
- continue;
+ if( !re.fieldEquals(refField) ) {
+ continue;
}
// tada! We found it!
return re;
}
-
+
return null;
}
// used below to convert a ReachSet to its callee-context
// equivalent with respect to allocation sites in this graph
- protected ReachSet toCalleeContext( ReachSet rs,
- ExistPredSet predsNodeOrEdge,
- Set<HrnIdOoc> oocHrnIdOoc2callee
- ) {
+ protected ReachSet toCalleeContext(ReachSet rs,
+ ExistPredSet predsNodeOrEdge,
+ Set<HrnIdOoc> oocHrnIdOoc2callee
+ ) {
ReachSet out = ReachSet.factory();
-
+
Iterator<ReachState> itr = rs.iterator();
while( itr.hasNext() ) {
ReachState stateCaller = itr.next();
-
+
ReachState stateCallee = stateCaller;
Iterator<AllocSite> asItr = allocSites.iterator();
while( asItr.hasNext() ) {
- AllocSite as = asItr.next();
+ AllocSite as = asItr.next();
- ReachState stateNew = ReachState.factory();
- Iterator<ReachTuple> rtItr = stateCallee.iterator();
- while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
+ ReachState stateNew = ReachState.factory();
+ Iterator<ReachTuple> rtItr = stateCallee.iterator();
+ while( rtItr.hasNext() ) {
+ ReachTuple rt = rtItr.next();
+
+ // only translate this tuple if it is
+ // in the out-callee-context bag
+ HrnIdOoc hio = new HrnIdOoc(rt.getHrnID(),
+ rt.isOutOfContext()
+ );
+ if( !oocHrnIdOoc2callee.contains(hio) ) {
+ stateNew = Canonical.addUpArity(stateNew, rt);
+ continue;
+ }
- // only translate this tuple if it is
- // in the out-callee-context bag
- HrnIdOoc hio = new HrnIdOoc( rt.getHrnID(),
- rt.isOutOfContext()
- );
- if( !oocHrnIdOoc2callee.contains( hio ) ) {
- stateNew = Canonical.addUpArity( stateNew, rt );
- continue;
- }
-
- int age = as.getAgeCategory( rt.getHrnID() );
-
- // this is the current mapping, where 0, 1, 2S were allocated
- // in the current context, 0?, 1? and 2S? were allocated in a
- // previous context, and we're translating to a future context
- //
- // 0 -> 0?
- // 1 -> 1?
- // 2S -> 2S?
- // 2S* -> 2S?*
- //
- // 0? -> 2S?
- // 1? -> 2S?
- // 2S? -> 2S?
- // 2S?* -> 2S?*
-
- if( age == AllocSite.AGE_notInThisSite ) {
- // things not from the site just go back in
- stateNew = Canonical.addUpArity( stateNew, rt );
-
- } else if( age == AllocSite.AGE_summary ||
- rt.isOutOfContext()
- ) {
-
- stateNew = Canonical.addUpArity( stateNew,
- ReachTuple.factory( as.getSummary(),
- true, // multi
- rt.getArity(),
- true // out-of-context
- )
- );
-
- } else {
- // otherwise everything else just goes to an out-of-context
- // version, everything else the same
- Integer I = as.getAge( rt.getHrnID() );
- assert I != null;
-
- assert !rt.isMultiObject();
-
- stateNew = Canonical.addUpArity( stateNew,
- ReachTuple.factory( rt.getHrnID(),
- rt.isMultiObject(), // multi
- rt.getArity(),
- true // out-of-context
- )
- );
- }
- }
-
- stateCallee = stateNew;
+ int age = as.getAgeCategory(rt.getHrnID() );
+
+ // this is the current mapping, where 0, 1, 2S were allocated
+ // in the current context, 0?, 1? and 2S? were allocated in a
+ // previous context, and we're translating to a future context
+ //
+ // 0 -> 0?
+ // 1 -> 1?
+ // 2S -> 2S?
+ // 2S* -> 2S?*
+ //
+ // 0? -> 2S?
+ // 1? -> 2S?
+ // 2S? -> 2S?
+ // 2S?* -> 2S?*
+
+ if( age == AllocSite.AGE_notInThisSite ) {
+ // things not from the site just go back in
+ stateNew = Canonical.addUpArity(stateNew, rt);
+
+ } else if( age == AllocSite.AGE_summary ||
+ rt.isOutOfContext()
+ ) {
+
+ stateNew = Canonical.addUpArity(stateNew,
+ ReachTuple.factory(as.getSummary(),
+ true, // multi
+ rt.getArity(),
+ true // out-of-context
+ )
+ );
+
+ } else {
+ // otherwise everything else just goes to an out-of-context
+ // version, everything else the same
+ Integer I = as.getAge(rt.getHrnID() );
+ assert I != null;
+
+ assert !rt.isMultiObject();
+
+ stateNew = Canonical.addUpArity(stateNew,
+ ReachTuple.factory(rt.getHrnID(),
+ rt.isMultiObject(), // multi
+ rt.getArity(),
+ true // out-of-context
+ )
+ );
+ }
+ }
+
+ stateCallee = stateNew;
}
-
+
// make a predicate of the caller graph element
// and the caller state we just converted
ExistPredSet predsWithState = ExistPredSet.factory();
Iterator<ExistPred> predItr = predsNodeOrEdge.iterator();
while( predItr.hasNext() ) {
- ExistPred predNodeOrEdge = predItr.next();
-
- predsWithState =
- Canonical.add( predsWithState,
- ExistPred.factory( predNodeOrEdge.n_hrnID,
- predNodeOrEdge.e_tdSrc,
- predNodeOrEdge.e_hrnSrcID,
- predNodeOrEdge.e_hrnDstID,
- predNodeOrEdge.e_type,
- predNodeOrEdge.e_field,
- stateCallee,
- null,
- predNodeOrEdge.e_srcOutCalleeContext,
- predNodeOrEdge.e_srcOutCallerContext
- )
- );
+ ExistPred predNodeOrEdge = predItr.next();
+
+ predsWithState =
+ Canonical.add(predsWithState,
+ ExistPred.factory(predNodeOrEdge.n_hrnID,
+ predNodeOrEdge.e_tdSrc,
+ predNodeOrEdge.e_hrnSrcID,
+ predNodeOrEdge.e_hrnDstID,
+ predNodeOrEdge.e_type,
+ predNodeOrEdge.e_field,
+ stateCallee,
+ null,
+ predNodeOrEdge.e_srcOutCalleeContext,
+ predNodeOrEdge.e_srcOutCallerContext
+ )
+ );
}
- stateCallee = Canonical.changePredsTo( stateCallee,
- predsWithState );
-
- out = Canonical.add( out,
- stateCallee
- );
+ stateCallee = Canonical.changePredsTo(stateCallee,
+ predsWithState);
+
+ out = Canonical.add(out,
+ stateCallee
+ );
}
assert out.isCanonical();
return out;
// used below to convert a ReachSet to its caller-context
// equivalent with respect to allocation sites in this graph
- protected ReachSet
- toCallerContext( ReachSet rs,
- Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied
- ) {
+ protected ReachSet
+ toCallerContext(ReachSet rs,
+ Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied
+ ) {
ReachSet out = ReachSet.factory();
// when the mapping is null it means there were no
while( itr.hasNext() ) {
ReachState stateCallee = itr.next();
- if( calleeStatesSatisfied.containsKey( stateCallee ) ) {
-
- // starting from one callee state...
- ReachSet rsCaller = ReachSet.factory( stateCallee );
-
- // possibly branch it into many states, which any
- // allocation site might do, so lots of derived states
- Iterator<AllocSite> asItr = allocSites.iterator();
- while( asItr.hasNext() ) {
- AllocSite as = asItr.next();
- rsCaller = Canonical.toCallerContext( rsCaller, as );
- }
-
- // then before adding each derived, now caller-context
- // states to the output, attach the appropriate pred
- // based on the source callee state
- Iterator<ReachState> stateItr = rsCaller.iterator();
- while( stateItr.hasNext() ) {
- ReachState stateCaller = stateItr.next();
- stateCaller = Canonical.attach( stateCaller,
- calleeStatesSatisfied.get( stateCallee )
- );
- out = Canonical.add( out,
- stateCaller
- );
- }
+ if( calleeStatesSatisfied.containsKey(stateCallee) ) {
+
+ // starting from one callee state...
+ ReachSet rsCaller = ReachSet.factory(stateCallee);
+
+ // possibly branch it into many states, which any
+ // allocation site might do, so lots of derived states
+ Iterator<AllocSite> asItr = allocSites.iterator();
+ while( asItr.hasNext() ) {
+ AllocSite as = asItr.next();
+ rsCaller = Canonical.toCallerContext(rsCaller, as);
+ }
+
+ // then before adding each derived, now caller-context
+ // states to the output, attach the appropriate pred
+ // based on the source callee state
+ Iterator<ReachState> stateItr = rsCaller.iterator();
+ while( stateItr.hasNext() ) {
+ ReachState stateCaller = stateItr.next();
+ stateCaller = Canonical.attach(stateCaller,
+ calleeStatesSatisfied.get(stateCallee)
+ );
+ out = Canonical.add(out,
+ stateCaller
+ );
+ }
}
- }
+ }
assert out.isCanonical();
return out;
// used below to convert a ReachSet to an equivalent
// version with shadow IDs merged into unshadowed IDs
- protected ReachSet unshadow( ReachSet rs ) {
+ protected ReachSet unshadow(ReachSet rs) {
ReachSet out = rs;
Iterator<AllocSite> asItr = allocSites.iterator();
while( asItr.hasNext() ) {
AllocSite as = asItr.next();
- out = Canonical.unshadow( out, as );
+ out = Canonical.unshadow(out, as);
}
assert out.isCanonical();
return out;
// convert a caller taint set into a callee taint set
protected TaintSet
- toCalleeContext( TaintSet ts,
- ExistPredSet predsEdge ) {
-
+ toCalleeContext(TaintSet ts,
+ ExistPredSet predsEdge) {
+
TaintSet out = TaintSet.factory();
// the idea is easy, the taint identifier itself doesn't
Iterator<ExistPred> predItr = predsEdge.iterator();
while( predItr.hasNext() ) {
- ExistPred predEdge = predItr.next();
-
- predsWithTaint =
- Canonical.add( predsWithTaint,
- ExistPred.factory( predEdge.e_tdSrc,
- predEdge.e_hrnSrcID,
- predEdge.e_hrnDstID,
- predEdge.e_type,
- predEdge.e_field,
- null,
- tCaller,
- predEdge.e_srcOutCalleeContext,
- predEdge.e_srcOutCallerContext
- )
- );
+ ExistPred predEdge = predItr.next();
+
+ predsWithTaint =
+ Canonical.add(predsWithTaint,
+ ExistPred.factory(predEdge.e_tdSrc,
+ predEdge.e_hrnSrcID,
+ predEdge.e_hrnDstID,
+ predEdge.e_type,
+ predEdge.e_field,
+ null,
+ tCaller,
+ predEdge.e_srcOutCalleeContext,
+ predEdge.e_srcOutCallerContext
+ )
+ );
}
- Taint tCallee = Canonical.changePredsTo( tCaller,
- predsWithTaint );
+ Taint tCallee = Canonical.changePredsTo(tCaller,
+ predsWithTaint);
- out = Canonical.add( out,
- tCallee
- );
+ out = Canonical.add(out,
+ tCallee
+ );
}
assert out.isCanonical();
// used below to convert a TaintSet to its caller-context
// equivalent, just eliminate Taints with bad preds
- protected TaintSet
- toCallerContext( TaintSet ts,
- Hashtable<Taint, ExistPredSet> calleeTaintsSatisfied
- ) {
+ protected TaintSet
+ toCallerContext(TaintSet ts,
+ Hashtable<Taint, ExistPredSet> calleeTaintsSatisfied
+ ) {
TaintSet out = TaintSet.factory();
while( itr.hasNext() ) {
Taint tCallee = itr.next();
- if( calleeTaintsSatisfied.containsKey( tCallee ) ) {
-
- Taint tCaller =
- Canonical.attach( Taint.factory( tCallee.sese,
- tCallee.stallSite,
- tCallee.var,
- tCallee.allocSite,
- tCallee.fnDefined,
- ExistPredSet.factory() ),
- calleeTaintsSatisfied.get( tCallee )
- );
- out = Canonical.add( out,
- tCaller
- );
- }
- }
-
+ if( calleeTaintsSatisfied.containsKey(tCallee) ) {
+
+ Taint tCaller =
+ Canonical.attach(Taint.factory(tCallee.sese,
+ tCallee.stallSite,
+ tCallee.var,
+ tCallee.allocSite,
+ tCallee.fnDefined,
+ ExistPredSet.factory() ),
+ calleeTaintsSatisfied.get(tCallee)
+ );
+ out = Canonical.add(out,
+ tCaller
+ );
+ }
+ }
+
assert out.isCanonical();
return out;
}
// use this method to make a new reach graph that is
- // what heap the FlatMethod callee from the FlatCall
+ // what heap the FlatMethod callee from the FlatCall
// would start with reaching from its arguments in
// this reach graph
- public ReachGraph
- makeCalleeView( FlatCall fc,
- FlatMethod fmCallee,
- Set<Integer> callerNodeIDsCopiedToCallee,
- boolean writeDebugDOTs
- ) {
+ public ReachGraph
+ makeCalleeView(FlatCall fc,
+ FlatMethod fmCallee,
+ Set<Integer> callerNodeIDsCopiedToCallee,
+ boolean writeDebugDOTs
+ ) {
// first traverse this context to find nodes and edges
for( int i = 0; i < fmCallee.numParameters(); ++i ) {
- TempDescriptor tdArg = fc.getArgMatchingParamIndex( fmCallee, i );
- VariableNode vnArgCaller = this.getVariableNodeFromTemp( tdArg );
+ TempDescriptor tdArg = fc.getArgMatchingParamIndex(fmCallee, i);
+ VariableNode vnArgCaller = this.getVariableNodeFromTemp(tdArg);
Set<RefSrcNode> toVisitInCaller = new HashSet<RefSrcNode>();
Set<RefSrcNode> visitedInCaller = new HashSet<RefSrcNode>();
- toVisitInCaller.add( vnArgCaller );
-
+ toVisitInCaller.add(vnArgCaller);
+
while( !toVisitInCaller.isEmpty() ) {
- RefSrcNode rsnCaller = toVisitInCaller.iterator().next();
- toVisitInCaller.remove( rsnCaller );
- visitedInCaller.add( rsnCaller );
-
- Iterator<RefEdge> itrRefEdges = rsnCaller.iteratorToReferencees();
- while( itrRefEdges.hasNext() ) {
- RefEdge reCaller = itrRefEdges.next();
- HeapRegionNode hrnCaller = reCaller.getDst();
-
- callerNodeIDsCopiedToCallee.add( hrnCaller.getID() );
- reachableCallerNodes.add( hrnCaller );
-
- if( reCaller.getSrc() instanceof HeapRegionNode ) {
- reachableCallerEdges.add( reCaller );
- } else {
- if( rsnCaller.equals( vnArgCaller ) ) {
- reachableCallerArgEdges2paramIndex.put( reCaller, i );
- } else {
- oocCallerEdges.add( reCaller );
- }
- }
-
- if( !visitedInCaller.contains( hrnCaller ) ) {
- toVisitInCaller.add( hrnCaller );
- }
-
- } // end edge iteration
+ RefSrcNode rsnCaller = toVisitInCaller.iterator().next();
+ toVisitInCaller.remove(rsnCaller);
+ visitedInCaller.add(rsnCaller);
+
+ Iterator<RefEdge> itrRefEdges = rsnCaller.iteratorToReferencees();
+ while( itrRefEdges.hasNext() ) {
+ RefEdge reCaller = itrRefEdges.next();
+ HeapRegionNode hrnCaller = reCaller.getDst();
+
+ callerNodeIDsCopiedToCallee.add(hrnCaller.getID() );
+ reachableCallerNodes.add(hrnCaller);
+
+ if( reCaller.getSrc() instanceof HeapRegionNode ) {
+ reachableCallerEdges.add(reCaller);
+ } else {
+ if( rsnCaller.equals(vnArgCaller) ) {
+ reachableCallerArgEdges2paramIndex.put(reCaller, i);
+ } else {
+ oocCallerEdges.add(reCaller);
+ }
+ }
+
+ if( !visitedInCaller.contains(hrnCaller) ) {
+ toVisitInCaller.add(hrnCaller);
+ }
+
+ } // end edge iteration
} // end visiting heap nodes in caller
} // end iterating over parameters as starting points
- // now collect out-of-callee-context IDs and
+ // now collect out-of-callee-context IDs and
// map them to whether the ID is out of the caller
// context as well
Set<HrnIdOoc> oocHrnIdOoc2callee = new HashSet<HrnIdOoc>();
- Iterator<Integer> itrInContext =
+ Iterator<Integer> itrInContext =
callerNodeIDsCopiedToCallee.iterator();
while( itrInContext.hasNext() ) {
- Integer hrnID = itrInContext.next();
- HeapRegionNode hrnCallerAndInContext = id2hrn.get( hrnID );
-
+ Integer hrnID = itrInContext.next();
+ HeapRegionNode hrnCallerAndInContext = id2hrn.get(hrnID);
+
Iterator<RefEdge> itrMightCross =
hrnCallerAndInContext.iteratorToReferencers();
while( itrMightCross.hasNext() ) {
- RefEdge edgeMightCross = itrMightCross.next();
-
- RefSrcNode rsnCallerAndOutContext =
- edgeMightCross.getSrc();
-
- if( rsnCallerAndOutContext instanceof VariableNode ) {
- // variables do not have out-of-context reach states,
- // so jump out now
- oocCallerEdges.add( edgeMightCross );
- continue;
- }
-
- HeapRegionNode hrnCallerAndOutContext =
- (HeapRegionNode) rsnCallerAndOutContext;
-
- // is this source node out-of-context?
- if( callerNodeIDsCopiedToCallee.contains( hrnCallerAndOutContext.getID() ) ) {
- // no, skip this edge
- continue;
- }
-
- // okay, we got one
- oocCallerEdges.add( edgeMightCross );
-
- // add all reach tuples on the node to list
- // of things that are out-of-context: insight
- // if this node is reachable from someting that WAS
- // in-context, then this node should already be in-context
- Iterator<ReachState> stateItr = hrnCallerAndOutContext.getAlpha().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
-
- Iterator<ReachTuple> rtItr = state.iterator();
- while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
-
- oocHrnIdOoc2callee.add( new HrnIdOoc( rt.getHrnID(),
- rt.isOutOfContext()
- )
- );
- }
- }
+ RefEdge edgeMightCross = itrMightCross.next();
+
+ RefSrcNode rsnCallerAndOutContext =
+ edgeMightCross.getSrc();
+
+ if( rsnCallerAndOutContext instanceof VariableNode ) {
+ // variables do not have out-of-context reach states,
+ // so jump out now
+ oocCallerEdges.add(edgeMightCross);
+ continue;
+ }
+
+ HeapRegionNode hrnCallerAndOutContext =
+ (HeapRegionNode) rsnCallerAndOutContext;
+
+ // is this source node out-of-context?
+ if( callerNodeIDsCopiedToCallee.contains(hrnCallerAndOutContext.getID() ) ) {
+ // no, skip this edge
+ continue;
+ }
+
+ // okay, we got one
+ oocCallerEdges.add(edgeMightCross);
+
+ // add all reach tuples on the node to list
+ // of things that are out-of-context: insight
+ // if this node is reachable from someting that WAS
+ // in-context, then this node should already be in-context
+ Iterator<ReachState> stateItr = hrnCallerAndOutContext.getAlpha().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
+
+ Iterator<ReachTuple> rtItr = state.iterator();
+ while( rtItr.hasNext() ) {
+ ReachTuple rt = rtItr.next();
+
+ oocHrnIdOoc2callee.add(new HrnIdOoc(rt.getHrnID(),
+ rt.isOutOfContext()
+ )
+ );
+ }
+ }
}
}
while( hrnItr.hasNext() ) {
HeapRegionNode hrnCaller = hrnItr.next();
- assert callerNodeIDsCopiedToCallee.contains( hrnCaller.getID() );
- assert !rg.id2hrn.containsKey( hrnCaller.getID() );
-
- ExistPred pred = ExistPred.factory( hrnCaller.getID(), null );
- ExistPredSet preds = ExistPredSet.factory( pred );
-
- rg.createNewHeapRegionNode( hrnCaller.getID(),
- hrnCaller.isSingleObject(),
- hrnCaller.isNewSummary(),
- false, // out-of-context?
- hrnCaller.getType(),
- hrnCaller.getAllocSite(),
- toCalleeContext( hrnCaller.getInherent(),
- preds,
- oocHrnIdOoc2callee
- ),
- toCalleeContext( hrnCaller.getAlpha(),
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- hrnCaller.getDescription()
- );
+ assert callerNodeIDsCopiedToCallee.contains(hrnCaller.getID() );
+ assert !rg.id2hrn.containsKey(hrnCaller.getID() );
+
+ ExistPred pred = ExistPred.factory(hrnCaller.getID(), null);
+ ExistPredSet preds = ExistPredSet.factory(pred);
+
+ rg.createNewHeapRegionNode(hrnCaller.getID(),
+ hrnCaller.isSingleObject(),
+ hrnCaller.isNewSummary(),
+ false, // out-of-context?
+ hrnCaller.getType(),
+ hrnCaller.getAllocSite(),
+ toCalleeContext(hrnCaller.getInherent(),
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ toCalleeContext(hrnCaller.getAlpha(),
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ hrnCaller.getDescription()
+ );
}
// add param edges to callee graph
- Iterator argEdges =
+ Iterator argEdges =
reachableCallerArgEdges2paramIndex.entrySet().iterator();
while( argEdges.hasNext() ) {
- Map.Entry me = (Map.Entry) argEdges.next();
- RefEdge reArg = (RefEdge) me.getKey();
- Integer index = (Integer) me.getValue();
-
- VariableNode vnCaller = (VariableNode) reArg.getSrc();
+ Map.Entry me = (Map.Entry)argEdges.next();
+ RefEdge reArg = (RefEdge) me.getKey();
+ Integer index = (Integer) me.getValue();
+
+ VariableNode vnCaller = (VariableNode) reArg.getSrc();
TempDescriptor argCaller = vnCaller.getTempDescriptor();
-
- TempDescriptor paramCallee = fmCallee.getParameter( index );
- VariableNode vnCallee = rg.getVariableNodeFromTemp( paramCallee );
-
+
+ TempDescriptor paramCallee = fmCallee.getParameter(index);
+ VariableNode vnCallee = rg.getVariableNodeFromTemp(paramCallee);
+
HeapRegionNode hrnDstCaller = reArg.getDst();
- HeapRegionNode hrnDstCallee = rg.id2hrn.get( hrnDstCaller.getID() );
+ HeapRegionNode hrnDstCallee = rg.id2hrn.get(hrnDstCaller.getID() );
assert hrnDstCallee != null;
-
+
ExistPred pred =
- ExistPred.factory( argCaller,
- null,
- hrnDstCallee.getID(),
- reArg.getType(),
- reArg.getField(),
- null, // state
- null, // taint
- true, // out-of-callee-context
- false // out-of-caller-context
- );
-
- ExistPredSet preds =
- ExistPredSet.factory( pred );
-
- RefEdge reCallee =
- new RefEdge( vnCallee,
- hrnDstCallee,
- reArg.getType(),
- reArg.getField(),
- toCalleeContext( reArg.getBeta(),
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- toCalleeContext( reArg.getTaints(),
- preds )
- );
-
- rg.addRefEdge( vnCallee,
- hrnDstCallee,
- reCallee
- );
+ ExistPred.factory(argCaller,
+ null,
+ hrnDstCallee.getID(),
+ reArg.getType(),
+ reArg.getField(),
+ null, // state
+ null, // taint
+ true, // out-of-callee-context
+ false // out-of-caller-context
+ );
+
+ ExistPredSet preds =
+ ExistPredSet.factory(pred);
+
+ RefEdge reCallee =
+ new RefEdge(vnCallee,
+ hrnDstCallee,
+ reArg.getType(),
+ reArg.getField(),
+ toCalleeContext(reArg.getBeta(),
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ toCalleeContext(reArg.getTaints(),
+ preds)
+ );
+
+ rg.addRefEdge(vnCallee,
+ hrnDstCallee,
+ reCallee
+ );
}
// add in-context edges to callee graph
Iterator<RefEdge> reItr = reachableCallerEdges.iterator();
while( reItr.hasNext() ) {
- RefEdge reCaller = reItr.next();
+ RefEdge reCaller = reItr.next();
RefSrcNode rsnCaller = reCaller.getSrc();
assert rsnCaller instanceof HeapRegionNode;
HeapRegionNode hrnSrcCaller = (HeapRegionNode) rsnCaller;
HeapRegionNode hrnDstCaller = reCaller.getDst();
- HeapRegionNode hrnSrcCallee = rg.id2hrn.get( hrnSrcCaller.getID() );
- HeapRegionNode hrnDstCallee = rg.id2hrn.get( hrnDstCaller.getID() );
+ HeapRegionNode hrnSrcCallee = rg.id2hrn.get(hrnSrcCaller.getID() );
+ HeapRegionNode hrnDstCallee = rg.id2hrn.get(hrnDstCaller.getID() );
assert hrnSrcCallee != null;
assert hrnDstCallee != null;
ExistPred pred =
- ExistPred.factory( null,
- hrnSrcCallee.getID(),
- hrnDstCallee.getID(),
- reCaller.getType(),
- reCaller.getField(),
- null, // state
- null, // taint
- false, // out-of-callee-context
- false // out-of-caller-context
- );
-
- ExistPredSet preds =
- ExistPredSet.factory( pred );
-
- RefEdge reCallee =
- new RefEdge( hrnSrcCallee,
- hrnDstCallee,
- reCaller.getType(),
- reCaller.getField(),
- toCalleeContext( reCaller.getBeta(),
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- toCalleeContext( reCaller.getTaints(),
- preds )
- );
-
- rg.addRefEdge( hrnSrcCallee,
- hrnDstCallee,
- reCallee
- );
+ ExistPred.factory(null,
+ hrnSrcCallee.getID(),
+ hrnDstCallee.getID(),
+ reCaller.getType(),
+ reCaller.getField(),
+ null, // state
+ null, // taint
+ false, // out-of-callee-context
+ false // out-of-caller-context
+ );
+
+ ExistPredSet preds =
+ ExistPredSet.factory(pred);
+
+ RefEdge reCallee =
+ new RefEdge(hrnSrcCallee,
+ hrnDstCallee,
+ reCaller.getType(),
+ reCaller.getField(),
+ toCalleeContext(reCaller.getBeta(),
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ toCalleeContext(reCaller.getTaints(),
+ preds)
+ );
+
+ rg.addRefEdge(hrnSrcCallee,
+ hrnDstCallee,
+ reCallee
+ );
}
// add out-of-context edges to callee graph
reItr = oocCallerEdges.iterator();
while( reItr.hasNext() ) {
- RefEdge reCaller = reItr.next();
- RefSrcNode rsnCaller = reCaller.getSrc();
+ RefEdge reCaller = reItr.next();
+ RefSrcNode rsnCaller = reCaller.getSrc();
HeapRegionNode hrnDstCaller = reCaller.getDst();
- HeapRegionNode hrnDstCallee = rg.id2hrn.get( hrnDstCaller.getID() );
+ HeapRegionNode hrnDstCallee = rg.id2hrn.get(hrnDstCaller.getID() );
assert hrnDstCallee != null;
TypeDescriptor oocNodeType;
- ReachSet oocReach;
+ ReachSet oocReach;
TempDescriptor oocPredSrcTemp = null;
- Integer oocPredSrcID = null;
- boolean outOfCalleeContext;
- boolean outOfCallerContext;
+ Integer oocPredSrcID = null;
+ boolean outOfCalleeContext;
+ boolean outOfCallerContext;
if( rsnCaller instanceof VariableNode ) {
- VariableNode vnCaller = (VariableNode) rsnCaller;
- oocNodeType = null;
- oocReach = rsetEmpty;
- oocPredSrcTemp = vnCaller.getTempDescriptor();
- outOfCalleeContext = true;
- outOfCallerContext = false;
+ VariableNode vnCaller = (VariableNode) rsnCaller;
+ oocNodeType = null;
+ oocReach = rsetEmpty;
+ oocPredSrcTemp = vnCaller.getTempDescriptor();
+ outOfCalleeContext = true;
+ outOfCallerContext = false;
} else {
- HeapRegionNode hrnSrcCaller = (HeapRegionNode) rsnCaller;
- assert !callerNodeIDsCopiedToCallee.contains( hrnSrcCaller.getID() );
- oocNodeType = hrnSrcCaller.getType();
- oocReach = hrnSrcCaller.getAlpha();
- oocPredSrcID = hrnSrcCaller.getID();
- if( hrnSrcCaller.isOutOfContext() ) {
- outOfCalleeContext = false;
- outOfCallerContext = true;
- } else {
- outOfCalleeContext = true;
- outOfCallerContext = false;
- }
+ HeapRegionNode hrnSrcCaller = (HeapRegionNode) rsnCaller;
+ assert !callerNodeIDsCopiedToCallee.contains(hrnSrcCaller.getID() );
+ oocNodeType = hrnSrcCaller.getType();
+ oocReach = hrnSrcCaller.getAlpha();
+ oocPredSrcID = hrnSrcCaller.getID();
+ if( hrnSrcCaller.isOutOfContext() ) {
+ outOfCalleeContext = false;
+ outOfCallerContext = true;
+ } else {
+ outOfCalleeContext = true;
+ outOfCallerContext = false;
+ }
}
ExistPred pred =
- ExistPred.factory( oocPredSrcTemp,
- oocPredSrcID,
- hrnDstCallee.getID(),
- reCaller.getType(),
- reCaller.getField(),
- null,
- null,
- outOfCalleeContext,
- outOfCallerContext
- );
-
- ExistPredSet preds =
- ExistPredSet.factory( pred );
-
+ ExistPred.factory(oocPredSrcTemp,
+ oocPredSrcID,
+ hrnDstCallee.getID(),
+ reCaller.getType(),
+ reCaller.getField(),
+ null,
+ null,
+ outOfCalleeContext,
+ outOfCallerContext
+ );
+
+ ExistPredSet preds =
+ ExistPredSet.factory(pred);
+
RefEdge oocEdgeExisting =
- rg.getOutOfContextReferenceTo( hrnDstCallee,
- oocNodeType,
- reCaller.getType(),
- reCaller.getField()
- );
+ rg.getOutOfContextReferenceTo(hrnDstCallee,
+ oocNodeType,
+ reCaller.getType(),
+ reCaller.getField()
+ );
+
+ if( oocEdgeExisting == null ) {
+ // for consistency, map one out-of-context "identifier"
+ // to one heap region node id, otherwise no convergence
+ String oocid = "oocid"+
+ fmCallee+
+ hrnDstCallee.getIDString()+
+ oocNodeType+
+ reCaller.getType()+
+ reCaller.getField();
+
+ Integer oocHrnID = oocid2hrnid.get(oocid);
+
+ HeapRegionNode hrnCalleeAndOutContext;
+
+ if( oocHrnID == null ) {
+
+ hrnCalleeAndOutContext =
+ rg.createNewHeapRegionNode(null, // ID
+ false, // single object?
+ false, // new summary?
+ true, // out-of-context?
+ oocNodeType,
+ null, // alloc site, shouldn't be used
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ "out-of-context"
+ );
+
+ oocid2hrnid.put(oocid, hrnCalleeAndOutContext.getID() );
+
+ } else {
+
+ // the mapping already exists, so see if node is there
+ hrnCalleeAndOutContext = rg.id2hrn.get(oocHrnID);
+
+ if( hrnCalleeAndOutContext == null ) {
+ // nope, make it
+ hrnCalleeAndOutContext =
+ rg.createNewHeapRegionNode(oocHrnID, // ID
+ false, // single object?
+ false, // new summary?
+ true, // out-of-context?
+ oocNodeType,
+ null, // alloc site, shouldn't be used
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ "out-of-context"
+ );
+
+ } else {
+ // otherwise it is there, so merge reachability
+ hrnCalleeAndOutContext.setAlpha(Canonical.unionORpreds(hrnCalleeAndOutContext.getAlpha(),
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ )
+ )
+ );
+ }
+ }
- if( oocEdgeExisting == null ) {
- // for consistency, map one out-of-context "identifier"
- // to one heap region node id, otherwise no convergence
- String oocid = "oocid"+
- fmCallee+
- hrnDstCallee.getIDString()+
- oocNodeType+
- reCaller.getType()+
- reCaller.getField();
-
- Integer oocHrnID = oocid2hrnid.get( oocid );
-
- HeapRegionNode hrnCalleeAndOutContext;
-
- if( oocHrnID == null ) {
-
- hrnCalleeAndOutContext =
- rg.createNewHeapRegionNode( null, // ID
- false, // single object?
- false, // new summary?
- true, // out-of-context?
- oocNodeType,
- null, // alloc site, shouldn't be used
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- ),
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- "out-of-context"
- );
-
- oocid2hrnid.put( oocid, hrnCalleeAndOutContext.getID() );
-
- } else {
-
- // the mapping already exists, so see if node is there
- hrnCalleeAndOutContext = rg.id2hrn.get( oocHrnID );
-
- if( hrnCalleeAndOutContext == null ) {
- // nope, make it
- hrnCalleeAndOutContext =
- rg.createNewHeapRegionNode( oocHrnID, // ID
- false, // single object?
- false, // new summary?
- true, // out-of-context?
- oocNodeType,
- null, // alloc site, shouldn't be used
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- ),
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- "out-of-context"
- );
-
- } else {
- // otherwise it is there, so merge reachability
- hrnCalleeAndOutContext.setAlpha( Canonical.unionORpreds( hrnCalleeAndOutContext.getAlpha(),
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- )
- )
- );
- }
- }
-
- assert hrnCalleeAndOutContext.reachHasOnlyOOC();
-
- rg.addRefEdge( hrnCalleeAndOutContext,
- hrnDstCallee,
- new RefEdge( hrnCalleeAndOutContext,
- hrnDstCallee,
- reCaller.getType(),
- reCaller.getField(),
- toCalleeContext( reCaller.getBeta(),
- preds,
- oocHrnIdOoc2callee
- ),
- preds,
- toCalleeContext( reCaller.getTaints(),
- preds )
- )
- );
-
- } else {
- // the out-of-context edge already exists
- oocEdgeExisting.setBeta( Canonical.unionORpreds( oocEdgeExisting.getBeta(),
- toCalleeContext( reCaller.getBeta(),
- preds,
- oocHrnIdOoc2callee
- )
- )
- );
-
- oocEdgeExisting.setPreds( Canonical.join( oocEdgeExisting.getPreds(),
- preds
- )
- );
-
- oocEdgeExisting.setTaints( Canonical.unionORpreds( oocEdgeExisting.getTaints(),
- toCalleeContext( reCaller.getTaints(),
- preds
- )
- )
- );
-
- HeapRegionNode hrnCalleeAndOutContext =
- (HeapRegionNode) oocEdgeExisting.getSrc();
- hrnCalleeAndOutContext.setAlpha( Canonical.unionORpreds( hrnCalleeAndOutContext.getAlpha(),
- toCalleeContext( oocReach,
- preds,
- oocHrnIdOoc2callee
- )
- )
- );
-
- assert hrnCalleeAndOutContext.reachHasOnlyOOC();
- }
- }
-
-
- if( writeDebugDOTs ) {
- debugGraphPrefix = String.format( "call%03d", debugCallSiteVisitCounter );
- rg.writeGraph( debugGraphPrefix+"calleeview",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ assert hrnCalleeAndOutContext.reachHasOnlyOOC();
+
+ rg.addRefEdge(hrnCalleeAndOutContext,
+ hrnDstCallee,
+ new RefEdge(hrnCalleeAndOutContext,
+ hrnDstCallee,
+ reCaller.getType(),
+ reCaller.getField(),
+ toCalleeContext(reCaller.getBeta(),
+ preds,
+ oocHrnIdOoc2callee
+ ),
+ preds,
+ toCalleeContext(reCaller.getTaints(),
+ preds)
+ )
+ );
+
+ } else {
+ // the out-of-context edge already exists
+ oocEdgeExisting.setBeta(Canonical.unionORpreds(oocEdgeExisting.getBeta(),
+ toCalleeContext(reCaller.getBeta(),
+ preds,
+ oocHrnIdOoc2callee
+ )
+ )
+ );
+
+ oocEdgeExisting.setPreds(Canonical.join(oocEdgeExisting.getPreds(),
+ preds
+ )
+ );
+
+ oocEdgeExisting.setTaints(Canonical.unionORpreds(oocEdgeExisting.getTaints(),
+ toCalleeContext(reCaller.getTaints(),
+ preds
+ )
+ )
+ );
+
+ HeapRegionNode hrnCalleeAndOutContext =
+ (HeapRegionNode) oocEdgeExisting.getSrc();
+ hrnCalleeAndOutContext.setAlpha(Canonical.unionORpreds(hrnCalleeAndOutContext.getAlpha(),
+ toCalleeContext(oocReach,
+ preds,
+ oocHrnIdOoc2callee
+ )
+ )
+ );
+
+ assert hrnCalleeAndOutContext.reachHasOnlyOOC();
+ }
+ }
+
+
+ if( writeDebugDOTs ) {
+ debugGraphPrefix = String.format("call%03d", debugCallSiteVisitCounter);
+ rg.writeGraph(debugGraphPrefix+"calleeview",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
return rg;
- }
+ }
- private static Hashtable<String, Integer> oocid2hrnid =
+ private static Hashtable<String, Integer> oocid2hrnid =
new Hashtable<String, Integer>();
static int debugCallSiteVisitStartCapture;
static int debugCallSiteNumVisitsToCapture;
static boolean debugCallSiteStopAfter;
-
- public void
- resolveMethodCall( FlatCall fc,
- FlatMethod fmCallee,
- ReachGraph rgCallee,
- Set<Integer> callerNodeIDsCopiedToCallee,
- boolean writeDebugDOTs
- ) {
+
+ public void
+ resolveMethodCall(FlatCall fc,
+ FlatMethod fmCallee,
+ ReachGraph rgCallee,
+ Set<Integer> callerNodeIDsCopiedToCallee,
+ boolean writeDebugDOTs
+ ) {
if( writeDebugDOTs ) {
- System.out.println( " Writing out visit "+
- debugCallSiteVisitCounter+
- " to debug call site" );
-
- debugGraphPrefix = String.format( "call%03d",
- debugCallSiteVisitCounter );
-
- rgCallee.writeGraph( debugGraphPrefix+"callee",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
-
- writeGraph( debugGraphPrefix+"caller00In",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints,
- callerNodeIDsCopiedToCallee );
+ System.out.println(" Writing out visit "+
+ debugCallSiteVisitCounter+
+ " to debug call site");
+
+ debugGraphPrefix = String.format("call%03d",
+ debugCallSiteVisitCounter);
+
+ rgCallee.writeGraph(debugGraphPrefix+"callee",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
+
+ writeGraph(debugGraphPrefix+"caller00In",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints,
+ callerNodeIDsCopiedToCallee);
}
// method call transfer function steps:
- // 1. Use current callee-reachable heap (CRH) to test callee
+ // 1. Use current callee-reachable heap (CRH) to test callee
// predicates and mark what will be coming in.
// 2. Wipe CRH out of caller.
// 3. Transplant marked callee parts in:
// 1. mark what callee elements have satisfied predicates
Hashtable<HeapRegionNode, ExistPredSet> calleeNodesSatisfied =
new Hashtable<HeapRegionNode, ExistPredSet>();
-
+
Hashtable<RefEdge, ExistPredSet> calleeEdgesSatisfied =
new Hashtable<RefEdge, ExistPredSet>();
Hashtable< HeapRegionNode, Hashtable<ReachState, ExistPredSet> >
- calleeNode2calleeStatesSatisfied =
+ calleeNode2calleeStatesSatisfied =
new Hashtable< HeapRegionNode, Hashtable<ReachState, ExistPredSet> >();
Hashtable< RefEdge, Hashtable<ReachState, ExistPredSet> >
- calleeEdge2calleeStatesSatisfied =
+ calleeEdge2calleeStatesSatisfied =
new Hashtable< RefEdge, Hashtable<ReachState, ExistPredSet> >();
Hashtable< RefEdge, Hashtable<Taint, ExistPredSet> >
- calleeEdge2calleeTaintsSatisfied =
+ calleeEdge2calleeTaintsSatisfied =
new Hashtable< RefEdge, Hashtable<Taint, ExistPredSet> >();
Hashtable< RefEdge, Set<RefSrcNode> > calleeEdges2oocCallerSrcMatches =
Iterator meItr = rgCallee.id2hrn.entrySet().iterator();
while( meItr.hasNext() ) {
- Map.Entry me = (Map.Entry) meItr.next();
- Integer id = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)meItr.next();
+ Integer id = (Integer) me.getKey();
HeapRegionNode hrnCallee = (HeapRegionNode) me.getValue();
// if a callee element's predicates are satisfied then a set
// of CALLER predicates is returned: they are the predicates
// that the callee element moved into the caller context
// should have, and it is inefficient to find this again later
- ExistPredSet predsIfSatis =
- hrnCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
+ ExistPredSet predsIfSatis =
+ hrnCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
if( predsIfSatis != null ) {
- calleeNodesSatisfied.put( hrnCallee, predsIfSatis );
+ calleeNodesSatisfied.put(hrnCallee, predsIfSatis);
} else {
- // otherwise don't bother looking at edges to this node
- continue;
+ // otherwise don't bother looking at edges to this node
+ continue;
}
-
+
// since the node is coming over, find out which reach
// states on it should come over, too
- assert calleeNode2calleeStatesSatisfied.get( hrnCallee ) == null;
+ assert calleeNode2calleeStatesSatisfied.get(hrnCallee) == null;
Iterator<ReachState> stateItr = hrnCallee.getAlpha().iterator();
while( stateItr.hasNext() ) {
- ReachState stateCallee = stateItr.next();
-
- predsIfSatis =
- stateCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
- if( predsIfSatis != null ) {
-
- Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied =
- calleeNode2calleeStatesSatisfied.get( hrnCallee );
-
- if( calleeStatesSatisfied == null ) {
- calleeStatesSatisfied =
- new Hashtable<ReachState, ExistPredSet>();
-
- calleeNode2calleeStatesSatisfied.put( hrnCallee, calleeStatesSatisfied );
- }
-
- calleeStatesSatisfied.put( stateCallee, predsIfSatis );
- }
+ ReachState stateCallee = stateItr.next();
+
+ predsIfSatis =
+ stateCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
+ if( predsIfSatis != null ) {
+
+ Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied =
+ calleeNode2calleeStatesSatisfied.get(hrnCallee);
+
+ if( calleeStatesSatisfied == null ) {
+ calleeStatesSatisfied =
+ new Hashtable<ReachState, ExistPredSet>();
+
+ calleeNode2calleeStatesSatisfied.put(hrnCallee, calleeStatesSatisfied);
+ }
+
+ calleeStatesSatisfied.put(stateCallee, predsIfSatis);
+ }
}
// then look at edges to the node
Iterator<RefEdge> reItr = hrnCallee.iteratorToReferencers();
while( reItr.hasNext() ) {
- RefEdge reCallee = reItr.next();
- RefSrcNode rsnCallee = reCallee.getSrc();
-
- // (caller local variables to in-context heap regions)
- // have an (out-of-context heap region -> in-context heap region)
- // abstraction in the callEE, so its true we never need to
- // look at a (var node -> heap region) edge in callee to bring
- // those over for the call site transfer, except for the special
- // case of *RETURN var* -> heap region edges.
- // What about (param var->heap region)
- // edges in callee? They are dealt with below this loop.
-
- if( rsnCallee instanceof VariableNode ) {
-
- // looking for the return-value variable only
- VariableNode vnCallee = (VariableNode) rsnCallee;
- if( vnCallee.getTempDescriptor() != tdReturn ) {
- continue;
- }
-
- TempDescriptor returnTemp = fc.getReturnTemp();
- if( returnTemp == null ||
- !DisjointAnalysis.shouldAnalysisTrack( returnTemp.getType() )
- ) {
- continue;
- }
-
- // note that the assignment of the return value is to a
- // variable in the caller which is out-of-context with
- // respect to the callee
- VariableNode vnLhsCaller = getVariableNodeFromTemp( returnTemp );
- Set<RefSrcNode> rsnCallers = new HashSet<RefSrcNode>();
- rsnCallers.add( vnLhsCaller );
- calleeEdges2oocCallerSrcMatches.put( reCallee, rsnCallers );
-
-
- } else {
- // for HeapRegionNode callee sources...
-
- // first see if the source is out-of-context, and only
- // proceed with this edge if we find some caller-context
- // matches
- HeapRegionNode hrnSrcCallee = (HeapRegionNode) rsnCallee;
- boolean matchedOutOfContext = false;
-
- if( !hrnSrcCallee.isOutOfContext() ) {
-
- predsIfSatis =
- hrnSrcCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
- if( predsIfSatis != null ) {
- calleeNodesSatisfied.put( hrnSrcCallee, predsIfSatis );
- } else {
- // otherwise forget this edge
- continue;
- }
-
- } else {
- // hrnSrcCallee is out-of-context
-
- assert !calleeEdges2oocCallerSrcMatches.containsKey( reCallee );
-
- Set<RefSrcNode> rsnCallers = new HashSet<RefSrcNode>();
-
- // is the target node in the caller?
- HeapRegionNode hrnDstCaller = this.id2hrn.get( hrnCallee.getID() );
- if( hrnDstCaller == null ) {
- continue;
- }
-
- Iterator<RefEdge> reDstItr = hrnDstCaller.iteratorToReferencers();
- while( reDstItr.hasNext() ) {
- // the edge and field (either possibly null) must match
- RefEdge reCaller = reDstItr.next();
-
- if( !reCaller.typeEquals ( reCallee.getType() ) ||
- !reCaller.fieldEquals( reCallee.getField() )
- ) {
- continue;
- }
-
- RefSrcNode rsnCaller = reCaller.getSrc();
- if( rsnCaller instanceof VariableNode ) {
-
- // a variable node matches an OOC region with null type
- if( hrnSrcCallee.getType() != null ) {
- continue;
- }
-
- } else {
- // otherwise types should match
- HeapRegionNode hrnCallerSrc = (HeapRegionNode) rsnCaller;
- if( hrnSrcCallee.getType() == null ) {
- if( hrnCallerSrc.getType() != null ) {
- continue;
- }
- } else {
- if( !hrnSrcCallee.getType().equals( hrnCallerSrc.getType() ) ) {
- continue;
- }
- }
- }
-
- rsnCallers.add( rsnCaller );
- matchedOutOfContext = true;
- }
-
- if( !rsnCallers.isEmpty() ) {
- calleeEdges2oocCallerSrcMatches.put( reCallee, rsnCallers );
- }
- }
-
- if( hrnSrcCallee.isOutOfContext() &&
- !matchedOutOfContext ) {
- continue;
- }
- }
-
-
- predsIfSatis =
- reCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
-
- if( predsIfSatis != null ) {
- calleeEdgesSatisfied.put( reCallee, predsIfSatis );
-
- // since the edge is coming over, find out which reach
- // states on it should come over, too
- assert calleeEdge2calleeStatesSatisfied.get( reCallee ) == null;
-
- stateItr = reCallee.getBeta().iterator();
- while( stateItr.hasNext() ) {
- ReachState stateCallee = stateItr.next();
-
- predsIfSatis =
- stateCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
- if( predsIfSatis != null ) {
-
- Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied =
- calleeEdge2calleeStatesSatisfied.get( reCallee );
-
- if( calleeStatesSatisfied == null ) {
- calleeStatesSatisfied =
- new Hashtable<ReachState, ExistPredSet>();
-
- calleeEdge2calleeStatesSatisfied.put( reCallee, calleeStatesSatisfied );
- }
-
- calleeStatesSatisfied.put( stateCallee, predsIfSatis );
- }
- }
-
- // since the edge is coming over, find out which taints
- // on it should come over, too
- assert calleeEdge2calleeTaintsSatisfied.get( reCallee ) == null;
-
- Iterator<Taint> tItr = reCallee.getTaints().iterator();
- while( tItr.hasNext() ) {
- Taint tCallee = tItr.next();
-
- predsIfSatis =
- tCallee.getPreds().isSatisfiedBy( this,
- callerNodeIDsCopiedToCallee
- );
- if( predsIfSatis != null ) {
-
- Hashtable<Taint, ExistPredSet> calleeTaintsSatisfied =
- calleeEdge2calleeTaintsSatisfied.get( reCallee );
-
- if( calleeTaintsSatisfied == null ) {
- calleeTaintsSatisfied =
- new Hashtable<Taint, ExistPredSet>();
-
- calleeEdge2calleeTaintsSatisfied.put( reCallee, calleeTaintsSatisfied );
- }
-
- calleeTaintsSatisfied.put( tCallee, predsIfSatis );
- }
- }
- }
+ RefEdge reCallee = reItr.next();
+ RefSrcNode rsnCallee = reCallee.getSrc();
+
+ // (caller local variables to in-context heap regions)
+ // have an (out-of-context heap region -> in-context heap region)
+ // abstraction in the callEE, so its true we never need to
+ // look at a (var node -> heap region) edge in callee to bring
+ // those over for the call site transfer, except for the special
+ // case of *RETURN var* -> heap region edges.
+ // What about (param var->heap region)
+ // edges in callee? They are dealt with below this loop.
+
+ if( rsnCallee instanceof VariableNode ) {
+
+ // looking for the return-value variable only
+ VariableNode vnCallee = (VariableNode) rsnCallee;
+ if( vnCallee.getTempDescriptor() != tdReturn ) {
+ continue;
+ }
+
+ TempDescriptor returnTemp = fc.getReturnTemp();
+ if( returnTemp == null ||
+ !DisjointAnalysis.shouldAnalysisTrack(returnTemp.getType() )
+ ) {
+ continue;
+ }
+
+ // note that the assignment of the return value is to a
+ // variable in the caller which is out-of-context with
+ // respect to the callee
+ VariableNode vnLhsCaller = getVariableNodeFromTemp(returnTemp);
+ Set<RefSrcNode> rsnCallers = new HashSet<RefSrcNode>();
+ rsnCallers.add(vnLhsCaller);
+ calleeEdges2oocCallerSrcMatches.put(reCallee, rsnCallers);
+
+
+ } else {
+ // for HeapRegionNode callee sources...
+
+ // first see if the source is out-of-context, and only
+ // proceed with this edge if we find some caller-context
+ // matches
+ HeapRegionNode hrnSrcCallee = (HeapRegionNode) rsnCallee;
+ boolean matchedOutOfContext = false;
+
+ if( !hrnSrcCallee.isOutOfContext() ) {
+
+ predsIfSatis =
+ hrnSrcCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
+ if( predsIfSatis != null ) {
+ calleeNodesSatisfied.put(hrnSrcCallee, predsIfSatis);
+ } else {
+ // otherwise forget this edge
+ continue;
+ }
+
+ } else {
+ // hrnSrcCallee is out-of-context
+
+ assert !calleeEdges2oocCallerSrcMatches.containsKey(reCallee);
+
+ Set<RefSrcNode> rsnCallers = new HashSet<RefSrcNode>();
+
+ // is the target node in the caller?
+ HeapRegionNode hrnDstCaller = this.id2hrn.get(hrnCallee.getID() );
+ if( hrnDstCaller == null ) {
+ continue;
+ }
+
+ Iterator<RefEdge> reDstItr = hrnDstCaller.iteratorToReferencers();
+ while( reDstItr.hasNext() ) {
+ // the edge and field (either possibly null) must match
+ RefEdge reCaller = reDstItr.next();
+
+ if( !reCaller.typeEquals(reCallee.getType() ) ||
+ !reCaller.fieldEquals(reCallee.getField() )
+ ) {
+ continue;
+ }
+
+ RefSrcNode rsnCaller = reCaller.getSrc();
+ if( rsnCaller instanceof VariableNode ) {
+
+ // a variable node matches an OOC region with null type
+ if( hrnSrcCallee.getType() != null ) {
+ continue;
+ }
+
+ } else {
+ // otherwise types should match
+ HeapRegionNode hrnCallerSrc = (HeapRegionNode) rsnCaller;
+ if( hrnSrcCallee.getType() == null ) {
+ if( hrnCallerSrc.getType() != null ) {
+ continue;
+ }
+ } else {
+ if( !hrnSrcCallee.getType().equals(hrnCallerSrc.getType() ) ) {
+ continue;
+ }
+ }
+ }
+
+ rsnCallers.add(rsnCaller);
+ matchedOutOfContext = true;
+ }
+
+ if( !rsnCallers.isEmpty() ) {
+ calleeEdges2oocCallerSrcMatches.put(reCallee, rsnCallers);
+ }
+ }
+
+ if( hrnSrcCallee.isOutOfContext() &&
+ !matchedOutOfContext ) {
+ continue;
+ }
+ }
+
+
+ predsIfSatis =
+ reCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
+
+ if( predsIfSatis != null ) {
+ calleeEdgesSatisfied.put(reCallee, predsIfSatis);
+
+ // since the edge is coming over, find out which reach
+ // states on it should come over, too
+ assert calleeEdge2calleeStatesSatisfied.get(reCallee) == null;
+
+ stateItr = reCallee.getBeta().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState stateCallee = stateItr.next();
+
+ predsIfSatis =
+ stateCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
+ if( predsIfSatis != null ) {
+
+ Hashtable<ReachState, ExistPredSet> calleeStatesSatisfied =
+ calleeEdge2calleeStatesSatisfied.get(reCallee);
+
+ if( calleeStatesSatisfied == null ) {
+ calleeStatesSatisfied =
+ new Hashtable<ReachState, ExistPredSet>();
+
+ calleeEdge2calleeStatesSatisfied.put(reCallee, calleeStatesSatisfied);
+ }
+
+ calleeStatesSatisfied.put(stateCallee, predsIfSatis);
+ }
+ }
+
+ // since the edge is coming over, find out which taints
+ // on it should come over, too
+ assert calleeEdge2calleeTaintsSatisfied.get(reCallee) == null;
+
+ Iterator<Taint> tItr = reCallee.getTaints().iterator();
+ while( tItr.hasNext() ) {
+ Taint tCallee = tItr.next();
+
+ predsIfSatis =
+ tCallee.getPreds().isSatisfiedBy(this,
+ callerNodeIDsCopiedToCallee
+ );
+ if( predsIfSatis != null ) {
+
+ Hashtable<Taint, ExistPredSet> calleeTaintsSatisfied =
+ calleeEdge2calleeTaintsSatisfied.get(reCallee);
+
+ if( calleeTaintsSatisfied == null ) {
+ calleeTaintsSatisfied =
+ new Hashtable<Taint, ExistPredSet>();
+
+ calleeEdge2calleeTaintsSatisfied.put(reCallee, calleeTaintsSatisfied);
+ }
+
+ calleeTaintsSatisfied.put(tCallee, predsIfSatis);
+ }
+ }
+ }
}
}
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller20BeforeWipe",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller20BeforeWipe",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
// 2. predicates tested, ok to wipe out caller part
Iterator<Integer> hrnItr = callerNodeIDsCopiedToCallee.iterator();
while( hrnItr.hasNext() ) {
- Integer hrnID = hrnItr.next();
- HeapRegionNode hrnCaller = id2hrn.get( hrnID );
+ Integer hrnID = hrnItr.next();
+ HeapRegionNode hrnCaller = id2hrn.get(hrnID);
assert hrnCaller != null;
// when clearing off nodes, also eliminate variable
// references
- wipeOut( hrnCaller, true );
+ wipeOut(hrnCaller, true);
}
// if we are assigning the return value to something, clobber now
// as part of the wipe
TempDescriptor returnTemp = fc.getReturnTemp();
- if( returnTemp != null &&
- DisjointAnalysis.shouldAnalysisTrack( returnTemp.getType() )
+ if( returnTemp != null &&
+ DisjointAnalysis.shouldAnalysisTrack(returnTemp.getType() )
) {
-
- VariableNode vnLhsCaller = getVariableNodeFromTemp( returnTemp );
- clearRefEdgesFrom( vnLhsCaller, null, null, true );
+
+ VariableNode vnLhsCaller = getVariableNodeFromTemp(returnTemp);
+ clearRefEdgesFrom(vnLhsCaller, null, null, true);
}
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller30BeforeAddingNodes",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller30BeforeAddingNodes",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
// 3.a) nodes
Iterator satisItr = calleeNodesSatisfied.entrySet().iterator();
while( satisItr.hasNext() ) {
- Map.Entry me = (Map.Entry) satisItr.next();
+ Map.Entry me = (Map.Entry)satisItr.next();
HeapRegionNode hrnCallee = (HeapRegionNode) me.getKey();
- ExistPredSet preds = (ExistPredSet) me.getValue();
+ ExistPredSet preds = (ExistPredSet) me.getValue();
// TODO: I think its true that the current implementation uses
// the type of the OOC region and the predicates OF THE EDGE from
// it to link everything up in caller context, so that's why we're
// skipping this... maybe that's a sillier way to do it?
if( hrnCallee.isOutOfContext() ) {
- continue;
+ continue;
}
- AllocSite as = hrnCallee.getAllocSite();
- allocSites.add( as );
+ AllocSite as = hrnCallee.getAllocSite();
+ allocSites.add(as);
- Integer hrnIDshadow = as.getShadowIDfromID( hrnCallee.getID() );
+ Integer hrnIDshadow = as.getShadowIDfromID(hrnCallee.getID() );
- HeapRegionNode hrnCaller = id2hrn.get( hrnIDshadow );
+ HeapRegionNode hrnCaller = id2hrn.get(hrnIDshadow);
if( hrnCaller == null ) {
- hrnCaller =
- createNewHeapRegionNode( hrnIDshadow, // id or null to generate a new one
- hrnCallee.isSingleObject(), // single object?
- hrnCallee.isNewSummary(), // summary?
- false, // out-of-context?
- hrnCallee.getType(), // type
- hrnCallee.getAllocSite(), // allocation site
- toCallerContext( hrnCallee.getInherent(),
- calleeNode2calleeStatesSatisfied.get( hrnCallee ) ), // inherent reach
- null, // current reach
- predsEmpty, // predicates
- hrnCallee.getDescription() // description
- );
+ hrnCaller =
+ createNewHeapRegionNode(hrnIDshadow, // id or null to generate a new one
+ hrnCallee.isSingleObject(), // single object?
+ hrnCallee.isNewSummary(), // summary?
+ false, // out-of-context?
+ hrnCallee.getType(), // type
+ hrnCallee.getAllocSite(), // allocation site
+ toCallerContext(hrnCallee.getInherent(),
+ calleeNode2calleeStatesSatisfied.get(hrnCallee) ), // inherent reach
+ null, // current reach
+ predsEmpty, // predicates
+ hrnCallee.getDescription() // description
+ );
} else {
- assert hrnCaller.isWiped();
+ assert hrnCaller.isWiped();
}
- hrnCaller.setAlpha( toCallerContext( hrnCallee.getAlpha(),
- calleeNode2calleeStatesSatisfied.get( hrnCallee )
- )
- );
+ hrnCaller.setAlpha(toCallerContext(hrnCallee.getAlpha(),
+ calleeNode2calleeStatesSatisfied.get(hrnCallee)
+ )
+ );
- hrnCaller.setPreds( preds );
+ hrnCaller.setPreds(preds);
}
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller31BeforeAddingEdges",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller31BeforeAddingEdges",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
// which includes return temp -> callee edges now, too
satisItr = calleeEdgesSatisfied.entrySet().iterator();
while( satisItr.hasNext() ) {
- Map.Entry me = (Map.Entry) satisItr.next();
- RefEdge reCallee = (RefEdge) me.getKey();
+ Map.Entry me = (Map.Entry)satisItr.next();
+ RefEdge reCallee = (RefEdge) me.getKey();
ExistPredSet preds = (ExistPredSet) me.getValue();
HeapRegionNode hrnDstCallee = reCallee.getDst();
- AllocSite asDst = hrnDstCallee.getAllocSite();
- allocSites.add( asDst );
+ AllocSite asDst = hrnDstCallee.getAllocSite();
+ allocSites.add(asDst);
+
+ Integer hrnIDDstShadow =
+ asDst.getShadowIDfromID(hrnDstCallee.getID() );
+
+ HeapRegionNode hrnDstCaller = id2hrn.get(hrnIDDstShadow);
+ assert hrnDstCaller != null;
+
+
+ RefSrcNode rsnCallee = reCallee.getSrc();
+
+ Set<RefSrcNode> rsnCallers =
+ new HashSet<RefSrcNode>();
+
+ Set<RefSrcNode> oocCallers =
+ calleeEdges2oocCallerSrcMatches.get(reCallee);
+
+ if( rsnCallee instanceof HeapRegionNode ) {
+ HeapRegionNode hrnCalleeSrc = (HeapRegionNode) rsnCallee;
+ if( hrnCalleeSrc.isOutOfContext() ) {
+ assert oocCallers != null;
+ }
+ }
+
+
+ if( oocCallers == null ) {
+ // there are no out-of-context matches, so it's
+ // either a param/arg var or one in-context heap region
+ if( rsnCallee instanceof VariableNode ) {
+ // variable -> node in the callee should only
+ // come into the caller if its from a param var
+ VariableNode vnCallee = (VariableNode) rsnCallee;
+ TempDescriptor tdParam = vnCallee.getTempDescriptor();
+ TempDescriptor tdArg = fc.getArgMatchingParam(fmCallee,
+ tdParam);
+ if( tdArg == null ) {
+ // this means the variable isn't a parameter, its local
+ // to the callee so we ignore it in call site transfer
+ // shouldn't this NEVER HAPPEN?
+ assert false;
+ }
+
+ rsnCallers.add(this.getVariableNodeFromTemp(tdArg) );
+
+ } else {
+ // otherwise source is in context, one region
+
+ HeapRegionNode hrnSrcCallee = (HeapRegionNode) rsnCallee;
+
+ // translate an in-context node to shadow
+ AllocSite asSrc = hrnSrcCallee.getAllocSite();
+ allocSites.add(asSrc);
- Integer hrnIDDstShadow =
- asDst.getShadowIDfromID( hrnDstCallee.getID() );
-
- HeapRegionNode hrnDstCaller = id2hrn.get( hrnIDDstShadow );
- assert hrnDstCaller != null;
-
-
- RefSrcNode rsnCallee = reCallee.getSrc();
+ Integer hrnIDSrcShadow =
+ asSrc.getShadowIDfromID(hrnSrcCallee.getID() );
- Set<RefSrcNode> rsnCallers =
- new HashSet<RefSrcNode>();
-
- Set<RefSrcNode> oocCallers =
- calleeEdges2oocCallerSrcMatches.get( reCallee );
+ HeapRegionNode hrnSrcCallerShadow =
+ this.id2hrn.get(hrnIDSrcShadow);
- if( rsnCallee instanceof HeapRegionNode ) {
- HeapRegionNode hrnCalleeSrc = (HeapRegionNode) rsnCallee;
- if( hrnCalleeSrc.isOutOfContext() ) {
- assert oocCallers != null;
- }
- }
+ assert hrnSrcCallerShadow != null;
-
- if( oocCallers == null ) {
- // there are no out-of-context matches, so it's
- // either a param/arg var or one in-context heap region
- if( rsnCallee instanceof VariableNode ) {
- // variable -> node in the callee should only
- // come into the caller if its from a param var
- VariableNode vnCallee = (VariableNode) rsnCallee;
- TempDescriptor tdParam = vnCallee.getTempDescriptor();
- TempDescriptor tdArg = fc.getArgMatchingParam( fmCallee,
- tdParam );
- if( tdArg == null ) {
- // this means the variable isn't a parameter, its local
- // to the callee so we ignore it in call site transfer
- // shouldn't this NEVER HAPPEN?
- assert false;
- }
-
- rsnCallers.add( this.getVariableNodeFromTemp( tdArg ) );
-
- } else {
- // otherwise source is in context, one region
-
- HeapRegionNode hrnSrcCallee = (HeapRegionNode) rsnCallee;
-
- // translate an in-context node to shadow
- AllocSite asSrc = hrnSrcCallee.getAllocSite();
- allocSites.add( asSrc );
-
- Integer hrnIDSrcShadow =
- asSrc.getShadowIDfromID( hrnSrcCallee.getID() );
-
- HeapRegionNode hrnSrcCallerShadow =
- this.id2hrn.get( hrnIDSrcShadow );
-
- assert hrnSrcCallerShadow != null;
-
- rsnCallers.add( hrnSrcCallerShadow );
- }
+ rsnCallers.add(hrnSrcCallerShadow);
+ }
} else {
- // otherwise we have a set of out-of-context srcs
- // that should NOT be translated to shadow nodes
- assert !oocCallers.isEmpty();
- rsnCallers.addAll( oocCallers );
+ // otherwise we have a set of out-of-context srcs
+ // that should NOT be translated to shadow nodes
+ assert !oocCallers.isEmpty();
+ rsnCallers.addAll(oocCallers);
}
// now make all caller edges we've identified from
assert !rsnCallers.isEmpty();
Iterator<RefSrcNode> rsnItr = rsnCallers.iterator();
while( rsnItr.hasNext() ) {
- RefSrcNode rsnCaller = rsnItr.next();
-
- RefEdge reCaller = new RefEdge( rsnCaller,
- hrnDstCaller,
- reCallee.getType(),
- reCallee.getField(),
- toCallerContext( reCallee.getBeta(),
- calleeEdge2calleeStatesSatisfied.get( reCallee ) ),
- preds,
- toCallerContext( reCallee.getTaints(),
- calleeEdge2calleeTaintsSatisfied.get( reCallee ) )
- );
-
- ChangeSet cs = ChangeSet.factory();
- Iterator<ReachState> rsItr = reCaller.getBeta().iterator();
- while( rsItr.hasNext() ) {
- ReachState state = rsItr.next();
- ExistPredSet predsPreCallee = state.getPreds();
-
- if( state.isEmpty() ) {
- continue;
- }
-
- Iterator<ExistPred> predItr = predsPreCallee.iterator();
- while( predItr.hasNext() ) {
- ExistPred pred = predItr.next();
- ReachState old = pred.ne_state;
-
- if( old == null ) {
- old = rstateEmpty;
- }
-
- cs = Canonical.add( cs,
- ChangeTuple.factory( old,
- state
- )
- );
- }
- }
-
- // we're just going to use the convenient "merge-if-exists"
- // edge call below, but still take a separate look if there
- // is an existing caller edge to build change sets properly
- if( !cs.isEmpty() ) {
- RefEdge edgeExisting = rsnCaller.getReferenceTo( hrnDstCaller,
- reCallee.getType(),
- reCallee.getField()
- );
- if( edgeExisting != null ) {
- ChangeSet csExisting = edgePlannedChanges.get( edgeExisting );
- if( csExisting == null ) {
- csExisting = ChangeSet.factory();
- }
- edgePlannedChanges.put( edgeExisting,
- Canonical.union( csExisting,
- cs
- )
- );
- } else {
- edgesForPropagation.add( reCaller );
- assert !edgePlannedChanges.containsKey( reCaller );
- edgePlannedChanges.put( reCaller, cs );
- }
- }
-
- // then add new caller edge or merge
- addEdgeOrMergeWithExisting( reCaller );
+ RefSrcNode rsnCaller = rsnItr.next();
+
+ RefEdge reCaller = new RefEdge(rsnCaller,
+ hrnDstCaller,
+ reCallee.getType(),
+ reCallee.getField(),
+ toCallerContext(reCallee.getBeta(),
+ calleeEdge2calleeStatesSatisfied.get(reCallee) ),
+ preds,
+ toCallerContext(reCallee.getTaints(),
+ calleeEdge2calleeTaintsSatisfied.get(reCallee) )
+ );
+
+ ChangeSet cs = ChangeSet.factory();
+ Iterator<ReachState> rsItr = reCaller.getBeta().iterator();
+ while( rsItr.hasNext() ) {
+ ReachState state = rsItr.next();
+ ExistPredSet predsPreCallee = state.getPreds();
+
+ if( state.isEmpty() ) {
+ continue;
+ }
+
+ Iterator<ExistPred> predItr = predsPreCallee.iterator();
+ while( predItr.hasNext() ) {
+ ExistPred pred = predItr.next();
+ ReachState old = pred.ne_state;
+
+ if( old == null ) {
+ old = rstateEmpty;
+ }
+
+ cs = Canonical.add(cs,
+ ChangeTuple.factory(old,
+ state
+ )
+ );
+ }
+ }
+
+ // we're just going to use the convenient "merge-if-exists"
+ // edge call below, but still take a separate look if there
+ // is an existing caller edge to build change sets properly
+ if( !cs.isEmpty() ) {
+ RefEdge edgeExisting = rsnCaller.getReferenceTo(hrnDstCaller,
+ reCallee.getType(),
+ reCallee.getField()
+ );
+ if( edgeExisting != null ) {
+ ChangeSet csExisting = edgePlannedChanges.get(edgeExisting);
+ if( csExisting == null ) {
+ csExisting = ChangeSet.factory();
+ }
+ edgePlannedChanges.put(edgeExisting,
+ Canonical.union(csExisting,
+ cs
+ )
+ );
+ } else {
+ edgesForPropagation.add(reCaller);
+ assert !edgePlannedChanges.containsKey(reCaller);
+ edgePlannedChanges.put(reCaller, cs);
+ }
+ }
+
+ // then add new caller edge or merge
+ addEdgeOrMergeWithExisting(reCaller);
}
}
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller38propagateReach",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller38propagateReach",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
// propagate callee reachability changes to the rest
// of the caller graph edges
HashSet<RefEdge> edgesUpdated = new HashSet<RefEdge>();
-
- propagateTokensOverEdges( edgesForPropagation, // source edges
- edgePlannedChanges, // map src edge to change set
- edgesUpdated ); // list of updated edges
-
+
+ propagateTokensOverEdges(edgesForPropagation, // source edges
+ edgePlannedChanges, // map src edge to change set
+ edgesUpdated); // list of updated edges
+
// commit beta' (beta<-betaNew)
Iterator<RefEdge> edgeItr = edgesUpdated.iterator();
while( edgeItr.hasNext() ) {
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller40BeforeShadowMerge",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller40BeforeShadowMerge",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
-
+
// 4) merge shadow nodes so alloc sites are back to k
Iterator<AllocSite> asItr = rgCallee.allocSites.iterator();
while( ageNorm < allocationDepth &&
ageShad < allocationDepth ) {
- // first, are there any normal nodes left?
- Integer idNorm = as.getIthOldest( ageNorm );
- HeapRegionNode hrnNorm = id2hrn.get( idNorm );
- if( hrnNorm == null ) {
- // no, this age of normal node not in the caller graph
- ageNorm++;
- continue;
- }
-
- // yes, a normal node exists, is there an empty shadow
- // "slot" to transfer it onto?
- HeapRegionNode hrnShad = getIthNode( as, ageShad, true );
- if( !hrnShad.isWiped() ) {
- // no, this age of shadow node is not empty
- ageShad++;
- continue;
- }
-
- // yes, this shadow node is empty
- transferOnto( hrnNorm, hrnShad );
- ageNorm++;
- ageShad++;
+ // first, are there any normal nodes left?
+ Integer idNorm = as.getIthOldest(ageNorm);
+ HeapRegionNode hrnNorm = id2hrn.get(idNorm);
+ if( hrnNorm == null ) {
+ // no, this age of normal node not in the caller graph
+ ageNorm++;
+ continue;
+ }
+
+ // yes, a normal node exists, is there an empty shadow
+ // "slot" to transfer it onto?
+ HeapRegionNode hrnShad = getIthNode(as, ageShad, true);
+ if( !hrnShad.isWiped() ) {
+ // no, this age of shadow node is not empty
+ ageShad++;
+ continue;
+ }
+
+ // yes, this shadow node is empty
+ transferOnto(hrnNorm, hrnShad);
+ ageNorm++;
+ ageShad++;
}
// now, while there are still normal nodes but no shadow
// slots, merge normal nodes into the shadow summary
while( ageNorm < allocationDepth ) {
- // first, are there any normal nodes left?
- Integer idNorm = as.getIthOldest( ageNorm );
- HeapRegionNode hrnNorm = id2hrn.get( idNorm );
- if( hrnNorm == null ) {
- // no, this age of normal node not in the caller graph
- ageNorm++;
- continue;
- }
-
- // yes, a normal node exists, so get the shadow summary
- HeapRegionNode summShad = getSummaryNode( as, true );
- mergeIntoSummary( hrnNorm, summShad );
-
- // now tokens in reachability sets need to age also
- Iterator itrAllHRNodes = id2hrn.entrySet().iterator();
- while( itrAllHRNodes.hasNext() ) {
- Map.Entry me = (Map.Entry) itrAllHRNodes.next();
- HeapRegionNode hrnToAge = (HeapRegionNode) me.getValue();
-
- ageTuplesFrom( as, hrnToAge );
-
- Iterator<RefEdge> itrEdges = hrnToAge.iteratorToReferencers();
- while( itrEdges.hasNext() ) {
- ageTuplesFrom( as, itrEdges.next() );
- }
- }
-
- ageNorm++;
+ // first, are there any normal nodes left?
+ Integer idNorm = as.getIthOldest(ageNorm);
+ HeapRegionNode hrnNorm = id2hrn.get(idNorm);
+ if( hrnNorm == null ) {
+ // no, this age of normal node not in the caller graph
+ ageNorm++;
+ continue;
+ }
+
+ // yes, a normal node exists, so get the shadow summary
+ HeapRegionNode summShad = getSummaryNode(as, true);
+ mergeIntoSummary(hrnNorm, summShad);
+
+ // now tokens in reachability sets need to age also
+ Iterator itrAllHRNodes = id2hrn.entrySet().iterator();
+ while( itrAllHRNodes.hasNext() ) {
+ Map.Entry me = (Map.Entry)itrAllHRNodes.next();
+ HeapRegionNode hrnToAge = (HeapRegionNode) me.getValue();
+
+ ageTuplesFrom(as, hrnToAge);
+
+ Iterator<RefEdge> itrEdges = hrnToAge.iteratorToReferencers();
+ while( itrEdges.hasNext() ) {
+ ageTuplesFrom(as, itrEdges.next() );
+ }
+ }
+
+ ageNorm++;
}
// if there is a normal summary, merge it into shadow summary
- Integer idNorm = as.getSummary();
- HeapRegionNode summNorm = id2hrn.get( idNorm );
+ Integer idNorm = as.getSummary();
+ HeapRegionNode summNorm = id2hrn.get(idNorm);
if( summNorm != null ) {
- HeapRegionNode summShad = getSummaryNode( as, true );
- mergeIntoSummary( summNorm, summShad );
+ HeapRegionNode summShad = getSummaryNode(as, true);
+ mergeIntoSummary(summNorm, summShad);
}
-
+
// finally, flip all existing shadow nodes onto the normal
for( int i = 0; i < allocationDepth; ++i ) {
- Integer idShad = as.getIthOldestShadow( i );
- HeapRegionNode hrnShad = id2hrn.get( idShad );
- if( hrnShad != null ) {
- // flip it
- HeapRegionNode hrnNorm = getIthNode( as, i, false );
- assert hrnNorm.isWiped();
- transferOnto( hrnShad, hrnNorm );
- }
+ Integer idShad = as.getIthOldestShadow(i);
+ HeapRegionNode hrnShad = id2hrn.get(idShad);
+ if( hrnShad != null ) {
+ // flip it
+ HeapRegionNode hrnNorm = getIthNode(as, i, false);
+ assert hrnNorm.isWiped();
+ transferOnto(hrnShad, hrnNorm);
+ }
}
-
- Integer idShad = as.getSummaryShadow();
- HeapRegionNode summShad = id2hrn.get( idShad );
+
+ Integer idShad = as.getSummaryShadow();
+ HeapRegionNode summShad = id2hrn.get(idShad);
if( summShad != null ) {
- summNorm = getSummaryNode( as, false );
- transferOnto( summShad, summNorm );
- }
+ summNorm = getSummaryNode(as, false);
+ transferOnto(summShad, summNorm);
+ }
}
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller45BeforeUnshadow",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
- }
-
-
+ writeGraph(debugGraphPrefix+"caller45BeforeUnshadow",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
+ }
+
+
Iterator itrAllHRNodes = id2hrn.entrySet().iterator();
while( itrAllHRNodes.hasNext() ) {
- Map.Entry me = (Map.Entry) itrAllHRNodes.next();
+ Map.Entry me = (Map.Entry)itrAllHRNodes.next();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
- hrn.setAlpha( unshadow( hrn.getAlpha() ) );
-
+
+ hrn.setAlpha(unshadow(hrn.getAlpha() ) );
+
Iterator<RefEdge> itrEdges = hrn.iteratorToReferencers();
while( itrEdges.hasNext() ) {
- RefEdge re = itrEdges.next();
- re.setBeta( unshadow( re.getBeta() ) );
+ RefEdge re = itrEdges.next();
+ re.setBeta(unshadow(re.getBeta() ) );
}
}
-
+
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller50BeforeGlobalSweep",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller50BeforeGlobalSweep",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
if( !DISABLE_GLOBAL_SWEEP ) {
globalSweep();
}
-
+
if( writeDebugDOTs ) {
- writeGraph( debugGraphPrefix+"caller90AfterTransfer",
- resolveMethodDebugDOTwriteLabels,
- resolveMethodDebugDOTselectTemps,
- resolveMethodDebugDOTpruneGarbage,
- resolveMethodDebugDOThideReach,
- resolveMethodDebugDOThideSubsetReach,
- resolveMethodDebugDOThidePreds,
- resolveMethodDebugDOThideEdgeTaints );
+ writeGraph(debugGraphPrefix+"caller90AfterTransfer",
+ resolveMethodDebugDOTwriteLabels,
+ resolveMethodDebugDOTselectTemps,
+ resolveMethodDebugDOTpruneGarbage,
+ resolveMethodDebugDOThideReach,
+ resolveMethodDebugDOThideSubsetReach,
+ resolveMethodDebugDOThidePreds,
+ resolveMethodDebugDOThideEdgeTaints);
}
- }
+ }
+
-
////////////////////////////////////////////////////
//
// predicates efficiently
//
////////////////////////////////////////////////////
- public void abstractGarbageCollect( Set<TempDescriptor> liveSet ) {
+ public void abstractGarbageCollect(Set<TempDescriptor> liveSet) {
// calculate a root set, will be different for Java
// version of analysis versus Bamboo version
// set, and do iterating on a copy, so we can remove
// dead variables while we're at this
Iterator makeCopyItr = td2vn.entrySet().iterator();
- Set entrysCopy = new HashSet();
+ Set entrysCopy = new HashSet();
while( makeCopyItr.hasNext() ) {
- entrysCopy.add( makeCopyItr.next() );
+ entrysCopy.add(makeCopyItr.next() );
}
-
+
Iterator eItr = entrysCopy.iterator();
while( eItr.hasNext() ) {
- Map.Entry me = (Map.Entry) eItr.next();
+ Map.Entry me = (Map.Entry)eItr.next();
TempDescriptor td = (TempDescriptor) me.getKey();
- VariableNode vn = (VariableNode) me.getValue();
+ VariableNode vn = (VariableNode) me.getValue();
- if( liveSet.contains( td ) ) {
- toVisit.add( vn );
+ if( liveSet.contains(td) ) {
+ toVisit.add(vn);
} else {
- // dead var, remove completely from graph
- td2vn.remove( td );
- clearRefEdgesFrom( vn, null, null, true );
+ // dead var, remove completely from graph
+ td2vn.remove(td);
+ clearRefEdgesFrom(vn, null, null, true);
}
}
// everything visited in a traversal is
// considered abstractly live
Set<RefSrcNode> visited = new HashSet<RefSrcNode>();
-
+
while( !toVisit.isEmpty() ) {
RefSrcNode rsn = toVisit.iterator().next();
- toVisit.remove( rsn );
- visited.add( rsn );
-
+ toVisit.remove(rsn);
+ visited.add(rsn);
+
Iterator<RefEdge> hrnItr = rsn.iteratorToReferencees();
while( hrnItr.hasNext() ) {
- RefEdge edge = hrnItr.next();
- HeapRegionNode hrn = edge.getDst();
-
- if( !visited.contains( hrn ) ) {
- toVisit.add( hrn );
- }
+ RefEdge edge = hrnItr.next();
+ HeapRegionNode hrn = edge.getDst();
+
+ if( !visited.contains(hrn) ) {
+ toVisit.add(hrn);
+ }
}
}
Set<HeapRegionNode> hrnAllPrior = new HashSet<HeapRegionNode>();
Iterator<HeapRegionNode> hrnItr = id2hrn.values().iterator();
while( hrnItr.hasNext() ) {
- hrnAllPrior.add( hrnItr.next() );
+ hrnAllPrior.add(hrnItr.next() );
}
Iterator<HeapRegionNode> hrnAllItr = hrnAllPrior.iterator();
while( hrnAllItr.hasNext() ) {
HeapRegionNode hrn = hrnAllItr.next();
- if( !visited.contains( hrn ) ) {
-
- // heap region nodes are compared across ReachGraph
- // objects by their integer ID, so when discarding
- // garbage nodes we must also discard entries in
- // the ID -> heap region hashtable.
- id2hrn.remove( hrn.getID() );
-
- // RefEdge objects are two-way linked between
- // nodes, so when a node is identified as garbage,
- // actively clear references to and from it so
- // live nodes won't have dangling RefEdge's
- wipeOut( hrn, true );
-
- // if we just removed the last node from an allocation
- // site, it should be taken out of the ReachGraph's list
- AllocSite as = hrn.getAllocSite();
- if( !hasNodesOf( as ) ) {
- allocSites.remove( as );
- }
+ if( !visited.contains(hrn) ) {
+
+ // heap region nodes are compared across ReachGraph
+ // objects by their integer ID, so when discarding
+ // garbage nodes we must also discard entries in
+ // the ID -> heap region hashtable.
+ id2hrn.remove(hrn.getID() );
+
+ // RefEdge objects are two-way linked between
+ // nodes, so when a node is identified as garbage,
+ // actively clear references to and from it so
+ // live nodes won't have dangling RefEdge's
+ wipeOut(hrn, true);
+
+ // if we just removed the last node from an allocation
+ // site, it should be taken out of the ReachGraph's list
+ AllocSite as = hrn.getAllocSite();
+ if( !hasNodesOf(as) ) {
+ allocSites.remove(as);
+ }
}
}
}
- protected boolean hasNodesOf( AllocSite as ) {
- if( id2hrn.containsKey( as.getSummary() ) ) {
+ protected boolean hasNodesOf(AllocSite as) {
+ if( id2hrn.containsKey(as.getSummary() ) ) {
return true;
}
for( int i = 0; i < allocationDepth; ++i ) {
- if( id2hrn.containsKey( as.getIthOldest( i ) ) ) {
- return true;
- }
+ if( id2hrn.containsKey(as.getIthOldest(i) ) ) {
+ return true;
+ }
}
return false;
}
// boldB is part of the phase 1 sweep
// it has an in-context table and an out-of-context table
Hashtable< Integer, Hashtable<RefEdge, ReachSet> > boldBic =
- new Hashtable< Integer, Hashtable<RefEdge, ReachSet> >();
+ new Hashtable< Integer, Hashtable<RefEdge, ReachSet> >();
Hashtable< Integer, Hashtable<RefEdge, ReachSet> > boldBooc =
- new Hashtable< Integer, Hashtable<RefEdge, ReachSet> >();
+ new Hashtable< Integer, Hashtable<RefEdge, ReachSet> >();
// visit every heap region to initialize alphaNew and betaNew,
// and make a map of every hrnID to the source nodes it should
// propagate forward from. In-context flagged hrnID's propagate
// from only the in-context node they name, but out-of-context
// ID's may propagate from several out-of-context nodes
- Hashtable< Integer, Set<HeapRegionNode> > icID2srcs =
+ Hashtable< Integer, Set<HeapRegionNode> > icID2srcs =
new Hashtable< Integer, Set<HeapRegionNode> >();
Hashtable< Integer, Set<HeapRegionNode> > oocID2srcs =
Iterator itrHrns = id2hrn.entrySet().iterator();
while( itrHrns.hasNext() ) {
- Map.Entry me = (Map.Entry) itrHrns.next();
- Integer hrnID = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)itrHrns.next();
+ Integer hrnID = (Integer) me.getKey();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
+
// assert that this node and incoming edges have clean alphaNew
// and betaNew sets, respectively
- assert rsetEmpty.equals( hrn.getAlphaNew() );
+ assert rsetEmpty.equals(hrn.getAlphaNew() );
Iterator<RefEdge> itrRers = hrn.iteratorToReferencers();
while( itrRers.hasNext() ) {
RefEdge edge = itrRers.next();
- assert rsetEmpty.equals( edge.getBetaNew() );
- }
+ assert rsetEmpty.equals(edge.getBetaNew() );
+ }
// make a mapping of IDs to heap regions they propagate from
if( hrn.isFlagged() ) {
- assert !hrn.isOutOfContext();
- assert !icID2srcs.containsKey( hrn.getID() );
-
- // in-context flagged node IDs simply propagate from the
- // node they name
- Set<HeapRegionNode> srcs = new HashSet<HeapRegionNode>();
- srcs.add( hrn );
- icID2srcs.put( hrn.getID(), srcs );
+ assert !hrn.isOutOfContext();
+ assert !icID2srcs.containsKey(hrn.getID() );
+
+ // in-context flagged node IDs simply propagate from the
+ // node they name
+ Set<HeapRegionNode> srcs = new HashSet<HeapRegionNode>();
+ srcs.add(hrn);
+ icID2srcs.put(hrn.getID(), srcs);
}
if( hrn.isOutOfContext() ) {
assert !hrn.isFlagged();
- // the reachability states on an out-of-context
- // node are not really important (combinations of
- // IDs or arity)--what matters is that the states
- // specify which nodes this out-of-context node
- // stands in for. For example, if the state [17?, 19*]
- // appears on the ooc node, it may serve as a source
- // for node 17? and a source for node 19.
- Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
-
- Iterator<ReachTuple> rtItr = state.iterator();
- while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
- assert rt.isOutOfContext();
-
- Set<HeapRegionNode> srcs = oocID2srcs.get( rt.getHrnID() );
- if( srcs == null ) {
- srcs = new HashSet<HeapRegionNode>();
- }
- srcs.add( hrn );
- oocID2srcs.put( rt.getHrnID(), srcs );
- }
- }
+ // the reachability states on an out-of-context
+ // node are not really important (combinations of
+ // IDs or arity)--what matters is that the states
+ // specify which nodes this out-of-context node
+ // stands in for. For example, if the state [17?, 19*]
+ // appears on the ooc node, it may serve as a source
+ // for node 17? and a source for node 19.
+ Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
+
+ Iterator<ReachTuple> rtItr = state.iterator();
+ while( rtItr.hasNext() ) {
+ ReachTuple rt = rtItr.next();
+ assert rt.isOutOfContext();
+
+ Set<HeapRegionNode> srcs = oocID2srcs.get(rt.getHrnID() );
+ if( srcs == null ) {
+ srcs = new HashSet<HeapRegionNode>();
+ }
+ srcs.add(hrn);
+ oocID2srcs.put(rt.getHrnID(), srcs);
+ }
+ }
}
}
// node traversal, propagating from every source
while( !icID2srcs.isEmpty() || !oocID2srcs.isEmpty() ) {
- Integer hrnID;
+ Integer hrnID;
Set<HeapRegionNode> srcs;
- boolean inContext;
+ boolean inContext;
if( !icID2srcs.isEmpty() ) {
- Map.Entry me = (Map.Entry) icID2srcs.entrySet().iterator().next();
- hrnID = (Integer) me.getKey();
- srcs = (Set<HeapRegionNode>) me.getValue();
- inContext = true;
- icID2srcs.remove( hrnID );
+ Map.Entry me = (Map.Entry)icID2srcs.entrySet().iterator().next();
+ hrnID = (Integer) me.getKey();
+ srcs = (Set<HeapRegionNode>)me.getValue();
+ inContext = true;
+ icID2srcs.remove(hrnID);
} else {
- assert !oocID2srcs.isEmpty();
+ assert !oocID2srcs.isEmpty();
- Map.Entry me = (Map.Entry) oocID2srcs.entrySet().iterator().next();
- hrnID = (Integer) me.getKey();
- srcs = (Set<HeapRegionNode>) me.getValue();
- inContext = false;
- oocID2srcs.remove( hrnID );
+ Map.Entry me = (Map.Entry)oocID2srcs.entrySet().iterator().next();
+ hrnID = (Integer) me.getKey();
+ srcs = (Set<HeapRegionNode>)me.getValue();
+ inContext = false;
+ oocID2srcs.remove(hrnID);
}
Hashtable<RefEdge, ReachSet> boldB_f =
new Hashtable<RefEdge, ReachSet>();
-
+
Set<RefEdge> workSetEdges = new HashSet<RefEdge>();
Iterator<HeapRegionNode> hrnItr = srcs.iterator();
while( hrnItr.hasNext() ) {
- HeapRegionNode hrn = hrnItr.next();
-
- assert workSetEdges.isEmpty();
-
- // initial boldB_f constraints
- Iterator<RefEdge> itrRees = hrn.iteratorToReferencees();
- while( itrRees.hasNext() ) {
- RefEdge edge = itrRees.next();
-
- assert !boldB_f.containsKey( edge );
- boldB_f.put( edge, edge.getBeta() );
-
- assert !workSetEdges.contains( edge );
- workSetEdges.add( edge );
- }
-
- // enforce the boldB_f constraint at edges until we reach a fixed point
- while( !workSetEdges.isEmpty() ) {
- RefEdge edge = workSetEdges.iterator().next();
- workSetEdges.remove( edge );
-
- Iterator<RefEdge> itrPrime = edge.getDst().iteratorToReferencees();
- while( itrPrime.hasNext() ) {
- RefEdge edgePrime = itrPrime.next();
-
- ReachSet prevResult = boldB_f.get( edgePrime );
- ReachSet intersection = Canonical.intersection( boldB_f.get( edge ),
- edgePrime.getBeta()
- );
-
- if( prevResult == null ||
- Canonical.unionORpreds( prevResult,
- intersection ).size()
- > prevResult.size()
- ) {
-
- if( prevResult == null ) {
- boldB_f.put( edgePrime,
- Canonical.unionORpreds( edgePrime.getBeta(),
- intersection
- )
- );
- } else {
- boldB_f.put( edgePrime,
- Canonical.unionORpreds( prevResult,
- intersection
- )
- );
- }
- workSetEdges.add( edgePrime );
- }
- }
- }
+ HeapRegionNode hrn = hrnItr.next();
+
+ assert workSetEdges.isEmpty();
+
+ // initial boldB_f constraints
+ Iterator<RefEdge> itrRees = hrn.iteratorToReferencees();
+ while( itrRees.hasNext() ) {
+ RefEdge edge = itrRees.next();
+
+ assert !boldB_f.containsKey(edge);
+ boldB_f.put(edge, edge.getBeta() );
+
+ assert !workSetEdges.contains(edge);
+ workSetEdges.add(edge);
+ }
+
+ // enforce the boldB_f constraint at edges until we reach a fixed point
+ while( !workSetEdges.isEmpty() ) {
+ RefEdge edge = workSetEdges.iterator().next();
+ workSetEdges.remove(edge);
+
+ Iterator<RefEdge> itrPrime = edge.getDst().iteratorToReferencees();
+ while( itrPrime.hasNext() ) {
+ RefEdge edgePrime = itrPrime.next();
+
+ ReachSet prevResult = boldB_f.get(edgePrime);
+ ReachSet intersection = Canonical.intersection(boldB_f.get(edge),
+ edgePrime.getBeta()
+ );
+
+ if( prevResult == null ||
+ Canonical.unionORpreds(prevResult,
+ intersection).size()
+ > prevResult.size()
+ ) {
+
+ if( prevResult == null ) {
+ boldB_f.put(edgePrime,
+ Canonical.unionORpreds(edgePrime.getBeta(),
+ intersection
+ )
+ );
+ } else {
+ boldB_f.put(edgePrime,
+ Canonical.unionORpreds(prevResult,
+ intersection
+ )
+ );
+ }
+ workSetEdges.add(edgePrime);
+ }
+ }
+ }
}
-
+
if( inContext ) {
- boldBic.put( hrnID, boldB_f );
+ boldBic.put(hrnID, boldB_f);
} else {
- boldBooc.put( hrnID, boldB_f );
+ boldBooc.put(hrnID, boldB_f);
}
}
itrHrns = id2hrn.entrySet().iterator();
while( itrHrns.hasNext() ) {
- Map.Entry me = (Map.Entry) itrHrns.next();
- Integer hrnID = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)itrHrns.next();
+ Integer hrnID = (Integer) me.getKey();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
- // out-of-context nodes don't participate in the
+
+ // out-of-context nodes don't participate in the
// global sweep, they serve as sources for the pass
// performed above
if( hrn.isOutOfContext() ) {
- continue;
+ continue;
}
// the inherent states of a region are the exception
// to removal as the global sweep prunes
- ReachTuple rtException = ReachTuple.factory( hrnID,
- !hrn.isSingleObject(),
- ReachTuple.ARITY_ONE,
- false // out-of-context
- );
+ ReachTuple rtException = ReachTuple.factory(hrnID,
+ !hrn.isSingleObject(),
+ ReachTuple.ARITY_ONE,
+ false // out-of-context
+ );
ChangeSet cts = ChangeSet.factory();
// never remove the inherent hrnID from a flagged region
// because it is trivially satisfied
- if( hrn.isFlagged() ) {
+ if( hrn.isFlagged() ) {
if( rtOld == rtException ) {
continue;
}
while( incidentEdgeItr.hasNext() ) {
RefEdge incidentEdge = incidentEdgeItr.next();
- Hashtable<RefEdge, ReachSet> B;
- if( rtOld.isOutOfContext() ) {
- B = boldBooc.get( rtOld.getHrnID() );
- } else {
+ Hashtable<RefEdge, ReachSet> B;
+ if( rtOld.isOutOfContext() ) {
+ B = boldBooc.get(rtOld.getHrnID() );
+ } else {
- if( !id2hrn.containsKey( rtOld.getHrnID() ) ) {
- // let symbols not in the graph get pruned
- break;
- }
+ if( !id2hrn.containsKey(rtOld.getHrnID() ) ) {
+ // let symbols not in the graph get pruned
+ break;
+ }
- B = boldBic.get( rtOld.getHrnID() );
- }
+ B = boldBic.get(rtOld.getHrnID() );
+ }
- if( B != null ) {
- ReachSet boldB_rtOld_incident = B.get( incidentEdge );
- if( boldB_rtOld_incident != null &&
- boldB_rtOld_incident.containsIgnorePreds( stateOld ) != null
- ) {
- foundState = true;
- }
- }
+ if( B != null ) {
+ ReachSet boldB_rtOld_incident = B.get(incidentEdge);
+ if( boldB_rtOld_incident != null &&
+ boldB_rtOld_incident.containsIgnorePreds(stateOld) != null
+ ) {
+ foundState = true;
+ }
+ }
}
-
+
if( !foundState ) {
- markedHrnIDs = Canonical.addUpArity( markedHrnIDs, rtOld );
+ markedHrnIDs = Canonical.addUpArity(markedHrnIDs, rtOld);
}
}
// if there is nothing marked, just move on
if( markedHrnIDs.isEmpty() ) {
- hrn.setAlphaNew( Canonical.add( hrn.getAlphaNew(),
- stateOld
- )
- );
+ hrn.setAlphaNew(Canonical.add(hrn.getAlphaNew(),
+ stateOld
+ )
+ );
continue;
}
while( rtItr.hasNext() ) {
ReachTuple rtOld = rtItr.next();
- if( !markedHrnIDs.containsTuple( rtOld ) ) {
- statePruned = Canonical.addUpArity( statePruned, rtOld );
+ if( !markedHrnIDs.containsTuple(rtOld) ) {
+ statePruned = Canonical.addUpArity(statePruned, rtOld);
}
}
- assert !stateOld.equals( statePruned );
-
- hrn.setAlphaNew( Canonical.add( hrn.getAlphaNew(),
- statePruned
- )
- );
- ChangeTuple ct = ChangeTuple.factory( stateOld,
- statePruned
- );
- cts = Canonical.add( cts, ct );
+ assert !stateOld.equals(statePruned);
+
+ hrn.setAlphaNew(Canonical.add(hrn.getAlphaNew(),
+ statePruned
+ )
+ );
+ ChangeTuple ct = ChangeTuple.factory(stateOld,
+ statePruned
+ );
+ cts = Canonical.add(cts, ct);
}
// throw change tuple set on all incident edges
Iterator<RefEdge> incidentEdgeItr = hrn.iteratorToReferencers();
while( incidentEdgeItr.hasNext() ) {
RefEdge incidentEdge = incidentEdgeItr.next();
-
- edgesForPropagation.add( incidentEdge );
-
- if( edgePlannedChanges.get( incidentEdge ) == null ) {
- edgePlannedChanges.put( incidentEdge, cts );
- } else {
- edgePlannedChanges.put(
- incidentEdge,
- Canonical.union( edgePlannedChanges.get( incidentEdge ),
- cts
- )
- );
+
+ edgesForPropagation.add(incidentEdge);
+
+ if( edgePlannedChanges.get(incidentEdge) == null ) {
+ edgePlannedChanges.put(incidentEdge, cts);
+ } else {
+ edgePlannedChanges.put(
+ incidentEdge,
+ Canonical.union(edgePlannedChanges.get(incidentEdge),
+ cts
+ )
+ );
}
}
}
}
-
+
HashSet<RefEdge> edgesUpdated = new HashSet<RefEdge>();
- propagateTokensOverEdges( edgesForPropagation,
- edgePlannedChanges,
- edgesUpdated );
+ propagateTokensOverEdges(edgesForPropagation,
+ edgePlannedChanges,
+ edgesUpdated);
// at the end of the 1st phase reference edges have
// beta, betaNew that correspond to beta and betaR
// as sources of reach states for the sweep, not part
// of the changes
if( hrn.isOutOfContext() ) {
- assert hrn.getAlphaNew().equals( rsetEmpty );
+ assert hrn.getAlphaNew().equals(rsetEmpty);
} else {
- hrn.applyAlphaNew();
+ hrn.applyAlphaNew();
}
Iterator<RefEdge> itrRes = hrn.iteratorToReferencers();
while( itrRes.hasNext() ) {
- res.add( itrRes.next() );
+ res.add(itrRes.next() );
}
}
- // 2nd phase
+ // 2nd phase
Iterator<RefEdge> edgeItr = res.iterator();
while( edgeItr.hasNext() ) {
- RefEdge edge = edgeItr.next();
+ RefEdge edge = edgeItr.next();
HeapRegionNode hrn = edge.getDst();
// commit results of last phase
- if( edgesUpdated.contains( edge ) ) {
+ if( edgesUpdated.contains(edge) ) {
edge.applyBetaNew();
}
// compute intial condition of 2nd phase
- edge.setBetaNew( Canonical.intersection( edge.getBeta(),
- hrn.getAlpha()
- )
- );
+ edge.setBetaNew(Canonical.intersection(edge.getBeta(),
+ hrn.getAlpha()
+ )
+ );
}
-
+
// every edge in the graph is the initial workset
Set<RefEdge> edgeWorkSet = (Set) res.clone();
while( !edgeWorkSet.isEmpty() ) {
RefEdge edgePrime = edgeWorkSet.iterator().next();
- edgeWorkSet.remove( edgePrime );
+ edgeWorkSet.remove(edgePrime);
RefSrcNode rsn = edgePrime.getSrc();
if( !(rsn instanceof HeapRegionNode) ) {
Iterator<RefEdge> itrEdge = hrn.iteratorToReferencers();
while( itrEdge.hasNext() ) {
- RefEdge edge = itrEdge.next();
+ RefEdge edge = itrEdge.next();
ReachSet prevResult = edge.getBetaNew();
assert prevResult != null;
- ReachSet intersection =
- Canonical.intersection( edge.getBeta(),
- edgePrime.getBetaNew()
- );
-
- if( Canonical.unionORpreds( prevResult,
- intersection
- ).size()
- > prevResult.size()
- ) {
-
- edge.setBetaNew(
- Canonical.unionORpreds( prevResult,
- intersection
- )
- );
- edgeWorkSet.add( edge );
- }
- }
+ ReachSet intersection =
+ Canonical.intersection(edge.getBeta(),
+ edgePrime.getBetaNew()
+ );
+
+ if( Canonical.unionORpreds(prevResult,
+ intersection
+ ).size()
+ > prevResult.size()
+ ) {
+
+ edge.setBetaNew(
+ Canonical.unionORpreds(prevResult,
+ intersection
+ )
+ );
+ edgeWorkSet.add(edge);
+ }
+ }
}
// commit beta' (beta<-betaNew)
edgeItr = res.iterator();
while( edgeItr.hasNext() ) {
edgeItr.next().applyBetaNew();
- }
- }
+ }
+ }
// a useful assertion for debugging:
Iterator hrnItr = id2hrn.entrySet().iterator();
while( hrnItr.hasNext() ) {
- Map.Entry me = (Map.Entry) hrnItr.next();
+ Map.Entry me = (Map.Entry)hrnItr.next();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
{
- Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
-
- Iterator<ReachTuple> rtItr = state.iterator();
- while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
-
- if( !rt.isOutOfContext() ) {
- if( !id2hrn.containsKey( rt.getHrnID() ) ) {
- System.out.println( rt.getHrnID()+" is missing" );
- return false;
- }
- }
- }
- }
+ Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
+
+ Iterator<ReachTuple> rtItr = state.iterator();
+ while( rtItr.hasNext() ) {
+ ReachTuple rt = rtItr.next();
+
+ if( !rt.isOutOfContext() ) {
+ if( !id2hrn.containsKey(rt.getHrnID() ) ) {
+ System.out.println(rt.getHrnID()+" is missing");
+ return false;
+ }
+ }
+ }
+ }
}
Iterator<RefEdge> edgeItr = hrn.iteratorToReferencers();
while( edgeItr.hasNext() ) {
- RefEdge edge = edgeItr.next();
-
- Iterator<ReachState> stateItr = edge.getBeta().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
-
- Iterator<ReachTuple> rtItr = state.iterator();
- while( rtItr.hasNext() ) {
- ReachTuple rt = rtItr.next();
-
- if( !rt.isOutOfContext() ) {
- if( !id2hrn.containsKey( rt.getHrnID() ) ) {
- System.out.println( rt.getHrnID()+" is missing" );
- return false;
- }
- }
- }
- }
+ RefEdge edge = edgeItr.next();
+
+ Iterator<ReachState> stateItr = edge.getBeta().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
+
+ Iterator<ReachTuple> rtItr = state.iterator();
+ while( rtItr.hasNext() ) {
+ ReachTuple rt = rtItr.next();
+
+ if( !rt.isOutOfContext() ) {
+ if( !id2hrn.containsKey(rt.getHrnID() ) ) {
+ System.out.println(rt.getHrnID()+" is missing");
+ return false;
+ }
+ }
+ }
+ }
}
}
// another useful assertion for debugging
public boolean noEmptyReachSetsInGraph() {
-
+
Iterator hrnItr = id2hrn.entrySet().iterator();
while( hrnItr.hasNext() ) {
- Map.Entry me = (Map.Entry) hrnItr.next();
+ Map.Entry me = (Map.Entry)hrnItr.next();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
- if( !hrn.isOutOfContext() &&
+ if( !hrn.isOutOfContext() &&
!hrn.isWiped() &&
- hrn.getAlpha().isEmpty()
+ hrn.getAlpha().isEmpty()
) {
- System.out.println( "!!! "+hrn+" has an empty ReachSet !!!" );
- return false;
+ System.out.println("!!! "+hrn+" has an empty ReachSet !!!");
+ return false;
}
Iterator<RefEdge> edgeItr = hrn.iteratorToReferencers();
while( edgeItr.hasNext() ) {
- RefEdge edge = edgeItr.next();
+ RefEdge edge = edgeItr.next();
- if( edge.getBeta().isEmpty() ) {
- System.out.println( "!!! "+edge+" has an empty ReachSet !!!" );
- return false;
- }
+ if( edge.getBeta().isEmpty() ) {
+ System.out.println("!!! "+edge+" has an empty ReachSet !!!");
+ return false;
+ }
}
}
-
+
return true;
}
Iterator hrnItr = id2hrn.entrySet().iterator();
while( hrnItr.hasNext() ) {
- Map.Entry me = (Map.Entry) hrnItr.next();
+ Map.Entry me = (Map.Entry)hrnItr.next();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
{
- Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
-
- if( !state.getPreds().equals( predsTrue ) ) {
- return false;
- }
- }
+ Iterator<ReachState> stateItr = hrn.getAlpha().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
+
+ if( !state.getPreds().equals(predsTrue) ) {
+ return false;
+ }
+ }
}
Iterator<RefEdge> edgeItr = hrn.iteratorToReferencers();
while( edgeItr.hasNext() ) {
- RefEdge edge = edgeItr.next();
+ RefEdge edge = edgeItr.next();
- Iterator<ReachState> stateItr = edge.getBeta().iterator();
- while( stateItr.hasNext() ) {
- ReachState state = stateItr.next();
+ Iterator<ReachState> stateItr = edge.getBeta().iterator();
+ while( stateItr.hasNext() ) {
+ ReachState state = stateItr.next();
- if( !state.getPreds().equals( predsTrue ) ) {
- return false;
- }
- }
+ if( !state.getPreds().equals(predsTrue) ) {
+ return false;
+ }
+ }
}
}
return true;
}
-
+
// merge it into B, so after the operation graph B
// is the final result.
////////////////////////////////////////////////////
- protected void merge( ReachGraph rg ) {
+ protected void merge(ReachGraph rg) {
if( rg == null ) {
return;
}
- mergeNodes ( rg );
- mergeRefEdges ( rg );
- mergeAllocSites ( rg );
- mergeInaccessibleVars( rg );
+ mergeNodes(rg);
+ mergeRefEdges(rg);
+ mergeAllocSites(rg);
+ mergeInaccessibleVars(rg);
}
-
- protected void mergeNodes( ReachGraph rg ) {
+
+ protected void mergeNodes(ReachGraph rg) {
// start with heap region nodes
- Set sA = rg.id2hrn.entrySet();
+ Set sA = rg.id2hrn.entrySet();
Iterator iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
// if this graph doesn't have a node the
// incoming graph has, allocate it
- if( !id2hrn.containsKey( idA ) ) {
+ if( !id2hrn.containsKey(idA) ) {
HeapRegionNode hrnB = hrnA.copy();
- id2hrn.put( idA, hrnB );
+ id2hrn.put(idA, hrnB);
} else {
// otherwise this is a node present in both graphs
// so make the new reachability set a union of the
// nodes' reachability sets
- HeapRegionNode hrnB = id2hrn.get( idA );
- hrnB.setAlpha( Canonical.unionORpreds( hrnB.getAlpha(),
- hrnA.getAlpha()
- )
- );
+ HeapRegionNode hrnB = id2hrn.get(idA);
+ hrnB.setAlpha(Canonical.unionORpreds(hrnB.getAlpha(),
+ hrnA.getAlpha()
+ )
+ );
- hrnB.setPreds( Canonical.join( hrnB.getPreds(),
- hrnA.getPreds()
- )
- );
+ hrnB.setPreds(Canonical.join(hrnB.getPreds(),
+ hrnA.getPreds()
+ )
+ );
- if( !hrnA.equals( hrnB ) ) {
- rg.writeGraph( "graphA" );
- this.writeGraph( "graphB" );
- throw new Error( "flagged not matching" );
- }
+ if( !hrnA.equals(hrnB) ) {
+ rg.writeGraph("graphA");
+ this.writeGraph("graphB");
+ throw new Error("flagged not matching");
+ }
sA = rg.td2vn.entrySet();
iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
+ Map.Entry meA = (Map.Entry)iA.next();
TempDescriptor tdA = (TempDescriptor) meA.getKey();
- VariableNode lnA = (VariableNode) meA.getValue();
+ VariableNode lnA = (VariableNode) meA.getValue();
// if the variable doesn't exist in B, allocate and add it
- VariableNode lnB = getVariableNodeFromTemp( tdA );
+ VariableNode lnB = getVariableNodeFromTemp(tdA);
}
}
- protected void mergeRefEdges( ReachGraph rg ) {
+ protected void mergeRefEdges(ReachGraph rg) {
// between heap regions
- Set sA = rg.id2hrn.entrySet();
+ Set sA = rg.id2hrn.entrySet();
Iterator iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
Iterator<RefEdge> heapRegionsItrA = hrnA.iteratorToReferencees();
while( heapRegionsItrA.hasNext() ) {
- RefEdge edgeA = heapRegionsItrA.next();
+ RefEdge edgeA = heapRegionsItrA.next();
HeapRegionNode hrnChildA = edgeA.getDst();
- Integer idChildA = hrnChildA.getID();
+ Integer idChildA = hrnChildA.getID();
// at this point we know an edge in graph A exists
// idA -> idChildA, does this exist in B?
- assert id2hrn.containsKey( idA );
- HeapRegionNode hrnB = id2hrn.get( idA );
- RefEdge edgeToMerge = null;
+ assert id2hrn.containsKey(idA);
+ HeapRegionNode hrnB = id2hrn.get(idA);
+ RefEdge edgeToMerge = null;
Iterator<RefEdge> heapRegionsItrB = hrnB.iteratorToReferencees();
while( heapRegionsItrB.hasNext() &&
edgeToMerge == null ) {
- RefEdge edgeB = heapRegionsItrB.next();
+ RefEdge edgeB = heapRegionsItrB.next();
HeapRegionNode hrnChildB = edgeB.getDst();
- Integer idChildB = hrnChildB.getID();
+ Integer idChildB = hrnChildB.getID();
// don't use the RefEdge.equals() here because
// we're talking about existence between graphs,
- // not intragraph equal
- if( idChildB.equals( idChildA ) &&
- edgeB.typeAndFieldEquals( edgeA ) ) {
+ // not intragraph equal
+ if( idChildB.equals(idChildA) &&
+ edgeB.typeAndFieldEquals(edgeA) ) {
edgeToMerge = edgeB;
}
// if the edge from A was not found in B,
// add it to B.
if( edgeToMerge == null ) {
- assert id2hrn.containsKey( idChildA );
- HeapRegionNode hrnChildB = id2hrn.get( idChildA );
+ assert id2hrn.containsKey(idChildA);
+ HeapRegionNode hrnChildB = id2hrn.get(idChildA);
edgeToMerge = edgeA.copy();
- edgeToMerge.setSrc( hrnB );
- edgeToMerge.setDst( hrnChildB );
- addRefEdge( hrnB, hrnChildB, edgeToMerge );
+ edgeToMerge.setSrc(hrnB);
+ edgeToMerge.setDst(hrnChildB);
+ addRefEdge(hrnB, hrnChildB, edgeToMerge);
}
// otherwise, the edge already existed in both graphs
// so merge their reachability sets
// just replace this beta set with the union
assert edgeToMerge != null;
edgeToMerge.setBeta(
- Canonical.unionORpreds( edgeToMerge.getBeta(),
- edgeA.getBeta()
- )
- );
- edgeToMerge.setPreds(
- Canonical.join( edgeToMerge.getPreds(),
- edgeA.getPreds()
- )
- );
- edgeToMerge.setTaints(
- Canonical.union( edgeToMerge.getTaints(),
- edgeA.getTaints()
- )
- );
+ Canonical.unionORpreds(edgeToMerge.getBeta(),
+ edgeA.getBeta()
+ )
+ );
+ edgeToMerge.setPreds(
+ Canonical.join(edgeToMerge.getPreds(),
+ edgeA.getPreds()
+ )
+ );
+ edgeToMerge.setTaints(
+ Canonical.union(edgeToMerge.getTaints(),
+ edgeA.getTaints()
+ )
+ );
}
}
}
sA = rg.td2vn.entrySet();
iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
+ Map.Entry meA = (Map.Entry)iA.next();
TempDescriptor tdA = (TempDescriptor) meA.getKey();
- VariableNode vnA = (VariableNode) meA.getValue();
+ VariableNode vnA = (VariableNode) meA.getValue();
Iterator<RefEdge> heapRegionsItrA = vnA.iteratorToReferencees();
while( heapRegionsItrA.hasNext() ) {
- RefEdge edgeA = heapRegionsItrA.next();
+ RefEdge edgeA = heapRegionsItrA.next();
HeapRegionNode hrnChildA = edgeA.getDst();
- Integer idChildA = hrnChildA.getID();
+ Integer idChildA = hrnChildA.getID();
// at this point we know an edge in graph A exists
// tdA -> idChildA, does this exist in B?
- assert td2vn.containsKey( tdA );
- VariableNode vnB = td2vn.get( tdA );
- RefEdge edgeToMerge = null;
+ assert td2vn.containsKey(tdA);
+ VariableNode vnB = td2vn.get(tdA);
+ RefEdge edgeToMerge = null;
Iterator<RefEdge> heapRegionsItrB = vnB.iteratorToReferencees();
while( heapRegionsItrB.hasNext() &&
edgeToMerge == null ) {
- RefEdge edgeB = heapRegionsItrB.next();
+ RefEdge edgeB = heapRegionsItrB.next();
HeapRegionNode hrnChildB = edgeB.getDst();
- Integer idChildB = hrnChildB.getID();
+ Integer idChildB = hrnChildB.getID();
// don't use the RefEdge.equals() here because
// we're talking about existence between graphs
- if( idChildB.equals( idChildA ) &&
- edgeB.typeAndFieldEquals( edgeA ) ) {
+ if( idChildB.equals(idChildA) &&
+ edgeB.typeAndFieldEquals(edgeA) ) {
edgeToMerge = edgeB;
}
// if the edge from A was not found in B,
// add it to B.
if( edgeToMerge == null ) {
- assert id2hrn.containsKey( idChildA );
- HeapRegionNode hrnChildB = id2hrn.get( idChildA );
+ assert id2hrn.containsKey(idChildA);
+ HeapRegionNode hrnChildB = id2hrn.get(idChildA);
edgeToMerge = edgeA.copy();
- edgeToMerge.setSrc( vnB );
- edgeToMerge.setDst( hrnChildB );
- addRefEdge( vnB, hrnChildB, edgeToMerge );
+ edgeToMerge.setSrc(vnB);
+ edgeToMerge.setDst(hrnChildB);
+ addRefEdge(vnB, hrnChildB, edgeToMerge);
}
// otherwise, the edge already existed in both graphs
// so merge their reachability sets
else {
// just replace this beta set with the union
- edgeToMerge.setBeta( Canonical.unionORpreds( edgeToMerge.getBeta(),
- edgeA.getBeta()
- )
- );
- edgeToMerge.setPreds( Canonical.join( edgeToMerge.getPreds(),
- edgeA.getPreds()
- )
- );
- edgeToMerge.setTaints(
- Canonical.union( edgeToMerge.getTaints(),
- edgeA.getTaints()
- )
- );
+ edgeToMerge.setBeta(Canonical.unionORpreds(edgeToMerge.getBeta(),
+ edgeA.getBeta()
+ )
+ );
+ edgeToMerge.setPreds(Canonical.join(edgeToMerge.getPreds(),
+ edgeA.getPreds()
+ )
+ );
+ edgeToMerge.setTaints(
+ Canonical.union(edgeToMerge.getTaints(),
+ edgeA.getTaints()
+ )
+ );
}
}
}
}
- protected void mergeAllocSites( ReachGraph rg ) {
- allocSites.addAll( rg.allocSites );
+ protected void mergeAllocSites(ReachGraph rg) {
+ allocSites.addAll(rg.allocSites);
}
-
- protected void mergeInaccessibleVars( ReachGraph rg ){
+
+ protected void mergeInaccessibleVars(ReachGraph rg) {
inaccessibleVars.addAll(rg.inaccessibleVars);
}
// the only way to know that all edges in both graphs
// are equally present is to iterate over both data
// structures and compare against the other graph.
- public boolean equals( ReachGraph rg ) {
+ public boolean equals(ReachGraph rg) {
if( rg == null ) {
if( dbgEquals ) {
- System.out.println( "rg is null" );
+ System.out.println("rg is null");
}
return false;
}
-
- if( !areHeapRegionNodesEqual( rg ) ) {
+
+ if( !areHeapRegionNodesEqual(rg) ) {
if( dbgEquals ) {
- System.out.println( "hrn not equal" );
+ System.out.println("hrn not equal");
}
return false;
}
- if( !areVariableNodesEqual( rg ) ) {
+ if( !areVariableNodesEqual(rg) ) {
if( dbgEquals ) {
- System.out.println( "vars not equal" );
+ System.out.println("vars not equal");
}
return false;
}
- if( !areRefEdgesEqual( rg ) ) {
+ if( !areRefEdgesEqual(rg) ) {
if( dbgEquals ) {
- System.out.println( "edges not equal" );
+ System.out.println("edges not equal");
}
return false;
}
-
- if( !inaccessibleVars.equals(rg.inaccessibleVars) ){
+
+ if( !inaccessibleVars.equals(rg.inaccessibleVars) ) {
return false;
}
// if everything is equal up to this point,
// assert that allocSites is also equal--
// this data is redundant but kept for efficiency
- assert allocSites.equals( rg.allocSites );
+ assert allocSites.equals(rg.allocSites);
return true;
}
-
- protected boolean areHeapRegionNodesEqual( ReachGraph rg ) {
- if( !areallHRNinAalsoinBandequal( this, rg ) ) {
+ protected boolean areHeapRegionNodesEqual(ReachGraph rg) {
+
+ if( !areallHRNinAalsoinBandequal(this, rg) ) {
return false;
}
- if( !areallHRNinAalsoinBandequal( rg, this ) ) {
+ if( !areallHRNinAalsoinBandequal(rg, this) ) {
return false;
}
return true;
}
- static protected boolean areallHRNinAalsoinBandequal( ReachGraph rgA,
- ReachGraph rgB ) {
- Set sA = rgA.id2hrn.entrySet();
+ static protected boolean areallHRNinAalsoinBandequal(ReachGraph rgA,
+ ReachGraph rgB) {
+ Set sA = rgA.id2hrn.entrySet();
Iterator iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
- if( !rgB.id2hrn.containsKey( idA ) ) {
+ if( !rgB.id2hrn.containsKey(idA) ) {
return false;
}
- HeapRegionNode hrnB = rgB.id2hrn.get( idA );
- if( !hrnA.equalsIncludingAlphaAndPreds( hrnB ) ) {
+ HeapRegionNode hrnB = rgB.id2hrn.get(idA);
+ if( !hrnA.equalsIncludingAlphaAndPreds(hrnB) ) {
return false;
}
}
-
+
return true;
}
- protected boolean areVariableNodesEqual( ReachGraph rg ) {
+ protected boolean areVariableNodesEqual(ReachGraph rg) {
- if( !areallVNinAalsoinBandequal( this, rg ) ) {
+ if( !areallVNinAalsoinBandequal(this, rg) ) {
return false;
}
- if( !areallVNinAalsoinBandequal( rg, this ) ) {
+ if( !areallVNinAalsoinBandequal(rg, this) ) {
return false;
}
return true;
}
- static protected boolean areallVNinAalsoinBandequal( ReachGraph rgA,
- ReachGraph rgB ) {
- Set sA = rgA.td2vn.entrySet();
+ static protected boolean areallVNinAalsoinBandequal(ReachGraph rgA,
+ ReachGraph rgB) {
+ Set sA = rgA.td2vn.entrySet();
Iterator iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
+ Map.Entry meA = (Map.Entry)iA.next();
TempDescriptor tdA = (TempDescriptor) meA.getKey();
- if( !rgB.td2vn.containsKey( tdA ) ) {
+ if( !rgB.td2vn.containsKey(tdA) ) {
return false;
}
}
}
- protected boolean areRefEdgesEqual( ReachGraph rg ) {
- if( !areallREinAandBequal( this, rg ) ) {
+ protected boolean areRefEdgesEqual(ReachGraph rg) {
+ if( !areallREinAandBequal(this, rg) ) {
return false;
}
- if( !areallREinAandBequal( rg, this ) ) {
+ if( !areallREinAandBequal(rg, this) ) {
return false;
- }
+ }
return true;
}
- static protected boolean areallREinAandBequal( ReachGraph rgA,
- ReachGraph rgB ) {
+ static protected boolean areallREinAandBequal(ReachGraph rgA,
+ ReachGraph rgB) {
// check all the heap region->heap region edges
- Set sA = rgA.id2hrn.entrySet();
+ Set sA = rgA.id2hrn.entrySet();
Iterator iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
// we should have already checked that the same
// heap regions exist in both graphs
- assert rgB.id2hrn.containsKey( idA );
+ assert rgB.id2hrn.containsKey(idA);
- if( !areallREfromAequaltoB( rgA, hrnA, rgB ) ) {
+ if( !areallREfromAequaltoB(rgA, hrnA, rgB) ) {
return false;
}
// then check every edge in B for presence in A, starting
// from the same parent HeapRegionNode
- HeapRegionNode hrnB = rgB.id2hrn.get( idA );
+ HeapRegionNode hrnB = rgB.id2hrn.get(idA);
- if( !areallREfromAequaltoB( rgB, hrnB, rgA ) ) {
+ if( !areallREfromAequaltoB(rgB, hrnB, rgA) ) {
return false;
}
}
sA = rgA.td2vn.entrySet();
iA = sA.iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
+ Map.Entry meA = (Map.Entry)iA.next();
TempDescriptor tdA = (TempDescriptor) meA.getKey();
- VariableNode vnA = (VariableNode) meA.getValue();
+ VariableNode vnA = (VariableNode) meA.getValue();
// we should have already checked that the same
// label nodes exist in both graphs
- assert rgB.td2vn.containsKey( tdA );
+ assert rgB.td2vn.containsKey(tdA);
- if( !areallREfromAequaltoB( rgA, vnA, rgB ) ) {
+ if( !areallREfromAequaltoB(rgA, vnA, rgB) ) {
return false;
}
// then check every edge in B for presence in A, starting
// from the same parent VariableNode
- VariableNode vnB = rgB.td2vn.get( tdA );
+ VariableNode vnB = rgB.td2vn.get(tdA);
- if( !areallREfromAequaltoB( rgB, vnB, rgA ) ) {
+ if( !areallREfromAequaltoB(rgB, vnB, rgA) ) {
return false;
}
}
}
- static protected boolean areallREfromAequaltoB( ReachGraph rgA,
- RefSrcNode rnA,
- ReachGraph rgB ) {
+ static protected boolean areallREfromAequaltoB(ReachGraph rgA,
+ RefSrcNode rnA,
+ ReachGraph rgB) {
Iterator<RefEdge> itrA = rnA.iteratorToReferencees();
while( itrA.hasNext() ) {
- RefEdge edgeA = itrA.next();
+ RefEdge edgeA = itrA.next();
HeapRegionNode hrnChildA = edgeA.getDst();
- Integer idChildA = hrnChildA.getID();
+ Integer idChildA = hrnChildA.getID();
- assert rgB.id2hrn.containsKey( idChildA );
+ assert rgB.id2hrn.containsKey(idChildA);
// at this point we know an edge in graph A exists
// rnA -> idChildA, does this exact edge exist in B?
RefSrcNode rnB = null;
if( rnA instanceof HeapRegionNode ) {
HeapRegionNode hrnA = (HeapRegionNode) rnA;
- rnB = rgB.id2hrn.get( hrnA.getID() );
+ rnB = rgB.id2hrn.get(hrnA.getID() );
} else {
VariableNode vnA = (VariableNode) rnA;
- rnB = rgB.td2vn.get( vnA.getTempDescriptor() );
+ rnB = rgB.td2vn.get(vnA.getTempDescriptor() );
}
Iterator<RefEdge> itrB = rnB.iteratorToReferencees();
while( itrB.hasNext() ) {
- RefEdge edgeB = itrB.next();
+ RefEdge edgeB = itrB.next();
HeapRegionNode hrnChildB = edgeB.getDst();
- Integer idChildB = hrnChildB.getID();
+ Integer idChildB = hrnChildB.getID();
- if( idChildA.equals( idChildB ) &&
- edgeA.typeAndFieldEquals( edgeB ) ) {
+ if( idChildA.equals(idChildB) &&
+ edgeA.typeAndFieldEquals(edgeB) ) {
// there is an edge in the right place with the right field,
// but do they have the same attributes?
- if( edgeA.getBeta().equals( edgeB.getBeta() ) &&
- edgeA.equalsPreds( edgeB )
- ) {
+ if( edgeA.getBeta().equals(edgeB.getBeta() ) &&
+ edgeA.equalsPreds(edgeB)
+ ) {
edgeFound = true;
}
}
}
-
+
if( !edgeFound ) {
return false;
}
// can be used to assert monotonicity
- static public boolean isNoSmallerThan( ReachGraph rgA,
- ReachGraph rgB ) {
+ static public boolean isNoSmallerThan(ReachGraph rgA,
+ ReachGraph rgB) {
//System.out.println( "*** Asking if A is no smaller than B ***" );
Iterator iA = rgA.id2hrn.entrySet().iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
- if( !rgB.id2hrn.containsKey( idA ) ) {
- System.out.println( " regions smaller" );
+ if( !rgB.id2hrn.containsKey(idA) ) {
+ System.out.println(" regions smaller");
return false;
}
//HeapRegionNode hrnB = rgB.id2hrn.get( idA );
/* NOT EQUALS, NO SMALLER THAN!
- if( !hrnA.equalsIncludingAlphaAndPreds( hrnB ) ) {
- System.out.println( " regions smaller" );
- return false;
- }
- */
+ if( !hrnA.equalsIncludingAlphaAndPreds( hrnB ) ) {
+ System.out.println( " regions smaller" );
+ return false;
+ }
+ */
}
-
+
// this works just fine, no smaller than
- if( !areallVNinAalsoinBandequal( rgA, rgB ) ) {
- System.out.println( " vars smaller:" );
- System.out.println( " A:"+rgA.td2vn.keySet() );
- System.out.println( " B:"+rgB.td2vn.keySet() );
+ if( !areallVNinAalsoinBandequal(rgA, rgB) ) {
+ System.out.println(" vars smaller:");
+ System.out.println(" A:"+rgA.td2vn.keySet() );
+ System.out.println(" B:"+rgB.td2vn.keySet() );
return false;
}
iA = rgA.id2hrn.entrySet().iterator();
while( iA.hasNext() ) {
- Map.Entry meA = (Map.Entry) iA.next();
- Integer idA = (Integer) meA.getKey();
+ Map.Entry meA = (Map.Entry)iA.next();
+ Integer idA = (Integer) meA.getKey();
HeapRegionNode hrnA = (HeapRegionNode) meA.getValue();
Iterator<RefEdge> reItr = hrnA.iteratorToReferencers();
while( reItr.hasNext() ) {
- RefEdge edgeA = reItr.next();
- RefSrcNode rsnA = edgeA.getSrc();
-
- // we already checked that nodes were present
- HeapRegionNode hrnB = rgB.id2hrn.get( hrnA.getID() );
- assert hrnB != null;
-
- RefSrcNode rsnB;
- if( rsnA instanceof VariableNode ) {
- VariableNode vnA = (VariableNode) rsnA;
- rsnB = rgB.td2vn.get( vnA.getTempDescriptor() );
-
- } else {
- HeapRegionNode hrnSrcA = (HeapRegionNode) rsnA;
- rsnB = rgB.id2hrn.get( hrnSrcA.getID() );
- }
- assert rsnB != null;
-
- RefEdge edgeB = rsnB.getReferenceTo( hrnB,
- edgeA.getType(),
- edgeA.getField()
- );
- if( edgeB == null ) {
- System.out.println( " edges smaller:" );
- return false;
- }
-
- // REMEMBER, IS NO SMALLER THAN
- /*
- System.out.println( " edges smaller" );
- return false;
- }
- */
+ RefEdge edgeA = reItr.next();
+ RefSrcNode rsnA = edgeA.getSrc();
+
+ // we already checked that nodes were present
+ HeapRegionNode hrnB = rgB.id2hrn.get(hrnA.getID() );
+ assert hrnB != null;
+
+ RefSrcNode rsnB;
+ if( rsnA instanceof VariableNode ) {
+ VariableNode vnA = (VariableNode) rsnA;
+ rsnB = rgB.td2vn.get(vnA.getTempDescriptor() );
+
+ } else {
+ HeapRegionNode hrnSrcA = (HeapRegionNode) rsnA;
+ rsnB = rgB.id2hrn.get(hrnSrcA.getID() );
+ }
+ assert rsnB != null;
+
+ RefEdge edgeB = rsnB.getReferenceTo(hrnB,
+ edgeA.getType(),
+ edgeA.getField()
+ );
+ if( edgeB == null ) {
+ System.out.println(" edges smaller:");
+ return false;
+ }
+
+ // REMEMBER, IS NO SMALLER THAN
+ /*
+ System.out.println( " edges smaller" );
+ return false;
+ }
+ */
}
}
-
+
return true;
}
// this analysis no longer has the "match anything"
// type which was represented by null
- protected TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2 ) {
+ protected TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2) {
assert td1 != null;
assert td2 != null;
if( td2.isNull() ) {
return td1;
}
- return typeUtil.mostSpecific( td1, td2 );
+ return typeUtil.mostSpecific(td1, td2);
+ }
+
+ protected TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2,
+ TypeDescriptor td3) {
+
+ return mostSpecificType(td1,
+ mostSpecificType(td2, td3)
+ );
+ }
+
+ protected TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2,
+ TypeDescriptor td3,
+ TypeDescriptor td4) {
+
+ return mostSpecificType(mostSpecificType(td1, td2),
+ mostSpecificType(td3, td4)
+ );
}
-
- protected TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2,
- TypeDescriptor td3 ) {
-
- return mostSpecificType( td1,
- mostSpecificType( td2, td3 )
- );
- }
-
- protected TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2,
- TypeDescriptor td3,
- TypeDescriptor td4 ) {
-
- return mostSpecificType( mostSpecificType( td1, td2 ),
- mostSpecificType( td3, td4 )
- );
- }
-
- protected boolean isSuperiorType( TypeDescriptor possibleSuper,
- TypeDescriptor possibleChild ) {
+
+ protected boolean isSuperiorType(TypeDescriptor possibleSuper,
+ TypeDescriptor possibleChild) {
assert possibleSuper != null;
assert possibleChild != null;
-
+
if( possibleSuper.isNull() ||
- possibleChild.isNull() ) {
+ possibleChild.isNull() ) {
return true;
}
- return typeUtil.isSuperorType( possibleSuper, possibleChild );
+ return typeUtil.isSuperorType(possibleSuper, possibleChild);
}
- protected boolean hasMatchingField( HeapRegionNode src,
- RefEdge edge ) {
+ protected boolean hasMatchingField(HeapRegionNode src,
+ RefEdge edge) {
- TypeDescriptor tdSrc = src.getType();
+ TypeDescriptor tdSrc = src.getType();
assert tdSrc != null;
if( tdSrc.isArray() ) {
TypeDescriptor tdSrcDeref = tdSrc.dereference();
assert tdSrcDeref != null;
- if( !typeUtil.isSuperorType( tdSrcDeref, td ) ) {
+ if( !typeUtil.isSuperorType(tdSrcDeref, td) ) {
return false;
}
- return edge.getField().equals( DisjointAnalysis.arrayElementFieldName );
+ return edge.getField().equals(DisjointAnalysis.arrayElementFieldName);
}
// if it's not a class, it doesn't have any fields to match
}
ClassDescriptor cd = tdSrc.getClassDesc();
- while( cd != null ) {
+ while( cd != null ) {
Iterator fieldItr = cd.getFields();
- while( fieldItr.hasNext() ) {
+ while( fieldItr.hasNext() ) {
FieldDescriptor fd = (FieldDescriptor) fieldItr.next();
- if( fd.getType().equals( edge.getType() ) &&
- fd.getSymbol().equals( edge.getField() ) ) {
+ if( fd.getType().equals(edge.getType() ) &&
+ fd.getSymbol().equals(edge.getField() ) ) {
return true;
}
}
-
+
cd = cd.getSuperDesc();
}
-
+
// otherwise it is a class with fields
// but we didn't find a match
return false;
}
- protected boolean hasMatchingType( RefEdge edge,
- HeapRegionNode dst ) {
-
+ protected boolean hasMatchingType(RefEdge edge,
+ HeapRegionNode dst) {
+
// if the region has no type, matches everything
TypeDescriptor tdDst = dst.getType();
assert tdDst != null;
-
+
// if the type is not a class or an array, don't
// match because primitives are copied, no aliases
ClassDescriptor cdDst = tdDst.getClassDesc();
if( cdDst == null && !tdDst.isArray() ) {
return false;
}
-
+
// if the edge type is null, it matches everything
TypeDescriptor tdEdge = edge.getType();
assert tdEdge != null;
-
- return typeUtil.isSuperorType( tdEdge, tdDst );
+
+ return typeUtil.isSuperorType(tdEdge, tdDst);
}
-
+
// the default signature for quick-and-dirty debugging
- public void writeGraph( String graphName ) {
- writeGraph( graphName,
- true, // write labels
- true, // label select
- true, // prune garbage
- false, // hide reachability
- true, // hide subset reachability
- true, // hide predicates
- false, // hide edge taints
- null // in-context boundary
- );
+ public void writeGraph(String graphName) {
+ writeGraph(graphName,
+ true, // write labels
+ true, // label select
+ true, // prune garbage
+ false, // hide reachability
+ true, // hide subset reachability
+ true, // hide predicates
+ false, // hide edge taints
+ null // in-context boundary
+ );
}
- public void writeGraph( String graphName,
- boolean writeLabels,
- boolean labelSelect,
- boolean pruneGarbage,
- boolean hideReachability,
- boolean hideSubsetReachability,
- boolean hidePredicates,
- boolean hideEdgeTaints
- ) {
- writeGraph( graphName,
- writeLabels,
- labelSelect,
- pruneGarbage,
- hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- null );
+ public void writeGraph(String graphName,
+ boolean writeLabels,
+ boolean labelSelect,
+ boolean pruneGarbage,
+ boolean hideReachability,
+ boolean hideSubsetReachability,
+ boolean hidePredicates,
+ boolean hideEdgeTaints
+ ) {
+ writeGraph(graphName,
+ writeLabels,
+ labelSelect,
+ pruneGarbage,
+ hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ null);
}
- public void writeGraph( String graphName,
- boolean writeLabels,
- boolean labelSelect,
- boolean pruneGarbage,
- boolean hideReachability,
- boolean hideSubsetReachability,
- boolean hidePredicates,
- boolean hideEdgeTaints,
- Set<Integer> callerNodeIDsCopiedToCallee
- ) {
+ public void writeGraph(String graphName,
+ boolean writeLabels,
+ boolean labelSelect,
+ boolean pruneGarbage,
+ boolean hideReachability,
+ boolean hideSubsetReachability,
+ boolean hidePredicates,
+ boolean hideEdgeTaints,
+ Set<Integer> callerNodeIDsCopiedToCallee
+ ) {
try {
// remove all non-word characters from the graph name so
// the filename and identifier in dot don't cause errors
- graphName = graphName.replaceAll( "[\\W]", "" );
+ graphName = graphName.replaceAll("[\\W]", "");
- BufferedWriter bw =
- new BufferedWriter( new FileWriter( graphName+".dot" ) );
+ BufferedWriter bw =
+ new BufferedWriter(new FileWriter(graphName+".dot") );
+
+ bw.write("digraph "+graphName+" {\n");
- bw.write( "digraph "+graphName+" {\n" );
-
// this is an optional step to form the callee-reachable
// "cut-out" into a DOT cluster for visualization
if( callerNodeIDsCopiedToCallee != null ) {
-
- bw.write( " subgraph cluster0 {\n" );
- bw.write( " color=blue;\n" );
-
- Iterator i = id2hrn.entrySet().iterator();
- while( i.hasNext() ) {
- Map.Entry me = (Map.Entry) i.next();
- HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
- if( callerNodeIDsCopiedToCallee.contains( hrn.getID() ) ) {
- bw.write( " "+
- hrn.toString()+
- hrn.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates )+
- ";\n" );
- }
- }
-
- bw.write( " }\n" );
+
+ bw.write(" subgraph cluster0 {\n");
+ bw.write(" color=blue;\n");
+
+ Iterator i = id2hrn.entrySet().iterator();
+ while( i.hasNext() ) {
+ Map.Entry me = (Map.Entry)i.next();
+ HeapRegionNode hrn = (HeapRegionNode) me.getValue();
+
+ if( callerNodeIDsCopiedToCallee.contains(hrn.getID() ) ) {
+ bw.write(" "+
+ hrn.toString()+
+ hrn.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates)+
+ ";\n");
+ }
+ }
+
+ bw.write(" }\n");
}
-
-
+
+
Set<HeapRegionNode> visited = new HashSet<HeapRegionNode>();
-
- // then visit every heap region node
+
+ // then visit every heap region node
Iterator i = id2hrn.entrySet().iterator();
while( i.hasNext() ) {
- Map.Entry me = (Map.Entry) i.next();
- HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
- // only visit nodes worth writing out--for instance
- // not every node at an allocation is referenced
- // (think of it as garbage-collected), etc.
- if( !pruneGarbage ||
- hrn.isOutOfContext() ||
- (hrn.isFlagged() && hrn.getID() > 0 && !hrn.isWiped()) // a non-shadow flagged node
- ) {
-
- if( !visited.contains( hrn ) ) {
- traverseHeapRegionNodes( hrn,
- bw,
- null,
- visited,
- hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- callerNodeIDsCopiedToCallee );
- }
- }
+ Map.Entry me = (Map.Entry)i.next();
+ HeapRegionNode hrn = (HeapRegionNode) me.getValue();
+
+ // only visit nodes worth writing out--for instance
+ // not every node at an allocation is referenced
+ // (think of it as garbage-collected), etc.
+ if( !pruneGarbage ||
+ hrn.isOutOfContext() ||
+ (hrn.isFlagged() && hrn.getID() > 0 && !hrn.isWiped()) // a non-shadow flagged node
+ ) {
+
+ if( !visited.contains(hrn) ) {
+ traverseHeapRegionNodes(hrn,
+ bw,
+ null,
+ visited,
+ hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ callerNodeIDsCopiedToCallee);
+ }
+ }
}
-
- bw.write( " graphTitle[label=\""+graphName+"\",shape=box];\n" );
-
-
+
+ bw.write(" graphTitle[label=\""+graphName+"\",shape=box];\n");
+
+
// then visit every label node, useful for debugging
if( writeLabels ) {
- i = td2vn.entrySet().iterator();
- while( i.hasNext() ) {
- Map.Entry me = (Map.Entry) i.next();
- VariableNode vn = (VariableNode) me.getValue();
-
- if( labelSelect ) {
- String labelStr = vn.getTempDescriptorString();
- if( labelStr.startsWith( "___temp" ) ||
- labelStr.startsWith( "___dst" ) ||
- labelStr.startsWith( "___srctmp" ) ||
- labelStr.startsWith( "___neverused" )
- ) {
- continue;
- }
- }
-
- Iterator<RefEdge> heapRegionsItr = vn.iteratorToReferencees();
- while( heapRegionsItr.hasNext() ) {
- RefEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
-
- if( !visited.contains( hrn ) ) {
- traverseHeapRegionNodes( hrn,
- bw,
- null,
- visited,
- hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- callerNodeIDsCopiedToCallee );
- }
-
- bw.write( " "+vn.toString()+
- " -> "+hrn.toString()+
- edge.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- "" )+
- ";\n" );
- }
- }
+ i = td2vn.entrySet().iterator();
+ while( i.hasNext() ) {
+ Map.Entry me = (Map.Entry)i.next();
+ VariableNode vn = (VariableNode) me.getValue();
+
+ if( labelSelect ) {
+ String labelStr = vn.getTempDescriptorString();
+ if( labelStr.startsWith("___temp") ||
+ labelStr.startsWith("___dst") ||
+ labelStr.startsWith("___srctmp") ||
+ labelStr.startsWith("___neverused")
+ ) {
+ continue;
+ }
+ }
+
+ Iterator<RefEdge> heapRegionsItr = vn.iteratorToReferencees();
+ while( heapRegionsItr.hasNext() ) {
+ RefEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
+
+ if( !visited.contains(hrn) ) {
+ traverseHeapRegionNodes(hrn,
+ bw,
+ null,
+ visited,
+ hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ callerNodeIDsCopiedToCallee);
+ }
+
+ bw.write(" "+vn.toString()+
+ " -> "+hrn.toString()+
+ edge.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ "")+
+ ";\n");
+ }
+ }
}
-
- bw.write( "}\n" );
+
+ bw.write("}\n");
bw.close();
} catch( IOException e ) {
- throw new Error( "Error writing out DOT graph "+graphName );
+ throw new Error("Error writing out DOT graph "+graphName);
}
}
- protected void
- traverseHeapRegionNodes( HeapRegionNode hrn,
- BufferedWriter bw,
- TempDescriptor td,
- Set<HeapRegionNode> visited,
- boolean hideReachability,
- boolean hideSubsetReachability,
- boolean hidePredicates,
- boolean hideEdgeTaints,
- Set<Integer> callerNodeIDsCopiedToCallee
- ) throws java.io.IOException {
-
- if( visited.contains( hrn ) ) {
+ protected void
+ traverseHeapRegionNodes(HeapRegionNode hrn,
+ BufferedWriter bw,
+ TempDescriptor td,
+ Set<HeapRegionNode> visited,
+ boolean hideReachability,
+ boolean hideSubsetReachability,
+ boolean hidePredicates,
+ boolean hideEdgeTaints,
+ Set<Integer> callerNodeIDsCopiedToCallee
+ ) throws java.io.IOException {
+
+ if( visited.contains(hrn) ) {
return;
}
- visited.add( hrn );
+ visited.add(hrn);
// if we're drawing the callee-view subgraph, only
// write out the node info if it hasn't already been
// written
if( callerNodeIDsCopiedToCallee == null ||
- !callerNodeIDsCopiedToCallee.contains( hrn.getID() )
+ !callerNodeIDsCopiedToCallee.contains(hrn.getID() )
) {
- bw.write( " "+
- hrn.toString()+
- hrn.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates )+
- ";\n" );
+ bw.write(" "+
+ hrn.toString()+
+ hrn.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates)+
+ ";\n");
}
Iterator<RefEdge> childRegionsItr = hrn.iteratorToReferencees();
while( childRegionsItr.hasNext() ) {
- RefEdge edge = childRegionsItr.next();
+ RefEdge edge = childRegionsItr.next();
HeapRegionNode hrnChild = edge.getDst();
if( callerNodeIDsCopiedToCallee != null &&
(edge.getSrc() instanceof HeapRegionNode) ) {
- HeapRegionNode hrnSrc = (HeapRegionNode) edge.getSrc();
- if( callerNodeIDsCopiedToCallee.contains( hrnSrc.getID() ) &&
- callerNodeIDsCopiedToCallee.contains( edge.getDst().getID() )
- ) {
- bw.write( " "+hrn.toString()+
- " -> "+hrnChild.toString()+
- edge.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- ",color=blue" )+
- ";\n");
- } else if( !callerNodeIDsCopiedToCallee.contains( hrnSrc.getID() ) &&
- callerNodeIDsCopiedToCallee.contains( edge.getDst().getID() )
- ) {
- bw.write( " "+hrn.toString()+
- " -> "+hrnChild.toString()+
- edge.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- ",color=blue,style=dashed" )+
- ";\n");
- } else {
- bw.write( " "+hrn.toString()+
- " -> "+hrnChild.toString()+
- edge.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- "" )+
- ";\n");
- }
+ HeapRegionNode hrnSrc = (HeapRegionNode) edge.getSrc();
+ if( callerNodeIDsCopiedToCallee.contains(hrnSrc.getID() ) &&
+ callerNodeIDsCopiedToCallee.contains(edge.getDst().getID() )
+ ) {
+ bw.write(" "+hrn.toString()+
+ " -> "+hrnChild.toString()+
+ edge.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ ",color=blue")+
+ ";\n");
+ } else if( !callerNodeIDsCopiedToCallee.contains(hrnSrc.getID() ) &&
+ callerNodeIDsCopiedToCallee.contains(edge.getDst().getID() )
+ ) {
+ bw.write(" "+hrn.toString()+
+ " -> "+hrnChild.toString()+
+ edge.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ ",color=blue,style=dashed")+
+ ";\n");
+ } else {
+ bw.write(" "+hrn.toString()+
+ " -> "+hrnChild.toString()+
+ edge.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ "")+
+ ";\n");
+ }
} else {
- bw.write( " "+hrn.toString()+
- " -> "+hrnChild.toString()+
- edge.toStringDOT( hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- "" )+
- ";\n");
+ bw.write(" "+hrn.toString()+
+ " -> "+hrnChild.toString()+
+ edge.toStringDOT(hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ "")+
+ ";\n");
}
-
- traverseHeapRegionNodes( hrnChild,
- bw,
- td,
- visited,
- hideReachability,
- hideSubsetReachability,
- hidePredicates,
- hideEdgeTaints,
- callerNodeIDsCopiedToCallee );
+
+ traverseHeapRegionNodes(hrnChild,
+ bw,
+ td,
+ visited,
+ hideReachability,
+ hideSubsetReachability,
+ hidePredicates,
+ hideEdgeTaints,
+ callerNodeIDsCopiedToCallee);
}
- }
+ }
+
-
// return the set of heap regions from the given allocation
// site, if any, that exist in this graph
- protected Set<HeapRegionNode> getAnyExisting( AllocSite as ) {
-
+ protected Set<HeapRegionNode> getAnyExisting(AllocSite as) {
+
Set<HeapRegionNode> out = new HashSet<HeapRegionNode>();
Integer idSum = as.getSummary();
- if( id2hrn.containsKey( idSum ) ) {
- out.add( id2hrn.get( idSum ) );
+ if( id2hrn.containsKey(idSum) ) {
+ out.add(id2hrn.get(idSum) );
}
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
- Integer idI = as.getIthOldest( i );
- if( id2hrn.containsKey( idI ) ) {
- out.add( id2hrn.get( idI ) );
+ Integer idI = as.getIthOldest(i);
+ if( id2hrn.containsKey(idI) ) {
+ out.add(id2hrn.get(idI) );
}
}
// return the set of reach tuples (NOT A REACH STATE! JUST A SET!)
// from the given allocation site, if any, from regions for that
// site that exist in this graph
- protected Set<ReachTuple> getAnyExisting( AllocSite as,
- boolean includeARITY_ZEROORMORE,
- boolean includeARITY_ONE ) {
-
+ protected Set<ReachTuple> getAnyExisting(AllocSite as,
+ boolean includeARITY_ZEROORMORE,
+ boolean includeARITY_ONE) {
+
Set<ReachTuple> out = new HashSet<ReachTuple>();
Integer idSum = as.getSummary();
- if( id2hrn.containsKey( idSum ) ) {
+ if( id2hrn.containsKey(idSum) ) {
- HeapRegionNode hrn = id2hrn.get( idSum );
+ HeapRegionNode hrn = id2hrn.get(idSum);
assert !hrn.isOutOfContext();
if( !includeARITY_ZEROORMORE ) {
- out.add( ReachTuple.factory( hrn.getID(),
- true, // multi-obj region
- ReachTuple.ARITY_ZEROORMORE,
- false ) // ooc?
- );
+ out.add(ReachTuple.factory(hrn.getID(),
+ true, // multi-obj region
+ ReachTuple.ARITY_ZEROORMORE,
+ false) // ooc?
+ );
}
if( includeARITY_ONE ) {
- out.add( ReachTuple.factory( hrn.getID(),
- true, // multi-object region
- ReachTuple.ARITY_ONE,
- false ) // ooc?
- );
+ out.add(ReachTuple.factory(hrn.getID(),
+ true, // multi-object region
+ ReachTuple.ARITY_ONE,
+ false) // ooc?
+ );
}
}
-
+
if( !includeARITY_ONE ) {
// no need to do the single-object regions that
// only have an ARITY ONE possible
}
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
-
- Integer idI = as.getIthOldest( i );
- if( id2hrn.containsKey( idI ) ) {
-
- HeapRegionNode hrn = id2hrn.get( idI );
- assert !hrn.isOutOfContext();
-
- out.add( ReachTuple.factory( hrn.getID(),
- false, // multi-object region
- ReachTuple.ARITY_ONE,
- false ) // ooc?
- );
+
+ Integer idI = as.getIthOldest(i);
+ if( id2hrn.containsKey(idI) ) {
+
+ HeapRegionNode hrn = id2hrn.get(idI);
+ assert !hrn.isOutOfContext();
+
+ out.add(ReachTuple.factory(hrn.getID(),
+ false, // multi-object region
+ ReachTuple.ARITY_ONE,
+ false) // ooc?
+ );
}
}
// if an object allocated at the target site may be
// reachable from both an object from root1 and an
// object allocated at root2, return TRUE
- public boolean mayBothReachTarget( AllocSite asRoot1,
- AllocSite asRoot2,
- AllocSite asTarget ) {
+ public boolean mayBothReachTarget(AllocSite asRoot1,
+ AllocSite asRoot2,
+ AllocSite asTarget) {
// consider all heap regions of the target and look
// for a reach state that indicates regions of root1
// and root2 might be able to reach same object
- Set<HeapRegionNode> hrnSetTarget = getAnyExisting( asTarget );
+ Set<HeapRegionNode> hrnSetTarget = getAnyExisting(asTarget);
// get relevant reach tuples, include ARITY_ZEROORMORE and ARITY_ONE
- Set<ReachTuple> rtSet1 = getAnyExisting( asRoot1, true, true );
- Set<ReachTuple> rtSet2 = getAnyExisting( asRoot2, true, true );
+ Set<ReachTuple> rtSet1 = getAnyExisting(asRoot1, true, true);
+ Set<ReachTuple> rtSet2 = getAnyExisting(asRoot2, true, true);
Iterator<HeapRegionNode> hrnItr = hrnSetTarget.iterator();
while( hrnItr.hasNext() ) {
HeapRegionNode hrn = hrnItr.next();
-
+
Iterator<ReachTuple> rtItr1 = rtSet1.iterator();
while( rtItr1.hasNext() ) {
- ReachTuple rt1 = rtItr1.next();
+ ReachTuple rt1 = rtItr1.next();
- Iterator<ReachTuple> rtItr2 = rtSet2.iterator();
- while( rtItr2.hasNext() ) {
- ReachTuple rt2 = rtItr2.next();
+ Iterator<ReachTuple> rtItr2 = rtSet2.iterator();
+ while( rtItr2.hasNext() ) {
+ ReachTuple rt2 = rtItr2.next();
- if( !hrn.getAlpha().getStatesWithBoth( rt1, rt2 ).isEmpty() ) {
- return true;
- }
- }
+ if( !hrn.getAlpha().getStatesWithBoth(rt1, rt2).isEmpty() ) {
+ return true;
+ }
+ }
}
}
// similar to the method above, return TRUE if ever
// more than one object from the root allocation site
// may reach an object from the target site
- public boolean mayManyReachTarget( AllocSite asRoot,
- AllocSite asTarget ) {
+ public boolean mayManyReachTarget(AllocSite asRoot,
+ AllocSite asTarget) {
// consider all heap regions of the target and look
// for a reach state that multiple objects of root
// might be able to reach the same object
- Set<HeapRegionNode> hrnSetTarget = getAnyExisting( asTarget );
+ Set<HeapRegionNode> hrnSetTarget = getAnyExisting(asTarget);
// get relevant reach tuples
- Set<ReachTuple> rtSetZOM = getAnyExisting( asRoot, true, false );
- Set<ReachTuple> rtSetONE = getAnyExisting( asRoot, false, true );
+ Set<ReachTuple> rtSetZOM = getAnyExisting(asRoot, true, false);
+ Set<ReachTuple> rtSetONE = getAnyExisting(asRoot, false, true);
Iterator<HeapRegionNode> hrnItr = hrnSetTarget.iterator();
while( hrnItr.hasNext() ) {
// if any ZERORMORE tuples are here, TRUE
Iterator<ReachTuple> rtItr = rtSetZOM.iterator();
while( rtItr.hasNext() ) {
- ReachTuple rtZOM = rtItr.next();
+ ReachTuple rtZOM = rtItr.next();
- if( hrn.getAlpha().containsTuple( rtZOM ) ) {
- return true;
- }
+ if( hrn.getAlpha().containsTuple(rtZOM) ) {
+ return true;
+ }
}
- // otherwise, look for any pair of ONE tuples
+ // otherwise, look for any pair of ONE tuples
Iterator<ReachTuple> rtItr1 = rtSetONE.iterator();
while( rtItr1.hasNext() ) {
- ReachTuple rt1 = rtItr1.next();
+ ReachTuple rt1 = rtItr1.next();
- Iterator<ReachTuple> rtItr2 = rtSetONE.iterator();
- while( rtItr2.hasNext() ) {
- ReachTuple rt2 = rtItr2.next();
+ Iterator<ReachTuple> rtItr2 = rtSetONE.iterator();
+ while( rtItr2.hasNext() ) {
+ ReachTuple rt2 = rtItr2.next();
- if( rt1 == rt2 ) {
- continue;
- }
+ if( rt1 == rt2 ) {
+ continue;
+ }
- if( !hrn.getAlpha().getStatesWithBoth( rt1, rt2 ).isEmpty() ) {
- return true;
- }
- }
+ if( !hrn.getAlpha().getStatesWithBoth(rt1, rt2).isEmpty() ) {
+ return true;
+ }
+ }
}
}
-
+
return false;
}
- public Set<HeapRegionNode> findCommonReachableNodes( ReachSet proofOfSharing ) {
+ public Set<HeapRegionNode> findCommonReachableNodes(ReachSet proofOfSharing) {
Set<HeapRegionNode> exhibitProofState =
new HashSet<HeapRegionNode>();
Iterator hrnItr = id2hrn.entrySet().iterator();
while( hrnItr.hasNext() ) {
- Map.Entry me = (Map.Entry) hrnItr.next();
+ Map.Entry me = (Map.Entry)hrnItr.next();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
+
ReachSet intersection =
- Canonical.intersection( proofOfSharing,
- hrn.getAlpha()
- );
+ Canonical.intersection(proofOfSharing,
+ hrn.getAlpha()
+ );
if( !intersection.isEmpty() ) {
- assert !hrn.isOutOfContext();
- exhibitProofState.add( hrn );
+ assert !hrn.isOutOfContext();
+ exhibitProofState.add(hrn);
}
}
-
+
return exhibitProofState;
}
-
+
public Set<HeapRegionNode> mayReachSharedObjects(HeapRegionNode hrn1,
HeapRegionNode hrn2) {
assert hrn1 != null;
assert !hrn1.isOutOfContext();
assert !hrn2.isOutOfContext();
- assert belongsToThis( hrn1 );
- assert belongsToThis( hrn2 );
+ assert belongsToThis(hrn1);
+ assert belongsToThis(hrn2);
- assert !hrn1.getID().equals( hrn2.getID() );
+ assert !hrn1.getID().equals(hrn2.getID() );
// then get the various tokens for these heap regions
- ReachTuple h1 =
- ReachTuple.factory( hrn1.getID(),
- !hrn1.isSingleObject(), // multi?
- ReachTuple.ARITY_ONE,
- false ); // ooc?
-
+ ReachTuple h1 =
+ ReachTuple.factory(hrn1.getID(),
+ !hrn1.isSingleObject(), // multi?
+ ReachTuple.ARITY_ONE,
+ false); // ooc?
+
ReachTuple h1star = null;
if( !hrn1.isSingleObject() ) {
- h1star =
- ReachTuple.factory( hrn1.getID(),
- !hrn1.isSingleObject(),
- ReachTuple.ARITY_ZEROORMORE,
- false );
- }
-
- ReachTuple h2 =
- ReachTuple.factory( hrn2.getID(),
- !hrn2.isSingleObject(),
- ReachTuple.ARITY_ONE,
- false );
+ h1star =
+ ReachTuple.factory(hrn1.getID(),
+ !hrn1.isSingleObject(),
+ ReachTuple.ARITY_ZEROORMORE,
+ false);
+ }
+
+ ReachTuple h2 =
+ ReachTuple.factory(hrn2.getID(),
+ !hrn2.isSingleObject(),
+ ReachTuple.ARITY_ONE,
+ false);
ReachTuple h2star = null;
- if( !hrn2.isSingleObject() ) {
+ if( !hrn2.isSingleObject() ) {
h2star =
- ReachTuple.factory( hrn2.getID(),
- !hrn2.isSingleObject(),
- ReachTuple.ARITY_ZEROORMORE,
- false );
+ ReachTuple.factory(hrn2.getID(),
+ !hrn2.isSingleObject(),
+ ReachTuple.ARITY_ZEROORMORE,
+ false);
}
// then get the merged beta of all out-going edges from these heap
ReachSet proofOfSharing = ReachSet.factory();
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta1.getStatesWithBoth( h1, h2 )
- );
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta2.getStatesWithBoth( h1, h2 )
- );
-
- if( !hrn1.isSingleObject() ) {
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta1.getStatesWithBoth( h1star, h2 )
- );
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta2.getStatesWithBoth( h1star, h2 )
- );
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta1.getStatesWithBoth(h1, h2)
+ );
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta2.getStatesWithBoth(h1, h2)
+ );
+
+ if( !hrn1.isSingleObject() ) {
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta1.getStatesWithBoth(h1star, h2)
+ );
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta2.getStatesWithBoth(h1star, h2)
+ );
}
- if( !hrn2.isSingleObject() ) {
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta1.getStatesWithBoth( h1, h2star )
- );
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta2.getStatesWithBoth( h1, h2star )
- );
+ if( !hrn2.isSingleObject() ) {
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta1.getStatesWithBoth(h1, h2star)
+ );
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta2.getStatesWithBoth(h1, h2star)
+ );
}
if( !hrn1.isSingleObject() &&
!hrn2.isSingleObject()
- ) {
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta1.getStatesWithBoth( h1star, h2star )
- );
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta2.getStatesWithBoth( h1star, h2star )
- );
+ ) {
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta1.getStatesWithBoth(h1star, h2star)
+ );
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta2.getStatesWithBoth(h1star, h2star)
+ );
}
-
+
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
if( !proofOfSharing.isEmpty() ) {
- common = findCommonReachableNodes( proofOfSharing );
+ common = findCommonReachableNodes(proofOfSharing);
if( !DISABLE_STRONG_UPDATES &&
!DISABLE_GLOBAL_SWEEP
- ) {
- assert !common.isEmpty();
+ ) {
+ assert !common.isEmpty();
}
}
assert hrn != null;
assert hrn.isNewSummary();
assert !hrn.isOutOfContext();
- assert belongsToThis( hrn );
+ assert belongsToThis(hrn);
- ReachTuple hstar =
- ReachTuple.factory( hrn.getID(),
- true, // multi
- ReachTuple.ARITY_ZEROORMORE,
- false ); // ooc
+ ReachTuple hstar =
+ ReachTuple.factory(hrn.getID(),
+ true, // multi
+ ReachTuple.ARITY_ZEROORMORE,
+ false); // ooc
- // then get the merged beta of all out-going edges from
+ // then get the merged beta of all out-going edges from
// this heap region
ReachSet beta = ReachSet.factory();
RefEdge edge = itrEdge.next();
beta = Canonical.unionORpreds(beta, edge.getBeta());
}
-
+
ReachSet proofOfSharing = ReachSet.factory();
- proofOfSharing =
- Canonical.unionORpreds( proofOfSharing,
- beta.getStatesWithBoth( hstar, hstar )
- );
-
+ proofOfSharing =
+ Canonical.unionORpreds(proofOfSharing,
+ beta.getStatesWithBoth(hstar, hstar)
+ );
+
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
if( !proofOfSharing.isEmpty() ) {
- common = findCommonReachableNodes( proofOfSharing );
+ common = findCommonReachableNodes(proofOfSharing);
if( !DISABLE_STRONG_UPDATES &&
!DISABLE_GLOBAL_SWEEP
- ) {
- assert !common.isEmpty();
+ ) {
+ assert !common.isEmpty();
}
}
-
+
return common;
}
public Set<HeapRegionNode> mayReachSharedObjects(FlatMethod fm,
- Integer paramIndex1,
+ Integer paramIndex1,
Integer paramIndex2) {
// get parameter's heap regions
TempDescriptor paramTemp1 = fm.getParameter(paramIndex1.intValue());
- assert this.hasVariable( paramTemp1 );
+ assert this.hasVariable(paramTemp1);
VariableNode paramVar1 = getVariableNodeFromTemp(paramTemp1);
if( !(paramVar1.getNumReferencees() == 1) ) {
- System.out.println( "\n fm="+fm+"\n param="+paramTemp1 );
- writeGraph( "whatup" );
+ System.out.println("\n fm="+fm+"\n param="+paramTemp1);
+ writeGraph("whatup");
}
HeapRegionNode hrnParam1 = paramEdge1.getDst();
TempDescriptor paramTemp2 = fm.getParameter(paramIndex2.intValue());
- assert this.hasVariable( paramTemp2 );
+ assert this.hasVariable(paramTemp2);
VariableNode paramVar2 = getVariableNodeFromTemp(paramTemp2);
if( !(paramVar2.getNumReferencees() == 1) ) {
- System.out.println( "\n fm="+fm+"\n param="+paramTemp2 );
- writeGraph( "whatup" );
+ System.out.println("\n fm="+fm+"\n param="+paramTemp2);
+ writeGraph("whatup");
}
assert paramVar2.getNumReferencees() == 1;
}
public Set<HeapRegionNode> mayReachSharedObjects(FlatMethod fm,
- Integer paramIndex,
+ Integer paramIndex,
AllocSite as) {
// get parameter's heap regions
TempDescriptor paramTemp = fm.getParameter(paramIndex.intValue());
- assert this.hasVariable( paramTemp );
+ assert this.hasVariable(paramTemp);
VariableNode paramVar = getVariableNodeFromTemp(paramTemp);
assert paramVar.getNumReferencees() == 1;
RefEdge paramEdge = paramVar.iteratorToReferencees().next();
// get summary node
HeapRegionNode hrnSummary=null;
- if(id2hrn.containsKey(as.getSummary())){
+ if(id2hrn.containsKey(as.getSummary())) {
// if summary node doesn't exist, ignore this case
hrnSummary = id2hrn.get(as.getSummary());
assert hrnSummary != null;
}
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
- if(hrnSummary!=null){
- common.addAll( mayReachSharedObjects(hrnParam, hrnSummary) );
+ if(hrnSummary!=null) {
+ common.addAll(mayReachSharedObjects(hrnParam, hrnSummary) );
}
// check for other nodes
// get summary node 1's alpha
Integer idSum1 = as1.getSummary();
HeapRegionNode hrnSum1=null;
- if(id2hrn.containsKey(idSum1)){
+ if(id2hrn.containsKey(idSum1)) {
hrnSum1 = id2hrn.get(idSum1);
}
// get summary node 2's alpha
Integer idSum2 = as2.getSummary();
HeapRegionNode hrnSum2=null;
- if(id2hrn.containsKey(idSum2)){
+ if(id2hrn.containsKey(idSum2)) {
hrnSum2 = id2hrn.get(idSum2);
}
-
+
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
- if(hrnSum1!=null && hrnSum2!=null && hrnSum1!=hrnSum2){
+ if(hrnSum1!=null && hrnSum2!=null && hrnSum1!=hrnSum2) {
common.addAll(mayReachSharedObjects(hrnSum1, hrnSum2));
}
- if(hrnSum1!=null){
+ if(hrnSum1!=null) {
// ask if objects from this summary share among each other
common.addAll(mayReachSharedObjects(hrnSum1));
}
// check sum2 against alloc1 nodes
- if(hrnSum2!=null){
+ if(hrnSum2!=null) {
for (int i = 0; i < as1.getAllocationDepth(); ++i) {
- Integer idI1 = as1.getIthOldest(i);
- assert id2hrn.containsKey(idI1);
- HeapRegionNode hrnI1 = id2hrn.get(idI1);
- assert hrnI1 != null;
- common.addAll(mayReachSharedObjects(hrnI1, hrnSum2));
+ Integer idI1 = as1.getIthOldest(i);
+ assert id2hrn.containsKey(idI1);
+ HeapRegionNode hrnI1 = id2hrn.get(idI1);
+ assert hrnI1 != null;
+ common.addAll(mayReachSharedObjects(hrnI1, hrnSum2));
}
// also ask if objects from this summary share among each other
HeapRegionNode hrnI2 = id2hrn.get(idI2);
assert hrnI2 != null;
- if(hrnSum1!=null){
- common.addAll(mayReachSharedObjects(hrnSum1, hrnI2));
+ if(hrnSum1!=null) {
+ common.addAll(mayReachSharedObjects(hrnSum1, hrnI2));
}
// while we're at it, do an inner loop for alloc2 vs alloc1 nodes
for (int j = 0; j < as1.getAllocationDepth(); ++j) {
- Integer idI1 = as1.getIthOldest(j);
+ Integer idI1 = as1.getIthOldest(j);
- // if these are the same site, don't look for the same token, no
- // alias.
- // different tokens of the same site could alias together though
- if (idI1.equals(idI2)) {
- continue;
- }
+ // if these are the same site, don't look for the same token, no
+ // alias.
+ // different tokens of the same site could alias together though
+ if (idI1.equals(idI2)) {
+ continue;
+ }
- HeapRegionNode hrnI1 = id2hrn.get(idI1);
+ HeapRegionNode hrnI1 = id2hrn.get(idI1);
- common.addAll(mayReachSharedObjects(hrnI1, hrnI2));
+ common.addAll(mayReachSharedObjects(hrnI1, hrnI2));
}
}
return common;
}
-
- public void makeInaccessible( Set<TempDescriptor> vars ) {
- inaccessibleVars.addAll( vars );
+
+ public void makeInaccessible(Set<TempDescriptor> vars) {
+ inaccessibleVars.addAll(vars);
}
- public void makeInaccessible( TempDescriptor td ) {
- inaccessibleVars.add( td );
+ public void makeInaccessible(TempDescriptor td) {
+ inaccessibleVars.add(td);
}
- public void makeAccessible( TempDescriptor td ) {
- inaccessibleVars.remove( td );
+ public void makeAccessible(TempDescriptor td) {
+ inaccessibleVars.remove(td);
}
-
+
public boolean isAccessible(TempDescriptor td) {
return !inaccessibleVars.contains(td);
}
public static ReachSet factory() {
ReachSet out = new ReachSet();
- out = (ReachSet) Canonical.makeCanonical( out );
+ out = (ReachSet) Canonical.makeCanonical(out);
return out;
}
- public static ReachSet factory( ReachState state ) {
+ public static ReachSet factory(ReachState state) {
assert state != null;
assert state.isCanonical();
ReachSet out = new ReachSet();
- out.reachStates.add( state );
- out = (ReachSet) Canonical.makeCanonical( out );
+ out.reachStates.add(state);
+ out = (ReachSet) Canonical.makeCanonical(out);
return out;
}
}
// this should be a hash table so we can do this by key
- public ReachState containsIgnorePreds( ReachState state ) {
+ public ReachState containsIgnorePreds(ReachState state) {
assert state != null;
Iterator<ReachState> stateItr = this.reachStates.iterator();
while( stateItr.hasNext() ) {
ReachState stateThis = stateItr.next();
- if( stateThis.equalsIgnorePreds( state ) ) {
+ if( stateThis.equalsIgnorePreds(state) ) {
return stateThis;
}
}
-
+
return null;
}
- public boolean containsSuperSet( ReachState state ) {
- return containsSuperSet( state, false );
+ public boolean containsSuperSet(ReachState state) {
+ return containsSuperSet(state, false);
}
- public boolean containsStrictSuperSet( ReachState state ) {
- return containsSuperSet( state, true );
+ public boolean containsStrictSuperSet(ReachState state) {
+ return containsSuperSet(state, true);
}
- public boolean containsSuperSet( ReachState state,
- boolean strict ) {
+ public boolean containsSuperSet(ReachState state,
+ boolean strict) {
assert state != null;
- if( !strict && reachStates.contains( state ) ) {
+ if( !strict && reachStates.contains(state) ) {
return true;
}
while( itr.hasNext() ) {
ReachState stateThis = itr.next();
if( strict ) {
- if( !state.equals( stateThis ) &&
- state.isSubset( stateThis ) ) {
- return true;
- }
+ if( !state.equals(stateThis) &&
+ state.isSubset(stateThis) ) {
+ return true;
+ }
} else {
- if( state.isSubset( stateThis ) ) {
- return true;
- }
+ if( state.isSubset(stateThis) ) {
+ return true;
+ }
}
}
-
- return false;
+
+ return false;
}
- public boolean containsTuple( ReachTuple rt ) {
+ public boolean containsTuple(ReachTuple rt) {
Iterator<ReachState> itr = iterator();
while( itr.hasNext() ) {
ReachState state = itr.next();
- if( state.containsTuple( rt ) ) {
+ if( state.containsTuple(rt) ) {
return true;
}
}
return false;
}
- public ReachSet getStatesWithBoth( ReachTuple rt1,
- ReachTuple rt2 ) {
+ public ReachSet getStatesWithBoth(ReachTuple rt1,
+ ReachTuple rt2) {
ReachSet out = new ReachSet();
Iterator<ReachState> itr = iterator();
while( itr.hasNext() ) {
ReachState state = itr.next();
- if( state.containsTuple( rt1 ) &&
- state.containsTuple( rt2 ) ) {
- out.reachStates.add( state );
+ if( state.containsTuple(rt1) &&
+ state.containsTuple(rt2) ) {
+ out.reachStates.add(state);
}
}
- out = (ReachSet) Canonical.makeCanonical( out );
+ out = (ReachSet) Canonical.makeCanonical(out);
return out;
}
// used to assert each state in the set is
// unique
public boolean containsNoDuplicates() {
- Vector<ReachState> v = new Vector( reachStates );
+ Vector<ReachState> v = new Vector(reachStates);
for( int i = 0; i < v.size(); ++i ) {
- ReachState s1 = v.get( i );
+ ReachState s1 = v.get(i);
for( int j = i+1; j < v.size(); ++j ) {
- ReachState s2 = v.get( j );
- if( s1.equals( s2 ) ) {
- assert s1.isCanonical();
- assert s2.isCanonical();
- return false;
- }
+ ReachState s2 = v.get(j);
+ if( s1.equals(s2) ) {
+ assert s1.isCanonical();
+ assert s2.isCanonical();
+ return false;
+ }
}
}
return true;
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
-
+
if( !(o instanceof ReachSet) ) {
return false;
}
ReachSet rs = (ReachSet) o;
- return reachStates.equals( rs.reachStates );
+ return reachStates.equals(rs.reachStates);
}
}
- public String toStringEscNewline( boolean hideSubsetReachability ) {
+ public String toStringEscNewline(boolean hideSubsetReachability) {
String s = "[";
Iterator<ReachState> i = this.iterator();
// skip this if there is a superset already
if( hideSubsetReachability &&
- containsStrictSuperSet( state ) ) {
- continue;
+ containsStrictSuperSet(state) ) {
+ continue;
}
s += state;
s += "]";
return s;
}
-
+
public String toString() {
- return toString( false );
+ return toString(false);
}
- public String toString( boolean hideSubsetReachability ) {
+ public String toString(boolean hideSubsetReachability) {
ReachSet toPrint = this;
-
+
if( hideSubsetReachability ) {
// make a new reach set with subset states removed
toPrint = ReachSet.factory();
Iterator<ReachState> i = this.iterator();
while( i.hasNext() ) {
- ReachState state = i.next();
+ ReachState state = i.next();
- if( containsStrictSuperSet( state ) ) {
- continue;
- }
+ if( containsStrictSuperSet(state) ) {
+ continue;
+ }
- toPrint = Canonical.add( toPrint, state );
+ toPrint = Canonical.add(toPrint, state);
}
}
public static ReachState factory() {
ReachState out = new ReachState();
- out = (ReachState) Canonical.makeCanonical( out );
+ out = (ReachState) Canonical.makeCanonical(out);
return out;
}
- public static ReachState factory( ReachTuple rt ) {
+ public static ReachState factory(ReachTuple rt) {
assert rt != null;
assert rt.isCanonical();
- ReachState out = new ReachState();
- out.reachTuples.add( rt );
- out = (ReachState) Canonical.makeCanonical( out );
+ ReachState out = new ReachState();
+ out.reachTuples.add(rt);
+ out = (ReachState) Canonical.makeCanonical(out);
return out;
}
- public static ReachState factory( HashSet<ReachTuple> reachTuples,
- ExistPredSet preds
- ) {
+ public static ReachState factory(HashSet<ReachTuple> reachTuples,
+ ExistPredSet preds
+ ) {
assert reachTuples != null;
assert preds != null;
assert preds.isCanonical();
ReachState out = new ReachState();
- out.reachTuples.addAll( reachTuples );
+ out.reachTuples.addAll(reachTuples);
out.preds = preds;
- out = (ReachState) Canonical.makeCanonical( out );
+ out = (ReachState) Canonical.makeCanonical(out);
return out;
}
return reachTuples.isEmpty();
}
- public boolean isSubset( ReachState rsIn ) {
+ public boolean isSubset(ReachState rsIn) {
assert rsIn != null;
- return rsIn.reachTuples.containsAll( this.reachTuples );
+ return rsIn.reachTuples.containsAll(this.reachTuples);
}
- public boolean containsTuple( ReachTuple rt ) {
+ public boolean containsTuple(ReachTuple rt) {
assert rt != null;
- return reachTuples.contains( rt );
+ return reachTuples.contains(rt);
}
// this should be a hash table so we can do this by key
- public ReachTuple containsHrnID( Integer hrnID,
- boolean isOutOfContext ) {
+ public ReachTuple containsHrnID(Integer hrnID,
+ boolean isOutOfContext) {
assert hrnID != null;
Iterator<ReachTuple> rtItr = reachTuples.iterator();
while( rtItr.hasNext() ) {
ReachTuple rt = rtItr.next();
- if( hrnID.equals( rt.getHrnID() ) &&
+ if( hrnID.equals(rt.getHrnID() ) &&
isOutOfContext == rt.isOutOfContext()
) {
return rt;
}
}
-
+
return null;
}
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
}
ReachState rs = (ReachState) o;
- return
- reachTuples.equals( rs.reachTuples ) &&
- preds.equals( rs.preds );
+ return
+ reachTuples.equals(rs.reachTuples) &&
+ preds.equals(rs.preds);
}
public int hashCodeSpecific() {
- return
- reachTuples.hashCode() ^
+ return
+ reachTuples.hashCode() ^
preds.hashCode();
}
- public boolean equalsIgnorePreds( Object o ) {
+ public boolean equalsIgnorePreds(Object o) {
if( o == null ) {
return false;
}
}
ReachState rs = (ReachState) o;
- return
- reachTuples.equals( rs.reachTuples );
+ return
+ reachTuples.equals(rs.reachTuples);
}
protected boolean isOutOfContext;
- public static ReachTuple factory( Integer hrnID,
- boolean isMultiObject,
- int arity,
- boolean ooc ) {
- ReachTuple out = new ReachTuple( hrnID,
- isMultiObject,
- arity,
- ooc );
- out = (ReachTuple) Canonical.makeCanonical( out );
+ public static ReachTuple factory(Integer hrnID,
+ boolean isMultiObject,
+ int arity,
+ boolean ooc) {
+ ReachTuple out = new ReachTuple(hrnID,
+ isMultiObject,
+ arity,
+ ooc);
+ out = (ReachTuple) Canonical.makeCanonical(out);
return out;
}
-
- public static ReachTuple factory( HeapRegionNode hrn ) {
- ReachTuple out = new ReachTuple( hrn.getID(),
- !hrn.isSingleObject(),
- ARITY_ONE,
- false );
- out = (ReachTuple) Canonical.makeCanonical( out );
+
+ public static ReachTuple factory(HeapRegionNode hrn) {
+ ReachTuple out = new ReachTuple(hrn.getID(),
+ !hrn.isSingleObject(),
+ ARITY_ONE,
+ false);
+ out = (ReachTuple) Canonical.makeCanonical(out);
return out;
}
- protected ReachTuple( Integer hrnID,
- boolean isMultiObject,
- int arity,
- boolean ooc ) {
+ protected ReachTuple(Integer hrnID,
+ boolean isMultiObject,
+ int arity,
+ boolean ooc) {
assert hrnID != null;
this.hrnID = hrnID;
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
ReachTuple rt = (ReachTuple) o;
- return hrnID.equals( rt.hrnID ) &&
- arity == rt.arity &&
- isOutOfContext == rt.isOutOfContext;
+ return hrnID.equals(rt.hrnID) &&
+ arity == rt.arity &&
+ isOutOfContext == rt.isOutOfContext;
}
public int hashCodeSpecific() {
protected ReachSet beta;
protected ReachSet betaNew;
- protected RefSrcNode src;
+ protected RefSrcNode src;
protected HeapRegionNode dst;
// existence predicates must be true in a caller
// read or write through this edge
protected TaintSet taints;
-
- public RefEdge( RefSrcNode src,
- HeapRegionNode dst,
- TypeDescriptor type,
- String field,
- ReachSet beta,
- ExistPredSet preds,
- TaintSet taints ) {
+
+ public RefEdge(RefSrcNode src,
+ HeapRegionNode dst,
+ TypeDescriptor type,
+ String field,
+ ReachSet beta,
+ ExistPredSet preds,
+ TaintSet taints) {
assert src != null;
assert dst != null;
public RefEdge copy() {
- RefEdge copy = new RefEdge( src,
- dst,
- type,
- field,
- beta,
- preds,
- taints );
+ RefEdge copy = new RefEdge(src,
+ dst,
+ type,
+ field,
+ beta,
+ preds,
+ taints);
return copy;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
if( !(o instanceof RefEdge) ) {
return false;
}
-
+
RefEdge edge = (RefEdge) o;
-
- if( !typeEquals( edge.type ) ) {
+
+ if( !typeEquals(edge.type) ) {
return false;
}
- if( !fieldEquals( edge.field ) ) {
+ if( !fieldEquals(edge.field) ) {
return false;
}
// fixed point, so use this method to determine if
// an edge is "equal" to some previous visit, basically
// and taints!
- public boolean equalsIncludingBetaPredsTaints( RefEdge edge ) {
- return equals( edge ) &&
- beta.equals( edge.beta ) &&
- preds.equals( edge.preds ) &&
- taints.equals( edge.taints );
+ public boolean equalsIncludingBetaPredsTaints(RefEdge edge) {
+ return equals(edge) &&
+ beta.equals(edge.beta) &&
+ preds.equals(edge.preds) &&
+ taints.equals(edge.taints);
}
- public boolean equalsPreds( RefEdge edge ) {
- return preds.equals( edge.preds );
+ public boolean equalsPreds(RefEdge edge) {
+ return preds.equals(edge.preds);
}
if( field != null ) {
hash += field.hashCode()*7;
}
-
+
hash += src.hashCode()*11;
hash += dst.hashCode();
return src;
}
- public void setSrc( RefSrcNode rsn ) {
+ public void setSrc(RefSrcNode rsn) {
assert rsn != null;
src = rsn;
}
return dst;
}
- public void setDst( HeapRegionNode hrn ) {
+ public void setDst(HeapRegionNode hrn) {
assert hrn != null;
dst = hrn;
}
return type;
}
- public void setType( TypeDescriptor td ) {
+ public void setType(TypeDescriptor td) {
assert td != null;
type = td;
}
return field;
}
- public void setField( String s ) {
+ public void setField(String s) {
field = s;
}
- public boolean typeEquals( TypeDescriptor td ) {
- return type.equals( td );
+ public boolean typeEquals(TypeDescriptor td) {
+ return type.equals(td);
}
- public boolean fieldEquals( String s ) {
+ public boolean fieldEquals(String s) {
if( field == null && s == null ) {
return true;
}
if( field == null ) {
return false;
}
- return field.equals( s );
+ return field.equals(s);
}
- public boolean typeAndFieldEquals( RefEdge e ) {
- return typeEquals ( e.getType() ) &&
- fieldEquals( e.getField() );
+ public boolean typeAndFieldEquals(RefEdge e) {
+ return typeEquals(e.getType() ) &&
+ fieldEquals(e.getField() );
}
return beta;
}
- public void setBeta( ReachSet beta ) {
+ public void setBeta(ReachSet beta) {
assert beta != null;
this.beta = beta;
}
return betaNew;
}
- public void setBetaNew( ReachSet beta ) {
+ public void setBetaNew(ReachSet beta) {
assert beta != null;
this.betaNew = beta;
}
-
+
public void applyBetaNew() {
assert betaNew != null;
beta = betaNew;
return preds;
}
- public void setPreds( ExistPredSet preds ) {
+ public void setPreds(ExistPredSet preds) {
this.preds = preds;
}
return taints;
}
- public void setTaints( TaintSet taints ) {
+ public void setTaints(TaintSet taints) {
this.taints = taints;
}
-
-
- public String toStringDOT( boolean hideReach,
- boolean hideSubsetReach,
- boolean hidePreds,
- boolean hideEdgeTaints,
- String otherAttributes ) {
- String s =
+
+
+ public String toStringDOT(boolean hideReach,
+ boolean hideSubsetReach,
+ boolean hidePreds,
+ boolean hideEdgeTaints,
+ String otherAttributes) {
+ String s =
"[label=\""+
type.toPrettyString()+"\\n"+
field;
if( !hideReach ) {
- s += "\\n"+beta.toStringEscNewline( hideSubsetReach );
+ s += "\\n"+beta.toStringEscNewline(hideSubsetReach);
}
if( !hidePreds ) {
s += "\\n"+preds.toStringEscNewline();
}
- if( !hideEdgeTaints ) {
+ if( !hideEdgeTaints ) {
if( !taints.isEmpty() ) {
- s += "\\n"+taints.toStringEscNewline();
+ s += "\\n"+taints.toStringEscNewline();
}
}
}
public String toString() {
- return new String( "("+src+
- "->"+type.toPrettyString()+
- " "+field+
- "->"+dst+")"
- );
- }
+ return new String("("+src+
+ "->"+type.toPrettyString()+
+ " "+field+
+ "->"+dst+")"
+ );
+ }
public String toStringAndBeta() {
return toString()+beta.toString();
return referencees.size();
}
- public void addReferencee( RefEdge edge ) {
+ public void addReferencee(RefEdge edge) {
assert edge != null;
- referencees.add( edge );
+ referencees.add(edge);
}
- public void removeReferencee( RefEdge edge ) {
+ public void removeReferencee(RefEdge edge) {
assert edge != null;
- assert referencees.contains( edge );
- referencees.remove( edge );
+ assert referencees.contains(edge);
+ referencees.remove(edge);
}
- public RefEdge getReferenceTo( HeapRegionNode hrn,
- TypeDescriptor type,
- String field
- ) {
+ public RefEdge getReferenceTo(HeapRegionNode hrn,
+ TypeDescriptor type,
+ String field
+ ) {
assert hrn != null;
Iterator<RefEdge> itrEdge = referencees.iterator();
while( itrEdge.hasNext() ) {
RefEdge edge = itrEdge.next();
- if( edge.getDst().equals( hrn ) &&
- edge.typeEquals( type ) &&
- edge.fieldEquals( field )
+ if( edge.getDst().equals(hrn) &&
+ edge.typeEquals(type) &&
+ edge.fieldEquals(field)
) {
return edge;
}
//////////////////////////////////////////////
//
-// SMFEState is part of a
+// SMFEState is part of a
// (S)tate (M)achine (F)or (E)ffects.
//
// StateMachineForEffects describes an intial
// optimizations and whatnot, we need an alternate
// system of unique IDs
- // uniquely identifies this state
+ // uniquely identifies this state
protected int id;
protected int iHashCode;
// all possible effects in this state
protected Set<Effect> effects;
-
+
//TODO Jim! get me the list of conflicts!
protected Set<Effect> conflicts;
protected int refCount;
protected FlatNode whereDefined;
-
- public SMFEState( FlatNode fnWhereDefined, int id ) {
+
+ public SMFEState(FlatNode fnWhereDefined, int id) {
this.id = id;
this.iHashCode = fnWhereDefined.hashCode();
this.whereDefined=fnWhereDefined;
-
+
effects = new HashSet<Effect>();
conflicts = new HashSet<Effect>();
e2states = new Hashtable< Effect, Set<SMFEState> >();
refCount = 0;
}
- public void addEffect( Effect e ) {
- effects.add( e );
+ public void addEffect(Effect e) {
+ effects.add(e);
}
// the given effect allows the transition to the new state
- public void addTransition( Effect effect,
- SMFEState stateTo
- ) {
+ public void addTransition(Effect effect,
+ SMFEState stateTo
+ ) {
- Set<SMFEState> states = e2states.get( effect );
+ Set<SMFEState> states = e2states.get(effect);
if( states == null ) {
states = new HashSet<SMFEState>();
- e2states.put( effect, states );
+ e2states.put(effect, states);
}
if (!states.contains(stateTo)) {
- states.add( stateTo );
+ states.add(stateTo);
stateTo.refCount++;
}
}
public Set<Effect> getEffectsAllowed() {
return effects;
}
-
+
public void addConflict(Effect e) {
conflicts.add(e);
}
public Set<Effect> getConflicts() {
return conflicts;
}
-
+
public Set<Effect> getTransitionEffects() {
return this.e2states.keySet();
}
// some subset of the above effects may transition to
// other states
- public Set<SMFEState> transitionsTo( Effect e ) {
- Set<SMFEState> statesOut = e2states.get( e );
+ public Set<SMFEState> transitionsTo(Effect e) {
+ Set<SMFEState> statesOut = e2states.get(e);
if( statesOut == null ) {
statesOut = new HashSet<SMFEState>();
}
// other states
public Set<SMFEState> transitionsTo() {
Set<SMFEState> statesOut = new HashSet<SMFEState>();
- for(Map.Entry<Effect, Set<SMFEState>> entry:e2states.entrySet()) {
+ for(Map.Entry<Effect, Set<SMFEState>> entry : e2states.entrySet()) {
statesOut.addAll(entry.getValue());
}
return statesOut;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
public String toStringDOT() {
-
+
// first create the state as a node in DOT graph
String s = " "+id+"[shape=box,";
if (conflicts.size()>0 ) {
Iterator<Effect> eItr = effects.iterator();
while( eItr.hasNext() ) {
- Effect e = eItr.next();
+ Effect e = eItr.next();
if (conflicts.contains(e)) {
s += "["+e.toString()+"]";
} else {
s += e.toString();
}
- if( eItr.hasNext() ) {
- s += "\\n";
- }
+ if( eItr.hasNext() ) {
+ s += "\\n";
+ }
}
}
// then each transition is an edge
Iterator<Effect> eItr = e2states.keySet().iterator();
while( eItr.hasNext() ) {
- Effect e = eItr.next();
- Set<SMFEState> states = e2states.get( e );
+ Effect e = eItr.next();
+ Set<SMFEState> states = e2states.get(e);
Iterator<SMFEState> sItr = states.iterator();
while( sItr.hasNext() ) {
- SMFEState state = sItr.next();
+ SMFEState state = sItr.next();
- s += "\n "+
- id+" -> "+state.id+
- "[label=\""+e+", RC="+refCount+"\"";
+ s += "\n "+
+ id+" -> "+state.id+
+ "[label=\""+e+", RC="+refCount+"\"";
if (conflicts.contains(e))
s+=",style=dashed";
s+="];";
public class StallSite {
- private HashSet<Effect> effectSet;
- private HashSet<AllocSite> allocSiteSet;
+ private HashSet<Effect> effectSet;
+ private HashSet<AllocSite> allocSiteSet;
- public StallSite(Set<AllocSite> allocSet) {
- effectSet = new HashSet<Effect>();
- allocSiteSet = new HashSet<AllocSite>();
- allocSiteSet.addAll(allocSet);
- }
+ public StallSite(Set<AllocSite> allocSet) {
+ effectSet = new HashSet<Effect>();
+ allocSiteSet = new HashSet<AllocSite>();
+ allocSiteSet.addAll(allocSet);
+ }
- public void addEffect(Effect e) {
- effectSet.add(e);
- }
+ public void addEffect(Effect e) {
+ effectSet.add(e);
+ }
- public HashSet<Effect> getEffectSet() {
- return effectSet;
- }
+ public HashSet<Effect> getEffectSet() {
+ return effectSet;
+ }
- public Set<AllocSite> getAllocSiteSet(){
- return allocSiteSet;
- }
+ public Set<AllocSite> getAllocSiteSet() {
+ return allocSiteSet;
+ }
- public boolean equals(Object o) {
+ public boolean equals(Object o) {
- if (o == null) {
- return false;
- }
+ if (o == null) {
+ return false;
+ }
- if (!(o instanceof StallSite)) {
- return false;
- }
+ if (!(o instanceof StallSite)) {
+ return false;
+ }
- StallSite in = (StallSite) o;
+ StallSite in = (StallSite) o;
- if (allocSiteSet.equals(in.getAllocSiteSet())
- && effectSet.equals(in.getEffectSet()) ){
- return true;
- } else {
- return false;
- }
+ if (allocSiteSet.equals(in.getAllocSiteSet())
+ && effectSet.equals(in.getEffectSet()) ) {
+ return true;
+ } else {
+ return false;
+ }
- }
+ }
- @Override
- public String toString() {
- return "StallSite [allocationSiteSet=" + allocSiteSet
- + ", effectSet=" + effectSet + "]";
- }
+ @Override
+ public String toString() {
+ return "StallSite [allocationSiteSet=" + allocSiteSet
+ + ", effectSet=" + effectSet + "]";
+ }
}
public final static FlatNode startNode=new FlatNop();
protected HashMap<Pair<Alloc, FieldDescriptor>, Integer> effectsMap;
- // states in the machine are uniquely identified
+ // states in the machine are uniquely identified
// by a flat node (program point)
protected Hashtable<FlatNode, SMFEState> fn2state;
-
+
//TODO Jim! Jim! Give me the weakly connected group number here!
protected Hashtable<FlatNode, Integer> fn2weaklyConnectedGroupID;
protected Set<Effect> possiblyEvilEffects;
- public StateMachineForEffects( FlatNode fnInitial ) {
+ public StateMachineForEffects(FlatNode fnInitial) {
fn2state = new Hashtable<FlatNode, SMFEState>();
effectsMap = new HashMap<Pair<Alloc, FieldDescriptor>, Integer>();
- initialState = getState( startNode );
+ initialState = getState(startNode);
this.fn=fnInitial;
possiblyEvilEffects = new HashSet<Effect>();
}
}
public boolean isEmpty() {
- for(FlatNode fn:fn2state.keySet()) {
+ for(FlatNode fn : fn2state.keySet()) {
SMFEState state=fn2state.get(fn);
if (!state.getConflicts().isEmpty())
return false;
return type.intValue();
}
- public void addEffect( FlatNode fnState, Effect e ) {
+ public void addEffect(FlatNode fnState, Effect e) {
if (fnState==null)
fnState=startNode;
- SMFEState state = getState( fnState );
- state.addEffect( e );
+ SMFEState state = getState(fnState);
+ state.addEffect(e);
Pair<Alloc, FieldDescriptor> p=new Pair<Alloc, FieldDescriptor>(e.getAffectedAllocSite(), e.getField());
int type=e.getType();
if (!effectsMap.containsKey(p))
effectsMap.put(p, new Integer(type|effectsMap.get(p).intValue()));
}
- public void addTransition( FlatNode fnFrom,
- FlatNode fnTo,
- Effect e ) {
+ public void addTransition(FlatNode fnFrom,
+ FlatNode fnTo,
+ Effect e) {
if (fnFrom==null)
fnFrom=startNode;
-
- assert fn2state.containsKey( fnFrom );
- SMFEState stateFrom = getState( fnFrom );
- SMFEState stateTo = getState( fnTo );
-
- stateFrom.addTransition( e, stateTo );
+
+ assert fn2state.containsKey(fnFrom);
+ SMFEState stateFrom = getState(fnFrom);
+ SMFEState stateTo = getState(fnTo);
+
+ stateFrom.addTransition(e, stateTo);
}
public SMFEState getInitialState() {
}
- protected SMFEState getState( FlatNode fn ) {
- SMFEState state = fn2state.get( fn );
+ protected SMFEState getState(FlatNode fn) {
+ SMFEState state = fn2state.get(fn);
if( state == null ) {
- state = new SMFEState( fn ,id++ );
- fn2state.put( fn, state );
+ state = new SMFEState(fn,id++);
+ fn2state.put(fn, state);
}
return state;
}
-
+
public Integer getWeaklyConnectedGroupID(FlatNode fn) {
//TODO stubby stubby!
return 0;
}
- public void addPossiblyEvilEffect( Effect e ) {
- possiblyEvilEffects.add( e );
+ public void addPossiblyEvilEffect(Effect e) {
+ possiblyEvilEffects.add(e);
}
public Set<Effect> getPossiblyEvilEffects() {
}
- public void writeAsDOT( String graphName ) {
- graphName = graphName.replaceAll( "[\\W]", "" );
+ public void writeAsDOT(String graphName) {
+ graphName = graphName.replaceAll("[\\W]", "");
try {
- BufferedWriter bw =
- new BufferedWriter( new FileWriter( graphName+".dot" ) );
+ BufferedWriter bw =
+ new BufferedWriter(new FileWriter(graphName+".dot") );
- bw.write( "digraph "+graphName+" {\n" );
+ bw.write("digraph "+graphName+" {\n");
Iterator<FlatNode> fnItr = fn2state.keySet().iterator();
while( fnItr.hasNext() ) {
- SMFEState state = fn2state.get( fnItr.next() );
- bw.write( state.toStringDOT()+"\n" );
+ SMFEState state = fn2state.get(fnItr.next() );
+ bw.write(state.toStringDOT()+"\n");
}
- bw.write( "}\n" );
+ bw.write("}\n");
bw.close();
-
+
} catch( IOException e ) {
- throw new Error( "Error writing out DOT graph "+graphName );
+ throw new Error("Error writing out DOT graph "+graphName);
}
}
// a stall site and live variable or
// an sese (rblock) and an in-set var
// only one identifer will be non-null
-
+
// identify an sese (rblock) + inset var
protected FlatSESEEnterNode sese;
// either type of taint includes a var
// and allocation site
protected TempDescriptor var;
- protected Alloc allocSite;
+ protected Alloc allocSite;
// taints have a new, possibly null element which is
// the FlatNode at which the tainted reference was
public Taint reTaint(FlatNode fn) {
Taint out=new Taint(sese, stallSite, var, allocSite, fn, preds);
- out = (Taint) Canonical.makeCanonical( out );
+ out = (Taint) Canonical.makeCanonical(out);
return out;
}
- public static Taint factory( FlatSESEEnterNode sese,
- TempDescriptor insetVar,
- Alloc as,
- FlatNode whereDefined,
- ExistPredSet eps ) {
- Taint out = new Taint( sese, null, insetVar, as, whereDefined, eps );
- out = (Taint) Canonical.makeCanonical( out );
+ public static Taint factory(FlatSESEEnterNode sese,
+ TempDescriptor insetVar,
+ Alloc as,
+ FlatNode whereDefined,
+ ExistPredSet eps) {
+ Taint out = new Taint(sese, null, insetVar, as, whereDefined, eps);
+ out = (Taint) Canonical.makeCanonical(out);
return out;
}
- public static Taint factory( FlatNode stallSite,
- TempDescriptor var,
- Alloc as,
- FlatNode whereDefined,
- ExistPredSet eps ) {
- Taint out = new Taint( null, stallSite, var, as, whereDefined, eps );
- out = (Taint) Canonical.makeCanonical( out );
+ public static Taint factory(FlatNode stallSite,
+ TempDescriptor var,
+ Alloc as,
+ FlatNode whereDefined,
+ ExistPredSet eps) {
+ Taint out = new Taint(null, stallSite, var, as, whereDefined, eps);
+ out = (Taint) Canonical.makeCanonical(out);
return out;
}
- public static Taint factory( FlatSESEEnterNode sese,
- FlatNode stallSite,
- TempDescriptor var,
- Alloc as,
- FlatNode whereDefined,
- ExistPredSet eps ) {
- Taint out = new Taint( sese, stallSite, var, as, whereDefined, eps );
- out = (Taint) Canonical.makeCanonical( out );
+ public static Taint factory(FlatSESEEnterNode sese,
+ FlatNode stallSite,
+ TempDescriptor var,
+ Alloc as,
+ FlatNode whereDefined,
+ ExistPredSet eps) {
+ Taint out = new Taint(sese, stallSite, var, as, whereDefined, eps);
+ out = (Taint) Canonical.makeCanonical(out);
return out;
}
- protected Taint( FlatSESEEnterNode sese,
- FlatNode stallSite,
- TempDescriptor v,
- Alloc as,
- FlatNode fnDefined,
- ExistPredSet eps ) {
- assert
+ protected Taint(FlatSESEEnterNode sese,
+ FlatNode stallSite,
+ TempDescriptor v,
+ Alloc as,
+ FlatNode fnDefined,
+ ExistPredSet eps) {
+ assert
(sese == null && stallSite != null) ||
- (sese != null && stallSite == null);
-
+ (sese != null && stallSite == null);
+
assert v != null;
assert as != null;
assert eps != null;
-
+
this.sese = sese;
this.stallSite = stallSite;
this.var = v;
this.preds = eps;
}
- protected Taint( Taint t ) {
- this( t.sese,
- t.stallSite,
- t.var,
- t.allocSite,
+ protected Taint(Taint t) {
+ this( t.sese,
+ t.stallSite,
+ t.var,
+ t.allocSite,
t.fnDefined,
t.preds );
}
return preds;
}
- public boolean equalsSpecific( Object o ) {
- if( !equalsIgnorePreds( o ) ) {
+ public boolean equalsSpecific(Object o) {
+ if( !equalsIgnorePreds(o) ) {
return false;
}
-
+
Taint t = (Taint) o;
- return preds.equals( t.preds );
+ return preds.equals(t.preds);
}
- public boolean equalsIgnorePreds( Object o ) {
+ public boolean equalsIgnorePreds(Object o) {
if( o == null ) {
return false;
}
boolean seseEqual;
if( sese == null ) {
- seseEqual = (t.sese == null);
+ seseEqual = (t.sese == null);
} else {
- seseEqual = sese.equals( t.sese );
+ seseEqual = sese.equals(t.sese);
}
boolean stallSiteEqual;
if( stallSite == null ) {
stallSiteEqual = (t.stallSite == null);
} else {
- stallSiteEqual = stallSite.equals( t.stallSite );
+ stallSiteEqual = stallSite.equals(t.stallSite);
}
boolean fnDefinedEqual;
if( fnDefined == null ) {
fnDefinedEqual = (t.fnDefined == null);
} else {
- fnDefinedEqual = fnDefined.equals( t.fnDefined );
+ fnDefinedEqual = fnDefined.equals(t.fnDefined);
}
- return
- seseEqual &&
+ return
+ seseEqual &&
stallSiteEqual &&
fnDefinedEqual &&
- var .equals( t.var ) &&
- allocSite.equals( t.allocSite );
+ var.equals(t.var) &&
+ allocSite.equals(t.allocSite);
}
public int hashCodeSpecific() {
int hash = allocSite.hashCode();
hash = hash ^ var.hashCode();
-
+
if( sese != null ) {
hash = hash ^ sese.hashCode();
}
f += ", "+fnDefined;
}
- return
+ return
"("+s+
"-"+var+
", "+allocSite.toStringBrief()+
public static TaintSet factory(HashSet<Taint> taints) {
TaintSet out = new TaintSet(taints);
- out = (TaintSet) Canonical.makeCanonical( out );
+ out = (TaintSet) Canonical.makeCanonical(out);
return out;
}
public TaintSet reTaint(FlatNode fn) {
HashSet<Taint> taintset=new HashSet<Taint>();
- for(Taint t:taints) {
+ for(Taint t : taints) {
if (t.getWhereDefined()!=fn) {
t=t.reTaint(fn);
}
taintset.add(t);
}
-
+
TaintSet out=new TaintSet(taintset);
- out = (TaintSet) Canonical.makeCanonical( out );
+ out = (TaintSet) Canonical.makeCanonical(out);
return out;
}
public static TaintSet factory() {
TaintSet out = new TaintSet();
- out = (TaintSet) Canonical.makeCanonical( out );
+ out = (TaintSet) Canonical.makeCanonical(out);
return out;
}
- public static TaintSet factory( Taint t ) {
+ public static TaintSet factory(Taint t) {
assert t != null;
assert t.isCanonical();
- TaintSet out = new TaintSet();
- out.taints.add( t );
- out = (TaintSet) Canonical.makeCanonical( out );
+ TaintSet out = new TaintSet();
+ out.taints.add(t);
+ out = (TaintSet) Canonical.makeCanonical(out);
return out;
}
- public static TaintSet factory( TaintSet ts,
- ExistPredSet preds ) {
+ public static TaintSet factory(TaintSet ts,
+ ExistPredSet preds) {
assert ts != null;
assert ts.isCanonical();
Iterator<Taint> tItr = ts.iterator();
while( tItr.hasNext() ) {
Taint t = tItr.next();
- Taint tOut = Taint.factory( t.sese,
- t.stallSite,
- t.var,
- t.allocSite,
- t.fnDefined,
- preds );
- out.taints.add( tOut );
+ Taint tOut = Taint.factory(t.sese,
+ t.stallSite,
+ t.var,
+ t.allocSite,
+ t.fnDefined,
+ preds);
+ out.taints.add(tOut);
}
- out = (TaintSet) Canonical.makeCanonical( out );
+ out = (TaintSet) Canonical.makeCanonical(out);
return out;
}
public TaintSet add(Taint t) {
return Canonical.addPTR(this, t);
/* TaintSet newt=new TaintSet();
- newt.taints.addAll(taints);
- newt.taints.add(t);
- return (TaintSet) Canonical.makeCanonical(newt);*/
+ newt.taints.addAll(taints);
+ newt.taints.add(t);
+ return (TaintSet) Canonical.makeCanonical(newt);*/
}
public TaintSet merge(TaintSet ts) {
return Canonical.unionPTR(this, ts);
/* TaintSet newt=new TaintSet();
- newt.taints.addAll(taints);
- newt.taints.addAll(ts.taints);
- return (TaintSet) Canonical.makeCanonical(newt);*/
+ newt.taints.addAll(taints);
+ newt.taints.addAll(ts.taints);
+ return (TaintSet) Canonical.makeCanonical(newt);*/
}
protected TaintSet() {
return taints.isEmpty();
}
- public Taint containsIgnorePreds( Taint t ) {
+ public Taint containsIgnorePreds(Taint t) {
assert t != null;
Iterator<Taint> tItr = taints.iterator();
while( tItr.hasNext() ) {
Taint tThis = tItr.next();
- if( tThis.equalsIgnorePreds( t ) ) {
- return tThis;
+ if( tThis.equalsIgnorePreds(t) ) {
+ return tThis;
}
}
return null;
}
- public boolean equalsSpecific( Object o ) {
+ public boolean equalsSpecific(Object o) {
if( o == null ) {
return false;
}
}
TaintSet ts = (TaintSet) o;
- return taints.equals( ts.taints );
+ return taints.equals(ts.taints);
}
-
+
public int hashCodeSpecific() {
return taints.hashCode();
}
s += t.toString();
if( tItr.hasNext() ) {
- s += ",\\n";
+ s += ",\\n";
}
}
s += "]";
return s;
}
-
+
public String toString() {
return taints.toString();
}
public class VariableNode extends RefSrcNode {
protected TempDescriptor td;
- public VariableNode( TempDescriptor td ) {
+ public VariableNode(TempDescriptor td) {
this.td = td;
}
return td;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
}
private void graphTasks() throws java.io.IOException {
- for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext();) {
+ for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext(); ) {
TaskDescriptor td = (TaskDescriptor)it_tasks.next();
FlatMethod fm = state.getMethodFlat(td);
writeFlatIRGraph(fm,"task"+td.getSymbol());
}
private void graphMethods() throws java.io.IOException {
- for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext();) {
+ for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext(); ) {
ClassDescriptor cd = (ClassDescriptor)it_classes.next();
- for(Iterator it_methods=cd.getMethods(); it_methods.hasNext();) {
+ for(Iterator it_methods=cd.getMethods(); it_methods.hasNext(); ) {
MethodDescriptor md = (MethodDescriptor)it_methods.next();
FlatMethod fm = state.getMethodFlat(md);
writeFlatIRGraph(fm,cd.getSymbol()+"."+md.getSymbol());
public Set<FlatNode> getTargets() {
HashSet<FlatNode> targets=new HashSet<FlatNode>();
Collection<Set<FlatNode>> groups=groupmap.values();
- for(Iterator<Set<FlatNode>> setit=groups.iterator();setit.hasNext();) {
+ for(Iterator<Set<FlatNode>> setit=groups.iterator(); setit.hasNext(); ) {
Set<FlatNode> group=setit.next();
targets.addAll(table.get(group));
}
} else {
Vector<FlatNode> exits=getJumps(fn);
output.println("switch(RESTOREBRANCH()) {");
- for(int i=0;i<exits.size();i++) {
+ for(int i=0; i<exits.size(); i++) {
output.println("case "+i+":");
output.println("goto L"+nodetolabels.get(exits.get(i))+";");
}
fnmap=computeMap(transset, nodeset, storeset);
groupmap=new Hashtable<FlatNode, Set<FlatNode>>();
- for(Iterator<FlatNode> fnit=transset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=transset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if ((fn.numNext()>1&&storeset.contains(fn))||fn.kind()==FKind.FlatBackEdge||fn.kind()==FKind.FlatNop) {
FlatNode[] children=fnmap.get(fn);
groupmap.put(fn, new HashSet<FlatNode>());
groupmap.get(fn).add(fn);
}
- for(int i=0;i<children.length;i++) {
+ for(int i=0; i<children.length; i++) {
FlatNode child=children[i];
if ((child.numNext()>1&&storeset.contains(child))||child.kind()==FKind.FlatBackEdge||child.kind()==FKind.FlatNop) {
mergegroups(fn, child, groupmap);
}
//now we have groupings...
Collection<Set<FlatNode>> groups=groupmap.values();
- for(Iterator<Set<FlatNode>> setit=groups.iterator();setit.hasNext();) {
+ for(Iterator<Set<FlatNode>> setit=groups.iterator(); setit.hasNext(); ) {
Set<FlatNode> group=setit.next();
Vector<FlatNode> exits=new Vector<FlatNode>();
table.put(group, exits);
- for(Iterator<FlatNode> fnit=group.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=group.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
FlatNode[] nextnodes=fnmap.get(fn);
- for(int i=0;i<nextnodes.length;i++) {
+ for(int i=0; i<nextnodes.length; i++) {
FlatNode nextnode=nextnodes[i];
if (!group.contains(nextnode)) {
//outside edge
}
if (groupmap.get(fn1)!=groupmap.get(fn2)) {
groupmap.get(fn1).addAll(groupmap.get(fn2));
- for(Iterator<FlatNode> fnit=groupmap.get(fn2).iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=groupmap.get(fn2).iterator(); fnit.hasNext(); ) {
FlatNode fn3=fnit.next();
groupmap.put(fn3, groupmap.get(fn1));
}
toprocess.remove(fn);
Set<Object[]> incomingtuples=new HashSet<Object[]>();
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fprev=fn.getPrev(i);
if (nodeset.contains(fprev)||storeset.contains(fprev)) {
- for(int j=0;j<fprev.numNext();j++) {
+ for(int j=0; j<fprev.numNext(); j++) {
if (fprev.getNext(j)==fn) {
Object[] pair=new Object[2];
- pair[0]=new Integer(j);pair[1]=fprev;
+ pair[0]=new Integer(j); pair[1]=fprev;
incomingtuples.add(pair);
}
}
if (nodeset.contains(fn)||storeset.contains(fn)||fn.kind()==FKind.FlatAtomicExitNode) {
//nodeset contains this node
- for(Iterator<Object[]> it=incomingtuples.iterator();it.hasNext();) {
+ for(Iterator<Object[]> it=incomingtuples.iterator(); it.hasNext(); ) {
Object[] pair=it.next();
int index=((Integer)pair[0]).intValue();
FlatNode node=(FlatNode)pair[1];
//add if we need to update
if (!fntotuple.containsKey(fn)||
- !fntotuple.get(fn).equals(incomingtuples)) {
+ !fntotuple.get(fn).equals(incomingtuples)) {
fntotuple.put(fn,incomingtuples);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
if (transset.contains(fn.getNext(i)))
toprocess.add(fn.getNext(i));
}
this.typeanalysis=typeanalysis;
this.gft=gft;
Set<LocalityBinding> localityset=locality.getLocalityBindings();
- for(Iterator<LocalityBinding> lbit=localityset.iterator();lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=localityset.iterator(); lbit.hasNext(); ) {
processlb(lbit.next());
}
}
-
+
Hashtable<LocalityBinding, Set<FlatNode>> transmap=new Hashtable<LocalityBinding, Set<FlatNode>>();
Hashtable<LocalityBinding, Set<FlatNode>> recordmap=new Hashtable<LocalityBinding, Set<FlatNode>>();
Hashtable<LocalityBinding, Set<FlatNode>> othermap=new Hashtable<LocalityBinding, Set<FlatNode>>();
Hashtable<LocalityBinding, HashSet<FlatNode>> cannotdelaymap=new Hashtable<LocalityBinding, HashSet<FlatNode>>();
Hashtable<LocalityBinding, Set<FlatNode>> derefmap=new Hashtable<LocalityBinding, Set<FlatNode>>();
Hashtable<LocalityBinding, Set<FlatNode>> convmap=new Hashtable<LocalityBinding, Set<FlatNode>>();
-
+
public DiscoverConflicts getConflicts() {
DiscoverConflicts dc=new DiscoverConflicts(locality, state, typeanalysis, cannotdelaymap, false, false, state.READSET?gft:null);
dc.doAnalysis();
return dc;
}
-
+
public Hashtable<LocalityBinding, HashSet<FlatNode>> getCannotDelayMap() {
return cannotdelaymap;
}
convmap.put(lb, convset);
if (lb.isAtomic()||!lb.getHasAtomic())
return;
-
+
Set<FlatNode> recordset=delaycomp.livecode(lb);
Set<FlatNode> cannotdelay=delaycomp.getCannotDelay(lb);
Set<FlatNode> otherset=delaycomp.getOther(lb);
FlatMethod fm=state.getMethodFlat(lb.getMethod());
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()==FKind.FlatAtomicEnterNode&&
- locality.getAtomic(lb).get(fn.getPrev(0)).intValue()==0) {
+ locality.getAtomic(lb).get(fn.getPrev(0)).intValue()==0) {
Set<FlatNode> transSet=computeTrans(lb, fn);
Set<FlatNode> tCheckSet=intersect(checkset, transSet);
Set<FlatNode> tRecordSet=intersect(recordset, transSet);
Set<FlatNode> tNotReadySet=intersect(notreadyset, transSet);
HashSet<FlatNode> tCannotDelay=intersect(cannotdelay, transSet);
Set<FlatNode> tderef=(state.STMARRAY&&!state.DUALVIEW)?intersect(derefset, transSet):null;
-
+
if (checkSet(fn, tCheckSet, tRecordSet, lb)) {
//We will convert this one
nrecordset.addAll(tRecordSet);
return false;
DiscoverConflicts dc=delaycomp.getConflicts();
- for(Iterator<FlatNode> fnit=checkset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=checkset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
//needs transread
if (!state.READSET&&dc.getNeedTrans(lb, fn)||state.READSET&&dc.getNeedWriteTrans(lb, fn)||fn.kind()==FKind.FlatCall) {
transSet.add(fn);
if (locality.getAtomic(lb).get(fn).intValue()==0)
continue;
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
if (!transSet.contains(fn.getNext(i)))
toProcess.add(fn.getNext(i));
}
//compute cannotdelaymap
Set<LocalityBinding> localityset=locality.getLocalityBindings();
- for(Iterator<LocalityBinding> lbit=localityset.iterator();lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=localityset.iterator(); lbit.hasNext(); ) {
analyzeMethod(lbit.next());
}
dcopts.doAnalysis();
- for(Iterator<LocalityBinding> lbit=localityset.iterator();lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=localityset.iterator(); lbit.hasNext(); ) {
LocalityBinding lb=lbit.next();
MethodDescriptor md=lb.getMethod();
FlatMethod fm=state.getMethodFlat(md);
if (lb.isAtomic())
continue;
-
+
if (lb.getHasAtomic()) {
HashSet<FlatNode> cannotdelay=cannotdelaymap.get(lb);
HashSet<FlatNode> notreadyset=computeNotReadySet(lb, cannotdelay);
otherset.removeAll(cannotdelay);
if (state.MINIMIZE) {
Hashtable<FlatNode, Integer> atomicmap=locality.getAtomic(lb);
- for(Iterator<FlatNode> fnit=otherset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=otherset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (atomicmap.get(fn).intValue()>0&&
- fn.kind()!=FKind.FlatAtomicEnterNode&&
- fn.kind()!=FKind.FlatGlobalConvNode) {
+ fn.kind()!=FKind.FlatAtomicEnterNode&&
+ fn.kind()!=FKind.FlatGlobalConvNode) {
//remove non-atomic flatnodes
fnit.remove();
notreadyset.add(fn);
}
}
}
-
+
notreadymap.put(lb, notreadyset);
othermap.put(lb, otherset);
}
-
+
//We now have:
//(1) Cannot delay set -- stuff that must be done before commit
//(2) Not ready set -- stuff that must wait until commit
Set<FlatNode> storeset=livecode(lb);
HashSet<FlatNode> delayedset=getNotReady(lb);
Hashtable<FlatNode, Hashtable<TempDescriptor, Set<TempFlatPair>>> fnmap=dcopts.getMap(lb);
- for(Iterator<FlatNode> fnit=delayedset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=delayedset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Hashtable<TempDescriptor, Set<TempFlatPair>> tempmap=fnmap.get(fn);
if (fn.kind()==FKind.FlatSetElementNode) {
FlatSetElementNode fsen=(FlatSetElementNode) fn;
Set<TempFlatPair> tfpset=tempmap.get(fsen.getDst());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (storeset.contains(tfp.f))
writeset.add(tfp.f);
FlatSetFieldNode fsfn=(FlatSetFieldNode) fn;
Set<TempFlatPair> tfpset=tempmap.get(fsfn.getDst());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (storeset.contains(tfp.f))
writeset.add(tfp.f);
//make it just this transaction
secondpart.retainAll(atomicnodes);
-
+
HashSet<TempDescriptor> tempset=new HashSet<TempDescriptor>();
-
- for(Iterator<FlatNode> fnit=secondpart.iterator();fnit.hasNext();) {
+
+ for(Iterator<FlatNode> fnit=secondpart.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
List<TempDescriptor> writes=Arrays.asList(fn.writesTemps());
tempset.addAll(writes);
tempset.addAll(reads);
}
}
-
+
return tempset;
}
secondpart.retainAll(atomicnodes);
Set<TempDescriptor> liveinto=new HashSet<TempDescriptor>();
-
+
Hashtable<FlatNode, Hashtable<TempDescriptor, Set<FlatNode>>> reachingdefs=ReachingDefs.computeReachingDefs(fm, Liveness.computeLiveTemps(fm), true);
-
- for(Iterator<FlatNode> fnit=secondpart.iterator();fnit.hasNext();) {
+
+ for(Iterator<FlatNode> fnit=secondpart.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (recordset.contains(fn))
continue;
TempDescriptor readset[]=fn.readsTemps();
- for(int i=0;i<readset.length;i++) {
+ for(int i=0; i<readset.length; i++) {
TempDescriptor rtmp=readset[i];
Set<FlatNode> fnset=reachingdefs.get(fn).get(rtmp);
- for(Iterator<FlatNode> fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode> fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if (secondpart.contains(fn2))
continue;
return liveinto;
}
- //This method computes which temps are live out of the second part
+ //This method computes which temps are live out of the second part
public Set<TempDescriptor> liveoutvirtualread(LocalityBinding lb, FlatAtomicEnterNode faen) {
MethodDescriptor md=lb.getMethod();
FlatMethod fm=state.getMethodFlat(md);
Set<TempDescriptor> liveset=new HashSet<TempDescriptor>();
//Have list of all live temps
- for(Iterator<FlatNode> fnit=exits.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=exits.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Set<TempDescriptor> tempset=livemap.get(fn);
Hashtable<TempDescriptor, Set<FlatNode>> reachmap=reachingdefs.get(fn);
//Look for reaching defs for all live variables that are in the secondpart
- for(Iterator<TempDescriptor> tmpit=tempset.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=tempset.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Set<FlatNode> fnset=reachmap.get(tmp);
boolean outsidenode=false;
boolean insidenode=false;
- for(Iterator<FlatNode> fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode> fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if (secondpart.contains(fn2)) {
insidenode=true;
Set<FlatNode> exits=faen.getExits();
Hashtable<FlatNode, Set<TempDescriptor>> livemap=Liveness.computeLiveTemps(fm);
Hashtable<FlatNode, Hashtable<TempDescriptor, Set<FlatNode>>> reachingdefs=ReachingDefs.computeReachingDefs(fm, livemap, true);
-
+
Set<FlatNode> atomicnodes=faen.getReachableSet(faen.getExits());
Set<FlatNode> secondpart=new HashSet<FlatNode>(getNotReady(lb));
Set<TempDescriptor> liveset=new HashSet<TempDescriptor>();
//Have list of all live temps
- for(Iterator<FlatNode> fnit=exits.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=exits.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Set<TempDescriptor> tempset=livemap.get(fn);
Hashtable<TempDescriptor, Set<FlatNode>> reachmap=reachingdefs.get(fn);
//Look for reaching defs for all live variables that are in the secondpart
- for(Iterator<TempDescriptor> tmpit=tempset.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=tempset.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Set<FlatNode> fnset=reachmap.get(tmp);
if (fnset==null) {
System.out.println("null temp set for"+fn+" tmp="+tmp);
System.out.println(fm.printMethod());
}
- for(Iterator<FlatNode> fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode> fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if (secondpart.contains(fn2)) {
liveset.add(tmp);
HashSet<FlatNode> delayedset=notreadymap.get(lb);
HashSet<FlatNode> derefset=null;
- if (state.STMARRAY&&!state.DUALVIEW)
+ if (state.STMARRAY&&!state.DUALVIEW)
derefset=derefmap.get(lb);
HashSet<FlatNode> otherset=othermap.get(lb);
HashSet<FlatNode> cannotdelayset=cannotdelaymap.get(lb);
//If both parts can contribute to the temp, then we need to do
//reads to make sure that liveout set has the right values
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()==FKind.FlatAtomicExitNode) {
Set<TempDescriptor> livetemps=livemap.get(fn);
Hashtable<TempDescriptor, Set<FlatNode>> tempmap=reachingdefsmap.get(fn);
//Iterate over the temps that are live into this node
- for(Iterator<TempDescriptor> tmpit=livetemps.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=livetemps.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Set<FlatNode> fnset=tempmap.get(tmp);
boolean inpart1=false;
boolean inpart2=false;
//iterate over the reaching definitions for the temp
- for(Iterator<FlatNode> fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode> fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if (delayedset.contains(fn2)) {
inpart2=true;
}
}
if (inpart1&&inpart2) {
- for(Iterator<FlatNode> fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode> fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if ((otherset.contains(fn2)||cannotdelayset.contains(fn2))&&
- locality.getAtomic(lb).get(fn2).intValue()>0) {
+ locality.getAtomic(lb).get(fn2).intValue()>0) {
unionset.add(fn2);
livenodes.add(fn2);
}
}
}
}
-
+
HashSet<FlatNode> toanalyze=new HashSet<FlatNode>();
toanalyze.add(fm);
if (!map.containsKey(fn)) {
map.put(fn, new Hashtable<TempDescriptor, HashSet<FlatNode>>());
//enqueue next nodes
- for(int i=0;i<fn.numNext();i++)
+ for(int i=0; i<fn.numNext(); i++)
toanalyze.add(fn.getNext(i));
}
continue;
Set<TempDescriptor> liveset=livemap.get(fn);
//Do merge on incoming edges
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fnprev=fn.getPrev(i);
Hashtable<TempDescriptor, HashSet<FlatNode>> prevmap=map.get(fnprev);
if (prevmap!=null)
- for(Iterator<TempDescriptor> tmpit=prevmap.keySet().iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=prevmap.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
if (!liveset.contains(tmp)) //skip dead temps
continue;
} else {
//If the node is in the second set, check our readset
TempDescriptor readset[]=fn.readsTemps();
- for(int i=0;i<readset.length;i++) {
+ for(int i=0; i<readset.length; i++) {
TempDescriptor tmp=readset[i];
if (tmptofn.containsKey(tmp)) {
livenodes.addAll(tmptofn.get(tmp)); //Add live nodes
}
//Do kills
TempDescriptor writeset[]=fn.writesTemps();
- for(int i=0;i<writeset.length;i++) {
+ for(int i=0; i<writeset.length; i++) {
TempDescriptor tmp=writeset[i];
tmptofn.remove(tmp);
}
//If the node is in the first set, search over what we write
//We write -- our reads are done
TempDescriptor writeset[]=fn.writesTemps();
- for(int i=0;i<writeset.length;i++) {
+ for(int i=0; i<writeset.length; i++) {
TempDescriptor tmp=writeset[i];
HashSet<FlatNode> set=new HashSet<FlatNode>();
tmptofn.put(tmp,set);
}
if (fn.numNext()>1) {
Set<FlatNode> branchset=branchmap.get((FlatCondBranch)fn);
- for(Iterator<FlatNode> brit=branchset.iterator();brit.hasNext();) {
+ for(Iterator<FlatNode> brit=branchset.iterator(); brit.hasNext(); ) {
FlatNode brfn=brit.next();
if (unionset.contains(brfn)) {
//This branch is important--need to remember how it goes
livenodes.add(fn);
- unionset.add(fn);
+ unionset.add(fn);
}
}
}
if (!map.containsKey(fn)||!map.get(fn).equals(tmptofn)) {
map.put(fn, tmptofn);
//enqueue next ndoes
- for(int i=0;i<fn.numNext();i++)
+ for(int i=0; i<fn.numNext(); i++)
toanalyze.add(fn.getNext(i));
}
}
public static Set<FlatNode> getNext(FlatNode fn, int i, Set<FlatNode> delayset, LocalityBinding lb, LocalityAnalysis locality, boolean contpastnode) {
Hashtable<FlatNode, Integer> atomictable=locality.getAtomic(lb);
FlatNode fnnext=fn.getNext(i);
- HashSet<FlatNode> reachable=new HashSet<FlatNode>();
+ HashSet<FlatNode> reachable=new HashSet<FlatNode>();
if (delayset.contains(fnnext)||atomictable.get(fnnext).intValue()==0) {
reachable.add(fnnext);
if (visited.contains(fn2))
continue;
visited.add(fn2);
- for (int j=0;j<fn2.numNext();j++) {
+ for (int j=0; j<fn2.numNext(); j++) {
FlatNode fn2next=fn2.getNext(j);
if (delayset.contains(fn2next)||atomictable.get(fn2next).intValue()==0) {
reachable.add(fn2next);
Hashtable<FlatNode, HashSet<TypeDescriptor>> nodelayarrayswr=new Hashtable<FlatNode, HashSet<TypeDescriptor>>();
Hashtable<FlatNode, HashSet<FieldDescriptor>> nodelayfieldsrd=new Hashtable<FlatNode, HashSet<FieldDescriptor>>();
Hashtable<FlatNode, HashSet<TypeDescriptor>> nodelayarraysrd=new Hashtable<FlatNode, HashSet<TypeDescriptor>>();
-
- Hashtable<FlatCondBranch, Set<FlatNode>> revbranchmap=revGetBranchSet(lb);
+
+ Hashtable<FlatCondBranch, Set<FlatNode>> revbranchmap=revGetBranchSet(lb);
Hashtable<FlatNode, Set<FlatCondBranch>> branchmap=getBranchSet(lb);
//Effect of adding something to nodelay set is to move it up past everything in delay set
//Have to make sure we can do this commute
while(!toanalyze.isEmpty()) {
FlatNode fn=toanalyze.iterator().next();
toanalyze.remove(fn);
-
+
boolean isatomic=atomictable.get(fn).intValue()>0;
if (!isatomic)
HashSet<TypeDescriptor> nodelayarraywrset=new HashSet<TypeDescriptor>();
HashSet<FieldDescriptor> nodelayfieldrdset=new HashSet<FieldDescriptor>();
HashSet<TypeDescriptor> nodelayarrayrdset=new HashSet<TypeDescriptor>();
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
if (nodelaytemps.containsKey(fn.getNext(i)))
nodelaytempset.addAll(nodelaytemps.get(fn.getNext(i)));
//do field/array write sets
if (nodelayfieldswr.containsKey(fn.getNext(i)))
- nodelayfieldwrset.addAll(nodelayfieldswr.get(fn.getNext(i)));
+ nodelayfieldwrset.addAll(nodelayfieldswr.get(fn.getNext(i)));
if (nodelayarrayswr.containsKey(fn.getNext(i)))
- nodelayarraywrset.addAll(nodelayarrayswr.get(fn.getNext(i)));
+ nodelayarraywrset.addAll(nodelayarrayswr.get(fn.getNext(i)));
//do read sets
if (nodelayfieldsrd.containsKey(fn.getNext(i)))
- nodelayfieldrdset.addAll(nodelayfieldsrd.get(fn.getNext(i)));
+ nodelayfieldrdset.addAll(nodelayfieldsrd.get(fn.getNext(i)));
if (nodelayarraysrd.containsKey(fn.getNext(i)))
- nodelayarrayrdset.addAll(nodelayarraysrd.get(fn.getNext(i)));
+ nodelayarrayrdset.addAll(nodelayarraysrd.get(fn.getNext(i)));
}
-
+
/* Check our temp write set */
TempDescriptor writeset[]=fn.writesTemps();
- for(int i=0;i<writeset.length;i++) {
+ for(int i=0; i<writeset.length; i++) {
TempDescriptor tmp=writeset[i];
if (nodelaytempset.contains(tmp)) {
//We are writing to a nodelay temp
nodelaytempset.remove(tmp);
}
}
-
+
//See if flatnode is definitely no delay
if (fn.kind()==FKind.FlatCall) {
FlatCall fcall=(FlatCall)fn;
(!mdcall.getSymbol().equals("println")&&!mdcall.getSymbol().equals("printString")))
isnodelay=true;
}
-
+
//Delay branches if possible
if (fn.kind()==FKind.FlatCondBranch) {
Set<FlatNode> branchset=revbranchmap.get((FlatCondBranch)fn);
- for(Iterator<FlatNode> brit=branchset.iterator();brit.hasNext();) {
+ for(Iterator<FlatNode> brit=branchset.iterator(); brit.hasNext(); ) {
FlatNode branchnode=brit.next();
if (cannotdelay.contains(branchnode)||(state.STMARRAY&&!state.DUALVIEW&&derefset.contains(branchnode))) {
isnodelay=true;
//write conflicts
if (nodelayfieldwrset.contains(fd))
isnodelay=true;
- //read
+ //read
if (nodelayfieldrdset.contains(fd))
isnodelay=true;
}
if (nodelayarraywrset.contains(td))
isnodelay=true;
}
-
+
//If we are no delay, then the temps we read are no delay
if (isnodelay) {
/* Add our read set */
TempDescriptor readset[]=fn.readsTemps();
- for(int i=0;i<readset.length;i++) {
+ for(int i=0; i<readset.length; i++) {
TempDescriptor tmp=readset[i];
nodelaytempset.add(tmp);
}
if (branchmap.containsKey(fn)) {
Set<FlatCondBranch> fcbset=branchmap.get(fn);
- for(Iterator<FlatCondBranch> fcbit=fcbset.iterator();fcbit.hasNext();) {
+ for(Iterator<FlatCondBranch> fcbit=fcbset.iterator(); fcbit.hasNext(); ) {
FlatCondBranch fcb=fcbit.next();
//enqueue flatcondbranch node for reanalysis
if (!cannotdelay.contains(fcb)) {
/* Do we write to arrays */
if (fn.kind()==FKind.FlatSetElementNode) {
//have to do expansion
- nodelayarraywrset.addAll(typeanalysis.expand(((FlatSetElementNode)fn).getDst().getType()));
+ nodelayarraywrset.addAll(typeanalysis.expand(((FlatSetElementNode)fn).getDst().getType()));
}
/* Do we read from arrays */
if (fn.kind()==FKind.FlatElementNode) {
}
break;
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
if (oldtemps.contains(fsen.getDst())) {
}
break;
}
+
case FKind.FlatFieldNode: {
FlatFieldNode ffn=(FlatFieldNode)fn;
if (oldtemps.contains(ffn.getSrc())&&
}
break;
}
+
case FKind.FlatElementNode: {
FlatElementNode fen=(FlatElementNode)fn;
if (oldtemps.contains(fen.getSrc())&&
}
}
}
-
+
boolean changed=false;
//See if we need to propagate changes
if (!nodelaytemps.containsKey(fn)||
- !nodelaytemps.get(fn).equals(nodelaytempset)) {
+ !nodelaytemps.get(fn).equals(nodelaytempset)) {
nodelaytemps.put(fn, nodelaytempset);
changed=true;
}
//See if we need to propagate changes
if (!nodelayfieldswr.containsKey(fn)||
- !nodelayfieldswr.get(fn).equals(nodelayfieldwrset)) {
+ !nodelayfieldswr.get(fn).equals(nodelayfieldwrset)) {
nodelayfieldswr.put(fn, nodelayfieldwrset);
changed=true;
}
//See if we need to propagate changes
if (!nodelayfieldsrd.containsKey(fn)||
- !nodelayfieldsrd.get(fn).equals(nodelayfieldrdset)) {
+ !nodelayfieldsrd.get(fn).equals(nodelayfieldrdset)) {
nodelayfieldsrd.put(fn, nodelayfieldrdset);
changed=true;
}
//See if we need to propagate changes
if (!nodelayarrayswr.containsKey(fn)||
- !nodelayarrayswr.get(fn).equals(nodelayarraywrset)) {
+ !nodelayarrayswr.get(fn).equals(nodelayarraywrset)) {
nodelayarrayswr.put(fn, nodelayarraywrset);
changed=true;
}
//See if we need to propagate changes
if (!nodelayarraysrd.containsKey(fn)||
- !nodelayarraysrd.get(fn).equals(nodelayarrayrdset)) {
+ !nodelayarraysrd.get(fn).equals(nodelayarrayrdset)) {
nodelayarraysrd.put(fn, nodelayarrayrdset);
changed=true;
}
if (changed)
- for(int i=0;i<fn.numPrev();i++)
+ for(int i=0; i<fn.numPrev(); i++)
toanalyze.add(fn.getPrev(i));
- }//end of while loop
+ } //end of while loop
if (lb.getHasAtomic()) {
if (state.STMARRAY&&!state.DUALVIEW)
//Compute initial notready set
HashSet<TempDescriptor> notreadyset=new HashSet<TempDescriptor>();
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
if (notreadymap.containsKey(fn.getPrev(i)))
notreadyset.addAll(notreadymap.get(fn.getPrev(i)));
}
-
+
//Are we ready
boolean notready=false;
//Test our read set first
TempDescriptor readset[]=fn.readsTemps();
- for(int i=0;i<readset.length;i++) {
+ for(int i=0; i<readset.length; i++) {
TempDescriptor tmp=readset[i];
if (notreadyset.contains(tmp)) {
notready=true;
TempDescriptor tmp=ffn.getSrc();
Set<TempFlatPair> tfpset=dcopts.getMap(lb).get(fn).get(tmp);
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (!dcopts.getNeedSrcTrans(lb, tfp.f)) {
//if a source didn't need a translation and we are
}
break;
}
+
case FKind.FlatSetFieldNode: {
FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
TempDescriptor tmp=fsfn.getDst();
Set<TempFlatPair> tfpset=tmpmap!=null?tmpmap.get(tmp):null;
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (!dcopts.getNeedSrcTrans(lb, tfp.f)) {
//if a source didn't need a translation and we are
}
break;
}
+
case FKind.FlatElementNode: {
FlatElementNode fen=(FlatElementNode)fn;
if (!dcopts.getArrays().contains(fen.getSrc().getType())) {
TempDescriptor tmp=fen.getSrc();
Set<TempFlatPair> tfpset=dcopts.getMap(lb).get(fn).get(tmp);
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (!dcopts.getNeedSrcTrans(lb, tfp.f)) {
//if a source didn't need a translation and we are
}
break;
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
TempDescriptor tmp=fsen.getDst();
Set<TempFlatPair> tfpset=dcopts.getMap(lb).get(fn).get(tmp);
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (!dcopts.getNeedSrcTrans(lb, tfp.f)) {
//if a source didn't need a translation and we are
//See if we depend on a conditional branch that is not ready
Set<FlatCondBranch> branchset=branchmap.get(fn);
if (branchset!=null)
- for(Iterator<FlatCondBranch> branchit=branchset.iterator();branchit.hasNext();) {
+ for(Iterator<FlatCondBranch> branchit=branchset.iterator(); branchit.hasNext(); ) {
FlatCondBranch fcb=branchit.next();
if (notreadynodes.contains(fcb)) {
//if we depend on a branch that isn't ready, we aren't ready
//Add our writes
TempDescriptor writeset[]=fn.writesTemps();
- for(int i=0;i<writeset.length;i++) {
+ for(int i=0; i<writeset.length; i++) {
TempDescriptor tmp=writeset[i];
notreadyset.add(tmp);
}
} else {
//Kill our writes
TempDescriptor writeset[]=fn.writesTemps();
- for(int i=0;i<writeset.length;i++) {
+ for(int i=0; i<writeset.length; i++) {
TempDescriptor tmp=writeset[i];
notreadyset.remove(tmp);
}
}
-
+
//See if we need to propagate changes
if (!notreadymap.containsKey(fn)||
- !notreadymap.get(fn).equals(notreadyset)) {
+ !notreadymap.get(fn).equals(notreadyset)) {
notreadymap.put(fn, notreadyset);
- for(int i=0;i<fn.numNext();i++)
+ for(int i=0; i<fn.numNext(); i++)
toanalyze.add(fn.getNext(i));
}
} //end of while
FlatMethod fm=state.getMethodFlat(md);
Hashtable<FlatCondBranch, Set<FlatNode>> condmap=new Hashtable<FlatCondBranch, Set<FlatNode>>();
DomTree postdt=new DomTree(fm, true);
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()!=FKind.FlatCondBranch)
continue;
FlatMethod fm=state.getMethodFlat(md);
Hashtable<FlatNode, Set<FlatCondBranch>> condmap=new Hashtable<FlatNode, Set<FlatCondBranch>>();
DomTree postdt=new DomTree(fm, true);
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()!=FKind.FlatCondBranch)
continue;
//Reverse the mapping
Set<FlatNode> fnset=computeBranchSet(lb, fcb, postdom);
- for(Iterator<FlatNode>fnit2=fnset.iterator();fnit2.hasNext();) {
+ for(Iterator<FlatNode>fnit2=fnset.iterator(); fnit2.hasNext(); ) {
FlatNode fn2=fnit2.next();
if (!condmap.containsKey(fn2))
condmap.put(fn2,new HashSet<FlatCondBranch>());
//out of transaction
if (locality.getAtomic(lb).get(fn).intValue()==0)
continue;
-
- visited.add(fn);
- for(int i=0;i<fn.numNext();i++) {
+
+ visited.add(fn);
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
toanalyze.add(fnext);
}
public Set<TypeDescriptor> getArrays() {
return arrays;
}
-
+
public void doAnalysis() {
//Compute fields and arrays for all transactions. Note that we
//only look at changes to old objects
Set<LocalityBinding> localityset=locality.getLocalityBindings();
- for(Iterator<LocalityBinding> lb=localityset.iterator();lb.hasNext();) {
+ for(Iterator<LocalityBinding> lb=localityset.iterator(); lb.hasNext(); ) {
computeModified(lb.next());
}
expandTypes();
//Compute set of nodes that need transread
- for(Iterator<LocalityBinding> lb=localityset.iterator();lb.hasNext();) {
+ for(Iterator<LocalityBinding> lb=localityset.iterator(); lb.hasNext(); ) {
LocalityBinding l=lb.next();
analyzeLocality(l);
}
private void setNeedReadTrans(LocalityBinding lb) {
HashSet<FlatNode> set=new HashSet<FlatNode>();
- for(Iterator<TempFlatPair> it=transreadmap.get(lb).iterator();it.hasNext();) {
+ for(Iterator<TempFlatPair> it=transreadmap.get(lb).iterator(); it.hasNext(); ) {
TempFlatPair tfp=it.next();
set.add(tfp.f);
}
if (gft!=null) {
//need to translate write map set
set=new HashSet<FlatNode>();
- for(Iterator<TempFlatPair> it=writemap.get(lb).iterator();it.hasNext();) {
+ for(Iterator<TempFlatPair> it=writemap.get(lb).iterator(); it.hasNext(); ) {
TempFlatPair tfp=it.next();
set.add(tfp.f);
}
twritemap.put(lb, set);
}
}
-
+
private void computeneedsarrayget(LocalityBinding lb, Hashtable<FlatNode, Hashtable<TempDescriptor, Set<TempFlatPair>>> fnmap) {
// Set<FlatNode> gwriteset=(state.READSET&&gft!=null)?twritemap.get(lb):treadmap.get(lb);
if (state.READSET&&gft!=null) {
if (twritemap.get(lb).size()==0) {
getmap.put(lb, new HashSet<FlatNode>());
- return;
+ return;
}
}
Set<FlatNode> gwriteset=treadmap.get(lb);
FlatMethod fm=state.getMethodFlat(lb.getMethod());
HashSet<FlatNode> needsget=new HashSet<FlatNode>();
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Hashtable<FlatNode, Integer> atomictable=locality.getAtomic(lb);
if (atomictable.get(fn).intValue()>0&&fn.kind()==FKind.FlatElementNode) {
FlatElementNode fen=(FlatElementNode)fn;
Set<TempFlatPair> tfpset=fnmap.get(fen).get(fen.getSrc());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (gwriteset.contains(tfp.f)) {
needsget.add(fen);
//could effect.
public void expandTypes() {
Set<TypeDescriptor> expandedarrays=new HashSet<TypeDescriptor>();
- for(Iterator<TypeDescriptor> it=arrays.iterator();it.hasNext();) {
+ for(Iterator<TypeDescriptor> it=arrays.iterator(); it.hasNext(); ) {
TypeDescriptor td=it.next();
expandedarrays.addAll(typeanalysis.expand(td));
}
Hashtable<TempDescriptor, Set<TempFlatPair>> doMerge(FlatNode fn, Hashtable<FlatNode, Hashtable<TempDescriptor, Set<TempFlatPair>>> tmptofnset) {
Hashtable<TempDescriptor, Set<TempFlatPair>> table=new Hashtable<TempDescriptor, Set<TempFlatPair>>();
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fprev=fn.getPrev(i);
Hashtable<TempDescriptor, Set<TempFlatPair>> tabset=tmptofnset.get(fprev);
if (tabset!=null) {
- for(Iterator<TempDescriptor> tmpit=tabset.keySet().iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=tabset.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor td=tmpit.next();
Set<TempFlatPair> fnset=tabset.get(td);
if (!table.containsKey(td))
}
return table;
}
-
+
public Set<FlatNode> getNeedSrcTrans(LocalityBinding lb) {
return srcmap.get(lb);
}
rightsrcmap.put(lb,rightsrctrans);
//compute writes that need translation on source
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Hashtable<FlatNode, Integer> atomictable=locality.getAtomic(lb);
if (atomictable.get(fn).intValue()>0) {
Hashtable<TempDescriptor, Set<TempFlatPair>> tmap=fnmap.get(fn);
switch(fn.kind()) {
- //We might need to translate arguments to pointer comparison
-
- case FKind.FlatOpNode: {
+ //We might need to translate arguments to pointer comparison
+
+ case FKind.FlatOpNode: {
FlatOpNode fon=(FlatOpNode)fn;
if (fon.getOp().getOp()==Operation.EQUAL||
fon.getOp().getOp()==Operation.NOTEQUAL) {
Set<TempFlatPair> righttfpset=tmap.get(fon.getRight());
//handle left operand
if (lefttfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=lefttfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=lefttfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfset.contains(tfp)||outofscope(tfp)) {
leftsrctrans.add(fon);
}
//handle right operand
if (righttfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=righttfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=righttfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfset.contains(tfp)||outofscope(tfp)) {
rightsrctrans.add(fon);
break;
}
- case FKind.FlatGlobalConvNode: {
+ case FKind.FlatGlobalConvNode: {
//need to translate these if the value we read from may be a
//shadow... check this by seeing if any of the values we
//may read are in the transread set or came from our caller
Set<TempFlatPair> tfpset=tmap.get(fgcn.getSrc());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfset.contains(tfp)||outofscope(tfp)) {
srctrans.add(fgcn);
break;
}
- case FKind.FlatSetFieldNode: {
+ case FKind.FlatSetFieldNode: {
//need to translate these if the value we read from may be a
//shadow... check this by seeing if any of the values we
//may read are in the transread set or came from our caller
break;
Set<TempFlatPair> tfpset=tmap.get(fsfn.getSrc());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfset.contains(tfp)||outofscope(tfp)) {
srctrans.add(fsfn);
}
break;
}
- case FKind.FlatSetElementNode: {
+
+ case FKind.FlatSetElementNode: {
//need to translate these if the value we read from may be a
//shadow... check this by seeing if any of the values we
//may read are in the transread set or came from our caller
break;
Set<TempFlatPair> tfpset=tmap.get(fsen.getSrc());
if (tfpset!=null) {
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfset.contains(tfp)||outofscope(tfp)) {
srctrans.add(fsen);
}
break;
}
+
default:
}
}
HashSet<FlatNode> toanalyze=new HashSet<FlatNode>();
toanalyze.addAll(fm.getNodeSet());
-
+
while(!toanalyze.isEmpty()) {
FlatNode fn=toanalyze.iterator().next();
toanalyze.remove(fn);
HashSet<TypeDescriptor> updatetypeset=new HashSet<TypeDescriptor>();
HashSet<FieldDescriptor> updatefieldset=new HashSet<FieldDescriptor>();
-
+
//Stop if we aren't in a transaction
if (atomictable.get(fn).intValue()==0)
continue;
-
+
//Do merge of all exits
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnnext=fn.getNext(i);
if (updatedtypemap.containsKey(fnnext)) {
updatetypeset.addAll(updatedtypemap.get(fnnext));
updatefieldset.addAll(updatedfieldmap.get(fnnext));
}
}
-
+
//process this node
if (cannotdelaymap!=null&&cannotdelaymap.containsKey(lb)&&cannotdelaymap.get(lb).contains(fn)!=inclusive) {
switch(fn.kind()) {
updatefieldset.add(fsfn.getField());
break;
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
updatetypeset.addAll(typeanalysis.expand(fsen.getDst().getType()));
break;
}
+
case FKind.FlatCall: {
FlatCall fcall=(FlatCall)fn;
MethodDescriptor mdfc=fcall.getMethod();
-
+
//get modified fields
Set<FieldDescriptor> fields=gft.getFieldsAll(mdfc);
updatefieldset.addAll(fields);
-
+
//get modified arrays
Set<TypeDescriptor> arrays=gft.getArraysAll(mdfc);
updatetypeset.addAll(typeanalysis.expandSet(arrays));
}
}
}
-
+
if (!updatedtypemap.containsKey(fn)||!updatedfieldmap.containsKey(fn)||
- !updatedtypemap.get(fn).equals(updatetypeset)||!updatedfieldmap.get(fn).equals(updatefieldset)) {
+ !updatedtypemap.get(fn).equals(updatetypeset)||!updatedfieldmap.get(fn).equals(updatefieldset)) {
updatedtypemap.put(fn, updatetypeset);
updatedfieldmap.put(fn, updatefieldset);
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
toanalyze.add(fn.getPrev(i));
}
}
/** Need to figure out which nodes need a transread to make local
- copies. Transread conceptually tracks conflicts. This depends on
- what fields/elements are accessed We iterate over all flatnodes that
- access fields...If these accesses could conflict, we mark the source
- tempflat pair as needing a transread */
+ copies. Transread conceptually tracks conflicts. This depends on
+ what fields/elements are accessed We iterate over all flatnodes that
+ access fields...If these accesses could conflict, we mark the source
+ tempflat pair as needing a transread */
+
-
HashSet<TempFlatPair> computeTranslationSet(LocalityBinding lb, FlatMethod fm, Hashtable<FlatNode, Hashtable<TempDescriptor, Set<TempFlatPair>>> fnmap, Set<TempFlatPair> writeset) {
HashSet<TempFlatPair> tfset=new HashSet<TempFlatPair>();
computeReadOnly(lb, updatedtypemap, updatedfieldmap);
}
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
//Check whether this node matters for cannot delayed computation
if (cannotdelaymap!=null&&cannotdelaymap.containsKey(lb)&&cannotdelaymap.get(lb).contains(fn)==inclusive)
}
break;
}
- case FKind.FlatFieldNode: {
+
+ case FKind.FlatFieldNode: {
FlatFieldNode ffn=(FlatFieldNode)fn;
if (fields.contains(ffn.getField())) {
//this could cause conflict...figure out conflict set
}
break;
}
- case FKind.FlatSetFieldNode: {
+
+ case FKind.FlatSetFieldNode: {
//definitely need to translate these
FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
Set<TempFlatPair> tfpset=tmap.get(fsfn.getDst());
}
break;
}
- case FKind.FlatSetElementNode: {
+
+ case FKind.FlatSetElementNode: {
//definitely need to translate these
FlatSetElementNode fsen=(FlatSetElementNode)fn;
Set<TempFlatPair> tfpset=tmap.get(fsen.getDst());
}
break;
}
+
case FKind.FlatCall: //assume pessimistically that calls do bad things
case FKind.FlatReturnNode: {
- TempDescriptor []readarray=fn.readsTemps();
- for(int i=0;i<readarray.length;i++) {
+ TempDescriptor [] readarray=fn.readsTemps();
+ for(int i=0; i<readarray.length; i++) {
TempDescriptor rtmp=readarray[i];
Set<TempFlatPair> tfpset=tmap.get(rtmp);
if (tfpset!=null)
}
break;
}
+
default:
//do nothing
}
}
- }
+ }
return tfset;
}
Hashtable<FlatNode, Set<TempDescriptor>> livetemps=Liveness.computeLiveTemps(fm);
tovisit.add(fm);
discovered.add(fm);
-
+
while(!tovisit.isEmpty()) {
FlatNode fn=tovisit.iterator().next();
tovisit.remove(fn);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
if (!discovered.contains(fnext)) {
discovered.add(fnext);
case FKind.FlatGlobalConvNode: {
FlatGlobalConvNode fgcn=(FlatGlobalConvNode)fn;
if (lb==fgcn.getLocality()&&
- fgcn.getMakePtr()) {
+ fgcn.getMakePtr()) {
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
TempDescriptor wtmp=writes[i];
HashSet<TempFlatPair> set=new HashSet<TempFlatPair>();
set.add(new TempFlatPair(wtmp, fn));
}
break;
}
+
case FKind.FlatFieldNode:
case FKind.FlatElementNode: {
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
TempDescriptor wtmp=writes[i];
HashSet<TempFlatPair> set=new HashSet<TempFlatPair>();
set.add(new TempFlatPair(wtmp, fn));
}
break;
}
+
case FKind.FlatCall:
case FKind.FlatMethod: {
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
TempDescriptor wtmp=writes[i];
HashSet<TempFlatPair> set=new HashSet<TempFlatPair>();
set.add(new TempFlatPair(wtmp, fn));
}
break;
}
+
case FKind.FlatCastNode:
- case FKind.FlatOpNode:
+ case FKind.FlatOpNode:
if (fn.kind()==FKind.FlatCastNode) {
FlatCastNode fcn=(FlatCastNode)fn;
if (fcn.getDst().getType().isPtr()) {
break;
}
}
+
default:
//Do kill computation
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
TempDescriptor wtmp=writes[i];
ttofn.remove(writes[i]);
}
!tmptofnset.get(fn).equals(ttofn)) {
//enqueue nodes to process
tmptofnset.put(fn, ttofn);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
tovisit.add(fnext);
}
}
return tmptofnset;
}
-
+
/* See what fields and arrays transactions might modify. We only
* look at changes to old objects. */
MethodDescriptor md=lb.getMethod();
FlatMethod fm=state.getMethodFlat(md);
Hashtable<FlatNode, Set<TempDescriptor>> oldtemps=computeOldTemps(lb);
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
Hashtable<FlatNode, Integer> atomictable=locality.getAtomic(lb);
if (atomictable.get(fn).intValue()>0) {
if (oldtemp.contains(fsfn.getDst()))
fields.add(fsfn.getField());
break;
+
case FKind.FlatSetElementNode:
FlatSetElementNode fsen=(FlatSetElementNode) fn;
if (oldtemp.contains(fsen.getDst()))
arrays.add(fsen.getDst().getType());
break;
+
default:
}
}
}
}
-
+
//Returns a table that maps a flatnode to a set of temporaries
//This set of temporaries is old (meaning they may point to object
Hashtable<FlatNode, Set<TempDescriptor>> livetemps=Liveness.computeLiveTemps(fm);
tovisit.add(fm);
discovered.add(fm);
-
+
while(!tovisit.isEmpty()) {
FlatNode fn=tovisit.iterator().next();
tovisit.remove(fn);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
if (!discovered.contains(fnext)) {
discovered.add(fnext);
//Everything live is old
Set<TempDescriptor> lives=livetemps.get(fn);
oldtemps=new HashSet<TempDescriptor>();
-
- for(Iterator<TempDescriptor> it=lives.iterator();it.hasNext();) {
+
+ for(Iterator<TempDescriptor> it=lives.iterator(); it.hasNext(); ) {
TempDescriptor tmp=it.next();
if (tmp.getType().isPtr()) {
oldtemps.add(tmp);
} else {
oldtemps=new HashSet<TempDescriptor>();
//Compute union of old temporaries
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
Set<TempDescriptor> pset=fntooldtmp.get(fn.getPrev(i));
if (pset!=null)
oldtemps.addAll(pset);
}
-
+
switch (fn.kind()) {
case FKind.FlatNew:
oldtemps.removeAll(Arrays.asList(fn.readsTemps()));
break;
+
case FKind.FlatOpNode:
- case FKind.FlatCastNode:
+ case FKind.FlatCastNode:
if (fn.kind()==FKind.FlatCastNode) {
FlatCastNode fcn=(FlatCastNode)fn;
if (fcn.getDst().getType().isPtr()) {
break;
}
}
+
default: {
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
TempDescriptor wtemp=writes[i];
if (wtemp.getType().isPtr())
oldtemps.add(wtemp);
}
}
}
-
+
if (oldtemps!=null) {
if (!fntooldtmp.containsKey(fn)||!fntooldtmp.get(fn).equals(oldtemps)) {
fntooldtmp.put(fn, oldtemps);
//propagate changes
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
tovisit.add(fnext);
}
this.t=t;
this.f=f;
}
-
+
public int hashCode() {
return f.hashCode()^t.hashCode();
}
continue;
Set<TempNodePair> prevset=nodetotnpair.get(fnprev);
- for(Iterator<TempNodePair> it=prevset.iterator(); it.hasNext();) {
+ for(Iterator<TempNodePair> it=prevset.iterator(); it.hasNext(); ) {
TempNodePair tnp=it.next();
if (fn.kind()==FKind.FlatGlobalConvNode&&
((FlatGlobalConvNode)fn).getLocality()!=lb) {
((FlatGlobalConvNode)fn).getLocality()==lb) {
/*If globalconvnode, make sure we have the right
* locality. */
- for(Iterator<TempDescriptor> writeit=writes.iterator(); writeit.hasNext();) {
+ for(Iterator<TempDescriptor> writeit=writes.iterator(); writeit.hasNext(); ) {
TempDescriptor wrtmp=writeit.next();
if (state.SINGLETM) {
if (wrtmp.getType().isPtr()&&
- (nodetemptab.get(wrtmp)!=LocalityAnalysis.SCRATCH)) {
+ (nodetemptab.get(wrtmp)!=LocalityAnalysis.SCRATCH)) {
TempNodePair tnp=new TempNodePair(wrtmp);
tempset.add(tnp);
}
}
//Place Convert to Oid nodes
toprocess=fm.getNodeSet();
- for(Iterator<FlatNode> it=toprocess.iterator(); it.hasNext();) {
+ for(Iterator<FlatNode> it=toprocess.iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
if (atomictab.get(fn).intValue()==0&&fn.numPrev()>0&&
atomictab.get(fn.getPrev(0)).intValue()>0) {
assert(fn.kind()==FKind.FlatAtomicExitNode);
//insert calls here...
Set<TempDescriptor> tempset=nodetoconvs2.get(fn);
- for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext(); ) {
TempDescriptor tmpd=tempit.next();
FlatGlobalConvNode fgcn=new FlatGlobalConvNode(tmpd, lb, false, nodetoconvs.get(fn).contains(tmpd));
//This loop makes sure that we have accurate atomic information for the new node
- for(Iterator<LocalityBinding> lbit=locality.getMethodBindings(lb.getMethod()).iterator();lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=locality.getMethodBindings(lb.getMethod()).iterator(); lbit.hasNext(); ) {
LocalityBinding fixlb=lbit.next();
locality.getAtomic(fixlb).put(fgcn, locality.getAtomic(fixlb).get(fn.getPrev(0)));
locality.getNodeTempInfo(fixlb).put(fgcn, (Hashtable<TempDescriptor, Integer>)locality.getNodeTempInfo(fixlb).get(fn).clone());
}
-
+
fgcn.setAtomicEnter(((FlatAtomicExitNode)fn).getAtomicEnter());
FlatNode[] prevarray=new FlatNode[fn.numPrev()];
for(int i=0; i<readtemps.length; i++) {
TempDescriptor tmp=readtemps[i];
if (tmp.getType().isPtr()&&
- pretemptab.get(tmp).intValue()!=LocalityAnalysis.SCRATCH) {
+ pretemptab.get(tmp).intValue()!=LocalityAnalysis.SCRATCH) {
transtemps.add(tmp);
}
}
}
}
toprocess=fm.getNodeSet();
- for(Iterator<FlatNode> it=toprocess.iterator(); it.hasNext();) {
+ for(Iterator<FlatNode> it=toprocess.iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
if (atomictab.get(fn).intValue()>0&&
atomictab.get(fn.getPrev(0)).intValue()==0) {
//insert calls here...
Set<TempDescriptor> tempset=nodetotranstemps.get(fn);
- for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext(); ) {
FlatGlobalConvNode fgcn=new FlatGlobalConvNode(tempit.next(), lb, true);
fgcn.setAtomicEnter((FlatAtomicEnterNode)fn);
//This loop makes sure that we have accurate atomic information for the new node
- for(Iterator<LocalityBinding> lbit=locality.getMethodBindings(lb.getMethod()).iterator();lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=locality.getMethodBindings(lb.getMethod()).iterator(); lbit.hasNext(); ) {
LocalityBinding fixlb=lbit.next();
locality.getAtomic(fixlb).put(fgcn, locality.getAtomic(fixlb).get(fn));
locality.getNodeTempInfo(fixlb).put(fgcn, (Hashtable<TempDescriptor, Integer>)locality.getNodeTempInfo(fixlb).get(fn).clone());
lb.setGlobalThis(thistype);
}
// else
- // lb.setGlobalThis(EITHER);//default value
+ // lb.setGlobalThis(EITHER);//default value
if (discovered.containsKey(lb))
lb=discovered.get(lb);
else throw new Error();
for(int i=0; i<fn.numPrev(); i++) {
FlatNode prevnode=fn.getPrev(i);
Hashtable<TempDescriptor, Integer> prevtable=temptable.get(prevnode);
- for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext(); ) {
TempDescriptor temp=tempit.next();
Integer tmpint=prevtable.get(temp);
- Integer oldint=currtable.containsKey(temp) ? currtable.get(temp) : (state.DSM?EITHER:STMEITHER);
+ Integer oldint=currtable.containsKey(temp)?currtable.get(temp):(state.DSM?EITHER:STMEITHER);
Integer newint=state.DSM?merge(tmpint, oldint):mergestm(tmpint, oldint);
currtable.put(temp, newint);
}
HashSet<TempDescriptor> set=new HashSet<TempDescriptor>();
Hashtable<FlatAtomicEnterNode, Set<TempDescriptor>> table=getTemps(lb);
if (table!=null)
- for(Iterator<FlatAtomicEnterNode> faenit=table.keySet().iterator(); faenit.hasNext();) {
+ for(Iterator<FlatAtomicEnterNode> faenit=table.keySet().iterator(); faenit.hasNext(); ) {
FlatAtomicEnterNode faen=faenit.next();
set.addAll(table.get(faen));
}
lbset.addAll(set);
}
}
- for(Iterator<LocalityBinding> lbit=discovered.keySet().iterator(); lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=discovered.keySet().iterator(); lbit.hasNext(); ) {
LocalityBinding lb=lbit.next();
if (!lbset.contains(lb)) {
lbit.remove();
public MethodDescriptor getStart() {
ClassDescriptor cd=typeutil.getClass(TypeUtil.ThreadClass);
for(Iterator methodit=cd.getMethodTable().getSet("staticStart").iterator(); methodit
-.hasNext();) {
+ .hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
if (md.numParameters()!=1||!md.getModifiers().isStatic()||!md.getParamType(0).getSymbol().equals(TypeUtil.ThreadClass))
- continue;
+ continue;
return md;
}
throw new Error("Can't find Thread.run");
}
-
-
+
+
private void computeLocalityBindingsSTM() {
lbmain=new LocalityBinding(typeutil.getMain(), false);
lbmain.setGlobalReturn(STMEITHER);
// Build table for initial node
Hashtable<TempDescriptor,Integer> table=new Hashtable<TempDescriptor,Integer>();
temptable.put(fm, table);
- atomictable.put(fm, lb.isAtomic() ? 1 : 0);
- int offset=md.isStatic() ? 0 : 1;
+ atomictable.put(fm, lb.isAtomic()?1:0);
+ int offset=md.isStatic()?0:1;
if (!md.isStatic()) {
table.put(fm.getParameter(0), lb.getGlobalThis());
}
TempDescriptor temp=fm.getParameter(i);
Integer b=lb.isGlobal(i-offset);
if (b!=null)
- table.put(temp,b);
+ table.put(temp,b);
}
}
Hashtable<FlatNode, Set<TempDescriptor>> livemap=Liveness.computeLiveTemps(fm);
-
+
while(!tovisit.isEmpty()) {
FlatNode fn=tovisit.iterator().next();
tovisit.remove(fn);
if (!temptable.containsKey(prevnode))
continue;
Hashtable<TempDescriptor, Integer> prevtable=temptable.get(prevnode);
- for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext(); ) {
TempDescriptor temp=tempit.next();
if (!liveset.contains(temp))
continue;
Integer tmpint=prevtable.get(temp);
- Integer oldint=currtable.containsKey(temp) ? currtable.get(temp) : STMEITHER;
+ Integer oldint=currtable.containsKey(temp)?currtable.get(temp):STMEITHER;
Integer newint=mergestm(tmpint, oldint);
currtable.put(temp, newint);
}
}
-
+
Hashtable<TempDescriptor,Integer> oldtable=temptable.get(fn);
if (oldtable==null||!oldtable.equals(currtable)) {
// Update table for this node
assert(nodemd.getModifiers().isNative());
MethodDescriptor runmd=null;
- for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("staticStart").iterator(); methodit.hasNext();) {
+ for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("staticStart").iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
if (md.numParameters()!=1||!md.getModifiers().isStatic()||!md.getParamType(0).getSymbol().equals(TypeUtil.ThreadClass))
continue;
Integer currreturnval=STMEITHER; //Start off with the either value
if (oldtable!=null&&fc.getReturnTemp()!=null&&
- oldtable.get(fc.getReturnTemp())!=null) {
+ oldtable.get(fc.getReturnTemp())!=null) {
//ensure termination
currreturnval=mergestm(currreturnval, oldtable.get(fc.getReturnTemp()));
}
- for(Iterator methodit=methodset.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=methodset.iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
boolean isnative=md.getModifiers().isNative();
Integer thistype=currtable.get(fc.getThis());
if (thistype==null)
thistype=STMEITHER;
-
+
if(thistype.equals(STMCONFLICT))
throw new Error("Using type that can be either normal or scratch in context:\n"+currlb.getExplanation());
lb.setGlobalThis(thistype);
return;
Integer srcvalue=currtable.get(fon.getLeft());
-
+
if (srcvalue==null) {
System.out.println(fon);
MethodDescriptor md=lb.getMethod();
}
currtable.put(fon.getDest(), srcvalue);
}
-
+
void processCastNodeSTM(FlatCastNode fcn, Hashtable<TempDescriptor, Integer> currtable) {
if (currtable.containsKey(fcn.getSrc()))
- currtable.put(fcn.getDst(), currtable.get(fcn.getSrc()));
+ currtable.put(fcn.getDst(), currtable.get(fcn.getSrc()));
}
void processReturnNodeSTM(LocalityBinding lb, FlatReturnNode frn, Hashtable<TempDescriptor, Integer> currtable) {
lb.setGlobalReturn(mergestm(returntype, lb.getGlobalReturn()));
}
}
-
- void processLiteralNodeSTM(FlatLiteralNode fln, Hashtable<TempDescriptor, Integer> currtable) {
+
+ void processLiteralNodeSTM(FlatLiteralNode fln, Hashtable<TempDescriptor, Integer> currtable) {
//null is either
- if (fln.getType().isNull())
- currtable.put(fln.getDst(), STMEITHER);
- else if (fln.getType().isPtr())
- currtable.put(fln.getDst(), NORMAL);
+ if (fln.getType().isNull())
+ currtable.put(fln.getDst(), STMEITHER);
+ else if (fln.getType().isPtr())
+ currtable.put(fln.getDst(), NORMAL);
}
void processElementNodeSTM(LocalityBinding lb, FlatElementNode fen, Hashtable<TempDescriptor, Integer> currtable) {
lbtovisit.add(lbexecute);
discovered.put(lbexecute, lbexecute);
if (!classtolb.containsKey(lbexecute.getMethod().getClassDesc()))
- classtolb.put(lbexecute.getMethod().getClassDesc(), new HashSet<LocalityBinding>());
+ classtolb.put(lbexecute.getMethod().getClassDesc(), new HashSet<LocalityBinding>());
classtolb.get(lbexecute.getMethod().getClassDesc()).add(lbexecute);
if (!methodtolb.containsKey(lbexecute.getMethod()))
- methodtolb.put(lbexecute.getMethod(), new HashSet<LocalityBinding>());
+ methodtolb.put(lbexecute.getMethod(), new HashSet<LocalityBinding>());
methodtolb.get(lbexecute.getMethod()).add(lbexecute);
}
// Build table for initial node
Hashtable<TempDescriptor,Integer> table=new Hashtable<TempDescriptor,Integer>();
temptable.put(fm, table);
- atomictable.put(fm, lb.isAtomic() ? 1 : 0);
- int offset=md.isStatic() ? 0 : 1;
+ atomictable.put(fm, lb.isAtomic()?1:0);
+ int offset=md.isStatic()?0:1;
if (!md.isStatic()) {
table.put(fm.getParameter(0), lb.getGlobalThis());
}
if (!temptable.containsKey(prevnode))
continue;
Hashtable<TempDescriptor, Integer> prevtable=temptable.get(prevnode);
- for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=prevtable.keySet().iterator(); tempit.hasNext(); ) {
TempDescriptor temp=tempit.next();
Integer tmpint=prevtable.get(temp);
- Integer oldint=currtable.containsKey(temp) ? currtable.get(temp) : EITHER;
+ Integer oldint=currtable.containsKey(temp)?currtable.get(temp):EITHER;
Integer newint=merge(tmpint, oldint);
currtable.put(temp, newint);
}
break;
case FKind.FlatCall:
- processCallNode(lb, (FlatCall)fn, currtable, isAtomic(atomictable, fn), temptable.get(fn));
+ processCallNode(lb, (FlatCall)fn, currtable, isAtomic(atomictable, fn), temptable.get(fn));
break;
case FKind.FlatFieldNode:
return CONFLICT;
}
- void processCallNode(LocalityBinding currlb, FlatCall fc, Hashtable<TempDescriptor, Integer> currtable, boolean isatomic, Hashtable<TempDescriptor,Integer> oldtable) {
+ void processCallNode(LocalityBinding currlb, FlatCall fc, Hashtable<TempDescriptor, Integer> currtable, boolean isatomic, Hashtable<TempDescriptor,Integer> oldtable) {
MethodDescriptor nodemd=fc.getMethod();
Set methodset=null;
Set runmethodset=null;
if (nodemd.getClassDesc().getSymbol().equals(TypeUtil.ThreadClass)&&
nodemd.getSymbol().equals("start")&&!nodemd.getModifiers().isStatic()&&
nodemd.numParameters()==1&&nodemd.getParamType(0).isInt()) {
- assert(nodemd.getModifiers().isNative());
-
- MethodDescriptor runmd=null;
-
- for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("run").iterator(); methodit.hasNext();) {
- MethodDescriptor md=(MethodDescriptor) methodit.next();
-
- if (md.numParameters()!=0||md.getModifiers().isStatic())
- continue;
- runmd=md;
- break;
- }
- if (runmd!=null) {
- runmethodset=callgraph.getMethods(runmd,fc.getThis().getType());
- methodset.addAll(runmethodset);
- } else throw new Error("Can't find run method");
+ assert(nodemd.getModifiers().isNative());
+
+ MethodDescriptor runmd=null;
+
+ for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("run").iterator(); methodit.hasNext(); ) {
+ MethodDescriptor md=(MethodDescriptor) methodit.next();
+
+ if (md.numParameters()!=0||md.getModifiers().isStatic())
+ continue;
+ runmd=md;
+ break;
+ }
+ if (runmd!=null) {
+ runmethodset=callgraph.getMethods(runmd,fc.getThis().getType());
+ methodset.addAll(runmethodset);
+ } else throw new Error("Can't find run method");
}
if(state.DSMTASK) {
- if (nodemd.getClassDesc().getSymbol().equals(TypeUtil.TaskClass) &&
- nodemd.getSymbol().equals("execution") && !nodemd.getModifiers().isStatic() &&
- nodemd.numParameters() == 0) {
-
- assert(nodemd.getModifiers().isNative());
- MethodDescriptor exemd = null;
-
- for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("execute").iterator(); methodit.hasNext();) {
- MethodDescriptor md = (MethodDescriptor) methodit.next();
-
- if (md.numParameters() != 0 || md.getModifiers().isStatic())
- continue;
- exemd = md;
- break;
- }
-
- if (exemd != null) {
- executemethodset = callgraph.getMethods(exemd, fc.getThis().getType());
- methodset.addAll(executemethodset);
- } else throw new Error("Can't find execute method");
- }
+ if (nodemd.getClassDesc().getSymbol().equals(TypeUtil.TaskClass) &&
+ nodemd.getSymbol().equals("execution") && !nodemd.getModifiers().isStatic() &&
+ nodemd.numParameters() == 0) {
+
+ assert(nodemd.getModifiers().isNative());
+ MethodDescriptor exemd = null;
+
+ for(Iterator methodit=nodemd.getClassDesc().getMethodTable().getSet("execute").iterator(); methodit.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) methodit.next();
+
+ if (md.numParameters() != 0 || md.getModifiers().isStatic())
+ continue;
+ exemd = md;
+ break;
+ }
+
+ if (exemd != null) {
+ executemethodset = callgraph.getMethods(exemd, fc.getThis().getType());
+ methodset.addAll(executemethodset);
+ } else throw new Error("Can't find execute method");
+ }
}
}
Integer currreturnval=EITHER; //Start off with the either value
if (oldtable!=null&&fc.getReturnTemp()!=null&&
oldtable.get(fc.getReturnTemp())!=null) {
- //ensure termination
- currreturnval=merge(currreturnval, oldtable.get(fc.getReturnTemp()));
+ //ensure termination
+ currreturnval=merge(currreturnval, oldtable.get(fc.getReturnTemp()));
}
- for(Iterator methodit=methodset.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=methodset.iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
boolean isnative=md.getModifiers().isNative();
System.out.println("Don't call native methods in atomic blocks!"+currlb.getMethod());
}
- if ((runmethodset==null||!runmethodset.contains(md)) &&( executemethodset == null || !executemethodset.contains(md))) {
+ if ((runmethodset==null||!runmethodset.contains(md)) &&( executemethodset == null || !executemethodset.contains(md))) {
//Skip this part if it is a run method or execute method
for(int i=0; i<fc.numArgs(); i++) {
TempDescriptor arg=fc.getArg(i);
throw new Error("Using type that can be either local or global in context:\n"+currlb.getExplanation());
if(runmethodset==null&&thistype.equals(GLOBAL)&&!isatomic && !isjoin && executemethodset == null) {
throw new Error("Using global object outside of transaction in context:\n"+currlb.getExplanation());
- }
+ }
if (runmethodset==null&&isnative&&thistype.equals(GLOBAL) && !isjoin && executemethodset == null && !isObjectgetType && !isObjecthashCode)
throw new Error("Potential call to native method "+md+" on global objects:\n"+currlb.getExplanation());
lb.setGlobalThis(thistype);
} else {
if (!(srctype.equals(LOCAL)||srctype.equals(EITHER))) {
throw new Error("Writing possible global reference to local object in context: \n"+lb.getExplanation());
- }
+ }
}
} else if (dsttype.equals(GLOBAL)) {
if (!transaction)
int atomic=atomictable.get(fen).intValue();
atomictable.put(fen, new Integer(atomic-1));
}
-
+
private void computeTempstoSave() {
- for(Iterator<LocalityBinding> lbit=getLocalityBindings().iterator(); lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=getLocalityBindings().iterator(); lbit.hasNext(); ) {
LocalityBinding lb=lbit.next();
computeTempstoSave(lb);
}
List<TempDescriptor> reads=Arrays.asList(fn.readsTemps());
List<TempDescriptor> writes=Arrays.asList(fn.writesTemps());
- for(Iterator<TempDescriptor> tempit=livetemps.iterator(); tempit.hasNext();) {
+ for(Iterator<TempDescriptor> tempit=livetemps.iterator(); tempit.hasNext(); ) {
TempDescriptor tmp=tempit.next();
if (writes.contains(tmp)) {
nodetosavetemps.get(atomicnode).add(tmp);
} else if (state.DSM) {
if (reads.contains(tmp)&&temptab.get(fn).get(tmp)==GLOBAL) {
nodetosavetemps.get(atomicnode).add(tmp);
- }
+ }
} else if (state.SINGLETM) {
if (reads.contains(tmp)&&tmp.getType().isPtr()&&temptab.get(fn).get(tmp)==NORMAL) {
nodetosavetemps.get(atomicnode).add(tmp);
- }
+ }
}
}
}
st+="[either] ";
else if (isglobalthis.equals(LocalityAnalysis.CONFLICT))
st+="[conflict] ";
- else
+ else
st+="[this="+isglobalthis+"]";
}
for(int i=0; i<isglobal.length; i++)
if (isglobal[i]!=null)
hashcode=hashcode*31+(isglobal[i].intValue());
}
- hashcode=hashcode*31+(isatomic ? 1 : 0);
+ hashcode=hashcode*31+(isatomic?1:0);
return hashcode;
}
}
HashSet<TypeDescriptor> roottypes;
Hashtable<TypeDescriptor, Set<TypeDescriptor>> transmap;
Hashtable<TypeDescriptor, Set<TypeDescriptor>> namemap;
-
+
public TypeAnalysis(LocalityAnalysis locality, State state, TypeUtil typeutil, CallGraph cg) {
this.state=state;
this.locality=locality;
roottypes=new HashSet<TypeDescriptor>();
doAnalysis();
}
-
+
/* We use locality bindings to get calleable methods. This could be
* changed to use the callgraph starting from the main method. */
void doAnalysis() {
Set<LocalityBinding> localityset=locality.getLocalityBindings();
- for(Iterator<LocalityBinding> lb=localityset.iterator();lb.hasNext();) {
+ for(Iterator<LocalityBinding> lb=localityset.iterator(); lb.hasNext(); ) {
computeTypes(lb.next().getMethod());
}
computeTrans();
computeOtherNames();
}
-
+
void computeOtherNames() {
- for(Iterator<TypeDescriptor> it=transmap.keySet().iterator();it.hasNext();) {
+ for(Iterator<TypeDescriptor> it=transmap.keySet().iterator(); it.hasNext(); ) {
TypeDescriptor td=it.next();
Set<TypeDescriptor> set=transmap.get(td);
- for(Iterator<TypeDescriptor> it2=set.iterator();it2.hasNext();) {
+ for(Iterator<TypeDescriptor> it2=set.iterator(); it2.hasNext(); ) {
TypeDescriptor type=it2.next();
if (!namemap.containsKey(type))
namemap.put(type, new HashSet<TypeDescriptor>());
}
}
}
-
+
void computeTrans() {
//Idea: for each type we want to know all of the possible types it could be called
- for(Iterator<TypeDescriptor> it=roottypes.iterator();it.hasNext();) {
+ for(Iterator<TypeDescriptor> it=roottypes.iterator(); it.hasNext(); ) {
TypeDescriptor td=it.next();
HashSet<TypeDescriptor> tovisit=new HashSet<TypeDescriptor>();
transmap.put(td, new HashSet<TypeDescriptor>());
tovisit.add(td);
transmap.get(td).add(td);
-
+
while(!tovisit.isEmpty()) {
TypeDescriptor type=tovisit.iterator().next();
tovisit.remove(type);
}
}
}
-
+
public Set<TypeDescriptor> expand(TypeDescriptor td) {
Set<TypeDescriptor> expandset=namemap.get(td);
return expandset;
public Set<TypeDescriptor> expandSet(Set<TypeDescriptor> tdset) {
HashSet<TypeDescriptor> expandedSet=new HashSet<TypeDescriptor>();
- for(Iterator<TypeDescriptor> it=tdset.iterator();it.hasNext();) {
+ for(Iterator<TypeDescriptor> it=tdset.iterator(); it.hasNext(); ) {
TypeDescriptor td=it.next();
Set<TypeDescriptor> etdset=expand(td);
if (etdset==null)
void computeTypes(MethodDescriptor md) {
FlatMethod fm=state.getMethodFlat(md);
- for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
switch(fn.kind()) {
case FKind.FlatOpNode: {
}
break;
}
+
case FKind.FlatNew: {
FlatNew fnew=(FlatNew)fn;
roottypes.add(fnew.getType());
break;
}
+
case FKind.FlatCastNode: {
FlatCastNode fcn=(FlatCastNode)fn;
addMapping(fcn.getSrc().getType(), fcn.getDst().getType());
break;
}
+
case FKind.FlatFieldNode: {
FlatFieldNode ffn=(FlatFieldNode)fn;
addMapping(ffn.getField().getType(), ffn.getDst().getType());
break;
}
+
case FKind.FlatSetFieldNode: {
FlatSetFieldNode fsfn=(FlatSetFieldNode) fn;
addMapping(fsfn.getSrc().getType(), fsfn.getField().getType());
break;
}
+
case FKind.FlatElementNode: {
FlatElementNode fen=(FlatElementNode)fn;
addMapping(fen.getSrc().getType().dereference(), fen.getDst().getType());
break;
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
addMapping(fsen.getSrc().getType(), fsen.getDst().getType().dereference());
break;
}
+
case FKind.FlatCall: {
FlatCall fc=(FlatCall)fn;
if (fc.getReturnTemp()!=null) {
if (fc.getThis()!=null) {
//complicated...need to deal with virtual dispatch here
Set methods=cg.getMethods(callmd);
- for(Iterator mdit=methods.iterator();mdit.hasNext();) {
+ for(Iterator mdit=methods.iterator(); mdit.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)mdit.next();
if (fc.getThis()!=null) {
TypeDescriptor ttype=new TypeDescriptor(md2.getClassDesc());
if (!typeutil.isSuperorType(fc.getThis().getType(),ttype)&&
- !typeutil.isSuperorType(ttype,fc.getThis().getType()))
+ !typeutil.isSuperorType(ttype,fc.getThis().getType()))
continue;
addMapping(fc.getThis().getType(), ttype);
}
}
}
- for(int i=0;i<fc.numArgs();i++) {
+ for(int i=0; i<fc.numArgs(); i++) {
TempDescriptor arg=fc.getArg(i);
TypeDescriptor ptype=callmd.getParamType(i);
addMapping(arg.getType(), ptype);
}
break;
}
- //both inputs and output
+
+ //both inputs and output
case FKind.FlatReturnNode: {
FlatReturnNode frn=(FlatReturnNode) fn;
if (frn.getReturnTemp()!=null)
while(!toprocess.isEmpty()) {
FlatNode fn=(FlatNode)toprocess.iterator().next();
toprocess.remove(fn);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode nnext=fn.getNext(i);
if (!discovered.contains(nnext)) {
toprocess.add(nnext);
//Do kills of expression/variable mappings
TempDescriptor[] write=fn.writesTemps();
- for(int i=0;i<write.length;i++) {
+ for(int i=0; i<write.length; i++) {
if (tab.containsKey(write[i]))
tab.remove(write[i]);
}
-
+
switch(fn.kind()) {
case FKind.FlatAtomicEnterNode:
- {
- killexpressions(tab, null, null, true);
- break;
- }
+ {
+ killexpressions(tab, null, null, true);
+ break;
+ }
+
case FKind.FlatCall:
- {
- FlatCall fc=(FlatCall) fn;
- MethodDescriptor md=fc.getMethod();
- Set<FieldDescriptor> fields=gft.getFieldsAll(md);
- Set<TypeDescriptor> arrays=gft.getArraysAll(md);
- killexpressions(tab, fields, arrays, gft.containsAtomicAll(md)||gft.containsBarrierAll(md));
- break;
- }
+ {
+ FlatCall fc=(FlatCall) fn;
+ MethodDescriptor md=fc.getMethod();
+ Set<FieldDescriptor> fields=gft.getFieldsAll(md);
+ Set<TypeDescriptor> arrays=gft.getArraysAll(md);
+ killexpressions(tab, fields, arrays, gft.containsAtomicAll(md)||gft.containsBarrierAll(md));
+ break;
+ }
+
case FKind.FlatOpNode:
- {
- FlatOpNode fon=(FlatOpNode) fn;
- Expression e=new Expression(fon.getLeft(), fon.getRight(), fon.getOp());
- tab.put(e, fon.getDest());
- break;
- }
+ {
+ FlatOpNode fon=(FlatOpNode) fn;
+ Expression e=new Expression(fon.getLeft(), fon.getRight(), fon.getOp());
+ tab.put(e, fon.getDest());
+ break;
+ }
+
case FKind.FlatSetFieldNode:
- {
- FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
- Set<FieldDescriptor> fields=new HashSet<FieldDescriptor>();
- fields.add(fsfn.getField());
- killexpressions(tab, fields, null, false);
- Expression e=new Expression(fsfn.getDst(), fsfn.getField());
- tab.put(e, fsfn.getSrc());
- break;
- }
+ {
+ FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
+ Set<FieldDescriptor> fields=new HashSet<FieldDescriptor>();
+ fields.add(fsfn.getField());
+ killexpressions(tab, fields, null, false);
+ Expression e=new Expression(fsfn.getDst(), fsfn.getField());
+ tab.put(e, fsfn.getSrc());
+ break;
+ }
+
case FKind.FlatFieldNode:
- {
- FlatFieldNode ffn=(FlatFieldNode)fn;
- Expression e=new Expression(ffn.getSrc(), ffn.getField());
- tab.put(e, ffn.getDst());
- break;
- }
+ {
+ FlatFieldNode ffn=(FlatFieldNode)fn;
+ Expression e=new Expression(ffn.getSrc(), ffn.getField());
+ tab.put(e, ffn.getDst());
+ break;
+ }
+
case FKind.FlatSetElementNode:
- {
- FlatSetElementNode fsen=(FlatSetElementNode)fn;
- Expression e=new Expression(fsen.getDst(),fsen.getIndex());
- tab.put(e, fsen.getSrc());
- break;
- }
+ {
+ FlatSetElementNode fsen=(FlatSetElementNode)fn;
+ Expression e=new Expression(fsen.getDst(),fsen.getIndex());
+ tab.put(e, fsen.getSrc());
+ break;
+ }
+
case FKind.FlatElementNode:
- {
- FlatElementNode fen=(FlatElementNode)fn;
- Expression e=new Expression(fen.getSrc(),fen.getIndex());
- tab.put(e, fen.getDst());
- break;
- }
+ {
+ FlatElementNode fen=(FlatElementNode)fn;
+ Expression e=new Expression(fen.getSrc(),fen.getIndex());
+ tab.put(e, fen.getDst());
+ break;
+ }
+
default:
}
-
+
if (write.length==1) {
TempDescriptor w=write[0];
- for(Iterator it=tab.entrySet().iterator();it.hasNext();) {
+ for(Iterator it=tab.entrySet().iterator(); it.hasNext(); ) {
Map.Entry m=(Map.Entry)it.next();
Expression e=(Expression)m.getKey();
if (e.a==w||e.b==w)
}
if (!availexpr.containsKey(fn)||!availexpr.get(fn).equals(tab)) {
availexpr.put(fn, tab);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode nnext=fn.getNext(i);
toprocess.add(nnext);
}
doOptimize(fm, availexpr);
}
-
+
public void doOptimize(FlatMethod fm, Hashtable<FlatNode,Hashtable<Expression, TempDescriptor>> availexpr) {
Hashtable<FlatNode, FlatNode> replacetable=new Hashtable<FlatNode, FlatNode>();
- for(Iterator<FlatNode> it=fm.getNodeSet().iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=fm.getNodeSet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
Hashtable<Expression, TempDescriptor> tab=computeIntersection(fn, availexpr);
switch(fn.kind()) {
case FKind.FlatOpNode:
- {
- FlatOpNode fon=(FlatOpNode) fn;
- Expression e=new Expression(fon.getLeft(), fon.getRight(),fon.getOp());
- if (tab.containsKey(e)) {
- TempDescriptor t=tab.get(e);
- FlatNode newfon=new FlatOpNode(fon.getDest(),t,null,new Operation(Operation.ASSIGN));
- replacetable.put(fon,newfon);
- }
- break;
+ {
+ FlatOpNode fon=(FlatOpNode) fn;
+ Expression e=new Expression(fon.getLeft(), fon.getRight(),fon.getOp());
+ if (tab.containsKey(e)) {
+ TempDescriptor t=tab.get(e);
+ FlatNode newfon=new FlatOpNode(fon.getDest(),t,null,new Operation(Operation.ASSIGN));
+ replacetable.put(fon,newfon);
}
+ break;
+ }
+
case FKind.FlatFieldNode:
- {
- FlatFieldNode ffn=(FlatFieldNode)fn;
- Expression e=new Expression(ffn.getSrc(), ffn.getField());
- if (tab.containsKey(e)) {
- TempDescriptor t=tab.get(e);
- FlatNode newfon=new FlatOpNode(ffn.getDst(),t,null,new Operation(Operation.ASSIGN));
- replacetable.put(ffn,newfon);
- }
- break;
+ {
+ FlatFieldNode ffn=(FlatFieldNode)fn;
+ Expression e=new Expression(ffn.getSrc(), ffn.getField());
+ if (tab.containsKey(e)) {
+ TempDescriptor t=tab.get(e);
+ FlatNode newfon=new FlatOpNode(ffn.getDst(),t,null,new Operation(Operation.ASSIGN));
+ replacetable.put(ffn,newfon);
}
+ break;
+ }
+
case FKind.FlatElementNode:
- {
- FlatElementNode fen=(FlatElementNode)fn;
- Expression e=new Expression(fen.getSrc(),fen.getIndex());
- if (tab.containsKey(e)) {
- TempDescriptor t=tab.get(e);
- FlatNode newfon=new FlatOpNode(fen.getDst(),t,null,new Operation(Operation.ASSIGN));
- replacetable.put(fen,newfon);
- }
- break;
+ {
+ FlatElementNode fen=(FlatElementNode)fn;
+ Expression e=new Expression(fen.getSrc(),fen.getIndex());
+ if (tab.containsKey(e)) {
+ TempDescriptor t=tab.get(e);
+ FlatNode newfon=new FlatOpNode(fen.getDst(),t,null,new Operation(Operation.ASSIGN));
+ replacetable.put(fen,newfon);
}
- default:
+ break;
+ }
+
+ default:
}
}
- for(Iterator<FlatNode> it=replacetable.keySet().iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=replacetable.keySet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
FlatNode newfn=replacetable.get(fn);
fn.replace(newfn);
}
}
-
+
public Hashtable<Expression, TempDescriptor> computeIntersection(FlatNode fn, Hashtable<FlatNode,Hashtable<Expression, TempDescriptor>> availexpr) {
Hashtable<Expression, TempDescriptor> tab=new Hashtable<Expression, TempDescriptor>();
boolean first=true;
-
+
//compute intersection
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode prev=fn.getPrev(i);
if (first) {
if (availexpr.containsKey(prev)) {
} else {
if (availexpr.containsKey(prev)) {
Hashtable<Expression, TempDescriptor> table=availexpr.get(prev);
- for(Iterator mapit=tab.entrySet().iterator();mapit.hasNext();) {
+ for(Iterator mapit=tab.entrySet().iterator(); mapit.hasNext(); ) {
Object entry=mapit.next();
if (!table.contains(entry))
mapit.remove();
}
public void killexpressions(Hashtable<Expression, TempDescriptor> tab, Set<FieldDescriptor> fields, Set<TypeDescriptor> arrays, boolean killall) {
- for(Iterator it=tab.entrySet().iterator();it.hasNext();) {
+ for(Iterator it=tab.entrySet().iterator(); it.hasNext(); ) {
Map.Entry m=(Map.Entry)it.next();
Expression e=(Expression)m.getKey();
if (killall&&(e.f!=null||e.a!=null))
it.remove();
- else if (e.f!=null&&fields!=null&&fields.contains(e.f))
+ else if (e.f!=null&&fields!=null&&fields.contains(e.f))
it.remove();
else if ((e.a!=null)&&(arrays!=null)) {
- for(Iterator<TypeDescriptor> arit=arrays.iterator();arit.hasNext();) {
+ for(Iterator<TypeDescriptor> arit=arrays.iterator(); arit.hasNext(); ) {
TypeDescriptor artd=arit.next();
if (typeutil.isSuperorType(artd,e.a.getType())||
typeutil.isSuperorType(e.a.getType(),artd)) {
//Compute intersection
Set<TempDescriptor> liveset=livetemps.get(fn);
- for(int i=1;i<fn.numPrev();i++) {
+ for(int i=1; i<fn.numPrev(); i++) {
Hashtable<TempDescriptor, TempDescriptor> tp=table.get(fn.getPrev(i));
if (tp==null)
continue;
- for(Iterator tmpit=tp.entrySet().iterator();tmpit.hasNext();) {
+ for(Iterator tmpit=tp.entrySet().iterator(); tmpit.hasNext(); ) {
Map.Entry t=(Map.Entry)tmpit.next();
TempDescriptor tmp=(TempDescriptor)t.getKey();
if (!liveset.contains(tmp))
}
HashSet<TempDescriptor> toremove=new HashSet<TempDescriptor>();
- TempDescriptor[]writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ TempDescriptor[] writes=fn.writesTemps();
+ for(int i=0; i<writes.length; i++) {
TempDescriptor tmp=writes[i];
toremove.add(tmp);
- for(Iterator<TempDescriptor> tmpit=tab.keySet().iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=tab.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp2=tmpit.next();
if (tmp==tab.get(tmp2))
toremove.add(tmp2);
}
}
- for(Iterator<TempDescriptor> tmpit=toremove.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=toremove.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
tab.put(tmp, bogustd);
}
if (!table.containsKey(fn)||!table.get(fn).equals(tab)) {
table.put(fn,tab);
changed=true;
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode nnext=fn.getNext(i);
tovisit.add(nnext);
}
Set<FlatNode> nodeset=fm.getNodeSet();
- for(Iterator<FlatNode> it=fm.getNodeSet().iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=fm.getNodeSet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
if (fn.numPrev()==0)
continue;
Hashtable<TempDescriptor, TempDescriptor> tab=new Hashtable<TempDescriptor, TempDescriptor>();
-
- for(int i=0;i<fn.numPrev();i++) {
+
+ for(int i=0; i<fn.numPrev(); i++) {
Hashtable<TempDescriptor, TempDescriptor> tp=table.get(fn.getPrev(i));
- for(Iterator tmpit=tp.entrySet().iterator();tmpit.hasNext();) {
+ for(Iterator tmpit=tp.entrySet().iterator(); tmpit.hasNext(); ) {
Map.Entry t=(Map.Entry)tmpit.next();
TempDescriptor tmp=(TempDescriptor)t.getKey();
-
+
if (!tab.containsKey(tmp))
tab.put(tmp, tp.get(tmp));
else if (tab.get(tmp)!=tp.get(tmp)) {
}
TempMap tmap=null;
- TempDescriptor[]reads=fn.readsTemps();
- for(int i=0;i<reads.length;i++) {
+ TempDescriptor[] reads=fn.readsTemps();
+ for(int i=0; i<reads.length; i++) {
TempDescriptor tmp=reads[i];
if (tab.containsKey(tmp)&&tab.get(tmp)!=bogustd) {
if (tmap==null)
boolean changed=true;
while(changed) {
changed=false;
- nextfn:
- for(Iterator<FlatNode> it=fm.getNodeSet().iterator();it.hasNext();) {
+nextfn:
+ for(Iterator<FlatNode> it=fm.getNodeSet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
switch(fn.kind()) {
case FKind.FlatCall:
case FKind.FlatPrefetchNode:
case FKind.FlatSESEEnterNode:
case FKind.FlatSESEExitNode:
- case FKind.FlatGenReachNode:
+ case FKind.FlatGenReachNode:
if (!useful.contains(fn)) {
useful.add(fn);
changed=true;
- }
+ }
break;
+
case FKind.FlatOpNode:
FlatOpNode fon=(FlatOpNode)fn;
if (fon.getOp().getOp()==Operation.DIV||
}
break;
}
+
default:
TempDescriptor[] writes=fn.writesTemps();
if (!useful.contains(fn))
- for(int i=0;i<writes.length;i++) {
- for(Iterator<FlatNode> uit=ud.useMap(fn,writes[i]).iterator();uit.hasNext();) {
+ for(int i=0; i<writes.length; i++) {
+ for(Iterator<FlatNode> uit=ud.useMap(fn,writes[i]).iterator(); uit.hasNext(); ) {
FlatNode ufn=uit.next();
if (useful.contains(ufn)) {
//we are useful
}
}
//get rid of useless nodes
- for(Iterator<FlatNode> it=fm.getNodeSet().iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=fm.getNodeSet().iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
if (!useful.contains(fn)||isuseless(fn)) {
//We have a useless node
FlatNode fnnext=fn.getNext(0);
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode nprev=fn.getPrev(i);
-
- for(int j=0;j<nprev.numNext();j++) {
+
+ for(int j=0; j<nprev.numNext(); j++) {
if (nprev.getNext(j)==fn) {
nprev.setnext(j, fnnext);
fnnext.addPrev(nprev);
if (postdominator) {
Set<FlatNode> fnodes=fm.getNodeSet();
Vector<FlatNode> v=new Vector<FlatNode>();
- for(Iterator<FlatNode> fit=fnodes.iterator();fit.hasNext();) {
+ for(Iterator<FlatNode> fit=fnodes.iterator(); fit.hasNext(); ) {
FlatNode fn=fit.next();
if (fn.numNext()==0) {
v.add(fn);
}
}
FlatNode[] fnarray=new FlatNode[v.size()];
- for(int i=0;i<v.size();i++) {
+ for(int i=0; i<v.size(); i++) {
fnarray[i]=v.elementAt(i);
domtable.put(fnarray[i],fnarray[i]);
HashSet<FlatNode> set=new HashSet<FlatNode> ();
boolean changed=true;
while(changed) {
changed=false;
- for(int i=vec.size()-2;i>=0;i--) {
+ for(int i=vec.size()-2; i>=0; i--) {
FlatNode fn=vec.elementAt(i);
FlatNode dom=null;
- for(int j=0;j<(postdominator?fn.numNext():fn.numPrev());j++) {
+ for(int j=0; j<(postdominator?fn.numNext():fn.numPrev()); j++) {
FlatNode np=postdominator?fn.getNext(j):fn.getPrev(j);
FlatNode ndom=domtable.get(np);
if (ndom!=null) {
vecindex=new Hashtable<FlatNode,Integer>();
HashSet visited=new HashSet();
Stack<FlatNode> stack=new Stack<FlatNode>();
- for(int i=0;i<fm.length;i++) {
+ for(int i=0; i<fm.length; i++) {
stack.push(fm[i]);
visited.add(fm[i]);
}
- mainloop:
+mainloop:
while(!stack.isEmpty()) {
FlatNode fn=stack.peek();
- for(int i=0;i<(postdominator?fn.numPrev():fn.numNext());i++) {
+ for(int i=0; i<(postdominator?fn.numPrev():fn.numNext()); i++) {
FlatNode next=postdominator?fn.getPrev(i):fn.getNext(i);
if (!visited.contains(next)) {
visited.add(next);
Hashtable<MethodDescriptor, Set<TypeDescriptor>> arraysrd;
HashSet<MethodDescriptor> containsAtomic;
HashSet<MethodDescriptor> containsBarrier;
-
+
public GlobalFieldType(CallGraph cg, State st, MethodDescriptor root) {
this.cg=cg;
this.st=st;
toprocess.remove(md);
analyzeMethod(md);
Set callees=cg.getCalleeSet(md);
- for(Iterator it=callees.iterator();it.hasNext();) {
+ for(Iterator it=callees.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (!discovered.contains(md2)) {
discovered.add(md2);
if (md.getClassDesc().getSymbol().equals(TypeUtil.ThreadClass)&&
md.getSymbol().equals("start")&&!md.getModifiers().isStatic()&&
md.numParameters()==0) {
- //start -> run link
+ //start -> run link
MethodDescriptor runmd=null;
- for(Iterator methodit=md.getClassDesc().getMethodTable().getSet("run").iterator(); methodit.hasNext();) {
+ for(Iterator methodit=md.getClassDesc().getMethodTable().getSet("run").iterator(); methodit.hasNext(); ) {
MethodDescriptor mdrun=(MethodDescriptor) methodit.next();
if (mdrun.numParameters()!=0||mdrun.getModifiers().isStatic())
continue;
}
if (runmd!=null) {
Set runmethodset=cg.getMethods(runmd);
- for(Iterator it=runmethodset.iterator();it.hasNext();) {
+ for(Iterator it=runmethodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (!discovered.contains(md2)) {
discovered.add(md2);
boolean changed=true;
while(changed) {
changed=false;
- for(Iterator it=discovered.iterator();it.hasNext();) {
+ for(Iterator it=discovered.iterator(); it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)it.next();
Set callees=cg.getCalleeSet(md);
- for(Iterator cit=callees.iterator();cit.hasNext();) {
+ for(Iterator cit=callees.iterator(); cit.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)cit.next();
if (fields.get(md).addAll(fields.get(md2)))
changed=true;
public boolean containsAtomicAll(MethodDescriptor md) {
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (containsAtomic.contains(md2))
return true;
public boolean containsBarrierAll(MethodDescriptor md) {
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (containsBarrier.contains(md2))
return true;
}
return false;
}
-
+
public Set<FieldDescriptor> getFieldsAll(MethodDescriptor md) {
HashSet<FieldDescriptor> s=new HashSet<FieldDescriptor>();
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (fields.containsKey(md2))
s.addAll(fields.get(md2));
public Set<TypeDescriptor> getArraysAll(MethodDescriptor md) {
HashSet<TypeDescriptor> s=new HashSet<TypeDescriptor>();
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (arrays.containsKey(md2))
s.addAll(arrays.get(md2));
public Set<FieldDescriptor> getFieldsRdAll(MethodDescriptor md) {
HashSet<FieldDescriptor> s=new HashSet<FieldDescriptor>();
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (fieldsrd.containsKey(md2))
s.addAll(fieldsrd.get(md2));
public Set<TypeDescriptor> getArraysRdAll(MethodDescriptor md) {
HashSet<TypeDescriptor> s=new HashSet<TypeDescriptor>();
Set methodset=cg.getMethods(md);
- for(Iterator it=methodset.iterator();it.hasNext();) {
+ for(Iterator it=methodset.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if (arraysrd.containsKey(md2))
s.addAll(arraysrd.get(md2));
arrays.put(md, new HashSet<TypeDescriptor>());
fieldsrd.put(md, new HashSet<FieldDescriptor>());
arraysrd.put(md, new HashSet<TypeDescriptor>());
-
+
FlatMethod fm=st.getMethodFlat(md);
- for(Iterator it=fm.getNodeSet().iterator();it.hasNext();) {
+ for(Iterator it=fm.getNodeSet().iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
if (fn.kind()==FKind.FlatSetElementNode) {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
import java.util.Iterator;
/**
* <code>LoopFinder</code> implements Dominator Tree Loop detection.
- *
+ *
* @author Brian Demsky <bdemsky@mit.edu>
- * @version $Id: LoopFinder.java,v 1.3 2009/04/03 09:06:12 bdemsky Exp $
+ * @version $Id: LoopFinder.java,v 1.4 2011/04/27 20:34:22 bdemsky Exp $
*/
public class LoopFinder implements Loops {
HashSet setofloops;
Loop root;
Loop ptr;
-
-
- /** Creates a new LoopFinder object.
+
+
+ /** Creates a new LoopFinder object.
* This call takes an HCode and a CFGrapher
* and returns a LoopFinder object
- * at the root level.
+ * at the root level.
*/
-
+
public LoopFinder(FlatMethod hc) {
- this.hc=hc;
- this.dominator=new DomTree(hc,false);
- analyze();
- this.ptr=root;
- }
+ this.hc=hc;
+ this.dominator=new DomTree(hc,false);
+ analyze();
+ this.ptr=root;
+ }
/**This method is for internal use only.
*It returns a Loopfinder object at any level,
*but it doesn't regenerate the internal tree
*so any external calls would result in garbage.*/
-
+
private LoopFinder(FlatMethod hc, DomTree dt, Loop root, Loop ptr) {
this.lasthc=hc;
this.hc=hc;
this.root=root;
this.ptr=ptr;
}
-
+
/*-----------------------------*/
-
+
/** This method returns the Root level loop for a given <code>HCode</code>.
* Does the same thing as the constructor call, but for an existing
* LoopFinder object.*/
-
+
public Loops getRootloop(FlatMethod hc) {
this.hc=hc;
analyze();
return new LoopFinder(hc,dominator,root,root);
}
-
+
/** This method returns the entry point of the loop.
- * For the natural loops we consider, that is simply the header.
+ * For the natural loops we consider, that is simply the header.
* It returns a <code>Set</code> of <code>HCodeElement</code>s.*/
-
+
public Set loopEntrances() {
HashSet entries=new HashSet();
analyze();
entries.add(ptr.header);
return entries;
}
-
-
+
+
/**Returns a <code>Set</code> with all of the <code>HCodeElement</code>s of the loop and
*loops included entirely within this loop. */
-
+
public Set loopIncElements() {
analyze();
HashSet A=new HashSet(ptr.entries);
return A;
}
-
+
/** Returns all of the <code>HCodeElement</code>s of this loop that aren't in a nested
* loop. This returns a <code>Set</code> of <code>HCodeElement</code>s.*/
-
+
public Set loopExcElements() {
analyze();
HashSet A=new HashSet(ptr.entries);
}
return A;
}
-
+
/** Returns a <code>Set</code> of loops that are nested inside of this loop.*/
-
+
public Set nestedLoops() {
analyze();
HashSet L=new HashSet();
L.add(new LoopFinder(hc,dominator,root,(Loop) iterate.next()));
return L;
}
-
+
/** Returns the <code>Loops</code> that contains this loop.
* If this is the top level loop, this call returns a null pointer.*/
-
+
public Loops parentLoop() {
analyze();
if (ptr.parent!=null)
return new LoopFinder(hc,dominator,root,ptr.parent);
else return null;
}
-
+
/*---------------------------*/
// public information accessor methods.
-
+
/*---------------------------*/
// Analysis code.
-
-
+
+
/** Main analysis method. */
-
+
void analyze() {
//Have we analyzed this set before?
//If so, don't do it again!!!
if (hc!=lasthc) {
-
+
//Did the caller hand us a bogus object?
//If so, throw it something
-
+
lasthc=hc;
-
+
//Set up the top level loop, so we can fill it with HCodeElements
//as we go along
root=new Loop();
root.header=hc;
-
+
//Set up a WorkSet for storing loops before we build the
//nested loop tree
setofloops=new HashSet();
//Find loops
findloopheaders(hc);
-
+
//Build the nested loop tree
buildtree();
}
- }
+ }
// end analysis.
-
+
void buildtree() {
//go through set of generated loops
while(!setofloops.isEmpty()) {
//Pull out one
Loop A=(Loop) setofloops.iterator().next();
setofloops.remove(A);
-
+
//Add it to the tree, complain if oddness
- if (addnode(A, root)!=1)
+ if (addnode(A, root)!=1)
System.out.println("Evil Error in LoopFinder while building tree.");
}
}
-
- //Adds a node to the tree...Its recursive
-
- int addnode(Loop A, Loop treenode) {
- //Only need to go deeper if the header is contained in this loop
- if (treenode.entries.contains(A.header))
-
- //Do we share headers?
- if (treenode.header!=A.header) {
-
- //No... Loop through our children to see if they want this
- //node.
-
- //Use integers for tri-state:
- //0=not stored here, 1=stored and everything is good
- //2=combined 2 natural loops with same header...need cleanup
-
- int stored=0;
- Iterator iterate=treenode.children.iterator();
- Loop temp=new Loop();
- while (iterate.hasNext()) {
- temp=(Loop) iterate.next();
- stored=addnode(A,temp);
- if (stored!=0) break;
- }
-
- //See what our children did for us
-
- if (stored==0) {
- //We get a new child...
- treenode.children.add(A);
- temp=A;
- }
-
- //Need to do cleanup for case 0 or 2
- //temp points to the new child
-
- if (stored!=1) {
-
- //Have to make sure that none of the nodes under this one
- //are children of the new node
-
- Iterator iterate2=treenode.children.iterator();
- temp.parent=treenode;
-
- //Loop through the children
- while (iterate2.hasNext()) {
- Loop temp2=(Loop)iterate2.next();
-
- //Don't look at the new node...otherwise we will create
- //a unreachable subtree
-
- if (temp2!=temp)
- //If the new node has a childs header
- //give the child up to it...
-
- if (temp.entries.contains(temp2.header)) {
- temp.children.add(temp2);
- iterate2.remove();
- }
- }
- }
-
- //We fixed everything...let our parents know
- return 1;
- } else {
- //need to combine loops
- while (!A.entries.isEmpty()) {
- FlatNode node=(FlatNode)A.entries.iterator().next();
- A.entries.remove(node);
- treenode.entries.add(node);
- }
- //let the previous caller know that they have stuff todo
- return 2;
- }
- //We aren't adopting the new node
- else return 0;
- }
- void findloopheaders(FlatNode current_nodeOrig) {
- Stack stk = new Stack();
- stk.push( current_nodeOrig );
- while( ! stk.isEmpty() ){
- FlatNode current_node = (FlatNode) stk.pop();
- //look at the current node
- visit(current_node);
-
- //add it to the all inclusive root loop
- root.entries.add(current_node);
-
- //See if those we dominate are backedges
- Set<FlatNode> children=dominator.children(current_node);
-
- if (children!=null) {
- for(Iterator<FlatNode> it=children.iterator();it.hasNext();) {
- FlatNode fn=it.next();
- if (fn!=current_node)
- stk.push(fn);
+ //Adds a node to the tree...Its recursive
+
+ int addnode(Loop A, Loop treenode) {
+ //Only need to go deeper if the header is contained in this loop
+ if (treenode.entries.contains(A.header))
+
+ //Do we share headers?
+ if (treenode.header!=A.header) {
+
+ //No... Loop through our children to see if they want this
+ //node.
+
+ //Use integers for tri-state:
+ //0=not stored here, 1=stored and everything is good
+ //2=combined 2 natural loops with same header...need cleanup
+
+ int stored=0;
+ Iterator iterate=treenode.children.iterator();
+ Loop temp=new Loop();
+ while (iterate.hasNext()) {
+ temp=(Loop) iterate.next();
+ stored=addnode(A,temp);
+ if (stored!=0) break;
+ }
+
+ //See what our children did for us
+
+ if (stored==0) {
+ //We get a new child...
+ treenode.children.add(A);
+ temp=A;
+ }
+
+ //Need to do cleanup for case 0 or 2
+ //temp points to the new child
+
+ if (stored!=1) {
+
+ //Have to make sure that none of the nodes under this one
+ //are children of the new node
+
+ Iterator iterate2=treenode.children.iterator();
+ temp.parent=treenode;
+
+ //Loop through the children
+ while (iterate2.hasNext()) {
+ Loop temp2=(Loop)iterate2.next();
+
+ //Don't look at the new node...otherwise we will create
+ //a unreachable subtree
+
+ if (temp2!=temp)
+ //If the new node has a childs header
+ //give the child up to it...
+
+ if (temp.entries.contains(temp2.header)) {
+ temp.children.add(temp2);
+ iterate2.remove();
}
- }
+ }
+ }
+
+ //We fixed everything...let our parents know
+ return 1;
+ } else {
+ //need to combine loops
+ while (!A.entries.isEmpty()) {
+ FlatNode node=(FlatNode)A.entries.iterator().next();
+ A.entries.remove(node);
+ treenode.entries.add(node);
+ }
+ //let the previous caller know that they have stuff todo
+ return 2;
+ }
+ //We aren't adopting the new node
+ else return 0;
+ }
+
+ void findloopheaders(FlatNode current_nodeOrig) {
+ Stack stk = new Stack();
+ stk.push(current_nodeOrig);
+ while( !stk.isEmpty() ) {
+ FlatNode current_node = (FlatNode) stk.pop();
+ //look at the current node
+ visit(current_node);
+
+ //add it to the all inclusive root loop
+ root.entries.add(current_node);
+
+ //See if those we dominate are backedges
+ Set<FlatNode> children=dominator.children(current_node);
+
+ if (children!=null) {
+ for(Iterator<FlatNode> it=children.iterator(); it.hasNext(); ) {
+ FlatNode fn=it.next();
+ if (fn!=current_node)
+ stk.push(fn);
}
+ }
}
+ }
void visit(FlatNode q) {
Loop A=new Loop();
HashSet B=new HashSet();
-
+
//Loop through all of our outgoing edges
- for (int i=0;i<q.numNext();i++) {
+ for (int i=0; i<q.numNext(); i++) {
FlatNode temp=q;
FlatNode temp_to=q.getNext(i);
//we hit the root element or we
//find the node we jump back too
while ((temp!=hc)&&
- (temp_to!=temp)) {
+ (temp_to!=temp)) {
temp=dominator.idom(temp);
}
-
+
//If we found the node we jumped back to
//then build loop
-
+
if (temp_to==temp) {
-
+
//found a loop
A.entries.add(temp); //Push the header
A.header=temp;
B.add(q); //Put the backedge in the todo list
-
+
//Starting with the backedge, work on the incoming edges
//until we get back to the loop header...
//Then we have the entire natural loop
-
+
while(!B.isEmpty()) {
FlatNode newnode=(FlatNode)B.iterator().next();
B.remove(newnode);
-
+
//Add all of the new incoming edges that we haven't already
//visited
- for (int j=0;j<newnode.numPrev();j++) {
+ for (int j=0; j<newnode.numPrev(); j++) {
FlatNode from=newnode.getPrev(j);
if (!A.entries.contains(from))
B.add(from);
}
-
+
//push the new node on our list of nodes in the loop
A.entries.add(newnode);
}
-
+
//save our new loop
setofloops.add(A);
}
}
}
-
+
//Structure for building internal trees...
-
+
class Loop {
public HashSet entries=new HashSet();
public FlatNode header;
}
public void recurse(Loops parent) {
- for(Iterator lpit=parent.nestedLoops().iterator();lpit.hasNext();) {
+ for(Iterator lpit=parent.nestedLoops().iterator(); lpit.hasNext(); ) {
Loops child=(Loops)lpit.next();
processLoop(child, child.nestedLoops().size()==0);
recurse(child);
HashSet<FieldDescriptor> fields=new HashSet<FieldDescriptor>();
HashSet<TypeDescriptor> types=new HashSet<TypeDescriptor>();
-
+
if (!isLeaf) {
- unsafe=true;
+ unsafe=true;
} else {
/* Check whether it is safe to reuse values. */
- for(Iterator elit=elements.iterator();elit.hasNext();) {
+ for(Iterator elit=elements.iterator(); elit.hasNext(); ) {
FlatNode fn=(FlatNode)elit.next();
if (fn.kind()==FKind.FlatAtomicEnterNode||
fn.kind()==FKind.FlatAtomicExitNode) {
}
}
}
-
+
HashSet dominatorset=unsafe?null:computeAlways(l);
/* Compute loop invariants */
table.put(entrance, new Vector<FlatNode>());
while(changed) {
changed=false;
- nextfn:
- for(Iterator tpit=toprocess.iterator();tpit.hasNext();) {
+nextfn:
+ for(Iterator tpit=toprocess.iterator(); tpit.hasNext(); ) {
FlatNode fn=(FlatNode)tpit.next();
switch(fn.kind()) {
case FKind.FlatOpNode:
checkNode(fn,elements))
continue nextfn;
TypeDescriptor td=((FlatElementNode)fn).getSrc().getType();
- for(Iterator<TypeDescriptor> tdit=types.iterator();tdit.hasNext();) {
+ for(Iterator<TypeDescriptor> tdit=types.iterator(); tdit.hasNext(); ) {
TypeDescriptor td2=tdit.next();
if (typeutil.isSuperorType(td,td2)||
- typeutil.isSuperorType(td2,td)) {
+ typeutil.isSuperorType(td2,td)) {
continue nextfn;
}
}
}
//mark to hoist
if (hoisted.add(fn))
- changed=true;
+ changed=true;
table.get(entrance).add(fn);
}
}
assert entrances.size()==1;
FlatNode entrance=(FlatNode)entrances.iterator().next();
boolean first=true;
- for (int i=0;i<entrance.numPrev();i++) {
+ for (int i=0; i<entrance.numPrev(); i++) {
FlatNode incoming=entrance.getPrev(i);
if (elements.contains(incoming)) {
HashSet domset=new HashSet();
dominatorset=domset;
first=false;
} else {
- for(Iterator it=dominatorset.iterator();it.hasNext();) {
+ for(Iterator it=dominatorset.iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
if (!domset.contains(fn))
it.remove();
public boolean checkNode(FlatNode fn, Set elements) {
//Can hoist if all variables are loop invariant
- TempDescriptor[]uses=fn.readsTemps();
- for(int i=0;i<uses.length;i++) {
+ TempDescriptor[] uses=fn.readsTemps();
+ for(int i=0; i<uses.length; i++) {
TempDescriptor t=uses[i];
Set<FlatNode> defset=usedef.defMap(fn, t);
- for(Iterator<FlatNode> defit=defset.iterator();defit.hasNext();) {
+ for(Iterator<FlatNode> defit=defset.iterator(); defit.hasNext(); ) {
FlatNode def=defit.next();
if (elements.contains(def)&&defset.size()>1)
return true;
map=new Hashtable<FlatNode, FlatNode>();
clonemap=new Hashtable<FlatNode, FlatNode>();
dooptimize(fm);
- }
+ }
private FlatNode ntooremap(FlatNode fn) {
while(ntoomap.containsKey(fn)) {
recurse(fm, root);
}
private void recurse(FlatMethod fm, Loops parent) {
- for(Iterator lpit=parent.nestedLoops().iterator();lpit.hasNext();) {
+ for(Iterator lpit=parent.nestedLoops().iterator(); lpit.hasNext(); ) {
Loops child=(Loops)lpit.next();
processLoop(fm, child);
recurse(fm, child);
if (tohoist.size()==0)
return;
- for(int i=0;i<tohoist.size();i++) {
+ for(int i=0; i<tohoist.size(); i++) {
FlatNode fn=tohoist.elementAt(i);
TempDescriptor[] writes=fn.writesTemps();
FlatNode fnnew=fn.clone(tnone);
fnnew.rewriteUse(t);
- for(int j=0;j<writes.length;j++) {
+ for(int j=0; j<writes.length; j++) {
if (writes[j]!=null) {
TempDescriptor cp=writes[j].createNew();
t.addPair(writes[j],cp);
/* The chain is built at this point. */
FlatNode[] prevarray=new FlatNode[entrance.numPrev()];
- for(int i=0;i<entrance.numPrev();i++) {
+ for(int i=0; i<entrance.numPrev(); i++) {
prevarray[i]=entrance.getPrev(i);
}
- for(int i=0;i<prevarray.length;i++) {
+ for(int i=0; i<prevarray.length; i++) {
FlatNode prev=prevarray[i];
if (!lelements.contains(ntooremap(prev))) {
//need to fix this edge
- for(int j=0;j<prev.numNext();j++) {
+ for(int j=0; j<prev.numNext(); j++) {
if (prev.getNext(j)==entrance)
prev.setNext(j, first);
}
TempMap t=new TempMap();
/* Copy the nodes */
- for(Iterator it=lelements.iterator();it.hasNext();) {
+ for(Iterator it=lelements.iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
FlatNode nfn=otonremap(fn);
/* Store initial in set for loop header */
FlatNode[] prevarray=new FlatNode[entrance.numPrev()];
- for(int i=0;i<entrance.numPrev();i++) {
+ for(int i=0; i<entrance.numPrev(); i++) {
prevarray[i]=entrance.getPrev(i);
}
FlatNode first=copytable.get(entrance);
/* Copy the internal edges */
- for(Iterator it=lelements.iterator();it.hasNext();) {
+ for(Iterator it=lelements.iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
fn=otonremap(fn);
FlatNode copyend=copyendtable.get(fn);
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode nnext=fn.getNext(i);
if (nnext==entrance) {
/* Back to loop header...point to old graph */
}
/* Splice header in using original in set */
- for(int i=0;i<prevarray.length;i++) {
+ for(int i=0; i<prevarray.length; i++) {
FlatNode prev=prevarray[i];
if (!lelements.contains(ntooremap(prev))) {
//need to fix this edge
- for(int j=0;j<prev.numNext();j++) {
+ for(int j=0; j<prev.numNext(); j++) {
if (prev.getNext(j)==entrance) {
prev.setNext(j, first);
}
}
/* Splice out loop invariant stuff */
- for(Iterator it=lelements.iterator();it.hasNext();) {
+ for(Iterator it=lelements.iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
FlatNode nfn=otonremap(fn);
if (tohoist.contains(fn)) {
*
*
* @author Brian Demsky <bdemsky@mit.edu>
- * @version $Id: Loops.java,v 1.1 2009/03/27 00:59:36 bdemsky Exp $
+ * @version $Id: Loops.java,v 1.2 2011/04/27 20:34:22 bdemsky Exp $
*/
public interface Loops {
- /** Returns entrances to the Loop.
- * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
- public Set loopEntrances();
-
- /** Returns elements of this loops and all nested loop.
- * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
- public Set loopIncElements();
-
- /** Returns elements of this loop not in any nested loop.
- * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
- public Set loopExcElements();
-
- /** Returns a <code>Set</code> containing <code>Loops</code> that are
- * nested.*/
- public Set nestedLoops();
-
- /** Returns the loop immediately nesting this loop.
- * If this is the highest level loop, returns a null pointer.*/
- public Loops parentLoop();
+ /** Returns entrances to the Loop.
+ * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
+ public Set loopEntrances();
+
+ /** Returns elements of this loops and all nested loop.
+ * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
+ public Set loopIncElements();
+
+ /** Returns elements of this loop not in any nested loop.
+ * This is a <code>Set</code> of <code>HCodeElement</code>s.*/
+ public Set loopExcElements();
+
+ /** Returns a <code>Set</code> containing <code>Loops</code> that are
+ * nested.*/
+ public Set nestedLoops();
+
+ /** Returns the loop immediately nesting this loop.
+ * If this is the highest level loop, returns a null pointer.*/
+ public Loops parentLoop();
}
import java.util.Iterator;
import Analysis.Liveness;
-public class UseDef{
+public class UseDef {
Hashtable<TempFlatPair, Set<FlatNode>> defs;
Hashtable<TempFlatPair, Set<FlatNode>> uses;
toanalyze.remove(fn);
HashSet<TempFlatPair> s=new HashSet<TempFlatPair>();
Set<TempDescriptor> liveset=livemap.get(fn);
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode prev=fn.getPrev(i);
Set<TempFlatPair> prevs=tmp.get(prev);
if (prevs!=null) {
- nexttfp:
- for(Iterator<TempFlatPair> tfit=prevs.iterator();tfit.hasNext();) {
+nexttfp:
+ for(Iterator<TempFlatPair> tfit=prevs.iterator(); tfit.hasNext(); ) {
TempFlatPair tfp=tfit.next();
if (!liveset.contains(tfp.t))
continue;
- for(int j=0;j<fnwrites.length;j++) {
+ for(int j=0; j<fnwrites.length; j++) {
if (tfp.t==fnwrites[j])
continue nexttfp;
}
s.add(tfp);
}
}
- for(int j=0;j<fnwrites.length;j++) {
+ for(int j=0; j<fnwrites.length; j++) {
TempFlatPair tfp=new TempFlatPair(fnwrites[j], fn);
s.add(tfp);
}
}
if (!tmp.containsKey(fn)||
- !tmp.get(fn).equals(s)) {
+ !tmp.get(fn).equals(s)) {
tmp.put(fn,s);
- for(int i=0;i<fn.numNext();i++)
+ for(int i=0; i<fn.numNext(); i++)
toanalyze.add(fn.getNext(i));
}
}
Set<FlatNode> fset=fm.getNodeSet();
defs=new Hashtable<TempFlatPair, Set<FlatNode>>();
uses=new Hashtable<TempFlatPair, Set<FlatNode>>();
- for(Iterator<FlatNode> fnit=fset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
TempDescriptor[] fnreads=fn.readsTemps();
Set<TempFlatPair> tfpset=tmp.get(fn);
-
- for(int i=0;i<fnreads.length;i++) {
+
+ for(int i=0; i<fnreads.length; i++) {
TempDescriptor readt=fnreads[i];
- for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator(); tfpit.hasNext(); ) {
TempFlatPair tfp=tfpit.next();
if (tfp.t==readt) {
//have use
public void turnon() {
turnoff=false;
}
-
+
public boolean needBarrier(FlatNode fn) {
if (turnoff)
return false;
HashSet<TempDescriptor> nb=computeIntersection(fn);
switch(fn.kind()) {
case FKind.FlatSetElementNode:
- {
- FlatSetElementNode fsen=(FlatSetElementNode)fn;
- return !nb.contains(fsen.getDst());
- }
+ {
+ FlatSetElementNode fsen=(FlatSetElementNode)fn;
+ return !nb.contains(fsen.getDst());
+ }
+
case FKind.FlatElementNode:
- {
- FlatElementNode fen=(FlatElementNode)fn;
- return !nb.contains(fen.getSrc());
- }
+ {
+ FlatElementNode fen=(FlatElementNode)fn;
+ return !nb.contains(fen.getSrc());
+ }
+
case FKind.FlatSetFieldNode:
- {
- FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
- return !nb.contains(fsfn.getDst());
- }
+ {
+ FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
+ return !nb.contains(fsfn.getDst());
+ }
+
default:
return true;
}
}
-
+
Hashtable<FlatNode,HashSet<TempDescriptor>> needbarrier;
public void analyze(LocalityBinding lb) {
toprocess.add(fm.getNext(0));
discovered.add(fm.getNext(0));
Hashtable<FlatNode, Integer> atomic=la.getAtomic(lb);
-
+
while(!toprocess.isEmpty()) {
FlatNode fn=(FlatNode)toprocess.iterator().next();
toprocess.remove(fn);
- for(int i=0;i<fn.numNext();i++) {
- FlatNode nnext=fn.getNext(i);
- if (!discovered.contains(nnext)) {
- toprocess.add(nnext);
- discovered.add(nnext);
- }
+ for(int i=0; i<fn.numNext(); i++) {
+ FlatNode nnext=fn.getNext(i);
+ if (!discovered.contains(nnext)) {
+ toprocess.add(nnext);
+ discovered.add(nnext);
+ }
}
HashSet<TempDescriptor> nb=computeIntersection(fn);
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
nb.remove(writes[i]);
}
switch(fn.kind()) {
case FKind.FlatSetElementNode:
- {
- FlatSetElementNode fsen=(FlatSetElementNode)fn;
- if (!state.STMARRAY)
- nb.add(fsen.getDst());
- break;
- }
+ {
+ FlatSetElementNode fsen=(FlatSetElementNode)fn;
+ if (!state.STMARRAY)
+ nb.add(fsen.getDst());
+ break;
+ }
+
case FKind.FlatSetFieldNode:
- {
- FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
- nb.add(fsfn.getDst());
- break;
- }
- case FKind.FlatOpNode:
- {
- FlatOpNode fon=(FlatOpNode)fn;
- if (fon.getOp().getOp()==Operation.ASSIGN) {
- if (nb.contains(fon.getLeft())) {
- nb.add(fon.getDest());
- }
+ {
+ FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
+ nb.add(fsfn.getDst());
+ break;
+ }
+
+ case FKind.FlatOpNode:
+ {
+ FlatOpNode fon=(FlatOpNode)fn;
+ if (fon.getOp().getOp()==Operation.ASSIGN) {
+ if (nb.contains(fon.getLeft())) {
+ nb.add(fon.getDest());
}
- break;
}
+ break;
+ }
+
case FKind.FlatNew:
- {
- FlatNew fnew=(FlatNew)fn;
- nb.add(fnew.getDst());
- break;
- }
+ {
+ FlatNew fnew=(FlatNew)fn;
+ nb.add(fnew.getDst());
+ break;
+ }
+
default:
//If we enter a transaction toss everything
if (atomic.get(fn).intValue()>0&&
}
}
if (!needbarrier.containsKey(fn)||
- !needbarrier.get(fn).equals(nb)) {
- for(int i=0;i<fn.numNext();i++) {
+ !needbarrier.get(fn).equals(nb)) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode nnext=fn.getNext(i);
toprocess.add(nnext);
}
HashSet<TempDescriptor> computeIntersection(FlatNode fn) {
HashSet<TempDescriptor> tab=new HashSet<TempDescriptor>();
boolean first=true;
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fprev=fn.getPrev(i);
HashSet<TempDescriptor> hs=needbarrier.get(fprev);
if (hs!=null) {
first=false;
} else {
//Intersect sets
- for(Iterator<TempDescriptor> it=tab.iterator();it.hasNext();) {
+ for(Iterator<TempDescriptor> it=tab.iterator(); it.hasNext(); ) {
TempDescriptor t=it.next();
if (!hs.contains(t))
it.remove();
}
}
public TempDescriptor getTemp(Group g) {
- for(Iterator it=g.set.iterator();it.hasNext();) {
+ for(Iterator it=g.set.iterator(); it.hasNext(); ) {
LocalExpression e=(LocalExpression)it.next();
if (e.t!=null)
return e.t;
public void doAnalysis(FlatMethod fm) {
Set nodes=fm.getNodeSet();
HashSet<FlatNode> toanalyze=new HashSet<FlatNode>();
- for(Iterator it=nodes.iterator();it.hasNext();) {
+ for(Iterator it=nodes.iterator(); it.hasNext(); ) {
FlatNode fn=(FlatNode)it.next();
if (fn.numPrev()>1)
toanalyze.add(fn);
}
- for(Iterator<FlatNode> it=toanalyze.iterator();it.hasNext();) {
+ for(Iterator<FlatNode> it=toanalyze.iterator(); it.hasNext(); ) {
FlatNode fn=it.next();
Hashtable<LocalExpression, Group> table=new Hashtable<LocalExpression,Group>();
do {
}
break;
}
+
case FKind.FlatLiteralNode: {
FlatLiteralNode fln=(FlatLiteralNode)fn;
LocalExpression e=new LocalExpression(fln.getValue());
table.put(dst, src);
break;
}
+
case FKind.FlatFieldNode: {
FlatFieldNode ffn=(FlatFieldNode) fn;
Group src=getGroup(table, ffn.getSrc());
table.put(dst, srcf);
break;
}
+
case FKind.FlatElementNode: {
FlatElementNode fen=(FlatElementNode) fn;
Group src=getGroup(table, fen.getSrc());
table.put(dst, srcf);
break;
}
+
case FKind.FlatSetFieldNode: {
FlatSetFieldNode fsfn=(FlatSetFieldNode)fn;
Group dst=getGroup(table, fsfn.getDst());
table.put(src, dstf);
break;
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode fsen=(FlatSetElementNode)fn;
Group dst=getGroup(table, fsen.getDst());
table.put(src, dstf);
break;
}
- case FKind.FlatCall:{
+
+ case FKind.FlatCall: {
//do side effects
FlatCall fc=(FlatCall)fn;
MethodDescriptor md=fc.getMethod();
Set<TypeDescriptor> arrays=gft.getArraysAll(md);
kill(table, fields, arrays, gft.containsAtomicAll(md), gft.containsBarrierAll(md));
}
+
default: {
TempDescriptor[] writes=fn.writesTemps();
- for(int i=0;i<writes.length;i++) {
+ for(int i=0; i<writes.length; i++) {
kill(table,writes[i]);
}
}
}
public void kill(Hashtable<LocalExpression, Group> tab, Set<FieldDescriptor> fields, Set<TypeDescriptor> arrays, boolean isAtomic, boolean isBarrier) {
Set<LocalExpression> eset=tab.keySet();
- for(Iterator<LocalExpression> it=eset.iterator();it.hasNext();) {
+ for(Iterator<LocalExpression> it=eset.iterator(); it.hasNext(); ) {
LocalExpression e=it.next();
if (isBarrier) {
//make Barriers kill everything
} else if (e.td!=null) {
//have array
TypeDescriptor artd=e.td;
- for(Iterator<TypeDescriptor> arit=arrays.iterator();arit.hasNext();) {
+ for(Iterator<TypeDescriptor> arit=arrays.iterator(); arit.hasNext(); ) {
TypeDescriptor td=arit.next();
if (typeutil.isSuperorType(artd,td)||
typeutil.isSuperorType(td,artd)) {
public boolean equals(Object o) {
LocalExpression e=(LocalExpression)o;
if (!(equiv(a,e.a)&&equiv(f,e.f)&&equiv(b,e.b)&&
- equiv(td,e.td)&&equiv(this.obj,e.obj)))
+ equiv(td,e.td)&&equiv(this.obj,e.obj)))
return false;
if (op!=null)
return op.getOp()==e.op.getOp();
}
public boolean isAccessible(FlatNode fn, TempDescriptor tmp) {
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fprev=fn.getPrev(i);
if (inAccessible.containsKey(fprev)&&inAccessible.get(fprev).contains(tmp))
return false;
}
public void computeFixPoint() {
- nextNode:
+nextNode:
while(!toprocess.isEmpty()) {
Pair<FlatNode, MethodDescriptor> fnpair=toprocess.pop();
FlatNode fn=fnpair.getFirst();
MethodDescriptor pairmd=fnpair.getSecond();
HashSet<TempDescriptor> inAccessibleSet=new HashSet<TempDescriptor>();
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
Set<TempDescriptor> inAccess=inAccessible.get(fn.getPrev(i));
if (inAccess!=null)
inAccessibleSet.addAll(inAccess);
case FKind.FlatElementNode:
case FKind.FlatSetFieldNode:
case FKind.FlatSetElementNode:
- {
- TempDescriptor[] rdtmps=fn.readsTemps();
- for(int i=0;i<rdtmps.length;i++) {
- inAccessibleSet.remove(rdtmps[i]);
- }
- TempDescriptor[] wrtmps=fn.writesTemps();
- for(int i=0;i<wrtmps.length;i++) {
- inAccessibleSet.remove(wrtmps[i]);
- }
+ {
+ TempDescriptor[] rdtmps=fn.readsTemps();
+ for(int i=0; i<rdtmps.length; i++) {
+ inAccessibleSet.remove(rdtmps[i]);
}
- break;
+ TempDescriptor[] wrtmps=fn.writesTemps();
+ for(int i=0; i<wrtmps.length; i++) {
+ inAccessibleSet.remove(wrtmps[i]);
+ }
+ }
+ break;
+
case FKind.FlatCastNode:
case FKind.FlatOpNode:
- {
- TempDescriptor[] rdtmps=fn.readsTemps();
- TempDescriptor[] wrtmps=fn.writesTemps();
- if (inAccessibleSet.contains(rdtmps[0]))
- inAccessibleSet.add(wrtmps[0]);
- }
- break;
+ {
+ TempDescriptor[] rdtmps=fn.readsTemps();
+ TempDescriptor[] wrtmps=fn.writesTemps();
+ if (inAccessibleSet.contains(rdtmps[0]))
+ inAccessibleSet.add(wrtmps[0]);
+ }
+ break;
+
case FKind.FlatReturnNode:
- {
- FlatReturnNode fr=(FlatReturnNode)fn;
- if (fr.getReturnTemp()!=null&&inAccessibleSet.contains(fr.getReturnTemp())) {
- //Need to inform callers
- Set<Pair<FlatCall, MethodDescriptor>> callset=methodmap.get(pairmd);
- for(Pair<FlatCall, MethodDescriptor> fcallpair:callset) {
- FlatCall fcall=fcallpair.getFirst();
- Set<TempDescriptor> inAccess=inAccessible.get(fcall);
- if (fcall.getReturnTemp()!=null&&!inAccess.contains(fcall.getReturnTemp())) {
- inAccess.add(fcall.getReturnTemp());
- for(int i=0;i<fcall.numNext();i++) {
- toprocess.add(new Pair<FlatNode, MethodDescriptor>(fcall.getNext(i), fcallpair.getSecond()));
- }
+ {
+ FlatReturnNode fr=(FlatReturnNode)fn;
+ if (fr.getReturnTemp()!=null&&inAccessibleSet.contains(fr.getReturnTemp())) {
+ //Need to inform callers
+ Set<Pair<FlatCall, MethodDescriptor>> callset=methodmap.get(pairmd);
+ for(Pair<FlatCall, MethodDescriptor> fcallpair : callset) {
+ FlatCall fcall=fcallpair.getFirst();
+ Set<TempDescriptor> inAccess=inAccessible.get(fcall);
+ if (fcall.getReturnTemp()!=null&&!inAccess.contains(fcall.getReturnTemp())) {
+ inAccess.add(fcall.getReturnTemp());
+ for(int i=0; i<fcall.numNext(); i++) {
+ toprocess.add(new Pair<FlatNode, MethodDescriptor>(fcall.getNext(i), fcallpair.getSecond()));
}
}
}
}
+ }
continue nextNode;
+
case FKind.FlatSESEEnterNode:
case FKind.FlatSESEExitNode:
continue nextNode;
+
case FKind.FlatCall: {
FlatCall fcall=(FlatCall)fn;
MethodDescriptor calledmethod=fcall.getMethod();
- Set methodsthatcouldbecalled=fcall.getThis()==null ? callGraph.getMethods(calledmethod) :
- callGraph.getMethods(calledmethod, fcall.getThis().getType());
- for(Object o:methodsthatcouldbecalled) {
+ Set methodsthatcouldbecalled=fcall.getThis()==null?callGraph.getMethods(calledmethod):
+ callGraph.getMethods(calledmethod, fcall.getThis().getType());
+ for(Object o : methodsthatcouldbecalled) {
MethodDescriptor md=(MethodDescriptor)o;
FlatMethod fm=state.getMethodFlat(md);
methodmap.put(md, new HashSet<Pair<FlatCall, MethodDescriptor>>());
methodmap.get(md).add(new Pair<FlatCall, MethodDescriptor>(fcall, pairmd));
-
+
HashSet<TempDescriptor> tmpinaccess=new HashSet<TempDescriptor>();
- for(int i=0;i<fm.numParameters();i++) {
+ for(int i=0; i<fm.numParameters(); i++) {
TempDescriptor fmtmp=fm.getParameter(i);
TempDescriptor tmpcall=fcall.getArgMatchingParamIndex(fm, i);
if (inAccessibleSet.contains(tmpcall)) {
}
}
if (!tmpinaccess.isEmpty()&&(!inAccessible.containsKey(fm)||!inAccessible.get(fm).containsAll(tmpinaccess))) {
- for(int i=0;i<fm.numNext();i++)
+ for(int i=0; i<fm.numNext(); i++)
toprocess.add(new Pair<FlatNode, MethodDescriptor>(fm.getNext(i),md));
if (!inAccessible.containsKey(fm))
inAccessible.put(fm, new HashSet<TempDescriptor>());
if (oldtemps!=null)
inAccessibleSet.addAll(oldtemps);
}
- break;
+ break;
+
default:
}
if (!inAccessibleSet.isEmpty()&&(!inAccessible.containsKey(fn)||!inAccessible.get(fn).equals(inAccessibleSet))) {
inAccessible.put(fn, inAccessibleSet);
- for(int i=0;i<fn.numNext();i++)
+ for(int i=0; i<fn.numNext(); i++)
toprocess.add(new Pair<FlatNode, MethodDescriptor>(fn.getNext(i),pairmd));
}
}
}
public void doAnalysis() {
- for(FlatSESEEnterNode sese: taskAnalysis.getAllSESEs()) {
+ for(FlatSESEEnterNode sese : taskAnalysis.getAllSESEs()) {
FlatSESEExitNode seseexit=sese.getFlatExit();
HashSet<TempDescriptor> liveout=new HashSet<TempDescriptor>(liveness.getLiveOutTemps(sese.getfmEnclosing(), seseexit));
- for(Iterator<TempDescriptor> tmpit=liveout.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=liveout.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
if (!tmp.getType().isPtr())
tmpit.remove();
}
inAccessible.put(seseexit, liveout);
- for(int i=0;i<seseexit.numNext();i++)
+ for(int i=0; i<seseexit.numNext(); i++)
toprocess.add(new Pair<FlatNode, MethodDescriptor>(seseexit.getNext(i),sese.getmdEnclosing()));
}
-
+
Set<MethodDescriptor> methodSet=taskAnalysis.getMethodsWithSESEs();
Set<MethodDescriptor> canCallSESE=new HashSet<MethodDescriptor>(methodSet);
Stack<MethodDescriptor> methodStack=new Stack<MethodDescriptor>();
while(!methodStack.isEmpty()) {
MethodDescriptor md=methodStack.pop();
Set callers=callGraph.getCallerSet(md);
- for(Object o:callers) {
+ for(Object o : callers) {
MethodDescriptor callermd=(MethodDescriptor)o;
if (!canCallSESE.contains(callermd)) {
//new method descriptor
}
//Set up exits of methods
- for(MethodDescriptor md:canCallSESE) {
+ for(MethodDescriptor md : canCallSESE) {
FlatMethod fm=state.getMethodFlat(md);
- for(FlatNode fn:fm.getNodeSet()) {
+ for(FlatNode fn : fm.getNodeSet()) {
if (fn.kind()==FKind.FlatCall) {
FlatCall fcall=(FlatCall)fn;
MethodDescriptor calledmethod=fcall.getMethod();
- Set methodsthatcouldbecalled=fcall.getThis()==null ? callGraph.getMethods(calledmethod) :
- callGraph.getMethods(calledmethod, fcall.getThis().getType());
+ Set methodsthatcouldbecalled=fcall.getThis()==null?callGraph.getMethods(calledmethod):
+ callGraph.getMethods(calledmethod, fcall.getThis().getType());
boolean specialcall=false;
- for(Object o:methodsthatcouldbecalled) {
+ for(Object o : methodsthatcouldbecalled) {
MethodDescriptor callermd=(MethodDescriptor)o;
if (canCallSESE.contains(callermd)) {
//TODO: NEED TO BUILD MAP FROM MD -> CALLS
Set<TempDescriptor> liveout=new HashSet<TempDescriptor>(liveness.getLiveOutTemps(fm, fcall));
TempDescriptor returntmp=fcall.getReturnTemp();
liveout.remove(returntmp);
- for(Iterator<TempDescriptor> tmpit=liveout.iterator();tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=liveout.iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
if (!tmp.getType().isPtr())
tmpit.remove();
}
inAccessible.put(fcall, liveout);
- for(int i=0;i<fcall.numNext();i++)
+ for(int i=0; i<fcall.numNext(); i++)
toprocess.add(new Pair<FlatNode, MethodDescriptor>(fcall.getNext(i),md));
}
}
// a code plan contains information based on analysis results
// for injecting code before and/or after a flat node
public class CodePlan {
-
+
private Hashtable< VariableSourceToken, Set<TempDescriptor> > stall2copySet;
private Set<TempDescriptor> dynamicStallSet;
private Hashtable<TempDescriptor, TempDescriptor> dynAssign_lhs2rhs;
private Set<TempDescriptor> dynAssign_lhs2curr;
- private FlatSESEEnterNode currentSESE;
-
- public CodePlan( FlatSESEEnterNode fsen ) {
+ private FlatSESEEnterNode currentSESE;
+
+ public CodePlan(FlatSESEEnterNode fsen) {
stall2copySet = new Hashtable< VariableSourceToken, Set<TempDescriptor> >();
dynamicStallSet = new HashSet<TempDescriptor>();
dynAssign_lhs2rhs = new Hashtable<TempDescriptor, TempDescriptor>();
public FlatSESEEnterNode getCurrentSESE() {
return currentSESE;
}
-
- public void addStall2CopySet( VariableSourceToken stallToken,
- Set<TempDescriptor> copySet ) {
- if( stall2copySet.containsKey( stallToken ) ) {
- Set<TempDescriptor> priorCopySet = stall2copySet.get( stallToken );
- priorCopySet.addAll( copySet );
+ public void addStall2CopySet(VariableSourceToken stallToken,
+ Set<TempDescriptor> copySet) {
+
+ if( stall2copySet.containsKey(stallToken) ) {
+ Set<TempDescriptor> priorCopySet = stall2copySet.get(stallToken);
+ priorCopySet.addAll(copySet);
} else {
- stall2copySet.put( stallToken, copySet );
+ stall2copySet.put(stallToken, copySet);
}
}
return stall2copySet.keySet();
}
- public Set<TempDescriptor> getCopySet( VariableSourceToken stallToken ) {
- return stall2copySet.get( stallToken );
+ public Set<TempDescriptor> getCopySet(VariableSourceToken stallToken) {
+ return stall2copySet.get(stallToken);
}
- public void addDynamicStall( TempDescriptor var ) {
- dynamicStallSet.add( var );
+ public void addDynamicStall(TempDescriptor var) {
+ dynamicStallSet.add(var);
}
public Set<TempDescriptor> getDynamicStallSet() {
return dynamicStallSet;
}
- public void addDynAssign( TempDescriptor lhs,
- TempDescriptor rhs ) {
- dynAssign_lhs2rhs.put( lhs, rhs );
+ public void addDynAssign(TempDescriptor lhs,
+ TempDescriptor rhs) {
+ dynAssign_lhs2rhs.put(lhs, rhs);
}
public Hashtable<TempDescriptor, TempDescriptor> getDynAssigns() {
return dynAssign_lhs2rhs;
}
- public void addDynAssign( TempDescriptor lhs ) {
- dynAssign_lhs2curr.add( lhs );
+ public void addDynAssign(TempDescriptor lhs) {
+ dynAssign_lhs2curr.add(lhs);
}
public Set<TempDescriptor> getDynAssignCurr() {
}
Iterator cpsItr = stall2copySet.entrySet().iterator();
while( cpsItr.hasNext() ) {
- Map.Entry me = (Map.Entry) cpsItr.next();
+ Map.Entry me = (Map.Entry)cpsItr.next();
VariableSourceToken stallToken = (VariableSourceToken) me.getKey();
- Set<TempDescriptor> copySet = (Set<TempDescriptor>) me.getValue();
+ Set<TempDescriptor> copySet = (Set<TempDescriptor>)me.getValue();
s += "("+stallToken+"->"+copySet+")";
}
public int getType() {
return type;
}
-
- public boolean isCoarseEdge(){
- if(type==ConflictGraph.COARSE_GRAIN_EDGE){
+
+ public boolean isCoarseEdge() {
+ if(type==ConflictGraph.COARSE_GRAIN_EDGE) {
return true;
}
return false;
while (entryIter.hasNext()) {
Entry entry = (Entry) entryIter.next();
Taint taint = (Taint) entry.getKey();
- Set<Effect> effectSet = (Set<Effect>) entry.getValue();
+ Set<Effect> effectSet = (Set<Effect>)entry.getValue();
if (!effectSet.isEmpty()) {
- Iterator<Effect> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- Effect effect = (Effect) effectIter.next();
- addLiveInNodeEffect(taint, effect);
- }
+ Iterator<Effect> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ Effect effect = (Effect) effectIter.next();
+ addLiveInNodeEffect(taint, effect);
+ }
}
}
}
public void addStallSite(Hashtable<Taint, Set<Effect>> taint2Effects, TempDescriptor var,
- ClassDescriptor cd) {
+ ClassDescriptor cd) {
if (taint2Effects == null) {
return;
}
while (entryIter.hasNext()) {
Entry entry = (Entry) entryIter.next();
Taint taint = (Taint) entry.getKey();
- Set<Effect> effectSet = (Set<Effect>) entry.getValue();
+ Set<Effect> effectSet = (Set<Effect>)entry.getValue();
if (!effectSet.isEmpty()) {
- Iterator<Effect> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- Effect effect = (Effect) effectIter.next();
- if (taint.getVar().equals(var)) {
- addStallSiteEffect(taint, effect, cd);
- }
- }
+ Iterator<Effect> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ Effect effect = (Effect) effectIter.next();
+ if (taint.getVar().equals(var)) {
+ addStallSiteEffect(taint, effect, cd);
+ }
+ }
}
}
}
// priority
Set<ConflictEdge> set = nodeU.getEdgeSet();
ConflictEdge toBeRemoved = null;
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
if ((conflictEdge.getVertexU().equals(nodeU) && conflictEdge.getVertexV().equals(nodeV))
|| (conflictEdge.getVertexU().equals(nodeV) && conflictEdge.getVertexV().equals(nodeU))) {
- if (conflictEdge.getType() == ConflictGraph.FINE_GRAIN_EDGE
- && type == ConflictGraph.COARSE_GRAIN_EDGE) {
- toBeRemoved = conflictEdge;
- break;
- } else if (conflictEdge.getType() == ConflictGraph.COARSE_GRAIN_EDGE
- && type == ConflictGraph.FINE_GRAIN_EDGE) {
- // ignore
- return;
- }
+ if (conflictEdge.getType() == ConflictGraph.FINE_GRAIN_EDGE
+ && type == ConflictGraph.COARSE_GRAIN_EDGE) {
+ toBeRemoved = conflictEdge;
+ break;
+ } else if (conflictEdge.getType() == ConflictGraph.COARSE_GRAIN_EDGE
+ && type == ConflictGraph.FINE_GRAIN_EDGE) {
+ // ignore
+ return;
+ }
}
}
Set<String> keySet = id2cn.keySet();
Set<String> analyzedIDSet = new HashSet<String>();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
String nodeID = (String) iterator.next();
ConflictNode node = id2cn.get(nodeID);
analyzePossibleConflicts(analyzedIDSet, node, sitesToFlag, useReachInfo);
}
private void analyzePossibleConflicts(Set<String> analyzedIDSet, ConflictNode currentNode,
- Set<FlatNew> sitesToFlag, boolean useReachInfo) {
+ Set<FlatNew> sitesToFlag, boolean useReachInfo) {
// compare with all nodes
// examine the case where self-edge exists
if (currentNode.isInVarNode()) {
conflictType = calculateConflictType(currentNode, useReachInfo);
if (conflictType > ConflictGraph.NON_WRITE_CONFLICT) {
- addConflictEdge(conflictType, currentNode, currentNode);
- if (sitesToFlag != null) {
- sitesToFlag.addAll(currentNode.getFlatNewSet());
- }
+ addConflictEdge(conflictType, currentNode, currentNode);
+ if (sitesToFlag != null) {
+ sitesToFlag.addAll(currentNode.getFlatNewSet());
+ }
}
}
Set<Entry<String, ConflictNode>> set = id2cn.entrySet();
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
- Entry<String, ConflictNode> entry = (Entry<String, ConflictNode>) iterator.next();
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
+ Entry<String, ConflictNode> entry = (Entry<String, ConflictNode>)iterator.next();
String entryNodeID = entry.getKey();
ConflictNode entryNode = entry.getValue();
if (currentNode.isStallSiteNode() && entryNode.isStallSiteNode()) {
- continue;
+ continue;
}
if ((currentNode.isInVarNode() && entryNode.isInVarNode())
&& (currentNode.getSESEIdentifier() == entryNode.getSESEIdentifier())
&& (currentNode.getVar().equals(entryNode.getVar()))) {
- continue;
+ continue;
}
if ((!currentNode.getID().equals(entryNodeID))
&& !(analyzedIDSet.contains(currentNode.getID() + entryNodeID) || analyzedIDSet
- .contains(entryNodeID + currentNode.getID()))) {
-
- conflictType = calculateConflictType(currentNode, entryNode, useReachInfo);
- if (conflictType > ConflictGraph.NON_WRITE_CONFLICT) {
- addConflictEdge(conflictType, currentNode, entryNode);
- if (sitesToFlag != null) {
- sitesToFlag.addAll(currentNode.getFlatNewSet());
- sitesToFlag.addAll(entryNode.getFlatNewSet());
- }
- }
- analyzedIDSet.add(currentNode.getID() + entryNodeID);
+ .contains(entryNodeID + currentNode.getID()))) {
+
+ conflictType = calculateConflictType(currentNode, entryNode, useReachInfo);
+ if (conflictType > ConflictGraph.NON_WRITE_CONFLICT) {
+ addConflictEdge(conflictType, currentNode, entryNode);
+ if (sitesToFlag != null) {
+ sitesToFlag.addAll(currentNode.getFlatNewSet());
+ sitesToFlag.addAll(entryNode.getFlatNewSet());
+ }
+ }
+ analyzedIDSet.add(currentNode.getID() + entryNodeID);
}
}
Hashtable<Alloc, Set<Effect>> alloc2SUEffects = node.getStrongUpdateEffectSet();
conflictType =
- updateConflictType(conflictType,
- determineConflictType(node, alloc2writeEffects, node, alloc2writeEffects, useReachInfo));
+ updateConflictType(conflictType,
+ determineConflictType(node, alloc2writeEffects, node, alloc2writeEffects, useReachInfo));
conflictType =
- updateConflictType(
- conflictType,
- hasStrongUpdateConflicts(node, alloc2SUEffects, node, alloc2readEffects,
- alloc2writeEffects, useReachInfo));
+ updateConflictType(
+ conflictType,
+ hasStrongUpdateConflicts(node, alloc2SUEffects, node, alloc2readEffects,
+ alloc2writeEffects, useReachInfo));
return conflictType;
}
// if node A has write effects on reading/writing regions of node B
conflictType =
- updateConflictType(
- conflictType,
- determineConflictType(nodeA, alloc2writeEffectsA, nodeB, alloc2readEffectsB,
- useReachInfo));
+ updateConflictType(
+ conflictType,
+ determineConflictType(nodeA, alloc2writeEffectsA, nodeB, alloc2readEffectsB,
+ useReachInfo));
conflictType =
- updateConflictType(
- conflictType,
- determineConflictType(nodeA, alloc2writeEffectsA, nodeB, alloc2writeEffectsB,
- useReachInfo));
+ updateConflictType(
+ conflictType,
+ determineConflictType(nodeA, alloc2writeEffectsA, nodeB, alloc2writeEffectsB,
+ useReachInfo));
// if node B has write effects on reading regions of node A
conflictType =
- updateConflictType(
- conflictType,
- determineConflictType(nodeB, alloc2writeEffectsB, nodeA, alloc2readEffectsA,
- useReachInfo));
+ updateConflictType(
+ conflictType,
+ determineConflictType(nodeB, alloc2writeEffectsB, nodeA, alloc2readEffectsA,
+ useReachInfo));
// strong udpate effects conflict with all effects
// on objects that are reachable from the same heap roots
// if node A has SU on regions of node B
if (!alloc2SUEffectsA.isEmpty()) {
conflictType =
- updateConflictType(
- conflictType,
- hasStrongUpdateConflicts(nodeA, alloc2SUEffectsA, nodeB, alloc2readEffectsB,
- alloc2writeEffectsB, useReachInfo));
+ updateConflictType(
+ conflictType,
+ hasStrongUpdateConflicts(nodeA, alloc2SUEffectsA, nodeB, alloc2readEffectsB,
+ alloc2writeEffectsB, useReachInfo));
}
// if node B has SU on regions of node A
if (!alloc2SUEffectsB.isEmpty()) {
conflictType =
- updateConflictType(
- conflictType,
- hasStrongUpdateConflicts(nodeB, alloc2SUEffectsB, nodeA, alloc2readEffectsA,
- alloc2writeEffectsA, useReachInfo));
+ updateConflictType(
+ conflictType,
+ hasStrongUpdateConflicts(nodeB, alloc2SUEffectsB, nodeA, alloc2readEffectsA,
+ alloc2writeEffectsA, useReachInfo));
}
return conflictType;
}
private int hasStrongUpdateConflicts(ConflictNode nodeA,
- Hashtable<Alloc, Set<Effect>> SUEffectsTableA, ConflictNode nodeB,
- Hashtable<Alloc, Set<Effect>> readTableB, Hashtable<Alloc, Set<Effect>> writeTableB,
- boolean useReachInfo) {
+ Hashtable<Alloc, Set<Effect>> SUEffectsTableA, ConflictNode nodeB,
+ Hashtable<Alloc, Set<Effect>> readTableB, Hashtable<Alloc, Set<Effect>> writeTableB,
+ boolean useReachInfo) {
int conflictType = ConflictGraph.NON_WRITE_CONFLICT;
Iterator effectItrA = SUEffectsTableA.entrySet().iterator();
while (effectItrA.hasNext()) {
- Map.Entry meA = (Map.Entry) effectItrA.next();
+ Map.Entry meA = (Map.Entry)effectItrA.next();
Alloc asA = (Alloc) meA.getKey();
- Set<Effect> strongUpdateSetA = (Set<Effect>) meA.getValue();
+ Set<Effect> strongUpdateSetA = (Set<Effect>)meA.getValue();
Iterator effectItrB = readTableB.entrySet().iterator();
while (effectItrB.hasNext()) {
- Map.Entry meB = (Map.Entry) effectItrB.next();
- Alloc asB = (Alloc) meB.getKey();
- Set<Effect> esB = (Set<Effect>) meB.getValue();
-
- for (Iterator iterator = strongUpdateSetA.iterator(); iterator.hasNext();) {
- Effect strongUpdateA = (Effect) iterator.next();
- for (Iterator iterator2 = esB.iterator(); iterator2.hasNext();) {
- Effect effectB = (Effect) iterator2.next();
-
- if (strongUpdateA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
- && strongUpdateA.getField().equals(effectB.getField())) {
- if (useReachInfo) {
- FlatNew fnRoot1 = asA.getFlatNew();
- FlatNew fnRoot2 = asB.getFlatNew();
- FlatNew fnTarget = strongUpdateA.getAffectedAllocSite().getFlatNew();
- if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
- addCoarseEffect(nodeA, asA, strongUpdateA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType = updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
- }
- } else {
- if (state.RCR) {
- // need coarse effects for RCR from just one pass
- addCoarseEffect(nodeA, asA, strongUpdateA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType = ConflictGraph.COARSE_GRAIN_EDGE;
- } else {
- return ConflictGraph.COARSE_GRAIN_EDGE;
- }
- }
-
- }
-
- }
- }
+ Map.Entry meB = (Map.Entry)effectItrB.next();
+ Alloc asB = (Alloc) meB.getKey();
+ Set<Effect> esB = (Set<Effect>)meB.getValue();
+
+ for (Iterator iterator = strongUpdateSetA.iterator(); iterator.hasNext(); ) {
+ Effect strongUpdateA = (Effect) iterator.next();
+ for (Iterator iterator2 = esB.iterator(); iterator2.hasNext(); ) {
+ Effect effectB = (Effect) iterator2.next();
+
+ if (strongUpdateA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
+ && strongUpdateA.getField().equals(effectB.getField())) {
+ if (useReachInfo) {
+ FlatNew fnRoot1 = asA.getFlatNew();
+ FlatNew fnRoot2 = asB.getFlatNew();
+ FlatNew fnTarget = strongUpdateA.getAffectedAllocSite().getFlatNew();
+ if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
+ addCoarseEffect(nodeA, asA, strongUpdateA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType = updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
+ }
+ } else {
+ if (state.RCR) {
+ // need coarse effects for RCR from just one pass
+ addCoarseEffect(nodeA, asA, strongUpdateA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType = ConflictGraph.COARSE_GRAIN_EDGE;
+ } else {
+ return ConflictGraph.COARSE_GRAIN_EDGE;
+ }
+ }
+
+ }
+
+ }
+ }
}
effectItrB = writeTableB.entrySet().iterator();
while (effectItrB.hasNext()) {
- Map.Entry meB = (Map.Entry) effectItrB.next();
- Alloc asB = (Alloc) meB.getKey();
- Set<Effect> esB = (Set<Effect>) meB.getValue();
-
- for (Iterator iterator = strongUpdateSetA.iterator(); iterator.hasNext();) {
- Effect strongUpdateA = (Effect) iterator.next();
- for (Iterator iterator2 = esB.iterator(); iterator2.hasNext();) {
- Effect effectB = (Effect) iterator2.next();
-
- if (strongUpdateA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
- && strongUpdateA.getField().equals(effectB.getField())) {
-
- if (useReachInfo) {
- FlatNew fnRoot1 = asA.getFlatNew();
- FlatNew fnRoot2 = asB.getFlatNew();
- FlatNew fnTarget = strongUpdateA.getAffectedAllocSite().getFlatNew();
- if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
- addCoarseEffect(nodeA, asA, strongUpdateA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType = updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
- }
- } else {
- return ConflictGraph.COARSE_GRAIN_EDGE;
- }
- }
-
- }
- }
+ Map.Entry meB = (Map.Entry)effectItrB.next();
+ Alloc asB = (Alloc) meB.getKey();
+ Set<Effect> esB = (Set<Effect>)meB.getValue();
+
+ for (Iterator iterator = strongUpdateSetA.iterator(); iterator.hasNext(); ) {
+ Effect strongUpdateA = (Effect) iterator.next();
+ for (Iterator iterator2 = esB.iterator(); iterator2.hasNext(); ) {
+ Effect effectB = (Effect) iterator2.next();
+
+ if (strongUpdateA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
+ && strongUpdateA.getField().equals(effectB.getField())) {
+
+ if (useReachInfo) {
+ FlatNew fnRoot1 = asA.getFlatNew();
+ FlatNew fnRoot2 = asB.getFlatNew();
+ FlatNew fnTarget = strongUpdateA.getAffectedAllocSite().getFlatNew();
+ if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
+ addCoarseEffect(nodeA, asA, strongUpdateA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType = updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
+ }
+ } else {
+ return ConflictGraph.COARSE_GRAIN_EDGE;
+ }
+ }
+
+ }
+ }
}
}
}
private int determineConflictType(ConflictNode nodeA, Hashtable<Alloc, Set<Effect>> nodeAtable,
- ConflictNode nodeB, Hashtable<Alloc, Set<Effect>> nodeBtable, boolean useReachInfo) {
+ ConflictNode nodeB, Hashtable<Alloc, Set<Effect>> nodeBtable, boolean useReachInfo) {
int conflictType = ConflictGraph.NON_WRITE_CONFLICT;
Iterator effectItrA = nodeAtable.entrySet().iterator();
while (effectItrA.hasNext()) {
- Map.Entry meA = (Map.Entry) effectItrA.next();
+ Map.Entry meA = (Map.Entry)effectItrA.next();
Alloc asA = (Alloc) meA.getKey();
- Set<Effect> esA = (Set<Effect>) meA.getValue();
+ Set<Effect> esA = (Set<Effect>)meA.getValue();
Iterator effectItrB = nodeBtable.entrySet().iterator();
while (effectItrB.hasNext()) {
- Map.Entry meB = (Map.Entry) effectItrB.next();
- Alloc asB = (Alloc) meB.getKey();
- Set<Effect> esB = (Set<Effect>) meB.getValue();
-
- for (Iterator iterator = esA.iterator(); iterator.hasNext();) {
- Effect effectA = (Effect) iterator.next();
- for (Iterator iterator2 = esB.iterator(); iterator2.hasNext();) {
- Effect effectB = (Effect) iterator2.next();
-
- if (effectA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
- && ((effectA.getField() != null && effectB.getField() != null && effectA.getField()
- .equals(effectB.getField())) || (effectA.getField() == null && effectB
- .getField() == null))) {
-
- if (useReachInfo) {
- FlatNew fnRoot1 = asA.getFlatNew();
- FlatNew fnRoot2 = asB.getFlatNew();
- FlatNew fnTarget = effectA.getAffectedAllocSite().getFlatNew();
- if (fnRoot1.equals(fnRoot2)) {
- if (!da.mayManyReachTarget(fmEnclosing, fnRoot1, fnTarget)) {
- // fine-grained conflict case
- conflictType = updateConflictType(conflictType, ConflictGraph.FINE_GRAIN_EDGE);
- } else {
- // coarse-grained conflict case
- addCoarseEffect(nodeA, asA, effectA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType =
- updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
- }
- } else {
- if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
- addCoarseEffect(nodeA, asA, effectA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType =
- updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
- } else {
- }
- }
- } else {
- if (state.RCR) {
- // need coarse effects for RCR from just one pass
- addCoarseEffect(nodeA, asA, effectA);
- if (!nodeA.equals(nodeB)) {
- addCoarseEffect(nodeB, asB, effectB);
- }
- conflictType = ConflictGraph.COARSE_GRAIN_EDGE;
- } else {
- return ConflictGraph.COARSE_GRAIN_EDGE;
- }
- }
- }
- }
- }
+ Map.Entry meB = (Map.Entry)effectItrB.next();
+ Alloc asB = (Alloc) meB.getKey();
+ Set<Effect> esB = (Set<Effect>)meB.getValue();
+
+ for (Iterator iterator = esA.iterator(); iterator.hasNext(); ) {
+ Effect effectA = (Effect) iterator.next();
+ for (Iterator iterator2 = esB.iterator(); iterator2.hasNext(); ) {
+ Effect effectB = (Effect) iterator2.next();
+
+ if (effectA.getAffectedAllocSite().equals(effectB.getAffectedAllocSite())
+ && ((effectA.getField() != null && effectB.getField() != null && effectA.getField()
+ .equals(effectB.getField())) || (effectA.getField() == null && effectB
+ .getField() == null))) {
+
+ if (useReachInfo) {
+ FlatNew fnRoot1 = asA.getFlatNew();
+ FlatNew fnRoot2 = asB.getFlatNew();
+ FlatNew fnTarget = effectA.getAffectedAllocSite().getFlatNew();
+ if (fnRoot1.equals(fnRoot2)) {
+ if (!da.mayManyReachTarget(fmEnclosing, fnRoot1, fnTarget)) {
+ // fine-grained conflict case
+ conflictType = updateConflictType(conflictType, ConflictGraph.FINE_GRAIN_EDGE);
+ } else {
+ // coarse-grained conflict case
+ addCoarseEffect(nodeA, asA, effectA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType =
+ updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
+ }
+ } else {
+ if (da.mayBothReachTarget(fmEnclosing, fnRoot1, fnRoot2, fnTarget)) {
+ addCoarseEffect(nodeA, asA, effectA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType =
+ updateConflictType(conflictType, ConflictGraph.COARSE_GRAIN_EDGE);
+ } else {
+ }
+ }
+ } else {
+ if (state.RCR) {
+ // need coarse effects for RCR from just one pass
+ addCoarseEffect(nodeA, asA, effectA);
+ if (!nodeA.equals(nodeB)) {
+ addCoarseEffect(nodeB, asB, effectB);
+ }
+ conflictType = ConflictGraph.COARSE_GRAIN_EDGE;
+ } else {
+ return ConflictGraph.COARSE_GRAIN_EDGE;
+ }
+ }
+ }
+ }
+ }
}
}
public void clearAllConflictEdge() {
Collection<ConflictNode> nodes = id2cn.values();
- for (Iterator iterator = nodes.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = nodes.iterator(); iterator.hasNext(); ) {
ConflictNode conflictNode = (ConflictNode) iterator.next();
conflictNode.getEdgeSet().clear();
}
HashSet<ConflictEdge> returnSet = new HashSet<ConflictEdge>();
Collection<ConflictNode> nodes = id2cn.values();
- for (Iterator iterator = nodes.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = nodes.iterator(); iterator.hasNext(); ) {
ConflictNode conflictNode = (ConflictNode) iterator.next();
returnSet.addAll(conflictNode.getEdgeSet());
}
public boolean hasConflictEdge() {
Set<String> keySet = id2cn.keySet();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
String key = (String) iterator.next();
ConflictNode node = id2cn.get(key);
if (node.getEdgeSet().size() > 0) {
- return true;
+ return true;
}
}
return false;
ConflictNode node = (ConflictNode) entry.getValue();
if (node.isInVarNode()) {
- if (node.getSESEIdentifier() == seseID) {
-
- Set<ConflictEdge> edgeSet = node.getEdgeSet();
- for (Iterator iterator = edgeSet.iterator(); iterator.hasNext();) {
- ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
-
- for (Iterator<SESELock> seseLockIter = seseLockSet.iterator(); seseLockIter.hasNext();) {
- SESELock seseLock = seseLockIter.next();
- if (seseLock.containsConflictNode(node)
- && seseLock.containsConflictEdge(conflictEdge)) {
- WaitingElement newElement = new WaitingElement();
- newElement.setQueueID(seseLock.getID());
- newElement.setStatus(seseLock.getNodeType(node));
- newElement.setTempDesc(node.getVar());
- if (isFineElement(newElement.getStatus())) {
- newElement.setDynID(node.getVar().toString());
- }
- if (!waitingElementSet.contains(newElement)) {
- waitingElementSet.add(newElement);
- }
-
- }
- }
- }
-
- }
+ if (node.getSESEIdentifier() == seseID) {
+
+ Set<ConflictEdge> edgeSet = node.getEdgeSet();
+ for (Iterator iterator = edgeSet.iterator(); iterator.hasNext(); ) {
+ ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
+
+ for (Iterator<SESELock> seseLockIter = seseLockSet.iterator(); seseLockIter.hasNext(); ) {
+ SESELock seseLock = seseLockIter.next();
+ if (seseLock.containsConflictNode(node)
+ && seseLock.containsConflictEdge(conflictEdge)) {
+ WaitingElement newElement = new WaitingElement();
+ newElement.setQueueID(seseLock.getID());
+ newElement.setStatus(seseLock.getNodeType(node));
+ newElement.setTempDesc(node.getVar());
+ if (isFineElement(newElement.getStatus())) {
+ newElement.setDynID(node.getVar().toString());
+ }
+ if (!waitingElementSet.contains(newElement)) {
+ waitingElementSet.add(newElement);
+ }
+
+ }
+ }
+ }
+
+ }
}
}
HashMap<Integer, Set<WaitingElement>> map = new HashMap<Integer, Set<WaitingElement>>();
SESEWaitingQueue seseDS = new SESEWaitingQueue();
- for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
WaitingElement waitingElement = (WaitingElement) iterator.next();
Set<WaitingElement> set = map.get(new Integer(waitingElement.getQueueID()));
if (set == null) {
- set = new HashSet<WaitingElement>();
+ set = new HashSet<WaitingElement>();
}
set.add(waitingElement);
map.put(new Integer(waitingElement.getQueueID()), set);
}
Set<Integer> keySet = map.keySet();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
Integer queueID = (Integer) iterator.next();
Set<WaitingElement> queueWEset = map.get(queueID);
refineQueue(queueID.intValue(), queueWEset, seseDS);
}
private void refineQueue(int queueID, Set<WaitingElement> waitingElementSet,
- SESEWaitingQueue seseDS) {
+ SESEWaitingQueue seseDS) {
if (waitingElementSet.size() > 1) {
// only consider there is more than one element submitted by same SESE
WaitingElement SCCelement = null;
WaitingElement coarseElement = null;
- for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) {
- WaitingElement waitingElement = (WaitingElement) iterator.next();
- if (waitingElement.getStatus() == ConflictNode.FINE_READ) {
- numRead++;
- } else if (waitingElement.getStatus() == ConflictNode.FINE_WRITE) {
- numWrite++;
- } else if (waitingElement.getStatus() == ConflictNode.COARSE) {
- numCoarse++;
- coarseElement = waitingElement;
- } else if (waitingElement.getStatus() == ConflictNode.SCC) {
- SCCelement = waitingElement;
- }
+ for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
+ WaitingElement waitingElement = (WaitingElement) iterator.next();
+ if (waitingElement.getStatus() == ConflictNode.FINE_READ) {
+ numRead++;
+ } else if (waitingElement.getStatus() == ConflictNode.FINE_WRITE) {
+ numWrite++;
+ } else if (waitingElement.getStatus() == ConflictNode.COARSE) {
+ numCoarse++;
+ coarseElement = waitingElement;
+ } else if (waitingElement.getStatus() == ConflictNode.SCC) {
+ SCCelement = waitingElement;
+ }
}
if (SCCelement != null) {
- // if there is at lease one SCC element, just enqueue SCC and
- // ignore others.
- if (state.RCR) {
- // for rcr, we need to label all of coarse tempdescriptors
- // here assume that all waiting elements are coarse
- for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) {
- WaitingElement waitingElement = (WaitingElement) iterator.next();
- SCCelement.addTempDesc(waitingElement.getTempDesc());
- if (waitingElement != SCCelement) {
- waitingElement.setBogus(true);
- refinedSet.add(waitingElement);
- }
- }
- }
- refinedSet.add(SCCelement);
+ // if there is at lease one SCC element, just enqueue SCC and
+ // ignore others.
+ if (state.RCR) {
+ // for rcr, we need to label all of coarse tempdescriptors
+ // here assume that all waiting elements are coarse
+ for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
+ WaitingElement waitingElement = (WaitingElement) iterator.next();
+ SCCelement.addTempDesc(waitingElement.getTempDesc());
+ if (waitingElement != SCCelement) {
+ waitingElement.setBogus(true);
+ refinedSet.add(waitingElement);
+ }
+ }
+ }
+ refinedSet.add(SCCelement);
} else if (numCoarse == 1 && (numRead + numWrite == total)) {
- // if one is a coarse, the othere are reads/write, enqueue SCC.
- WaitingElement we = new WaitingElement();
- we.setQueueID(queueID);
- we.setStatus(ConflictNode.SCC);
- refinedSet.add(we);
+ // if one is a coarse, the othere are reads/write, enqueue SCC.
+ WaitingElement we = new WaitingElement();
+ we.setQueueID(queueID);
+ we.setStatus(ConflictNode.SCC);
+ refinedSet.add(we);
} else if (numCoarse == total) {
- // if there are multiple coarses, enqueue just one coarse.
- if (state.RCR) {
- // for rcr, we need to label all of coarse tempdescriptors
- for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) {
- WaitingElement waitingElement = (WaitingElement) iterator.next();
- if (waitingElement != coarseElement) {
- coarseElement.addTempDesc(waitingElement.getTempDesc());
- waitingElement.setBogus(true);
- refinedSet.add(waitingElement);
- }
- }
- }
- refinedSet.add(coarseElement);
+ // if there are multiple coarses, enqueue just one coarse.
+ if (state.RCR) {
+ // for rcr, we need to label all of coarse tempdescriptors
+ for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
+ WaitingElement waitingElement = (WaitingElement) iterator.next();
+ if (waitingElement != coarseElement) {
+ coarseElement.addTempDesc(waitingElement.getTempDesc());
+ waitingElement.setBogus(true);
+ refinedSet.add(waitingElement);
+ }
+ }
+ }
+ refinedSet.add(coarseElement);
} else if (numWrite == total || (numRead + numWrite) == total) {
- // code generator is going to handle the case for multiple writes &
- // read/writes.
- seseDS.setType(queueID, SESEWaitingQueue.EXCEPTION);
- refinedSet.addAll(waitingElementSet);
+ // code generator is going to handle the case for multiple writes &
+ // read/writes.
+ seseDS.setType(queueID, SESEWaitingQueue.EXCEPTION);
+ refinedSet.addAll(waitingElementSet);
} else {
- // otherwise, enqueue everything.
- refinedSet.addAll(waitingElementSet);
+ // otherwise, enqueue everything.
+ refinedSet.addAll(waitingElementSet);
}
seseDS.setWaitingElementSet(queueID, refinedSet);
} else {
}
public Set<WaitingElement> getStallSiteWaitingElementSet(FlatNode stallSite,
- Set<SESELock> seseLockSet) {
+ Set<SESELock> seseLockSet) {
HashSet<WaitingElement> waitingElementSet = new HashSet<WaitingElement>();
Iterator iter = id2cn.entrySet().iterator();
ConflictNode node = (ConflictNode) entry.getValue();
if (node.isStallSiteNode() && node.getStallSiteFlatNode().equals(stallSite)) {
- Set<ConflictEdge> edgeSet = node.getEdgeSet();
- for (Iterator iter2 = edgeSet.iterator(); iter2.hasNext();) {
- ConflictEdge conflictEdge = (ConflictEdge) iter2.next();
-
- for (Iterator<SESELock> seseLockIter = seseLockSet.iterator(); seseLockIter.hasNext();) {
- SESELock seseLock = seseLockIter.next();
- if (seseLock.containsConflictNode(node) && seseLock.containsConflictEdge(conflictEdge)) {
- WaitingElement newElement = new WaitingElement();
- newElement.setQueueID(seseLock.getID());
- newElement.setStatus(seseLock.getNodeType(node));
- if (isFineElement(newElement.getStatus())) {
- newElement.setDynID(node.getVar().toString());
- }
- newElement.setTempDesc(node.getVar());
- waitingElementSet.add(newElement);
- }
- }
-
- }
+ Set<ConflictEdge> edgeSet = node.getEdgeSet();
+ for (Iterator iter2 = edgeSet.iterator(); iter2.hasNext(); ) {
+ ConflictEdge conflictEdge = (ConflictEdge) iter2.next();
+
+ for (Iterator<SESELock> seseLockIter = seseLockSet.iterator(); seseLockIter.hasNext(); ) {
+ SESELock seseLock = seseLockIter.next();
+ if (seseLock.containsConflictNode(node) && seseLock.containsConflictEdge(conflictEdge)) {
+ WaitingElement newElement = new WaitingElement();
+ newElement.setQueueID(seseLock.getID());
+ newElement.setStatus(seseLock.getNodeType(node));
+ if (isFineElement(newElement.getStatus())) {
+ newElement.setDynID(node.getVar().toString());
+ }
+ newElement.setTempDesc(node.getVar());
+ waitingElementSet.add(newElement);
+ }
+ }
+
+ }
}
ConflictNode node = entry.getValue();
if (filter) {
- if (node.getID().startsWith("___dst") || node.getID().startsWith("___srctmp")
- || node.getID().startsWith("___neverused") || node.getID().startsWith("___temp")) {
+ if (node.getID().startsWith("___dst") || node.getID().startsWith("___srctmp")
+ || node.getID().startsWith("___neverused") || node.getID().startsWith("___temp")) {
- continue;
- }
+ continue;
+ }
- if (node.getEdgeSet().isEmpty()) {
- continue;
- }
+ if (node.getEdgeSet().isEmpty()) {
+ continue;
+ }
}
attributes += "label=\"" + node.getID() + "\\n";
if (node.isStallSiteNode()) {
- String srcFileName = node.getSourceFileName();
- int separatorIdx = srcFileName.lastIndexOf(File.separator);
- if (separatorIdx > 0) {
- srcFileName = srcFileName.substring(separatorIdx + 1);
- }
- node.stallSite.getNumLine();
- attributes +=
- "STALL SITE" + "\\n" + srcFileName + ":" + node.getStallSiteFlatNode().getNumLine()
- + "\\n" + "\"]";
+ String srcFileName = node.getSourceFileName();
+ int separatorIdx = srcFileName.lastIndexOf(File.separator);
+ if (separatorIdx > 0) {
+ srcFileName = srcFileName.substring(separatorIdx + 1);
+ }
+ node.stallSite.getNumLine();
+ attributes +=
+ "STALL SITE" + "\\n" + srcFileName + ":" + node.getStallSiteFlatNode().getNumLine()
+ + "\\n" + "\"]";
} else {
- attributes += "LIVE-IN" + "\\n" + "\"]";
+ attributes += "LIVE-IN" + "\\n" + "\"]";
}
bw.write(entry.getKey() + attributes + ";\n");
Set<ConflictEdge> edgeSet = node.getEdgeSet();
- for (Iterator iterator = edgeSet.iterator(); iterator.hasNext();) {
- ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
-
- ConflictNode u = conflictEdge.getVertexU();
- ConflictNode v = conflictEdge.getVertexV();
-
- if (filter) {
- String uID = u.getID();
- String vID = v.getID();
- if (uID.startsWith("___dst") || uID.startsWith("___srctmp")
- || uID.startsWith("___neverused") || uID.startsWith("___temp")
- || vID.startsWith("___dst") || vID.startsWith("___srctmp")
- || vID.startsWith("___neverused") || vID.startsWith("___temp")) {
- continue;
- }
- }
-
- if (!addedSet.contains(conflictEdge)) {
- bw.write("" + u.getID() + "--" + v.getID() + "[label=" + conflictEdge.toGraphEdgeString()
- + ",decorate];\n");
- addedSet.add(conflictEdge);
- }
+ for (Iterator iterator = edgeSet.iterator(); iterator.hasNext(); ) {
+ ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
+
+ ConflictNode u = conflictEdge.getVertexU();
+ ConflictNode v = conflictEdge.getVertexV();
+
+ if (filter) {
+ String uID = u.getID();
+ String vID = v.getID();
+ if (uID.startsWith("___dst") || uID.startsWith("___srctmp")
+ || uID.startsWith("___neverused") || uID.startsWith("___temp")
+ || vID.startsWith("___dst") || vID.startsWith("___srctmp")
+ || vID.startsWith("___neverused") || vID.startsWith("___temp")) {
+ continue;
+ }
+ }
+
+ if (!addedSet.contains(conflictEdge)) {
+ bw.write("" + u.getID() + "--" + v.getID() + "[label=" + conflictEdge.toGraphEdgeString()
+ + ",decorate];\n");
+ addedSet.add(conflictEdge);
+ }
}
}
public static final int STALLSITE = 1;
public ConflictNode(String id, int nodeType, TempDescriptor var, FlatNode stallSite,
- ClassDescriptor cd) {
+ ClassDescriptor cd) {
this(id, var, nodeType);
this.stallSite = stallSite;
this.cd = cd;
}
public Taint getTaint(Alloc as) {
- for (Iterator iterator = taintSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = taintSet.iterator(); iterator.hasNext(); ) {
Taint t = (Taint) iterator.next();
if (t.getAllocSite().equals(as)) {
- return t;
+ return t;
}
}
return null;
public Set<FlatNew> getFlatNewSet() {
Set<FlatNew> fnSet = new HashSet<FlatNew>();
- for (Iterator iterator = allocSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = allocSet.iterator(); iterator.hasNext(); ) {
Alloc as = (Alloc) iterator.next();
FlatNew fn = as.getFlatNew();
fnSet.add(fn);
public boolean IsValidToPrune() {
- for (Iterator iterator = edgeSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = edgeSet.iterator(); iterator.hasNext(); ) {
ConflictEdge edge = (ConflictEdge) iterator.next();
if (edge.getVertexU() == edge.getVertexV()) {
- // self-conflict, need to generate traverser
- return false;
+ // self-conflict, need to generate traverser
+ return false;
} else {
- if (edge.getVertexU() == this) {
- if (edge.getVertexV().isInVarNode()) {
- // has a conflict with invar, need to generate traverser
- return false;
- }
- } else {
- if (edge.getVertexU().isInVarNode()) {
- // has a conflict with invar, need to generate traverser
- return false;
- }
- }
+ if (edge.getVertexU() == this) {
+ if (edge.getVertexV().isInVarNode()) {
+ // has a conflict with invar, need to generate traverser
+ return false;
+ }
+ } else {
+ if (edge.getVertexU().isInVarNode()) {
+ // has a conflict with invar, need to generate traverser
+ return false;
+ }
+ }
}
}
return true;
}
- public void addNeededStaticName( SESEandAgePair p ) {
- needStaticNameInCode.add( p );
+ public void addNeededStaticName(SESEandAgePair p) {
+ needStaticNameInCode.add(p);
}
public Set<SESEandAgePair> getNeededStaticNames() {
return needStaticNameInCode;
}
- public void addDynamicVar( TempDescriptor td ) {
- dynamicVars.add( td );
+ public void addDynamicVar(TempDescriptor td) {
+ dynamicVars.add(td);
}
public Set<TempDescriptor> getDynamicVarSet() {
}
public OoOJavaAnalysis(State state, TypeUtil typeUtil, CallGraph callGraph, Liveness liveness,
- ArrayReferencees arrayReferencees) {
+ ArrayReferencees arrayReferencees) {
State.logEvent("Starting OoOJavaAnalysis");
this.state = state;
// to compute taints and effects
if (state.POINTER) {
disjointAnalysisTaints =
- new Pointer(state, typeUtil, callGraph, rblockRel, liveness, buildStateMachines);
+ new Pointer(state, typeUtil, callGraph, rblockRel, liveness, buildStateMachines);
((Pointer) disjointAnalysisTaints).doAnalysis();
} else
disjointAnalysisTaints =
- new DisjointAnalysis(state, typeUtil, callGraph, liveness, arrayReferencees, null,
- rblockRel, buildStateMachines, true); // suppress output--this is
- // an intermediate pass
+ new DisjointAnalysis(state, typeUtil, callGraph, liveness, arrayReferencees, null,
+ rblockRel, buildStateMachines, true); // suppress output--this is
+ // an intermediate pass
State.logEvent("OoOJavaAnalysis 5th pass completed");
// efficient method to deal with conflict can be computed
// later
disjointAnalysisReach =
- new DisjointAnalysis(state, typeUtil, callGraph, liveness, arrayReferencees, sitesToFlag,
- null, // don't do effects analysis again!
- null, // no BuildStateMachines needed
- !state.OOODEBUG // only print out in OoOJava debug mode
- );
+ new DisjointAnalysis(state, typeUtil, callGraph, liveness, arrayReferencees, sitesToFlag,
+ null, // don't do effects analysis again!
+ null, // no BuildStateMachines needed
+ !state.OOODEBUG // only print out in OoOJava debug mode
+ );
State.logEvent("OoOJavaAnalysis 9th pass completed");
// 10th pass, calculate conflicts with reachability info
calculateConflicts(null, true);
// analysis passes are complete
Iterator spliceItr = wdvNodesToSpliceIn.entrySet().iterator();
while (spliceItr.hasNext()) {
- Map.Entry me = (Map.Entry) spliceItr.next();
+ Map.Entry me = (Map.Entry)spliceItr.next();
FlatWriteDynamicVarNode fwdvn = (FlatWriteDynamicVarNode) me.getValue();
fwdvn.spliceIntoIR();
}
if (state.OOODEBUG) {
try {
- writeReports("");
- disjointAnalysisTaints.getEffectsAnalysis().writeEffects("effects.txt");
- writeConflictGraph();
+ writeReports("");
+ disjointAnalysisTaints.getEffectsAnalysis().writeEffects("effects.txt");
+ writeConflictGraph();
} catch (IOException e) {
}
}
fn2fm.put(fn, fm);
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- if (!flatNodesVisited.contains(nn)) {
- flatNodesToVisit.add(nn);
- }
+ FlatNode nn = fn.getNext(i);
+ if (!flatNodesVisited.contains(nn)) {
+ flatNodesToVisit.add(nn);
+ }
}
}
}
try {
BufferedWriter bw = new BufferedWriter(new FileWriter("sitesToFlag.txt"));
- for (Iterator iterator = sitesToFlag.iterator(); iterator.hasNext();) {
- FlatNew fn = (FlatNew) iterator.next();
- bw.write(fn + "\n");
+ for (Iterator iterator = sitesToFlag.iterator(); iterator.hasNext(); ) {
+ FlatNew fn = (FlatNew) iterator.next();
+ bw.write(fn + "\n");
}
bw.close();
} catch (IOException e) {
// merge sets from control flow joins
Set<TempDescriptor> livein = new HashSet<TempDescriptor>();
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- Set<TempDescriptor> s = livenessGlobalView.get(nn);
- if (s != null) {
- livein.addAll(s);
- }
+ FlatNode nn = fn.getNext(i);
+ Set<TempDescriptor> s = livenessGlobalView.get(nn);
+ if (s != null) {
+ livein.addAll(s);
+ }
}
Set<TempDescriptor> curr = liveness_nodeActions(fn, livein);
// if a new result, schedule backward nodes for analysis
if (!curr.equals(prev)) {
- if (fn != fsen) {
- livenessGlobalView.put(fn, curr);
- for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- flatNodesToVisit.add(nn);
- }
- }
+ if (fn != fsen) {
+ livenessGlobalView.put(fn, curr);
+ for (int i = 0; i < fn.numPrev(); i++) {
+ FlatNode nn = fn.getPrev(i);
+ flatNodesToVisit.add(nn);
+ }
+ }
}
}
}
// task's in-var set
FlatSESEEnterNode fsen = (FlatSESEEnterNode) fn;
if (liveIn != null) {
- fsen.addInVarSet(liveIn);
+ fsen.addInVarSet(liveIn);
}
// no break, should also execute default actions
}
// handle effects of statement in reverse, writes then reads
TempDescriptor[] writeTemps = fn.writesTemps();
for (int i = 0; i < writeTemps.length; ++i) {
- liveIn.remove(writeTemps[i]);
-
- // if we are analyzing code declared directly in a task,
- FlatSESEEnterNode fsen = rblockRel.getLocalInnerRBlock(fn);
- if (fsen != null) {
- // check to see if we are writing to variables that will
- // be live-out at the task's exit (and therefore should
- // go in the task's out-var set)
- FlatSESEExitNode fsexn = fsen.getFlatExit();
- // note: liveness analysis can have corresponding decisions
- Set<TempDescriptor> livetemps = liveness.getLiveInTemps(fsen.getfmEnclosing(), fsexn);
- if (livetemps != null && livetemps.contains(writeTemps[i])) {
- fsen.addOutVar(writeTemps[i]);
- }
- }
+ liveIn.remove(writeTemps[i]);
+
+ // if we are analyzing code declared directly in a task,
+ FlatSESEEnterNode fsen = rblockRel.getLocalInnerRBlock(fn);
+ if (fsen != null) {
+ // check to see if we are writing to variables that will
+ // be live-out at the task's exit (and therefore should
+ // go in the task's out-var set)
+ FlatSESEExitNode fsexn = fsen.getFlatExit();
+ // note: liveness analysis can have corresponding decisions
+ Set<TempDescriptor> livetemps = liveness.getLiveInTemps(fsen.getfmEnclosing(), fsexn);
+ if (livetemps != null && livetemps.contains(writeTemps[i])) {
+ fsen.addOutVar(writeTemps[i]);
+ }
+ }
}
TempDescriptor[] readTemps = fn.readsTemps();
for (int i = 0; i < readTemps.length; ++i) {
- liveIn.add(readTemps[i]);
+ liveIn.add(readTemps[i]);
}
Set<TempDescriptor> virtualReadTemps = livenessVirtualReads.get(fn);
if (virtualReadTemps != null) {
- liveIn.addAll(virtualReadTemps);
+ liveIn.addAll(virtualReadTemps);
}
}
- break;
+ break;
} // end switch
// merge sets from control flow joins
VarSrcTokTable curr = new VarSrcTokTable();
for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- VarSrcTokTable incoming = variableResults.get(nn);
- curr.merge(incoming);
+ FlatNode nn = fn.getPrev(i);
+ VarSrcTokTable incoming = variableResults.get(nn);
+ curr.merge(incoming);
}
FlatSESEEnterNode currentSESE = rblockRel.getLocalInnerRBlock(fn);
if (currentSESE == null) {
- currentSESE = rblockRel.getCallerProxySESE();
+ currentSESE = rblockRel.getCallerProxySESE();
}
variable_nodeActions(fn, curr, currentSESE);
// if a new result, schedule forward nodes for analysis
if (!curr.equals(prev)) {
- variableResults.put(fn, curr);
+ variableResults.put(fn, curr);
- for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- flatNodesToVisit.add(nn);
- }
+ for (int i = 0; i < fn.numNext(); i++) {
+ FlatNode nn = fn.getNext(i);
+ flatNodesToVisit.add(nn);
+ }
}
}
}
private void variable_nodeActions(FlatNode fn, VarSrcTokTable vstTable,
- FlatSESEEnterNode currentSESE) {
+ FlatSESEEnterNode currentSESE) {
switch (fn.kind()) {
case FKind.FlatSESEEnterNode: {
vstTable.age(fsen);
vstTable.assertConsistency();
}
- break;
+ break;
case FKind.FlatSESEExitNode: {
FlatSESEExitNode fsexn = (FlatSESEExitNode) fn;
Set<TempDescriptor> liveVars = liveness.getLiveInTemps(fsen.getfmEnclosing(), fn);
Set<TempDescriptor> fsenVirtReads =
- vstTable.calcVirtReadsAndPruneParentAndSiblingTokens(fsen, liveVars);
+ vstTable.calcVirtReadsAndPruneParentAndSiblingTokens(fsen, liveVars);
Set<TempDescriptor> fsenVirtReadsOld = livenessVirtualReads.get(fn);
if (fsenVirtReadsOld != null) {
- fsenVirtReads.addAll(fsenVirtReadsOld);
+ fsenVirtReads.addAll(fsenVirtReadsOld);
}
livenessVirtualReads.put(fn, fsenVirtReads);
// the latest, clean sources
Iterator<TempDescriptor> outVarItr = fsen.getOutVarSet().iterator();
while (outVarItr.hasNext()) {
- TempDescriptor outVar = outVarItr.next();
- HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
- ts.add(outVar);
- VariableSourceToken vst = new VariableSourceToken(ts, fsen, new Integer(0), outVar);
- vstTable.remove(outVar);
- vstTable.add(vst);
+ TempDescriptor outVar = outVarItr.next();
+ HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
+ ts.add(outVar);
+ VariableSourceToken vst = new VariableSourceToken(ts, fsen, new Integer(0), outVar);
+ vstTable.remove(outVar);
+ vstTable.add(vst);
}
vstTable.assertConsistency();
}
- break;
+ break;
case FKind.FlatOpNode: {
FlatOpNode fon = (FlatOpNode) fn;
if (fon.getOp().getOp() == Operation.ASSIGN) {
- TempDescriptor lhs = fon.getDest();
- TempDescriptor rhs = fon.getLeft();
-
- vstTable.remove(lhs);
-
- Set<VariableSourceToken> forAddition = new HashSet<VariableSourceToken>();
-
- Iterator<VariableSourceToken> itr = vstTable.get(rhs).iterator();
- while (itr.hasNext()) {
- VariableSourceToken vst = itr.next();
-
- HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
- ts.add(lhs);
-
- // when we do x = y for variables, just copy over from a child,
- // there are two cases:
- // 1. if the current task is the caller proxy, any local root is a
- // child
- boolean case1 =
- currentSESE.getIsCallerProxySESE()
- && rblockRel.getLocalRootSESEs().contains(vst.getSESE());
-
- // 2. if the child task is a locally-defined child of the current task
- boolean case2 = currentSESE.getLocalChildren().contains(vst.getSESE());
-
- if (case1 || case2) {
- // if the source comes from a child, copy it over
- forAddition.add(new VariableSourceToken(ts, vst.getSESE(), vst.getAge(), vst
- .getAddrVar()));
- } else {
- // otherwise, stamp it as us as the source
- forAddition.add(new VariableSourceToken(ts, currentSESE, new Integer(0), lhs));
- }
- }
-
- vstTable.addAll(forAddition);
-
- // only break if this is an ASSIGN op node,
- // otherwise fall through to default case
- vstTable.assertConsistency();
- break;
+ TempDescriptor lhs = fon.getDest();
+ TempDescriptor rhs = fon.getLeft();
+
+ vstTable.remove(lhs);
+
+ Set<VariableSourceToken> forAddition = new HashSet<VariableSourceToken>();
+
+ Iterator<VariableSourceToken> itr = vstTable.get(rhs).iterator();
+ while (itr.hasNext()) {
+ VariableSourceToken vst = itr.next();
+
+ HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
+ ts.add(lhs);
+
+ // when we do x = y for variables, just copy over from a child,
+ // there are two cases:
+ // 1. if the current task is the caller proxy, any local root is a
+ // child
+ boolean case1 =
+ currentSESE.getIsCallerProxySESE()
+ && rblockRel.getLocalRootSESEs().contains(vst.getSESE());
+
+ // 2. if the child task is a locally-defined child of the current task
+ boolean case2 = currentSESE.getLocalChildren().contains(vst.getSESE());
+
+ if (case1 || case2) {
+ // if the source comes from a child, copy it over
+ forAddition.add(new VariableSourceToken(ts, vst.getSESE(), vst.getAge(), vst
+ .getAddrVar()));
+ } else {
+ // otherwise, stamp it as us as the source
+ forAddition.add(new VariableSourceToken(ts, currentSESE, new Integer(0), lhs));
+ }
+ }
+
+ vstTable.addAll(forAddition);
+
+ // only break if this is an ASSIGN op node,
+ // otherwise fall through to default case
+ vstTable.assertConsistency();
+ break;
}
}
- // note that FlatOpNode's that aren't ASSIGN
- // fall through to this default case
+ // note that FlatOpNode's that aren't ASSIGN
+ // fall through to this default case
default: {
TempDescriptor[] writeTemps = fn.writesTemps();
if (writeTemps.length > 0) {
- // for now, when writeTemps > 1, make sure
- // its a call node, programmer enforce only
- // doing stuff like calling a print routine
- if (writeTemps.length > 1) {
- assert fn.kind() == FKind.FlatCall || fn.kind() == FKind.FlatMethod;
- break;
- }
+ // for now, when writeTemps > 1, make sure
+ // its a call node, programmer enforce only
+ // doing stuff like calling a print routine
+ if (writeTemps.length > 1) {
+ assert fn.kind() == FKind.FlatCall || fn.kind() == FKind.FlatMethod;
+ break;
+ }
- vstTable.remove(writeTemps[0]);
+ vstTable.remove(writeTemps[0]);
- HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
- ts.add(writeTemps[0]);
+ HashSet<TempDescriptor> ts = new HashSet<TempDescriptor>();
+ ts.add(writeTemps[0]);
- vstTable.add(new VariableSourceToken(ts, currentSESE, new Integer(0), writeTemps[0]));
+ vstTable.add(new VariableSourceToken(ts, currentSESE, new Integer(0), writeTemps[0]));
}
vstTable.assertConsistency();
}
- break;
+ break;
} // end switch
}
Set<TempDescriptor> curr = new HashSet<TempDescriptor>();
for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- Set<TempDescriptor> notAvailIn = notAvailableResults.get(nn);
- if (notAvailIn != null) {
- curr.addAll(notAvailIn);
- }
+ FlatNode nn = fn.getPrev(i);
+ Set<TempDescriptor> notAvailIn = notAvailableResults.get(nn);
+ if (notAvailIn != null) {
+ curr.addAll(notAvailIn);
+ }
}
FlatSESEEnterNode currentSESE = rblockRel.getLocalInnerRBlock(fn);
if (currentSESE == null) {
- currentSESE = rblockRel.getCallerProxySESE();
+ currentSESE = rblockRel.getCallerProxySESE();
}
notAvailable_nodeActions(fn, curr, currentSESE);
// if a new result, schedule forward nodes for analysis
if (!curr.equals(prev)) {
- notAvailableResults.put(fn, curr);
+ notAvailableResults.put(fn, curr);
- for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- flatNodesToVisit.add(nn);
- }
+ for (int i = 0; i < fn.numNext(); i++) {
+ FlatNode nn = fn.getNext(i);
+ flatNodesToVisit.add(nn);
+ }
}
}
}
private void notAvailable_nodeActions(FlatNode fn, Set<TempDescriptor> notAvailSet,
- FlatSESEEnterNode currentSESE) {
+ FlatSESEEnterNode currentSESE) {
// any temps that are removed from the not available set
// at this node should be marked in this node's code plan
Set<TempDescriptor> notAvailCopy = new HashSet<TempDescriptor>();
Iterator<TempDescriptor> tdItr = notAvailSet.iterator();
while (tdItr.hasNext()) {
- notAvailCopy.add(tdItr.next());
+ notAvailCopy.add(tdItr.next());
}
notAvailableIntoSESE.put(fsen, notAvailCopy);
notAvailSet.clear();
}
- break;
+ break;
case FKind.FlatSESEExitNode: {
FlatSESEExitNode fsexn = (FlatSESEExitNode) fn;
assert notAvailIn != null;
notAvailSet.addAll(notAvailIn);
}
- break;
+ break;
case FKind.FlatMethod: {
notAvailSet.clear();
}
- break;
+ break;
case FKind.FlatOpNode: {
FlatOpNode fon = (FlatOpNode) fn;
if (fon.getOp().getOp() == Operation.ASSIGN) {
- TempDescriptor lhs = fon.getDest();
- TempDescriptor rhs = fon.getLeft();
-
- // copy makes lhs same availability as rhs
- if (notAvailSet.contains(rhs)) {
- notAvailSet.add(lhs);
- } else {
- notAvailSet.remove(lhs);
- }
-
- // only break if this is an ASSIGN op node,
- // otherwise fall through to default case
- break;
+ TempDescriptor lhs = fon.getDest();
+ TempDescriptor rhs = fon.getLeft();
+
+ // copy makes lhs same availability as rhs
+ if (notAvailSet.contains(rhs)) {
+ notAvailSet.add(lhs);
+ } else {
+ notAvailSet.remove(lhs);
+ }
+
+ // only break if this is an ASSIGN op node,
+ // otherwise fall through to default case
+ break;
}
}
- // note that FlatOpNode's that aren't ASSIGN
- // fall through to this default case
+ // note that FlatOpNode's that aren't ASSIGN
+ // fall through to this default case
default: {
TempDescriptor[] writeTemps = fn.writesTemps();
for (int i = 0; i < writeTemps.length; i++) {
- TempDescriptor wTemp = writeTemps[i];
- notAvailSet.remove(wTemp);
+ TempDescriptor wTemp = writeTemps[i];
+ notAvailSet.remove(wTemp);
}
TempDescriptor[] readTemps = fn.readsTemps();
for (int i = 0; i < readTemps.length; i++) {
- TempDescriptor rTemp = readTemps[i];
- notAvailSet.remove(rTemp);
-
- // if this variable has exactly one source, potentially
- // get other things from this source as well
- VarSrcTokTable vstTable = variableResults.get(fn);
-
- VSTWrapper vstIfStatic = new VSTWrapper();
- Integer srcType = vstTable.getRefVarSrcType(rTemp, currentSESE, vstIfStatic);
-
- if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
-
- VariableSourceToken vst = vstIfStatic.vst;
-
- Iterator<VariableSourceToken> availItr =
- vstTable.get(vst.getSESE(), vst.getAge()).iterator();
-
- // look through things that are also available from same source
- while (availItr.hasNext()) {
- VariableSourceToken vstAlsoAvail = availItr.next();
-
- Iterator<TempDescriptor> refVarItr = vstAlsoAvail.getRefVars().iterator();
- while (refVarItr.hasNext()) {
- TempDescriptor refVarAlso = refVarItr.next();
-
- // if a variable is available from the same source, AND it ALSO
- // only comes from one statically known source, mark it available
- VSTWrapper vstIfStaticNotUsed = new VSTWrapper();
- Integer srcTypeAlso =
- vstTable.getRefVarSrcType(refVarAlso, currentSESE, vstIfStaticNotUsed);
- if (srcTypeAlso.equals(VarSrcTokTable.SrcType_STATIC)) {
- notAvailSet.remove(refVarAlso);
- }
- }
- }
- }
+ TempDescriptor rTemp = readTemps[i];
+ notAvailSet.remove(rTemp);
+
+ // if this variable has exactly one source, potentially
+ // get other things from this source as well
+ VarSrcTokTable vstTable = variableResults.get(fn);
+
+ VSTWrapper vstIfStatic = new VSTWrapper();
+ Integer srcType = vstTable.getRefVarSrcType(rTemp, currentSESE, vstIfStatic);
+
+ if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
+
+ VariableSourceToken vst = vstIfStatic.vst;
+
+ Iterator<VariableSourceToken> availItr =
+ vstTable.get(vst.getSESE(), vst.getAge()).iterator();
+
+ // look through things that are also available from same source
+ while (availItr.hasNext()) {
+ VariableSourceToken vstAlsoAvail = availItr.next();
+
+ Iterator<TempDescriptor> refVarItr = vstAlsoAvail.getRefVars().iterator();
+ while (refVarItr.hasNext()) {
+ TempDescriptor refVarAlso = refVarItr.next();
+
+ // if a variable is available from the same source, AND it ALSO
+ // only comes from one statically known source, mark it available
+ VSTWrapper vstIfStaticNotUsed = new VSTWrapper();
+ Integer srcTypeAlso =
+ vstTable.getRefVarSrcType(refVarAlso, currentSESE, vstIfStaticNotUsed);
+ if (srcTypeAlso.equals(VarSrcTokTable.SrcType_STATIC)) {
+ notAvailSet.remove(refVarAlso);
+ }
+ }
+ }
+ }
}
}
- break;
+ break;
} // end switch
}
// before the current statement
VarSrcTokTable dotSTtable = new VarSrcTokTable();
for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- dotSTtable.merge(variableResults.get(nn));
+ FlatNode nn = fn.getPrev(i);
+ dotSTtable.merge(variableResults.get(nn));
}
// find dt-st notAvailableSet also
Set<TempDescriptor> dotSTnotAvailSet = new HashSet<TempDescriptor>();
for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- Set<TempDescriptor> notAvailIn = notAvailableResults.get(nn);
- if (notAvailIn != null) {
- dotSTnotAvailSet.addAll(notAvailIn);
- }
+ FlatNode nn = fn.getPrev(i);
+ Set<TempDescriptor> notAvailIn = notAvailableResults.get(nn);
+ if (notAvailIn != null) {
+ dotSTnotAvailSet.addAll(notAvailIn);
+ }
}
Set<TempDescriptor> dotSTlive = livenessGlobalView.get(fn);
FlatSESEEnterNode currentSESE = rblockRel.getLocalInnerRBlock(fn);
if (currentSESE == null) {
- currentSESE = rblockRel.getCallerProxySESE();
+ currentSESE = rblockRel.getCallerProxySESE();
}
codePlans_nodeActions(fm, fn, dotSTtable, dotSTnotAvailSet, currentSESE);
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
+ FlatNode nn = fn.getNext(i);
- if (!visited.contains(nn)) {
- flatNodesToVisit.add(nn);
- }
+ if (!visited.contains(nn)) {
+ flatNodesToVisit.add(nn);
+ }
}
}
}
private void codePlans_nodeActions(FlatMethod fm, FlatNode fn, VarSrcTokTable vstTableIn,
- Set<TempDescriptor> notAvailSetIn, FlatSESEEnterNode currentSESE) {
+ Set<TempDescriptor> notAvailSetIn, FlatSESEEnterNode currentSESE) {
CodePlan plan = new CodePlan(currentSESE);
// dependencies properly
Iterator<TempDescriptor> inVarItr = fsen.getInVarSet().iterator();
while (inVarItr.hasNext()) {
- TempDescriptor inVar = inVarItr.next();
-
- // when we get to an SESE enter node we change the
- // currentSESE variable of this analysis to the
- // child that is declared by the enter node, so
- // in order to classify in-vars correctly, pass
- // the parent SESE in--at other FlatNode types just
- // use the currentSESE
- FlatSESEEnterNode parent = rblockRel.getLocalInnerRBlock(fn);
- if (parent == null) {
- parent = rblockRel.getCallerProxySESE();
- }
-
- VSTWrapper vstIfStatic = new VSTWrapper();
- Integer srcType = vstTableIn.getRefVarSrcType(inVar, parent, vstIfStatic);
-
- // the current SESE needs a local space to track the dynamic
- // variable and the child needs space in its SESE record
- if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
- fsen.addDynamicInVar(inVar);
- addDynamicVar(parent, fm, inVar);
-
- } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
- fsen.addStaticInVar(inVar);
- VariableSourceToken vst = vstIfStatic.vst;
- fsen.putStaticInVar2src(inVar, vst);
- fsen.addStaticInVarSrc(new SESEandAgePair(vst.getSESE(), vst.getAge()));
-
- } else {
- assert srcType.equals(VarSrcTokTable.SrcType_READY);
- fsen.addReadyInVar(inVar);
- }
+ TempDescriptor inVar = inVarItr.next();
+
+ // when we get to an SESE enter node we change the
+ // currentSESE variable of this analysis to the
+ // child that is declared by the enter node, so
+ // in order to classify in-vars correctly, pass
+ // the parent SESE in--at other FlatNode types just
+ // use the currentSESE
+ FlatSESEEnterNode parent = rblockRel.getLocalInnerRBlock(fn);
+ if (parent == null) {
+ parent = rblockRel.getCallerProxySESE();
+ }
+
+ VSTWrapper vstIfStatic = new VSTWrapper();
+ Integer srcType = vstTableIn.getRefVarSrcType(inVar, parent, vstIfStatic);
+
+ // the current SESE needs a local space to track the dynamic
+ // variable and the child needs space in its SESE record
+ if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
+ fsen.addDynamicInVar(inVar);
+ addDynamicVar(parent, fm, inVar);
+
+ } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
+ fsen.addStaticInVar(inVar);
+ VariableSourceToken vst = vstIfStatic.vst;
+ fsen.putStaticInVar2src(inVar, vst);
+ fsen.addStaticInVarSrc(new SESEandAgePair(vst.getSESE(), vst.getAge()));
+
+ } else {
+ assert srcType.equals(VarSrcTokTable.SrcType_READY);
+ fsen.addReadyInVar(inVar);
+ }
}
}
- break;
+ break;
case FKind.FlatSESEExitNode: {
FlatSESEExitNode fsexn = (FlatSESEExitNode) fn;
Iterator<TempDescriptor> outVarItr = exiter.getOutVarSet().iterator();
while (outVarItr.hasNext()) {
- TempDescriptor outVar = outVarItr.next();
-
- VSTWrapper vstIfStatic = new VSTWrapper();
- Integer srcType = vstTableIn.getRefVarSrcType(outVar, exiter, vstIfStatic);
-
- if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
- // if the out-var is dynamic, put it in the set of dyn out vars
- // so exiting code gen knows to look for the value, but also put
- // it in the set of dynamic vars the exiter must track!
- exiter.addDynamicOutVar(outVar);
- addDynamicVar(exiter, fm, outVar);
-
- } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
- exiter.addStaticOutVar(outVar);
- VariableSourceToken vst = vstIfStatic.vst;
- exiter.putStaticOutVar2src(outVar, vst);
- exiter.addStaticOutVarSrc(new SESEandAgePair(vst.getSESE(), vst.getAge()));
-
- } else {
- assert srcType.equals(VarSrcTokTable.SrcType_READY);
- exiter.addReadyOutVar(outVar);
- }
+ TempDescriptor outVar = outVarItr.next();
+
+ VSTWrapper vstIfStatic = new VSTWrapper();
+ Integer srcType = vstTableIn.getRefVarSrcType(outVar, exiter, vstIfStatic);
+
+ if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
+ // if the out-var is dynamic, put it in the set of dyn out vars
+ // so exiting code gen knows to look for the value, but also put
+ // it in the set of dynamic vars the exiter must track!
+ exiter.addDynamicOutVar(outVar);
+ addDynamicVar(exiter, fm, outVar);
+
+ } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
+ exiter.addStaticOutVar(outVar);
+ VariableSourceToken vst = vstIfStatic.vst;
+ exiter.putStaticOutVar2src(outVar, vst);
+ exiter.addStaticOutVarSrc(new SESEandAgePair(vst.getSESE(), vst.getAge()));
+
+ } else {
+ assert srcType.equals(VarSrcTokTable.SrcType_READY);
+ exiter.addReadyOutVar(outVar);
+ }
}
}
- break;
+ break;
case FKind.FlatOpNode: {
FlatOpNode fon = (FlatOpNode) fn;
if (fon.getOp().getOp() == Operation.ASSIGN) {
- TempDescriptor lhs = fon.getDest();
- TempDescriptor rhs = fon.getLeft();
-
- // if this is an op node, don't stall, copy
- // source and delay until we need to use value
-
- // ask whether lhs and rhs sources are dynamic, static, etc.
- VSTWrapper vstIfStatic = new VSTWrapper();
- Integer lhsSrcType = vstTableIn.getRefVarSrcType(lhs, currentSESE, vstIfStatic);
- Integer rhsSrcType = vstTableIn.getRefVarSrcType(rhs, currentSESE, vstIfStatic);
-
- if (rhsSrcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
- // if rhs is dynamic going in, lhs will definitely be dynamic
- // going out of this node, so track that here
- plan.addDynAssign(lhs, rhs);
- addDynamicVar(currentSESE, fm, lhs);
- addDynamicVar(currentSESE, fm, rhs);
-
- } else if (lhsSrcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
- // otherwise, if the lhs is dynamic, but the rhs is not, we
- // need to update the variable's dynamic source as "current SESE"
- plan.addDynAssign(lhs);
- }
-
- // only break if this is an ASSIGN op node,
- // otherwise fall through to default case
- break;
+ TempDescriptor lhs = fon.getDest();
+ TempDescriptor rhs = fon.getLeft();
+
+ // if this is an op node, don't stall, copy
+ // source and delay until we need to use value
+
+ // ask whether lhs and rhs sources are dynamic, static, etc.
+ VSTWrapper vstIfStatic = new VSTWrapper();
+ Integer lhsSrcType = vstTableIn.getRefVarSrcType(lhs, currentSESE, vstIfStatic);
+ Integer rhsSrcType = vstTableIn.getRefVarSrcType(rhs, currentSESE, vstIfStatic);
+
+ if (rhsSrcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
+ // if rhs is dynamic going in, lhs will definitely be dynamic
+ // going out of this node, so track that here
+ plan.addDynAssign(lhs, rhs);
+ addDynamicVar(currentSESE, fm, lhs);
+ addDynamicVar(currentSESE, fm, rhs);
+
+ } else if (lhsSrcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
+ // otherwise, if the lhs is dynamic, but the rhs is not, we
+ // need to update the variable's dynamic source as "current SESE"
+ plan.addDynAssign(lhs);
+ }
+
+ // only break if this is an ASSIGN op node,
+ // otherwise fall through to default case
+ break;
}
}
- // note that FlatOpNode's that aren't ASSIGN
- // fall through to this default case
+ // note that FlatOpNode's that aren't ASSIGN
+ // fall through to this default case
default: {
// a node with no live set has nothing to stall for
TempDescriptor[] readarray = fn.readsTemps();
for (int i = 0; i < readarray.length; i++) {
- TempDescriptor readtmp = readarray[i];
-
- // ignore temps that are definitely available
- // when considering to stall on it
- if (!notAvailSetIn.contains(readtmp)) {
- continue;
- }
-
- // check the source type of this variable
- VSTWrapper vstIfStatic = new VSTWrapper();
- Integer srcType = vstTableIn.getRefVarSrcType(readtmp, currentSESE, vstIfStatic);
-
- if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
- // 1) It is not clear statically where this variable will
- // come from, so dynamically we must keep track
- // along various control paths, and therefore when we stall,
- // just stall for the exact thing we need and move on
- plan.addDynamicStall(readtmp);
- addDynamicVar(currentSESE, fm, readtmp);
-
- } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
- // 2) Single token/age pair: Stall for token/age pair, and copy
- // all live variables with same token/age pair at the same
- // time. This is the same stuff that the notavaialable analysis
- // marks as now available.
- VariableSourceToken vst = vstIfStatic.vst;
-
- Iterator<VariableSourceToken> availItr =
- vstTableIn.get(vst.getSESE(), vst.getAge()).iterator();
-
- while (availItr.hasNext()) {
- VariableSourceToken vstAlsoAvail = availItr.next();
-
- // only grab additional stuff that is live
- Set<TempDescriptor> copySet = new HashSet<TempDescriptor>();
-
- Iterator<TempDescriptor> refVarItr = vstAlsoAvail.getRefVars().iterator();
-
- while (refVarItr.hasNext()) {
- TempDescriptor refVar = refVarItr.next();
- // note: this should just use normal liveness in...only want to
- // copy live variables...
- if (liveness.getLiveInTemps(fm, fn).contains(refVar)) {
- copySet.add(refVar);
- }
- }
-
- if (!copySet.isEmpty()) {
- plan.addStall2CopySet(vstAlsoAvail, copySet);
- }
- }
-
- } else {
- // the other case for srcs is READY, so do nothing
- }
-
- // assert that everything being stalled for is in the
- // "not available" set coming into this flat node and
- // that every VST identified is in the possible "stall set"
- // that represents VST's from children SESE's
+ TempDescriptor readtmp = readarray[i];
+
+ // ignore temps that are definitely available
+ // when considering to stall on it
+ if (!notAvailSetIn.contains(readtmp)) {
+ continue;
+ }
+
+ // check the source type of this variable
+ VSTWrapper vstIfStatic = new VSTWrapper();
+ Integer srcType = vstTableIn.getRefVarSrcType(readtmp, currentSESE, vstIfStatic);
+
+ if (srcType.equals(VarSrcTokTable.SrcType_DYNAMIC)) {
+ // 1) It is not clear statically where this variable will
+ // come from, so dynamically we must keep track
+ // along various control paths, and therefore when we stall,
+ // just stall for the exact thing we need and move on
+ plan.addDynamicStall(readtmp);
+ addDynamicVar(currentSESE, fm, readtmp);
+
+ } else if (srcType.equals(VarSrcTokTable.SrcType_STATIC)) {
+ // 2) Single token/age pair: Stall for token/age pair, and copy
+ // all live variables with same token/age pair at the same
+ // time. This is the same stuff that the notavaialable analysis
+ // marks as now available.
+ VariableSourceToken vst = vstIfStatic.vst;
+
+ Iterator<VariableSourceToken> availItr =
+ vstTableIn.get(vst.getSESE(), vst.getAge()).iterator();
+
+ while (availItr.hasNext()) {
+ VariableSourceToken vstAlsoAvail = availItr.next();
+
+ // only grab additional stuff that is live
+ Set<TempDescriptor> copySet = new HashSet<TempDescriptor>();
+
+ Iterator<TempDescriptor> refVarItr = vstAlsoAvail.getRefVars().iterator();
+
+ while (refVarItr.hasNext()) {
+ TempDescriptor refVar = refVarItr.next();
+ // note: this should just use normal liveness in...only want to
+ // copy live variables...
+ if (liveness.getLiveInTemps(fm, fn).contains(refVar)) {
+ copySet.add(refVar);
+ }
+ }
+
+ if (!copySet.isEmpty()) {
+ plan.addStall2CopySet(vstAlsoAvail, copySet);
+ }
+ }
+
+ } else {
+ // the other case for srcs is READY, so do nothing
+ }
+
+ // assert that everything being stalled for is in the
+ // "not available" set coming into this flat node and
+ // that every VST identified is in the possible "stall set"
+ // that represents VST's from children SESE's
}
}
- break;
+ break;
} // end switch
// never need to generate another name for it in code (it is
// ALWAYS the task executing the local method context)
if (vst.getSESE().getIsCallerProxySESE()) {
- continue;
+ continue;
}
SESEandAgePair sap = new SESEandAgePair(vst.getSESE(), vst.getAge());
FlatSESEEnterNode sese = currentSESE;
while (sese != null) {
- addNeededStaticName(sese, fm, sap);
- sese = sese.getLocalParent();
+ addNeededStaticName(sese, fm, sap);
+ sese = sese.getLocalParent();
}
}
fsenDoingTracking = currentSESE.getLocalParent();
if (fsenDoingTracking == null) {
- // if there is no local parent, there are one of two cases
- // 1) the current task is main, in which case this FlatNode
- // is the main's exit, and doesn't need to do any of the
- // following dynamic tracking
- // 2) the current task is defined in a method, so use the
- // caller proxy in the variable source calcs below
- if (currentSESE.equals(rblockRel.getMainSESE())) {
- return;
- } else {
- fsenDoingTracking = rblockRel.getCallerProxySESE();
- }
+ // if there is no local parent, there are one of two cases
+ // 1) the current task is main, in which case this FlatNode
+ // is the main's exit, and doesn't need to do any of the
+ // following dynamic tracking
+ // 2) the current task is defined in a method, so use the
+ // caller proxy in the variable source calcs below
+ if (currentSESE.equals(rblockRel.getMainSESE())) {
+ return;
+ } else {
+ fsenDoingTracking = rblockRel.getCallerProxySESE();
+ }
}
} else {
fsenDoingTracking = currentSESE;
// completely outside of the root SESE scope
if (nextVstTable != null && nextLiveIn != null) {
- Hashtable<TempDescriptor, VSTWrapper> readyOrStatic2dynamicSet =
- thisVstTable.getReadyOrStatic2DynamicSet(nextVstTable, nextLiveIn, fsenDoingTracking);
+ Hashtable<TempDescriptor, VSTWrapper> readyOrStatic2dynamicSet =
+ thisVstTable.getReadyOrStatic2DynamicSet(nextVstTable, nextLiveIn, fsenDoingTracking);
- if (!readyOrStatic2dynamicSet.isEmpty()) {
+ if (!readyOrStatic2dynamicSet.isEmpty()) {
- // either add these results to partial fixed-point result
- // or make a new one if we haven't made any here yet
- FlatEdge fe = new FlatEdge(fn, nn);
- FlatWriteDynamicVarNode fwdvn = wdvNodesToSpliceIn.get(fe);
+ // either add these results to partial fixed-point result
+ // or make a new one if we haven't made any here yet
+ FlatEdge fe = new FlatEdge(fn, nn);
+ FlatWriteDynamicVarNode fwdvn = wdvNodesToSpliceIn.get(fe);
- if (fwdvn == null) {
- fwdvn =
- new FlatWriteDynamicVarNode(fn, nn, readyOrStatic2dynamicSet, fsenDoingTracking);
- wdvNodesToSpliceIn.put(fe, fwdvn);
- } else {
- fwdvn.addMoreVar2Src(readyOrStatic2dynamicSet);
- }
- }
+ if (fwdvn == null) {
+ fwdvn =
+ new FlatWriteDynamicVarNode(fn, nn, readyOrStatic2dynamicSet, fsenDoingTracking);
+ wdvNodesToSpliceIn.put(fe, fwdvn);
+ } else {
+ fwdvn.addMoreVar2Src(readyOrStatic2dynamicSet);
+ }
+ }
}
}
}
// effects analysis says they should be a conflict node in the that
// parent's conflict graph
Set<FlatSESEEnterNode> allSESEs = rblockRel.getAllSESEs();
- for (Iterator iterator = allSESEs.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = allSESEs.iterator(); iterator.hasNext(); ) {
FlatSESEEnterNode parent = (FlatSESEEnterNode) iterator.next();
if (parent.getIsLeafSESE()) {
- continue;
+ continue;
}
EffectsAnalysis effectsAnalysis = disjointAnalysisTaints.getEffectsAnalysis();
conflictGraph = new ConflictGraph(state);
Set<FlatSESEEnterNode> children = parent.getChildren();
- for (Iterator iterator2 = children.iterator(); iterator2.hasNext();) {
- FlatSESEEnterNode child = (FlatSESEEnterNode) iterator2.next();
- Hashtable<Taint, Set<Effect>> taint2Effects = effectsAnalysis.get(child);
- conflictGraph.addLiveIn(taint2Effects);
+ for (Iterator iterator2 = children.iterator(); iterator2.hasNext(); ) {
+ FlatSESEEnterNode child = (FlatSESEEnterNode) iterator2.next();
+ Hashtable<Taint, Set<Effect>> taint2Effects = effectsAnalysis.get(child);
+ conflictGraph.addLiveIn(taint2Effects);
}
sese2conflictGraph.put(parent, conflictGraph);
MethodDescriptor md = descItr.next();
FlatMethod fm = state.getMethodFlat(md);
if (fm != null) {
- addStallSitesToConflictGraphs(fm);
+ addStallSitesToConflictGraphs(fm);
}
}
}
// schedule forward nodes for analysis
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- if (!visited.contains(nn)) {
- flatNodesToVisit.add(nn);
- }
+ FlatNode nn = fn.getNext(i);
+ if (!visited.contains(nn)) {
+ flatNodesToVisit.add(nn);
+ }
}
}
}
private void conflictGraph_nodeAction(FlatNode fn, Set<FlatSESEEnterNode> currentSESEs,
- ClassDescriptor cd) {
+ ClassDescriptor cd) {
EffectsAnalysis effectsAnalysis = disjointAnalysisTaints.getEffectsAnalysis();
ConflictGraph conflictGraph = sese2conflictGraph.get(currentSESE);
if (conflictGraph == null) {
- assert currentSESE.getIsLeafSESE();
- continue;
+ assert currentSESE.getIsLeafSESE();
+ continue;
}
TempDescriptor lhs;
case FKind.FlatFieldNode:
case FKind.FlatElementNode: {
- if (fn instanceof FlatFieldNode) {
- FlatFieldNode ffn = (FlatFieldNode) fn;
- rhs = ffn.getSrc();
- } else {
- FlatElementNode fen = (FlatElementNode) fn;
- rhs = fen.getSrc();
- }
+ if (fn instanceof FlatFieldNode) {
+ FlatFieldNode ffn = (FlatFieldNode) fn;
+ rhs = ffn.getSrc();
+ } else {
+ FlatElementNode fen = (FlatElementNode) fn;
+ rhs = fen.getSrc();
+ }
- conflictGraph.addStallSite(taint2Effects, rhs, cd);
+ conflictGraph.addStallSite(taint2Effects, rhs, cd);
}
- break;
+ break;
case FKind.FlatSetFieldNode:
case FKind.FlatSetElementNode: {
- if (fn instanceof FlatSetFieldNode) {
- FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
- lhs = fsfn.getDst();
- rhs = fsfn.getSrc();
- } else {
- FlatSetElementNode fsen = (FlatSetElementNode) fn;
- lhs = fsen.getDst();
- rhs = fsen.getSrc();
- }
-
- conflictGraph.addStallSite(taint2Effects, rhs, cd);
- conflictGraph.addStallSite(taint2Effects, lhs, cd);
+ if (fn instanceof FlatSetFieldNode) {
+ FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
+ lhs = fsfn.getDst();
+ rhs = fsfn.getSrc();
+ } else {
+ FlatSetElementNode fsen = (FlatSetElementNode) fn;
+ lhs = fsen.getDst();
+ rhs = fsen.getSrc();
+ }
+
+ conflictGraph.addStallSite(taint2Effects, rhs, cd);
+ conflictGraph.addStallSite(taint2Effects, lhs, cd);
}
- break;
+ break;
case FKind.FlatCall: {
- FlatCall fc = (FlatCall) fn;
- lhs = fc.getThis();
+ FlatCall fc = (FlatCall) fn;
+ lhs = fc.getThis();
- conflictGraph.addStallSite(taint2Effects, lhs, cd);
+ conflictGraph.addStallSite(taint2Effects, lhs, cd);
}
- break;
+ break;
}
if (conflictGraph.id2cn.size() > 0) {
- sese2conflictGraph.put(currentSESE, conflictGraph);
+ sese2conflictGraph.put(currentSESE, conflictGraph);
}
}
}
ConflictGraph conflictGraph = sese2conflictGraph.get(sese);
if (useReachInfo) {
- // clear current conflict before recalculating with reachability info
- conflictGraph.clearAllConflictEdge();
- conflictGraph.setDisJointAnalysis(disjointAnalysisReach);
- conflictGraph.setFMEnclosing(sese.getfmEnclosing());
+ // clear current conflict before recalculating with reachability info
+ conflictGraph.clearAllConflictEdge();
+ conflictGraph.setDisJointAnalysis(disjointAnalysisReach);
+ conflictGraph.setFMEnclosing(sese.getfmEnclosing());
}
conflictGraph.analyzeConflicts(sitesToFlag, useReachInfo);
sese2conflictGraph.put(sese, conflictGraph);
FlatNode key = (FlatNode) keyEnum.nextElement();
ConflictGraph cg = sese2conflictGraph.get(key);
try {
- if (cg.hasConflictEdge()) {
- cg.writeGraph("ConflictGraphFor" + key, false);
- }
+ if (cg.hasConflictEdge()) {
+ cg.writeGraph("ConflictGraphFor" + key, false);
+ }
} catch (IOException e) {
- System.out.println("Error writing");
- System.exit(0);
+ System.out.println("Error writing");
+ System.exit(0);
}
}
}
// for every conflict graph, generate a set of memory queues
// (called SESELock in this code!) to cover the graph
Set<Map.Entry<FlatNode, ConflictGraph>> graphEntrySet = sese2conflictGraph.entrySet();
- for (Iterator iterator = graphEntrySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = graphEntrySet.iterator(); iterator.hasNext(); ) {
Map.Entry<FlatNode, ConflictGraph> graphEntry =
- (Map.Entry<FlatNode, ConflictGraph>) iterator.next();
+ (Map.Entry<FlatNode, ConflictGraph>)iterator.next();
FlatNode sese = graphEntry.getKey();
ConflictGraph conflictGraph = graphEntry.getValue();
calculateCovering(conflictGraph);
HashSet<SESELock> lockSet = new HashSet<SESELock>();
Set<ConflictEdge> tempCover = conflictGraph.getEdgeSet();
- for (Iterator iterator = tempCover.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = tempCover.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
if (conflictEdge.isCoarseEdge()) {
- coarseToCover.add(conflictEdge);
+ coarseToCover.add(conflictEdge);
} else {
- fineToCover.add(conflictEdge);
+ fineToCover.add(conflictEdge);
}
}
do { // fine-grained edge
- changed = false;
-
- for (Iterator iterator = fineToCover.iterator(); iterator.hasNext();) {
-
- int type;
- ConflictEdge edge = (ConflictEdge) iterator.next();
- if (seseLock.getConflictNodeSet().size() == 0) {
- // initial setup
- if (seseLock.isWriteNode(edge.getVertexU())) {
- // mark as fine_write
- if (edge.getVertexU().isStallSiteNode()) {
- type = ConflictNode.PARENT_WRITE;
- } else {
- type = ConflictNode.FINE_WRITE;
- }
- seseLock.addConflictNode(edge.getVertexU(), type);
- } else {
- // mark as fine_read
- if (edge.getVertexU().isStallSiteNode()) {
- type = ConflictNode.PARENT_READ;
- } else {
- type = ConflictNode.FINE_READ;
- }
- seseLock.addConflictNode(edge.getVertexU(), type);
- }
- if (edge.getVertexV() != edge.getVertexU()) {
- if (seseLock.isWriteNode(edge.getVertexV())) {
- // mark as fine_write
- if (edge.getVertexV().isStallSiteNode()) {
- type = ConflictNode.PARENT_WRITE;
- } else {
- type = ConflictNode.FINE_WRITE;
- }
- seseLock.addConflictNode(edge.getVertexV(), type);
- } else {
- // mark as fine_read
- if (edge.getVertexV().isStallSiteNode()) {
- type = ConflictNode.PARENT_READ;
- } else {
- type = ConflictNode.FINE_READ;
- }
- seseLock.addConflictNode(edge.getVertexV(), type);
- }
- }
- changed = true;
- seseLock.addConflictEdge(edge);
- fineToCover.remove(edge);
- break;// exit iterator loop
- }// end of initial setup
-
- ConflictNode newNode;
- if ((newNode = seseLock.getNewNodeConnectedWithGroup(edge)) != null) {
- // new node has a fine-grained edge to all current node
- // If there is a coarse grained edge where need a fine edge, it's
- // okay to add the node
- // but the edge must remain uncovered.
-
- changed = true;
-
- if (seseLock.containsConflictNode(newNode)) {
- seseLock.addEdge(edge);
- fineToCover.remove(edge);
- break;
- }
-
- if (seseLock.isWriteNode(newNode)) {
- if (newNode.isStallSiteNode()) {
- type = ConflictNode.PARENT_WRITE;
- } else {
- type = ConflictNode.FINE_WRITE;
- }
- seseLock.setNodeType(newNode, type);
- } else {
- if (newNode.isStallSiteNode()) {
- type = ConflictNode.PARENT_READ;
- } else {
- type = ConflictNode.FINE_READ;
- }
- seseLock.setNodeType(newNode, type);
- }
-
- seseLock.addEdge(edge);
- Set<ConflictEdge> edgeSet = newNode.getEdgeSet();
- for (Iterator iterator2 = edgeSet.iterator(); iterator2.hasNext();) {
- ConflictEdge conflictEdge = (ConflictEdge) iterator2.next();
-
- // mark all fine edges between new node and nodes in the group as
- // covered
- if (!conflictEdge.getVertexU().equals(newNode)) {
- if (seseLock.containsConflictNode(conflictEdge.getVertexU())) {
- changed = true;
- seseLock.addConflictEdge(conflictEdge);
- fineToCover.remove(conflictEdge);
- }
- } else if (!conflictEdge.getVertexV().equals(newNode)) {
- if (seseLock.containsConflictNode(conflictEdge.getVertexV())) {
- changed = true;
- seseLock.addConflictEdge(conflictEdge);
- fineToCover.remove(conflictEdge);
- }
- }
-
- }
-
- break;// exit iterator loop
- }
- }
+ changed = false;
+
+ for (Iterator iterator = fineToCover.iterator(); iterator.hasNext(); ) {
+
+ int type;
+ ConflictEdge edge = (ConflictEdge) iterator.next();
+ if (seseLock.getConflictNodeSet().size() == 0) {
+ // initial setup
+ if (seseLock.isWriteNode(edge.getVertexU())) {
+ // mark as fine_write
+ if (edge.getVertexU().isStallSiteNode()) {
+ type = ConflictNode.PARENT_WRITE;
+ } else {
+ type = ConflictNode.FINE_WRITE;
+ }
+ seseLock.addConflictNode(edge.getVertexU(), type);
+ } else {
+ // mark as fine_read
+ if (edge.getVertexU().isStallSiteNode()) {
+ type = ConflictNode.PARENT_READ;
+ } else {
+ type = ConflictNode.FINE_READ;
+ }
+ seseLock.addConflictNode(edge.getVertexU(), type);
+ }
+ if (edge.getVertexV() != edge.getVertexU()) {
+ if (seseLock.isWriteNode(edge.getVertexV())) {
+ // mark as fine_write
+ if (edge.getVertexV().isStallSiteNode()) {
+ type = ConflictNode.PARENT_WRITE;
+ } else {
+ type = ConflictNode.FINE_WRITE;
+ }
+ seseLock.addConflictNode(edge.getVertexV(), type);
+ } else {
+ // mark as fine_read
+ if (edge.getVertexV().isStallSiteNode()) {
+ type = ConflictNode.PARENT_READ;
+ } else {
+ type = ConflictNode.FINE_READ;
+ }
+ seseLock.addConflictNode(edge.getVertexV(), type);
+ }
+ }
+ changed = true;
+ seseLock.addConflictEdge(edge);
+ fineToCover.remove(edge);
+ break; // exit iterator loop
+ } // end of initial setup
+
+ ConflictNode newNode;
+ if ((newNode = seseLock.getNewNodeConnectedWithGroup(edge)) != null) {
+ // new node has a fine-grained edge to all current node
+ // If there is a coarse grained edge where need a fine edge, it's
+ // okay to add the node
+ // but the edge must remain uncovered.
+
+ changed = true;
+
+ if (seseLock.containsConflictNode(newNode)) {
+ seseLock.addEdge(edge);
+ fineToCover.remove(edge);
+ break;
+ }
+
+ if (seseLock.isWriteNode(newNode)) {
+ if (newNode.isStallSiteNode()) {
+ type = ConflictNode.PARENT_WRITE;
+ } else {
+ type = ConflictNode.FINE_WRITE;
+ }
+ seseLock.setNodeType(newNode, type);
+ } else {
+ if (newNode.isStallSiteNode()) {
+ type = ConflictNode.PARENT_READ;
+ } else {
+ type = ConflictNode.FINE_READ;
+ }
+ seseLock.setNodeType(newNode, type);
+ }
+
+ seseLock.addEdge(edge);
+ Set<ConflictEdge> edgeSet = newNode.getEdgeSet();
+ for (Iterator iterator2 = edgeSet.iterator(); iterator2.hasNext(); ) {
+ ConflictEdge conflictEdge = (ConflictEdge) iterator2.next();
+
+ // mark all fine edges between new node and nodes in the group as
+ // covered
+ if (!conflictEdge.getVertexU().equals(newNode)) {
+ if (seseLock.containsConflictNode(conflictEdge.getVertexU())) {
+ changed = true;
+ seseLock.addConflictEdge(conflictEdge);
+ fineToCover.remove(conflictEdge);
+ }
+ } else if (!conflictEdge.getVertexV().equals(newNode)) {
+ if (seseLock.containsConflictNode(conflictEdge.getVertexV())) {
+ changed = true;
+ seseLock.addConflictEdge(conflictEdge);
+ fineToCover.remove(conflictEdge);
+ }
+ }
+
+ }
+
+ break; // exit iterator loop
+ }
+ }
} while (changed);
HashSet<ConflictEdge> notCovered = new HashSet<ConflictEdge>();
do { // coarse
- changed = false;
- int type;
- for (Iterator iterator = coarseToCover.iterator(); iterator.hasNext();) {
-
- ConflictEdge edge = (ConflictEdge) iterator.next();
- if (seseLock.getConflictNodeSet().size() == 0) {
- // initial setup
- if (seseLock.hasSelfCoarseEdge(edge.getVertexU())) {
- // node has a coarse-grained edge with itself
- if (!(edge.getVertexU().isStallSiteNode())) {
- // and it is not parent
- type = ConflictNode.SCC;
- } else {
- if (state.RCR) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- type = ConflictNode.PARENT_WRITE;
- }
- }
- seseLock.addConflictNode(edge.getVertexU(), type);
- } else {
- if (edge.getVertexU().isStallSiteNode()) {
- if (state.RCR) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- if (edge.getVertexU().getWriteEffectSet().isEmpty()) {
- type = ConflictNode.PARENT_READ;
- } else {
- type = ConflictNode.PARENT_WRITE;
- }
- }
- } else {
- type = ConflictNode.COARSE;
- }
- seseLock.addConflictNode(edge.getVertexU(), type);
- }
- if (seseLock.hasSelfCoarseEdge(edge.getVertexV())) {
- // node has a coarse-grained edge with itself
- if (!(edge.getVertexV().isStallSiteNode())) {
- // and it is not parent
- type = ConflictNode.SCC;
- } else {
- if (state.RCR) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- type = ConflictNode.PARENT_WRITE;
- }
- }
- seseLock.addConflictNode(edge.getVertexV(), type);
- } else {
- if (edge.getVertexV().isStallSiteNode()) {
- if (state.RCR) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- if (edge.getVertexV().getWriteEffectSet().isEmpty()) {
- type = ConflictNode.PARENT_READ;
- } else {
- type = ConflictNode.PARENT_WRITE;
- }
- }
- } else {
- type = ConflictNode.COARSE;
- }
- seseLock.addConflictNode(edge.getVertexV(), type);
- }
- changed = true;
- coarseToCover.remove(edge);
- seseLock.addConflictEdge(edge);
- break;// exit iterator loop
- }// end of initial setup
-
- ConflictNode newNode;
- if ((newNode = seseLock.getNewNodeConnectedWithGroup(edge)) != null) {
- // new node has a coarse-grained edge to all fine-read, fine-write,
- // parent
- changed = true;
-
- if (newNode.isInVarNode() && (!seseLock.hasSelfCoarseEdge(newNode))
- && seseLock.hasCoarseEdgeWithParentCoarse(newNode)) {
- // this case can't be covered by this queue
- coarseToCover.remove(edge);
- notCovered.add(edge);
- break;
- }
-
- if (seseLock.containsConflictNode(newNode)) {
- seseLock.addEdge(edge);
- coarseToCover.remove(edge);
- break;
- }
-
- if (seseLock.hasSelfCoarseEdge(newNode)) {
- // SCC
- if (newNode.isStallSiteNode()) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- type = ConflictNode.SCC;
- }
- seseLock.setNodeType(newNode, type);
- } else {
- if (newNode.isStallSiteNode()) {
- type = ConflictNode.PARENT_COARSE;
- } else {
- type = ConflictNode.COARSE;
- }
- seseLock.setNodeType(newNode, type);
- }
-
- seseLock.addEdge(edge);
- Set<ConflictEdge> edgeSet = newNode.getEdgeSet();
- for (Iterator iterator2 = edgeSet.iterator(); iterator2.hasNext();) {
- ConflictEdge conflictEdge = (ConflictEdge) iterator2.next();
- // mark all coarse edges between new node and nodes in the group
- // as covered
- if (!conflictEdge.getVertexU().equals(newNode)) {
- if (seseLock.containsConflictNode(conflictEdge.getVertexU())) {
- changed = true;
- seseLock.addConflictEdge(conflictEdge);
- coarseToCover.remove(conflictEdge);
- }
- } else if (!conflictEdge.getVertexV().equals(newNode)) {
- if (seseLock.containsConflictNode(conflictEdge.getVertexV())) {
- changed = true;
- seseLock.addConflictEdge(conflictEdge);
- coarseToCover.remove(conflictEdge);
- }
- }
-
- }
- break;// exit iterator loop
- }
-
- }
+ changed = false;
+ int type;
+ for (Iterator iterator = coarseToCover.iterator(); iterator.hasNext(); ) {
+
+ ConflictEdge edge = (ConflictEdge) iterator.next();
+ if (seseLock.getConflictNodeSet().size() == 0) {
+ // initial setup
+ if (seseLock.hasSelfCoarseEdge(edge.getVertexU())) {
+ // node has a coarse-grained edge with itself
+ if (!(edge.getVertexU().isStallSiteNode())) {
+ // and it is not parent
+ type = ConflictNode.SCC;
+ } else {
+ if (state.RCR) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ type = ConflictNode.PARENT_WRITE;
+ }
+ }
+ seseLock.addConflictNode(edge.getVertexU(), type);
+ } else {
+ if (edge.getVertexU().isStallSiteNode()) {
+ if (state.RCR) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ if (edge.getVertexU().getWriteEffectSet().isEmpty()) {
+ type = ConflictNode.PARENT_READ;
+ } else {
+ type = ConflictNode.PARENT_WRITE;
+ }
+ }
+ } else {
+ type = ConflictNode.COARSE;
+ }
+ seseLock.addConflictNode(edge.getVertexU(), type);
+ }
+ if (seseLock.hasSelfCoarseEdge(edge.getVertexV())) {
+ // node has a coarse-grained edge with itself
+ if (!(edge.getVertexV().isStallSiteNode())) {
+ // and it is not parent
+ type = ConflictNode.SCC;
+ } else {
+ if (state.RCR) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ type = ConflictNode.PARENT_WRITE;
+ }
+ }
+ seseLock.addConflictNode(edge.getVertexV(), type);
+ } else {
+ if (edge.getVertexV().isStallSiteNode()) {
+ if (state.RCR) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ if (edge.getVertexV().getWriteEffectSet().isEmpty()) {
+ type = ConflictNode.PARENT_READ;
+ } else {
+ type = ConflictNode.PARENT_WRITE;
+ }
+ }
+ } else {
+ type = ConflictNode.COARSE;
+ }
+ seseLock.addConflictNode(edge.getVertexV(), type);
+ }
+ changed = true;
+ coarseToCover.remove(edge);
+ seseLock.addConflictEdge(edge);
+ break; // exit iterator loop
+ } // end of initial setup
+
+ ConflictNode newNode;
+ if ((newNode = seseLock.getNewNodeConnectedWithGroup(edge)) != null) {
+ // new node has a coarse-grained edge to all fine-read, fine-write,
+ // parent
+ changed = true;
+
+ if (newNode.isInVarNode() && (!seseLock.hasSelfCoarseEdge(newNode))
+ && seseLock.hasCoarseEdgeWithParentCoarse(newNode)) {
+ // this case can't be covered by this queue
+ coarseToCover.remove(edge);
+ notCovered.add(edge);
+ break;
+ }
+
+ if (seseLock.containsConflictNode(newNode)) {
+ seseLock.addEdge(edge);
+ coarseToCover.remove(edge);
+ break;
+ }
+
+ if (seseLock.hasSelfCoarseEdge(newNode)) {
+ // SCC
+ if (newNode.isStallSiteNode()) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ type = ConflictNode.SCC;
+ }
+ seseLock.setNodeType(newNode, type);
+ } else {
+ if (newNode.isStallSiteNode()) {
+ type = ConflictNode.PARENT_COARSE;
+ } else {
+ type = ConflictNode.COARSE;
+ }
+ seseLock.setNodeType(newNode, type);
+ }
+
+ seseLock.addEdge(edge);
+ Set<ConflictEdge> edgeSet = newNode.getEdgeSet();
+ for (Iterator iterator2 = edgeSet.iterator(); iterator2.hasNext(); ) {
+ ConflictEdge conflictEdge = (ConflictEdge) iterator2.next();
+ // mark all coarse edges between new node and nodes in the group
+ // as covered
+ if (!conflictEdge.getVertexU().equals(newNode)) {
+ if (seseLock.containsConflictNode(conflictEdge.getVertexU())) {
+ changed = true;
+ seseLock.addConflictEdge(conflictEdge);
+ coarseToCover.remove(conflictEdge);
+ }
+ } else if (!conflictEdge.getVertexV().equals(newNode)) {
+ if (seseLock.containsConflictNode(conflictEdge.getVertexV())) {
+ changed = true;
+ seseLock.addConflictEdge(conflictEdge);
+ coarseToCover.remove(conflictEdge);
+ }
+ }
+
+ }
+ break; // exit iterator loop
+ }
+
+ }
} while (changed);
lockSet.add(seseLock);
MethodDescriptor md = methItr.next();
FlatMethod fm = state.getMethodFlat(md);
if (fm != null) {
- bw =
- new BufferedWriter(new FileWriter("ooojReport_" + md.getClassMethodName()
- + md.getSafeMethodDescriptor() + ".txt"));
- bw.write("OoOJava Results for " + md + "\n-------------------\n");
-
- bw.write("Dynamic vars to manage:\n " + getContextTaskNames(fm).getDynamicVarSet());
-
- bw.write("\n\nLive-In, Root View\n------------------\n"
- + fm.printMethod(livenessGlobalView));
- bw.write("\n\nVariable Results-Out\n----------------\n" + fm.printMethod(variableResults));
- bw.write("\n\nNot Available Results-Out\n---------------------\n"
- + fm.printMethod(notAvailableResults));
- bw.write("\n\nCode Plans\n----------\n" + fm.printMethod(codePlans));
- bw.close();
+ bw =
+ new BufferedWriter(new FileWriter("ooojReport_" + md.getClassMethodName()
+ + md.getSafeMethodDescriptor() + ".txt"));
+ bw.write("OoOJava Results for " + md + "\n-------------------\n");
+
+ bw.write("Dynamic vars to manage:\n " + getContextTaskNames(fm).getDynamicVarSet());
+
+ bw.write("\n\nLive-In, Root View\n------------------\n"
+ + fm.printMethod(livenessGlobalView));
+ bw.write("\n\nVariable Results-Out\n----------------\n" + fm.printMethod(variableResults));
+ bw.write("\n\nNot Available Results-Out\n---------------------\n"
+ + fm.printMethod(notAvailableResults));
+ bw.write("\n\nCode Plans\n----------\n" + fm.printMethod(codePlans));
+ bw.close();
}
}
}
}
private void printSESEHierarchyTree(BufferedWriter bw, FlatSESEEnterNode fsen, int depth)
- throws java.io.IOException {
+ throws java.io.IOException {
for (int i = 0; i < depth; ++i) {
bw.write(" ");
}
bw.write("SESE " + fsen.getPrettyIdentifier());
if (fsen.getIsLeafSESE()) {
- bw.write(" (leaf)");
+ bw.write(" (leaf)");
}
bw.write(" {\n");
bw.write(" in-set: " + fsen.getInVarSet() + "\n");
Iterator<TempDescriptor> tItr = fsen.getInVarSet().iterator();
while (tItr.hasNext()) {
- TempDescriptor inVar = tItr.next();
- if (fsen.getReadyInVarSet().contains(inVar)) {
- bw.write(" (ready) " + inVar + "\n");
- }
- if (fsen.getStaticInVarSet().contains(inVar)) {
- bw.write(" (static) " + inVar + " from " + fsen.getStaticInVarSrc(inVar) + "\n");
- }
- if (fsen.getDynamicInVarSet().contains(inVar)) {
- bw.write(" (dynamic)" + inVar + "\n");
- }
+ TempDescriptor inVar = tItr.next();
+ if (fsen.getReadyInVarSet().contains(inVar)) {
+ bw.write(" (ready) " + inVar + "\n");
+ }
+ if (fsen.getStaticInVarSet().contains(inVar)) {
+ bw.write(" (static) " + inVar + " from " + fsen.getStaticInVarSrc(inVar) + "\n");
+ }
+ if (fsen.getDynamicInVarSet().contains(inVar)) {
+ bw.write(" (dynamic)" + inVar + "\n");
+ }
}
bw.write(" Dynamic vars to manage: " + getContextTaskNames(fsen).getDynamicVarSet() + "\n");
bw.write(" out-set: " + fsen.getOutVarSet() + "\n");
tItr = fsen.getOutVarSet().iterator();
while (tItr.hasNext()) {
- TempDescriptor outVar = tItr.next();
- if (fsen.getReadyOutVarSet().contains(outVar)) {
- bw.write(" (ready) " + outVar + "\n");
- }
- if (fsen.getStaticOutVarSet().contains(outVar)) {
- bw.write(" (static) " + outVar + " from " + fsen.getStaticOutVarSrc(outVar) + "\n");
- }
- if (fsen.getDynamicOutVarSet().contains(outVar)) {
- bw.write(" (dynamic)" + outVar + "\n");
- }
+ TempDescriptor outVar = tItr.next();
+ if (fsen.getReadyOutVarSet().contains(outVar)) {
+ bw.write(" (ready) " + outVar + "\n");
+ }
+ if (fsen.getStaticOutVarSet().contains(outVar)) {
+ bw.write(" (static) " + outVar + " from " + fsen.getStaticOutVarSrc(outVar) + "\n");
+ }
+ if (fsen.getDynamicOutVarSet().contains(outVar)) {
+ bw.write(" (dynamic)" + outVar + "\n");
+ }
}
bw.write(" local parent: " + fsen.getLocalParent() + "\n");
// This analysis finds all reachable rblocks in the
-// program and computes parent/child relations
+// program and computes parent/child relations
// between those rblocks
// SPECIAL NOTE!
public class RBlockRelationAnalysis {
// compiler data
- State state;
- TypeUtil typeUtil;
+ State state;
+ TypeUtil typeUtil;
CallGraph callGraph;
// an implicit SESE is automatically spliced into
// simply the set of every reachable SESE in the program
protected Set<FlatSESEEnterNode> allSESEs;
-
+
// to support calculation of leaf SESEs (no children even
// through method calls) for optimization during code gen
protected Set<MethodDescriptor> methodsContainingSESEs;
-
+
// maps method descriptor to SESE defined inside of it
// only contains local root SESE definitions in corresponding method
// (has no parent in the local method context)
// baz(); <-- here there is no locally-defined SESE, would be null
// }
protected Hashtable<FlatNode, FlatSESEEnterNode> fn2localInnerSESE;
-
+
// indicates whether this statement might occur in a task and
// after some child task definition such that, without looking at
// the flat node itself, the parent might have to stall for child
protected Hashtable<FlatNode, Boolean> fn2isPotentialStallSite;
-
+
HashMap<MethodDescriptor, Set<Pair<FlatCall, MethodDescriptor>>> methodmap=
new HashMap<MethodDescriptor, Set<Pair<FlatCall, MethodDescriptor>>>();
return allLocalRootSESEs;
}
- public Set<FlatSESEEnterNode> getLocalRootSESEs( FlatMethod fm ) {
- Set<FlatSESEEnterNode> out = md2localRootSESEs.get( fm );
+ public Set<FlatSESEEnterNode> getLocalRootSESEs(FlatMethod fm) {
+ Set<FlatSESEEnterNode> out = md2localRootSESEs.get(fm);
if( out == null ) {
out = new HashSet<FlatSESEEnterNode>();
}
public Set<MethodDescriptor> getMethodsWithSESEs() {
return methodsContainingSESEs;
}
-
+
/* Returns all SESE's that this fn can be a member of
* transitively. */
FlatNode curr=toprocess.pop();
Set<FlatSESEEnterNode> callers=fn2currentSESEs.get(curr);
if (callers!=null) {
- for(FlatSESEEnterNode sese:callers) {
+ for(FlatSESEEnterNode sese : callers) {
if (!seseSet.contains(sese)) {
seseSet.add(sese);
toprocess.add(fn);
}
return seseSet;
}
-
- public Set<FlatSESEEnterNode> getPossibleExecutingRBlocks( FlatNode fn ) {
- Set<FlatSESEEnterNode> out = fn2currentSESEs.get( fn );
+
+ public Set<FlatSESEEnterNode> getPossibleExecutingRBlocks(FlatNode fn) {
+ Set<FlatSESEEnterNode> out = fn2currentSESEs.get(fn);
if( out == null ) {
out = new HashSet<FlatSESEEnterNode>();
}
return out;
}
- public FlatSESEEnterNode getLocalInnerRBlock( FlatNode fn ) {
- return fn2localInnerSESE.get( fn );
+ public FlatSESEEnterNode getLocalInnerRBlock(FlatNode fn) {
+ return fn2localInnerSESE.get(fn);
}
// the "caller proxy" is a static name for whichever
return callerProxySESE;
}
- public boolean isPotentialStallSite( FlatNode fn ) {
- Boolean ipss = fn2isPotentialStallSite.get( fn );
- if( ipss == null ) {
- return false;
+ public boolean isPotentialStallSite(FlatNode fn) {
+ Boolean ipss = fn2isPotentialStallSite.get(fn);
+ if( ipss == null ) {
+ return false;
}
return ipss;
}
- public RBlockRelationAnalysis( State state,
- TypeUtil typeUtil,
- CallGraph callGraph ) {
+ public RBlockRelationAnalysis(State state,
+ TypeUtil typeUtil,
+ CallGraph callGraph) {
this.state = state;
this.typeUtil = typeUtil;
this.callGraph = callGraph;
- callerProxySESE = new FlatSESEEnterNode( null );
+ callerProxySESE = new FlatSESEEnterNode(null);
callerProxySESE.setIsCallerProxySESE();
allSESEs = new HashSet<FlatSESEEnterNode>();
fn2isPotentialStallSite = new Hashtable<FlatNode, Boolean>();
fn2allSESEs = new Hashtable< FlatNode, Set<FlatSESEEnterNode>>();
-
+
MethodDescriptor mdSourceEntry = typeUtil.getMain();
- FlatMethod fmMain = state.getMethodFlat( mdSourceEntry );
+ FlatMethod fmMain = state.getMethodFlat(mdSourceEntry);
+
+ mainSESE = (FlatSESEEnterNode) fmMain.getNext(0);
+ mainSESE.setfmEnclosing(fmMain);
+ mainSESE.setmdEnclosing(fmMain.getMethod() );
+ mainSESE.setcdEnclosing(fmMain.getMethod().getClassDesc() );
- mainSESE = (FlatSESEEnterNode) fmMain.getNext( 0 );
- mainSESE.setfmEnclosing( fmMain );
- mainSESE.setmdEnclosing( fmMain.getMethod() );
- mainSESE.setcdEnclosing( fmMain.getMethod().getClassDesc() );
-
// add all methods transitively reachable from the
// source's main to set to find rblocks
- Set<MethodDescriptor> descriptorsToAnalyze =
- callGraph.getAllMethods( mdSourceEntry );
-
- descriptorsToAnalyze.add( mdSourceEntry );
+ Set<MethodDescriptor> descriptorsToAnalyze =
+ callGraph.getAllMethods(mdSourceEntry);
+
+ descriptorsToAnalyze.add(mdSourceEntry);
- findRblocksAndLocalParentChildRelations( descriptorsToAnalyze );
+ findRblocksAndLocalParentChildRelations(descriptorsToAnalyze);
findTransitiveParentChildRelations();
-
- protected void findRblocksAndLocalParentChildRelations( Set<MethodDescriptor> descriptorsToAnalyze ) {
+
+ protected void findRblocksAndLocalParentChildRelations(Set<MethodDescriptor> descriptorsToAnalyze) {
Iterator<MethodDescriptor> mdItr = descriptorsToAnalyze.iterator();
while( mdItr.hasNext() ) {
- FlatMethod fm = state.getMethodFlat( mdItr.next() );
-
+ FlatMethod fm = state.getMethodFlat(mdItr.next() );
+
// start from flat method top, visit every node in
// method exactly once, find SESE stack on every
// control path: this will discover every reachable
// SESE in the program, and define the local parent
// and local children relations
Hashtable< FlatNode, Stack<FlatSESEEnterNode> > seseStacks =
- new Hashtable< FlatNode, Stack<FlatSESEEnterNode> >();
+ new Hashtable< FlatNode, Stack<FlatSESEEnterNode> >();
Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
- flatNodesToVisit.add( fm );
-
- Set<FlatNode> visited = new HashSet<FlatNode>();
+ flatNodesToVisit.add(fm);
+
+ Set<FlatNode> visited = new HashSet<FlatNode>();
Stack<FlatSESEEnterNode> seseStackFirst = new Stack<FlatSESEEnterNode>();
- seseStacks.put( fm, seseStackFirst );
+ seseStacks.put(fm, seseStackFirst);
while( !flatNodesToVisit.isEmpty() ) {
- Iterator<FlatNode> fnItr = flatNodesToVisit.iterator();
- FlatNode fn = fnItr.next();
-
- Stack<FlatSESEEnterNode> seseStack = seseStacks.get( fn );
- assert seseStack != null;
-
- flatNodesToVisit.remove( fn );
- visited.add( fn );
-
- if( !seseStack.isEmpty() ) {
- fn2localInnerSESE.put( fn, seseStack.peek() );
- }
-
- nodeActions( fn, seseStack, fm );
-
- for( int i = 0; i < fn.numNext(); i++ ) {
- FlatNode nn = fn.getNext( i );
-
- if( !visited.contains( nn ) ) {
- flatNodesToVisit.add( nn );
-
- // clone stack and send along each control path
- seseStacks.put( nn, (Stack<FlatSESEEnterNode>)seseStack.clone() );
- }
- }
- }
+ Iterator<FlatNode> fnItr = flatNodesToVisit.iterator();
+ FlatNode fn = fnItr.next();
+
+ Stack<FlatSESEEnterNode> seseStack = seseStacks.get(fn);
+ assert seseStack != null;
+
+ flatNodesToVisit.remove(fn);
+ visited.add(fn);
+
+ if( !seseStack.isEmpty() ) {
+ fn2localInnerSESE.put(fn, seseStack.peek() );
+ }
+
+ nodeActions(fn, seseStack, fm);
+
+ for( int i = 0; i < fn.numNext(); i++ ) {
+ FlatNode nn = fn.getNext(i);
+
+ if( !visited.contains(nn) ) {
+ flatNodesToVisit.add(nn);
+
+ // clone stack and send along each control path
+ seseStacks.put(nn, (Stack<FlatSESEEnterNode>)seseStack.clone() );
+ }
+ }
+ }
}
}
- protected void nodeActions( FlatNode fn,
- Stack<FlatSESEEnterNode> seseStack,
- FlatMethod fm ) {
+ protected void nodeActions(FlatNode fn,
+ Stack<FlatSESEEnterNode> seseStack,
+ FlatMethod fm) {
switch( fn.kind() ) {
-
+
case FKind.FlatSESEEnterNode: {
FlatSESEEnterNode fsen = (FlatSESEEnterNode) fn;
- allSESEs.add( fsen );
- methodsContainingSESEs.add( fm.getMethod() );
+ allSESEs.add(fsen);
+ methodsContainingSESEs.add(fm.getMethod() );
+
+ fsen.setfmEnclosing(fm);
+ fsen.setmdEnclosing(fm.getMethod() );
+ fsen.setcdEnclosing(fm.getMethod().getClassDesc() );
- fsen.setfmEnclosing( fm );
- fsen.setmdEnclosing( fm.getMethod() );
- fsen.setcdEnclosing( fm.getMethod().getClassDesc() );
-
if( seseStack.empty() ) {
- // no local parent
- fsen.setLocalParent( null );
+ // no local parent
+ fsen.setLocalParent(null);
- allLocalRootSESEs.add( fsen );
+ allLocalRootSESEs.add(fsen);
- Set<FlatSESEEnterNode> seseSet = md2localRootSESEs.get( fm.getMethod() );
- if( seseSet == null ) {
- seseSet = new HashSet<FlatSESEEnterNode>();
- }
- seseSet.add( fsen );
- md2localRootSESEs.put( fm.getMethod(), seseSet );
+ Set<FlatSESEEnterNode> seseSet = md2localRootSESEs.get(fm.getMethod() );
+ if( seseSet == null ) {
+ seseSet = new HashSet<FlatSESEEnterNode>();
+ }
+ seseSet.add(fsen);
+ md2localRootSESEs.put(fm.getMethod(), seseSet);
} else {
- // otherwise a local parent/child relation
- // which is also the broader parent/child
- // relation as well
- seseStack.peek().addLocalChild( fsen );
- fsen.setLocalParent( seseStack.peek() );
-
- seseStack.peek().addChild( fsen );
- fsen.addParent( seseStack.peek() );
+ // otherwise a local parent/child relation
+ // which is also the broader parent/child
+ // relation as well
+ seseStack.peek().addLocalChild(fsen);
+ fsen.setLocalParent(seseStack.peek() );
+
+ seseStack.peek().addChild(fsen);
+ fsen.addParent(seseStack.peek() );
}
- seseStack.push( fsen );
+ seseStack.push(fsen);
} break;
case FKind.FlatSESEExitNode: {
case FKind.FlatReturnNode: {
FlatReturnNode frn = (FlatReturnNode) fn;
if( !seseStack.empty() ) {
- throw new Error( "Error: return statement enclosed within SESE "+
- seseStack.peek().getPrettyIdentifier() );
+ throw new Error("Error: return statement enclosed within SESE "+
+ seseStack.peek().getPrettyIdentifier() );
}
} break;
-
+
}
}
-
+
protected void findTransitiveParentChildRelations() {
-
- for (Iterator<FlatSESEEnterNode> itr = allSESEs.iterator(); itr.hasNext();) {
+
+ for (Iterator<FlatSESEEnterNode> itr = allSESEs.iterator(); itr.hasNext(); ) {
FlatSESEEnterNode fsen = itr.next();
boolean hasNoNestedChildren = fsen.getLocalChildren().isEmpty();
- boolean hasNoChildrenByCall = !hasChildrenByCall( fsen );
+ boolean hasNoChildrenByCall = !hasChildrenByCall(fsen);
- fsen.setIsLeafSESE( hasNoNestedChildren && hasNoChildrenByCall );
+ fsen.setIsLeafSESE(hasNoNestedChildren && hasNoChildrenByCall);
}
}
- protected boolean hasChildrenByCall( FlatSESEEnterNode fsen ) {
+ protected boolean hasChildrenByCall(FlatSESEEnterNode fsen) {
boolean hasChildrenByCall = false;
// visit every flat node in SESE body, find method calls that
// may transitively call methods with SESEs enclosed
Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
- flatNodesToVisit.add( fsen );
+ flatNodesToVisit.add(fsen);
Set<FlatNode> visited = new HashSet<FlatNode>();
-
+
while( !flatNodesToVisit.isEmpty() ) {
Iterator<FlatNode> fnItr = flatNodesToVisit.iterator();
FlatNode fn = fnItr.next();
- flatNodesToVisit.remove( fn );
- visited.add( fn );
-
+ flatNodesToVisit.remove(fn);
+ visited.add(fn);
+
if( fn.kind() == FKind.FlatCall ) {
- FlatCall fc = (FlatCall) fn;
- MethodDescriptor mdCallee = fc.getMethod();
- Set reachable = new HashSet();
-
- reachable.add( mdCallee );
- reachable.addAll( callGraph.getAllMethods( mdCallee ) );
- reachable.retainAll( methodsContainingSESEs );
-
- if( !reachable.isEmpty() ) {
- hasChildrenByCall = true;
-
- Set reachableSESEMethodSet =
- callGraph.getFirstReachableMethodContainingSESE( mdCallee, methodsContainingSESEs );
-
- reachableSESEMethodSet.add( mdCallee );
- reachableSESEMethodSet.retainAll( methodsContainingSESEs );
-
- for( Iterator iterator = reachableSESEMethodSet.iterator(); iterator.hasNext(); ) {
- MethodDescriptor md = (MethodDescriptor) iterator.next();
- Set<FlatSESEEnterNode> seseSet = md2localRootSESEs.get( md );
- if( seseSet != null ) {
- fsen.addChildren( seseSet );
- for( Iterator iterator2 = seseSet.iterator(); iterator2.hasNext(); ) {
- FlatSESEEnterNode child = (FlatSESEEnterNode) iterator2.next();
- child.addParent( fsen );
- }
- }
- }
- }
+ FlatCall fc = (FlatCall) fn;
+ MethodDescriptor mdCallee = fc.getMethod();
+ Set reachable = new HashSet();
+
+ reachable.add(mdCallee);
+ reachable.addAll(callGraph.getAllMethods(mdCallee) );
+ reachable.retainAll(methodsContainingSESEs);
+
+ if( !reachable.isEmpty() ) {
+ hasChildrenByCall = true;
+
+ Set reachableSESEMethodSet =
+ callGraph.getFirstReachableMethodContainingSESE(mdCallee, methodsContainingSESEs);
+
+ reachableSESEMethodSet.add(mdCallee);
+ reachableSESEMethodSet.retainAll(methodsContainingSESEs);
+
+ for( Iterator iterator = reachableSESEMethodSet.iterator(); iterator.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) iterator.next();
+ Set<FlatSESEEnterNode> seseSet = md2localRootSESEs.get(md);
+ if( seseSet != null ) {
+ fsen.addChildren(seseSet);
+ for( Iterator iterator2 = seseSet.iterator(); iterator2.hasNext(); ) {
+ FlatSESEEnterNode child = (FlatSESEEnterNode) iterator2.next();
+ child.addParent(fsen);
+ }
+ }
+ }
+ }
}
if( fn == fsen.getFlatExit() ) {
- // don't enqueue any futher nodes
- continue;
+ // don't enqueue any futher nodes
+ continue;
}
for( int i = 0; i < fn.numNext(); i++ ) {
- FlatNode nn = fn.getNext( i );
+ FlatNode nn = fn.getNext(i);
- if( !visited.contains( nn ) ) {
- flatNodesToVisit.add( nn );
- }
+ if( !visited.contains(nn) ) {
+ flatNodesToVisit.add(nn);
+ }
}
}
// this sese/rblock/task and mark that this rblock might be executing.
// Important: skip the body of child rblocks, BUT DO mark the child ENTER
// and EXIT flat nodes as the parent being the current executing rblock!
- Hashtable<FlatNode, FlatMethod> flatNodesToVisit =
+ Hashtable<FlatNode, FlatMethod> flatNodesToVisit =
new Hashtable<FlatNode, FlatMethod>();
for( int i = 0; i < fsen.numNext(); i++ ) {
- FlatNode nn = fsen.getNext( i );
- flatNodesToVisit.put( nn, fsen.getfmEnclosing() );
+ FlatNode nn = fsen.getNext(i);
+ flatNodesToVisit.put(nn, fsen.getfmEnclosing() );
}
-
+
Set<FlatNode> visited = new HashSet<FlatNode>();
-
+
while (!flatNodesToVisit.isEmpty()) {
- Map.Entry me = (Map.Entry) flatNodesToVisit.entrySet().iterator().next();
- FlatNode fn = (FlatNode) me.getKey();
- FlatMethod fm = (FlatMethod) me.getValue();
-
- flatNodesToVisit.remove(fn);
- visited.add(fn);
-
- // the "is potential stall site" strategy is to propagate
- // "false" from the beginning of a task until you hit a
- // child, then from the child's exit propagate "true" for
- // the parent statements after children. When you pull a node
- // out of the bag for traversal and it happens to be an
- // enter or an exit node, fix the dumb propagation that
- // your IR predecessor pushed on you
- Boolean isPotentialStallSite = isPotentialStallSite(fn);
-
- if (fn == fsen.getFlatExit()) {
- // don't enqueue any further nodes when you find your exit,
- // NOR mark your own flat as a statement you are currently
- // executing, your parent(s) will mark it
- continue;
- }
-
- if (fn instanceof FlatSESEExitNode) {
- setIsPotentialStallSite(fn, false);
- isPotentialStallSite = true;
- }
-
- // the purpose of this traversal is to find program
- // points where rblock 'fsen' might be executing
- addPossibleExecutingRBlock(fn, fsen);
-
- if (fn instanceof FlatSESEEnterNode) {
- // don't visit internal nodes of child,
- // just enqueue the exit node
- FlatSESEEnterNode child = (FlatSESEEnterNode) fn;
- assert fsen.getChildren().contains(child);
- assert child.getParents().contains(fsen);
- flatNodesToVisit.put(child.getFlatExit(), fm);
- setIsPotentialStallSite(fn, false);
-
- // explicitly do this to handle the case that you
- // should mark yourself as possibly executing at
- // your own exit, because one instance can
- // recursively invoke another
- addPossibleExecutingRBlock(child.getFlatExit(), fsen);
-
- continue;
- }
-
- // if previous flat nodes have any changes,,
- // propagate predecessor's status of stall site potential
-
- if (fn instanceof FlatCall) {
-
- // start visiting nodes in other contexts
- FlatCall fc = (FlatCall) fn;
- MethodDescriptor mdCallee = fc.getMethod();
-
- Set<MethodDescriptor> implementations = new HashSet<MethodDescriptor>();
-
- if (mdCallee.isStatic()) {
- implementations.add(mdCallee);
- } else {
- TypeDescriptor typeDesc = fc.getThis().getType();
- implementations.addAll(callGraph.getMethods(mdCallee, typeDesc));
- }
-
- for (Iterator imps = implementations.iterator(); imps.hasNext();) {
- MethodDescriptor mdImp = (MethodDescriptor) imps.next();
- FlatMethod fmImp = state.getMethodFlat(mdImp);
-
- // keep mapping from fc's md to <fc,caller's md>
- // later, when return node of callee becomes a potential stall site,
- // following flat nodes of fc should be re-analyzied
- if(!methodmap.containsKey(fmImp)){
- methodmap.put(mdImp, new HashSet<Pair<FlatCall,MethodDescriptor>>());
- }
- methodmap.get(mdImp).add(new Pair<FlatCall,MethodDescriptor>(fc,fm.getMethod()));
-
- if ((isPotentialStallSite && !isPotentialStallSite(fmImp)) || !visited.contains(fmImp)) {
- flatNodesToVisit.put(fmImp, fmImp);
-
- // propagate your IR graph predecessor's stall site potential
- mergeIsPotentialStallSite(fmImp, isPotentialStallSite);
- }
-
- }
- // don't 'continue' out of this loop, also enqueue
- // flat nodes that flow in the current method context
- }
-
- if (fn instanceof FlatReturnNode) {
- // if return node is potential stall site, need to inform its caller
- if (isPotentialStallSite) {
- Set<Pair<FlatCall, MethodDescriptor>> callset = methodmap.get(fm.getMethod());
- if (callset != null) {
- for (Pair<FlatCall, MethodDescriptor> fcallpair : callset) {
- FlatCall fcall = fcallpair.getFirst();
- MethodDescriptor mdcaller = fcallpair.getSecond();
- for (int i = 0; i < fcall.numNext(); i++) {
- FlatNode nn = fcall.getNext(i);
- if ( visited.contains(nn) && (!isPotentialStallSite(nn)) ) {
- mergeIsPotentialStallSite(nn, isPotentialStallSite);
- FlatMethod fmcaller = state.getMethodFlat(mdcaller);
- flatNodesToVisit.put(nn, fmcaller);
- }
- }
- }
- }
- }
- }
-
- // note: only when current flat node has a change on the status of potential
- // stall site, need to visit following flat nodes
- for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- if ((isPotentialStallSite && !isPotentialStallSite(nn)) || !visited.contains(nn)) {
- flatNodesToVisit.put(nn, fm);
- mergeIsPotentialStallSite(nn, isPotentialStallSite);
- }
- }
+ Map.Entry me = (Map.Entry)flatNodesToVisit.entrySet().iterator().next();
+ FlatNode fn = (FlatNode) me.getKey();
+ FlatMethod fm = (FlatMethod) me.getValue();
+
+ flatNodesToVisit.remove(fn);
+ visited.add(fn);
+
+ // the "is potential stall site" strategy is to propagate
+ // "false" from the beginning of a task until you hit a
+ // child, then from the child's exit propagate "true" for
+ // the parent statements after children. When you pull a node
+ // out of the bag for traversal and it happens to be an
+ // enter or an exit node, fix the dumb propagation that
+ // your IR predecessor pushed on you
+ Boolean isPotentialStallSite = isPotentialStallSite(fn);
+
+ if (fn == fsen.getFlatExit()) {
+ // don't enqueue any further nodes when you find your exit,
+ // NOR mark your own flat as a statement you are currently
+ // executing, your parent(s) will mark it
+ continue;
+ }
+
+ if (fn instanceof FlatSESEExitNode) {
+ setIsPotentialStallSite(fn, false);
+ isPotentialStallSite = true;
+ }
+
+ // the purpose of this traversal is to find program
+ // points where rblock 'fsen' might be executing
+ addPossibleExecutingRBlock(fn, fsen);
+
+ if (fn instanceof FlatSESEEnterNode) {
+ // don't visit internal nodes of child,
+ // just enqueue the exit node
+ FlatSESEEnterNode child = (FlatSESEEnterNode) fn;
+ assert fsen.getChildren().contains(child);
+ assert child.getParents().contains(fsen);
+ flatNodesToVisit.put(child.getFlatExit(), fm);
+ setIsPotentialStallSite(fn, false);
+
+ // explicitly do this to handle the case that you
+ // should mark yourself as possibly executing at
+ // your own exit, because one instance can
+ // recursively invoke another
+ addPossibleExecutingRBlock(child.getFlatExit(), fsen);
+
+ continue;
+ }
+
+ // if previous flat nodes have any changes,,
+ // propagate predecessor's status of stall site potential
+
+ if (fn instanceof FlatCall) {
+
+ // start visiting nodes in other contexts
+ FlatCall fc = (FlatCall) fn;
+ MethodDescriptor mdCallee = fc.getMethod();
+
+ Set<MethodDescriptor> implementations = new HashSet<MethodDescriptor>();
+
+ if (mdCallee.isStatic()) {
+ implementations.add(mdCallee);
+ } else {
+ TypeDescriptor typeDesc = fc.getThis().getType();
+ implementations.addAll(callGraph.getMethods(mdCallee, typeDesc));
+ }
+
+ for (Iterator imps = implementations.iterator(); imps.hasNext(); ) {
+ MethodDescriptor mdImp = (MethodDescriptor) imps.next();
+ FlatMethod fmImp = state.getMethodFlat(mdImp);
+
+ // keep mapping from fc's md to <fc,caller's md>
+ // later, when return node of callee becomes a potential stall site,
+ // following flat nodes of fc should be re-analyzied
+ if(!methodmap.containsKey(fmImp)) {
+ methodmap.put(mdImp, new HashSet<Pair<FlatCall,MethodDescriptor>>());
+ }
+ methodmap.get(mdImp).add(new Pair<FlatCall,MethodDescriptor>(fc,fm.getMethod()));
+
+ if ((isPotentialStallSite && !isPotentialStallSite(fmImp)) || !visited.contains(fmImp)) {
+ flatNodesToVisit.put(fmImp, fmImp);
+
+ // propagate your IR graph predecessor's stall site potential
+ mergeIsPotentialStallSite(fmImp, isPotentialStallSite);
+ }
+
+ }
+ // don't 'continue' out of this loop, also enqueue
+ // flat nodes that flow in the current method context
+ }
+
+ if (fn instanceof FlatReturnNode) {
+ // if return node is potential stall site, need to inform its caller
+ if (isPotentialStallSite) {
+ Set<Pair<FlatCall, MethodDescriptor>> callset = methodmap.get(fm.getMethod());
+ if (callset != null) {
+ for (Pair<FlatCall, MethodDescriptor> fcallpair : callset) {
+ FlatCall fcall = fcallpair.getFirst();
+ MethodDescriptor mdcaller = fcallpair.getSecond();
+ for (int i = 0; i < fcall.numNext(); i++) {
+ FlatNode nn = fcall.getNext(i);
+ if ( visited.contains(nn) && (!isPotentialStallSite(nn)) ) {
+ mergeIsPotentialStallSite(nn, isPotentialStallSite);
+ FlatMethod fmcaller = state.getMethodFlat(mdcaller);
+ flatNodesToVisit.put(nn, fmcaller);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // note: only when current flat node has a change on the status of potential
+ // stall site, need to visit following flat nodes
+ for (int i = 0; i < fn.numNext(); i++) {
+ FlatNode nn = fn.getNext(i);
+ if ((isPotentialStallSite && !isPotentialStallSite(nn)) || !visited.contains(nn)) {
+ flatNodesToVisit.put(nn, fm);
+ mergeIsPotentialStallSite(nn, isPotentialStallSite);
+ }
+ }
}
}
}
-
- protected void addPossibleExecutingRBlock( FlatNode fn,
- FlatSESEEnterNode fsen ) {
- Set<FlatSESEEnterNode> currentSESEs = fn2currentSESEs.get( fn );
+ protected void addPossibleExecutingRBlock(FlatNode fn,
+ FlatSESEEnterNode fsen) {
+
+ Set<FlatSESEEnterNode> currentSESEs = fn2currentSESEs.get(fn);
if( currentSESEs == null ) {
currentSESEs = new HashSet<FlatSESEEnterNode>();
}
- currentSESEs.add( fsen );
- fn2currentSESEs.put( fn, currentSESEs );
+ currentSESEs.add(fsen);
+ fn2currentSESEs.put(fn, currentSESEs);
}
-
+
// definitively set whether a statement is a potential stall site
// such as a task exit is FALSE and the statement following an exit
// is TRUE
- protected void setIsPotentialStallSite( FlatNode fn,
- Boolean ipss ) {
- fn2isPotentialStallSite.put( fn, ipss );
+ protected void setIsPotentialStallSite(FlatNode fn,
+ Boolean ipss) {
+ fn2isPotentialStallSite.put(fn, ipss);
}
// Use this to OR the previous result with a new result
- protected void mergeIsPotentialStallSite( FlatNode fn,
- Boolean ipss ) {
- Boolean ipssPrev = isPotentialStallSite( fn );
- setIsPotentialStallSite( fn, ipssPrev || ipss );
+ protected void mergeIsPotentialStallSite(FlatNode fn,
+ Boolean ipss) {
+ Boolean ipssPrev = isPotentialStallSite(fn);
+ setIsPotentialStallSite(fn, ipssPrev || ipss);
}
FlatMethod fm = state.getMethodFlat(mdItr.next());
printStatusMap(fm);
}
- System.exit( 0 );
+ System.exit(0);
}
protected void printStatusMap(FlatMethod fm) {
System.out.println(fn+"[["+isPotentialStallSite(fn)+"]]");
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
+ FlatNode nn = fn.getNext(i);
- if (!visited.contains(nn)) {
- flatNodesToVisit.add(nn);
- }
+ if (!visited.contains(nn)) {
+ flatNodesToVisit.add(nn);
+ }
}
}
}
}
public void addConflictEdge(ConflictEdge e) {
- conflictEdgeSet.add(e);
+ conflictEdgeSet.add(e);
}
public boolean containsConflictEdge(ConflictEdge e) {
public boolean hasSelfCoarseEdge(ConflictNode node) {
Set<ConflictEdge> set = node.getEdgeSet();
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
if (conflictEdge.isCoarseEdge() && conflictEdge.getVertexU() == conflictEdge.getVertexV()) {
- return true;
+ return true;
}
}
return false;
}
-
+
public boolean hasSelfEdge(ConflictNode node) {
Set<ConflictEdge> set = node.getEdgeSet();
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
if ((!conflictEdge.isCoarseEdge()) && conflictEdge.getVertexU() == conflictEdge.getVertexV()) {
- return true;
+ return true;
}
}
return false;
}
-
+
public boolean hasCoarseEdgeWithParentCoarse(ConflictNode node) {
Set<ConflictEdge> set = node.getEdgeSet();
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
ConflictNode cNode;
if (conflictEdge.getVertexU() == node) {
- cNode = conflictEdge.getVertexV();
+ cNode = conflictEdge.getVertexV();
} else {
- cNode = conflictEdge.getVertexU();
+ cNode = conflictEdge.getVertexU();
}
Integer cNodeTypeIn = nodeTypeMap.get(cNode);
if (cNodeTypeIn != null && cNodeTypeIn.intValue() == ConflictNode.PARENT_COARSE) {
- return true;
+ return true;
}
}
return false;
} else {
return null;
}
-
-
+
+
int count = 0;
Set<ConflictEdge> edgeSet = newNode.getEdgeSet();
- for (Iterator iterator = edgeSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = edgeSet.iterator(); iterator.hasNext(); ) {
ConflictEdge conflictEdge = (ConflictEdge) iterator.next();
if (!conflictEdge.getVertexU().equals(newNode)
&& conflictNodeSet.contains(conflictEdge.getVertexU())) {
- count++;
+ count++;
} else if (!conflictEdge.getVertexV().equals(newNode)
- && conflictNodeSet.contains(conflictEdge.getVertexV())) {
- count++;
+ && conflictNodeSet.contains(conflictEdge.getVertexV())) {
+ count++;
}
}
-
- if(conflictNodeSet.contains(newNode)){
+
+ if(conflictNodeSet.contains(newNode)) {
count++;
}
- if(isWriteNode(newNode)){
+ if(isWriteNode(newNode)) {
if (count == conflictNodeSet.size()) {
- // connected to all current nodes in group
- return newNode;
+ // connected to all current nodes in group
+ return newNode;
}
- }else{
+ } else {
// it is read node
int writeNodeCount=0;
- for (Iterator iterator = conflictNodeSet.iterator(); iterator.hasNext();) {
- ConflictNode node = (ConflictNode) iterator.next();
- if(isWriteNode(node)){
- writeNodeCount++;
- }
+ for (Iterator iterator = conflictNodeSet.iterator(); iterator.hasNext(); ) {
+ ConflictNode node = (ConflictNode) iterator.next();
+ if(isWriteNode(node)) {
+ writeNodeCount++;
+ }
}
if (count == writeNodeCount) {
- // connected to all current write nodes in group
- return newNode;
+ // connected to all current write nodes in group
+ return newNode;
}
}
return false;
}
- ConflictNode nodeToAdd = conflictNodeSet.contains(newEdge.getVertexU()) ? newEdge.getVertexV()
- : newEdge.getVertexU();
+ ConflictNode nodeToAdd = conflictNodeSet.contains(newEdge.getVertexU())?newEdge.getVertexV()
+ :newEdge.getVertexU();
HashSet<ConflictNode> nodeSet = new HashSet<ConflictNode>(conflictNodeSet);
- for (Iterator edgeIter = nodeToAdd.getEdgeSet().iterator(); edgeIter.hasNext();) {
+ for (Iterator edgeIter = nodeToAdd.getEdgeSet().iterator(); edgeIter.hasNext(); ) {
ConflictEdge edge = (ConflictEdge) edgeIter.next();
if (nodeSet.contains(edge.getVertexU())) {
- nodeSet.remove(edge.getVertexU());
+ nodeSet.remove(edge.getVertexU());
} else if (nodeSet.contains(edge.getVertexV())) {
- nodeSet.remove(edge.getVertexV());
+ nodeSet.remove(edge.getVertexV());
}
}
public String toString() {
String rtr = "";
- for (Iterator<ConflictNode> iterator = conflictNodeSet.iterator(); iterator.hasNext();) {
+ for (Iterator<ConflictNode> iterator = conflictNodeSet.iterator(); iterator.hasNext(); ) {
ConflictNode node = (ConflictNode) iterator.next();
rtr += " " + node + "::" + getNodeType(node);
}
public class SESEWaitingQueue {
public static final int NORMAL= 0; // enqueue all stuff.
public static final int EXCEPTION= 1; // dynamically decide whether a waiting element is enqueued or not.
-
+
private HashMap<TempDescriptor, Set<WaitingElement>>tmp2WaitingElement;
private HashMap<Integer, Set<WaitingElement>>mapWaitingElement;
private HashMap<Integer, Integer>mapType;
-
- public SESEWaitingQueue(){
+
+ public SESEWaitingQueue() {
mapWaitingElement=new HashMap<Integer, Set<WaitingElement>>();
tmp2WaitingElement=new HashMap<TempDescriptor, Set<WaitingElement>>();
mapType=new HashMap<Integer, Integer>();
}
-
- public void setType(int queueID, int type){
+
+ public void setType(int queueID, int type) {
mapType.put(new Integer(queueID), new Integer(type));
}
-
- public int getType(int queueID){
+
+ public int getType(int queueID) {
Integer type=mapType.get(new Integer(queueID));
- if(type==null){
+ if(type==null) {
return SESEWaitingQueue.NORMAL;
} else {
return type.intValue();
}
}
-
+
public void setWaitingElementSet(int queueID, Set<WaitingElement> set) {
mapWaitingElement.put(new Integer(queueID), set);
- for(Iterator<WaitingElement> wit=set.iterator();wit.hasNext();) {
+ for(Iterator<WaitingElement> wit=set.iterator(); wit.hasNext(); ) {
WaitingElement we=wit.next();
TempDescriptor tmp=we.getTempDesc();
if (!tmp2WaitingElement.containsKey(tmp))
public Set<WaitingElement> getWaitingElementSet(TempDescriptor tmp) {
return tmp2WaitingElement.get(tmp);
}
-
- public Set<WaitingElement> getWaitingElementSet(int queueID){
+
+ public Set<WaitingElement> getWaitingElementSet(int queueID) {
return mapWaitingElement.get(new Integer(queueID));
}
-
- public Set<Integer> getQueueIDSet(){
+
+ public Set<Integer> getQueueIDSet() {
return mapWaitingElement.keySet();
}
-
- public int getWaitingElementSize(){
+
+ public int getWaitingElementSize() {
int size=0;
Set<Integer> keySet=mapWaitingElement.keySet();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
Integer key = (Integer) iterator.next();
size+=mapWaitingElement.get(key).size();
}
public class SESEandAgePair {
private FlatSESEEnterNode sese;
- private Integer age;
+ private Integer age;
- public SESEandAgePair( FlatSESEEnterNode sese,
- Integer age ) {
+ public SESEandAgePair(FlatSESEEnterNode sese,
+ Integer age) {
this.sese = sese;
this.age = age;
}
return age;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
SESEandAgePair p = (SESEandAgePair) o;
- return age.equals( p.age ) &&
- sese.equals( p.sese );
+ return age.equals(p.age) &&
+ sese.equals(p.sese);
}
public int hashCode() {
public String toString() {
return "SESE_"+
- sese.getPrettyIdentifier()+
- sese.getIdentifier()+
- "_"+
- age;
+ sese.getPrettyIdentifier()+
+ sese.getIdentifier()+
+ "_"+
+ age;
}
}
public class SVKey {
private FlatSESEEnterNode sese;
- private TempDescriptor var;
+ private TempDescriptor var;
- public SVKey( FlatSESEEnterNode sese,
- TempDescriptor var ) {
+ public SVKey(FlatSESEEnterNode sese,
+ TempDescriptor var) {
this.sese = sese;
this.var = var;
}
return var;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
SVKey k = (SVKey) o;
- return var.equals( k.var ) &&
- sese.equals( k.sese );
+ return var.equals(k.var) &&
+ sese.equals(k.sese);
}
public int hashCode() {
private Hashtable< SVKey, Set<VariableSourceToken> > sv2vst;
// maximum age from aging operation
- private static final Integer MAX_AGE = new Integer( 2 );
-
- public static final Integer SrcType_READY = new Integer( 34 );
- public static final Integer SrcType_STATIC = new Integer( 35 );
- public static final Integer SrcType_DYNAMIC = new Integer( 36 );
+ private static final Integer MAX_AGE = new Integer(2);
+
+ public static final Integer SrcType_READY = new Integer(34);
+ public static final Integer SrcType_STATIC = new Integer(35);
+ public static final Integer SrcType_DYNAMIC = new Integer(36);
public static RBlockRelationAnalysis rblockRel;
// make a deep copy of the in table
- public VarSrcTokTable( VarSrcTokTable in ) {
+ public VarSrcTokTable(VarSrcTokTable in) {
this();
- merge( in );
+ merge(in);
assertConsistency();
}
- public void add( VariableSourceToken vst ) {
- addPrivate( vst );
+ public void add(VariableSourceToken vst) {
+ addPrivate(vst);
assertConsistency();
}
- private void addPrivate( VariableSourceToken vst ) {
+ private void addPrivate(VariableSourceToken vst) {
// make sure we aren't clobbering anything!
- if( trueSet.contains( vst ) ) {
+ if( trueSet.contains(vst) ) {
// if something with the same hashcode is in the true set, they might
// have different reference variable sets because that set is not considered
// in a token's equality, so make sure we smooth that out right here
VariableSourceToken vstAlready = trueSet.get(vst);
if (vstAlready!=null) {
- removePrivate( vstAlready );
+ removePrivate(vstAlready);
HashSet<TempDescriptor> toAddSet=new HashSet<TempDescriptor>();
toAddSet.addAll(vstAlready.getRefVars());
toAddSet.addAll(vst.getRefVars());
}
}
- trueSet.add( vst );
+ trueSet.add(vst);
- Set<VariableSourceToken> s = sese2vst.get( vst.getSESE() );
+ Set<VariableSourceToken> s = sese2vst.get(vst.getSESE() );
if( s == null ) {
s = new HashSet<VariableSourceToken>();
- sese2vst.put( vst.getSESE(), s );
+ sese2vst.put(vst.getSESE(), s);
}
- s.add( vst );
+ s.add(vst);
Iterator<TempDescriptor> refVarItr = vst.getRefVars().iterator();
while( refVarItr.hasNext() ) {
TempDescriptor refVar = refVarItr.next();
- s = var2vst.get( refVar );
+ s = var2vst.get(refVar);
if( s == null ) {
- s = new HashSet<VariableSourceToken>();
- var2vst.put( refVar, s );
+ s = new HashSet<VariableSourceToken>();
+ var2vst.put(refVar, s);
}
- s.add( vst );
+ s.add(vst);
- SVKey key = new SVKey( vst.getSESE(), refVar );
- s = sv2vst.get( key );
+ SVKey key = new SVKey(vst.getSESE(), refVar);
+ s = sv2vst.get(key);
if( s == null ) {
- s = new HashSet<VariableSourceToken>();
- sv2vst.put( key, s );
+ s = new HashSet<VariableSourceToken>();
+ sv2vst.put(key, s);
}
- s.add( vst );
+ s.add(vst);
}
}
- public void addAll( Set<VariableSourceToken> s ) {
+ public void addAll(Set<VariableSourceToken> s) {
Iterator<VariableSourceToken> itr = s.iterator();
while( itr.hasNext() ) {
- addPrivate( itr.next() );
+ addPrivate(itr.next() );
}
assertConsistency();
}
return trueSet;
}
- public Set<VariableSourceToken> get( FlatSESEEnterNode sese ) {
- Set<VariableSourceToken> s = sese2vst.get( sese );
+ public Set<VariableSourceToken> get(FlatSESEEnterNode sese) {
+ Set<VariableSourceToken> s = sese2vst.get(sese);
if( s == null ) {
- s = new HashSet<VariableSourceToken>();
- sese2vst.put( sese, s );
+ s = new HashSet<VariableSourceToken>();
+ sese2vst.put(sese, s);
}
return s;
}
- public Set<VariableSourceToken> get( TempDescriptor refVar ) {
- Set<VariableSourceToken> s = var2vst.get( refVar );
+ public Set<VariableSourceToken> get(TempDescriptor refVar) {
+ Set<VariableSourceToken> s = var2vst.get(refVar);
if( s == null ) {
s = new HashSet<VariableSourceToken>();
- var2vst.put( refVar, s );
+ var2vst.put(refVar, s);
}
return s;
}
- public Set<VariableSourceToken> get( FlatSESEEnterNode sese,
- TempDescriptor refVar ) {
- SVKey key = new SVKey( sese, refVar );
- Set<VariableSourceToken> s = sv2vst.get( key );
+ public Set<VariableSourceToken> get(FlatSESEEnterNode sese,
+ TempDescriptor refVar) {
+ SVKey key = new SVKey(sese, refVar);
+ Set<VariableSourceToken> s = sv2vst.get(key);
if( s == null ) {
s = new HashSet<VariableSourceToken>();
- sv2vst.put( key, s );
+ sv2vst.put(key, s);
}
return s;
}
- public Set<VariableSourceToken> get( FlatSESEEnterNode sese,
- Integer age ) {
+ public Set<VariableSourceToken> get(FlatSESEEnterNode sese,
+ Integer age) {
- HashSet<VariableSourceToken> s0 = (HashSet<VariableSourceToken>) sese2vst.get( sese );
+ HashSet<VariableSourceToken> s0 = (HashSet<VariableSourceToken>)sese2vst.get(sese);
if( s0 == null ) {
- s0 = new HashSet<VariableSourceToken>();
- sese2vst.put( sese, s0 );
+ s0 = new HashSet<VariableSourceToken>();
+ sese2vst.put(sese, s0);
}
- Set<VariableSourceToken> s = (Set<VariableSourceToken>) s0.clone();
+ Set<VariableSourceToken> s = (Set<VariableSourceToken>)s0.clone();
Iterator<VariableSourceToken> sItr = s.iterator();
while( sItr.hasNext() ) {
VariableSourceToken vst = sItr.next();
- if( !vst.getAge().equals( age ) ) {
- s.remove( vst );
+ if( !vst.getAge().equals(age) ) {
+ s.remove(vst);
}
}
// merge now makes a deep copy of incoming stuff because tokens may
// be modified (reference var sets) by later ops that change more
// than one table, causing inconsistency
- public void merge( VarSrcTokTable in ) {
+ public void merge(VarSrcTokTable in) {
if( in == null ) {
return;
Iterator<VariableSourceToken> vstItr = in.trueSet.iterator();
while( vstItr.hasNext() ) {
VariableSourceToken vst = vstItr.next();
- this.addPrivate( vst.copy() );
+ this.addPrivate(vst.copy() );
}
assertConsistency();
}
- // remove operations must leave the trueSet
+ // remove operations must leave the trueSet
// and the hash maps consistent
- public void remove( VariableSourceToken vst ) {
- removePrivate( vst );
+ public void remove(VariableSourceToken vst) {
+ removePrivate(vst);
assertConsistency();
}
- private void removePrivate( VariableSourceToken vst ) {
- trueSet.remove( vst );
-
+ private void removePrivate(VariableSourceToken vst) {
+ trueSet.remove(vst);
+
Set<VariableSourceToken> s;
- s = get( vst.getSESE() );
- if( s != null ) { s.remove( vst ); }
+ s = get(vst.getSESE() );
+ if( s != null ) {
+ s.remove(vst);
+ }
Iterator<TempDescriptor> refVarItr = vst.getRefVars().iterator();
while( refVarItr.hasNext() ) {
TempDescriptor refVar = refVarItr.next();
- s = get( refVar );
- if( s != null ) {
- s.remove( vst );
+ s = get(refVar);
+ if( s != null ) {
+ s.remove(vst);
if( s.isEmpty() ) {
- var2vst.remove( refVar );
+ var2vst.remove(refVar);
}
}
-
- s = get( vst.getSESE(), refVar );
- if( s != null ) {
- s.remove( vst );
+
+ s = get(vst.getSESE(), refVar);
+ if( s != null ) {
+ s.remove(vst);
if( s.isEmpty() ) {
- sv2vst.remove( new SVKey( vst.getSESE(), refVar ) );
+ sv2vst.remove(new SVKey(vst.getSESE(), refVar) );
}
}
}
}
- public void remove( FlatSESEEnterNode sese ) {
- removePrivate( sese );
+ public void remove(FlatSESEEnterNode sese) {
+ removePrivate(sese);
assertConsistency();
}
- public void removePrivate( FlatSESEEnterNode sese ) {
- Set<VariableSourceToken> s = sese2vst.get( sese );
+ public void removePrivate(FlatSESEEnterNode sese) {
+ Set<VariableSourceToken> s = sese2vst.get(sese);
if( s == null ) {
return;
}
Iterator<VariableSourceToken> itr = s.iterator();
while( itr.hasNext() ) {
VariableSourceToken vst = itr.next();
- removePrivate( vst );
+ removePrivate(vst);
}
- sese2vst.remove( sese );
+ sese2vst.remove(sese);
}
- public void remove( TempDescriptor refVar ) {
- removePrivate( refVar );
+ public void remove(TempDescriptor refVar) {
+ removePrivate(refVar);
assertConsistency();
}
- private void removePrivate( TempDescriptor refVar ) {
- Set<VariableSourceToken> s = var2vst.get( refVar );
+ private void removePrivate(TempDescriptor refVar) {
+ Set<VariableSourceToken> s = var2vst.get(refVar);
if( s == null ) {
return;
}
-
+
Set<VariableSourceToken> forRemoval = new HashSet<VariableSourceToken>();
// iterate over tokens that this temp can reference, make a set
while( itr.hasNext() ) {
VariableSourceToken vst = itr.next();
Set<TempDescriptor> refVars = vst.getRefVars();
- assert refVars.contains( refVar );
- forRemoval.add( vst );
+ assert refVars.contains(refVar);
+ forRemoval.add(vst);
}
itr = forRemoval.iterator();
// referencing this token, just take it
// out of the table all together
if( refVars.size() == 1 ) {
- removePrivate( vst );
+ removePrivate(vst);
}
- sv2vst.remove( new SVKey( vst.getSESE(), refVar ) );
+ sv2vst.remove(new SVKey(vst.getSESE(), refVar) );
HashSet<TempDescriptor> newset=new HashSet<TempDescriptor>();
newset.addAll(vst.getRefVars());
}
- var2vst.remove( refVar );
+ var2vst.remove(refVar);
}
- public void remove( FlatSESEEnterNode sese,
- TempDescriptor var ) {
+ public void remove(FlatSESEEnterNode sese,
+ TempDescriptor var) {
// don't seem to need this, don't bother maintaining
// until its clear we need it
// age tokens with respect to SESE curr, where
// any curr tokens increase age by 1
- public void age( FlatSESEEnterNode curr ) {
+ public void age(FlatSESEEnterNode curr) {
Set<VariableSourceToken> forRemoval =
new HashSet<VariableSourceToken>();
while( itr.hasNext() ) {
VariableSourceToken vst = itr.next();
- if( vst.getSESE().equals( curr ) ) {
+ if( vst.getSESE().equals(curr) ) {
// only age if the token isn't already the maximum age
if( vst.getAge() < MAX_AGE ) {
-
- forRemoval.add( vst );
-
- forAddition.add( new VariableSourceToken( vst.getRefVars(),
- curr,
- vst.getAge() + 1,
- vst.getAddrVar()
- )
- );
+
+ forRemoval.add(vst);
+
+ forAddition.add(new VariableSourceToken(vst.getRefVars(),
+ curr,
+ vst.getAge() + 1,
+ vst.getAddrVar()
+ )
+ );
}
- }
+ }
}
-
+
itr = forRemoval.iterator();
while( itr.hasNext() ) {
VariableSourceToken vst = itr.next();
- remove( vst );
+ remove(vst);
}
-
+
itr = forRemoval.iterator();
while( itr.hasNext() ) {
VariableSourceToken vst = itr.next();
- add( vst );
+ add(vst);
}
assertConsistency();
// at an SESE enter node, all ref vars in the SESE's in-set will
// be copied into the SESE's local scope, change source to itself
- public void ownInSet( FlatSESEEnterNode curr ) {
+ public void ownInSet(FlatSESEEnterNode curr) {
Iterator<TempDescriptor> inVarItr = curr.getInVarSet().iterator();
while( inVarItr.hasNext() ) {
TempDescriptor inVar = inVarItr.next();
- remove( inVar );
+ remove(inVar);
assertConsistency();
Set<TempDescriptor> refVars = new HashSet<TempDescriptor>();
- refVars.add( inVar );
- add( new VariableSourceToken( refVars,
- curr,
- new Integer( 0 ),
- inVar
- )
- );
+ refVars.add(inVar);
+ add(new VariableSourceToken(refVars,
+ curr,
+ new Integer(0),
+ inVar
+ )
+ );
assertConsistency();
}
}
-
+
// for the given SESE, change child tokens into this parent
- public void remapChildTokens( FlatSESEEnterNode curr ) {
+ public void remapChildTokens(FlatSESEEnterNode curr) {
Iterator<FlatSESEEnterNode> childItr = curr.getLocalChildren().iterator();
while( childItr.hasNext() ) {
FlatSESEEnterNode child = childItr.next();
-
+
// set of VSTs for removal
HashSet<VariableSourceToken> removalSet=new HashSet<VariableSourceToken>();
// set of VSTs for additon
HashSet<VariableSourceToken> additionSet=new HashSet<VariableSourceToken>();
-
- Iterator<VariableSourceToken> vstItr = get( child ).iterator();
+
+ Iterator<VariableSourceToken> vstItr = get(child).iterator();
while( vstItr.hasNext() ) {
- VariableSourceToken vst = vstItr.next();
- removalSet.add(vst);
-
- additionSet.add( new VariableSourceToken( vst.getRefVars(),
- curr,
- new Integer( 0 ),
- vst.getAddrVar()
- )
- );
+ VariableSourceToken vst = vstItr.next();
+ removalSet.add(vst);
+
+ additionSet.add(new VariableSourceToken(vst.getRefVars(),
+ curr,
+ new Integer(0),
+ vst.getAddrVar()
+ )
+ );
}
-
+
// remove( eah item in forremoval )
vstItr = removalSet.iterator();
while( vstItr.hasNext() ) {
- VariableSourceToken vst = vstItr.next();
- remove( vst );
+ VariableSourceToken vst = vstItr.next();
+ remove(vst);
}
// add( each ite inm for additon _
vstItr = additionSet.iterator();
while( vstItr.hasNext() ) {
- VariableSourceToken vst = vstItr.next();
- add( vst );
+ VariableSourceToken vst = vstItr.next();
+ add(vst);
}
}
assertConsistency();
- }
-
+ }
+
// this method is called at the SESE exit of SESE 'curr'
// if the sources for a variable written by curr can also
// whether it ends up writing to it or not. It will always, then,
// appear in curr's out-set.
public Set<TempDescriptor>
- calcVirtReadsAndPruneParentAndSiblingTokens( FlatSESEEnterNode exiter,
- Set<TempDescriptor> liveVars ) {
+ calcVirtReadsAndPruneParentAndSiblingTokens(FlatSESEEnterNode exiter,
+ Set<TempDescriptor> liveVars) {
Set<TempDescriptor> virtReadSet = new HashSet<TempDescriptor>();
Set<FlatSESEEnterNode> alternateSESEs = new HashSet<FlatSESEEnterNode>();
FlatSESEEnterNode ancestor = exiter;
- boolean findMore = true;
+ boolean findMore = true;
while( findMore ) {
// first move up to the next ancestor
Iterator<FlatSESEEnterNode> childItr;
if( ancestor == null ) {
- // when some caller task is the next parent, the siblings
- // of the current task are other local root tasks
- ancestor = rblockRel.getCallerProxySESE();
- childItr = rblockRel.getLocalRootSESEs( exiter.getfmEnclosing() ).iterator();
- findMore = false;
+ // when some caller task is the next parent, the siblings
+ // of the current task are other local root tasks
+ ancestor = rblockRel.getCallerProxySESE();
+ childItr = rblockRel.getLocalRootSESEs(exiter.getfmEnclosing() ).iterator();
+ findMore = false;
} else {
- // otherwise, the siblings are locally-defined
- childItr = ancestor.getLocalChildren().iterator();
+ // otherwise, the siblings are locally-defined
+ childItr = ancestor.getLocalChildren().iterator();
- // and there is no further ancestry beyond the main task
- if( ancestor.equals( rblockRel.getMainSESE() ) ) {
- findMore = false;
- }
+ // and there is no further ancestry beyond the main task
+ if( ancestor.equals(rblockRel.getMainSESE() ) ) {
+ findMore = false;
+ }
}
-
+
// this ancestor and its children are valid alternate sources
- alternateSESEs.add( ancestor );
+ alternateSESEs.add(ancestor);
while( childItr.hasNext() ) {
- FlatSESEEnterNode sibling = childItr.next();
- alternateSESEs.add( sibling );
+ FlatSESEEnterNode sibling = childItr.next();
+ alternateSESEs.add(sibling);
}
}
-
+
// VSTs to remove if they are alternate sources for exiter VSTs
// whose variables will become virtual reads
Set<VariableSourceToken> forRemoval = new HashSet<VariableSourceToken>();
// look at all of this SESE's VSTs at exit...
- Iterator<VariableSourceToken> vstItr = get( exiter ).iterator();
+ Iterator<VariableSourceToken> vstItr = get(exiter).iterator();
while( vstItr.hasNext() ) {
VariableSourceToken vstExiterSrc = vstItr.next();
// for each variable that might come from those sources...
Iterator<TempDescriptor> refVarItr = vstExiterSrc.getRefVars().iterator();
while( refVarItr.hasNext() ) {
- TempDescriptor refVar = refVarItr.next();
+ TempDescriptor refVar = refVarItr.next();
// only matters for live variables at SESE exit program point
- if( !liveVars.contains( refVar ) ) {
+ if( !liveVars.contains(refVar) ) {
continue;
}
// examine other sources for a variable...
- Iterator<VariableSourceToken> srcItr = get( refVar ).iterator();
+ Iterator<VariableSourceToken> srcItr = get(refVar).iterator();
while( srcItr.hasNext() ) {
VariableSourceToken vstPossibleOtherSrc = srcItr.next();
- if( vstPossibleOtherSrc.getSESE().equals( exiter ) &&
- vstPossibleOtherSrc.getAge() > 0
- ) {
- // this is an alternate source if its
- // an older instance of this SESE
- virtReadSet.add( refVar );
- forRemoval.add( vstPossibleOtherSrc );
-
- } else if( alternateSESEs.contains( vstPossibleOtherSrc.getSESE() ) ) {
+ if( vstPossibleOtherSrc.getSESE().equals(exiter) &&
+ vstPossibleOtherSrc.getAge() > 0
+ ) {
+ // this is an alternate source if its
+ // an older instance of this SESE
+ virtReadSet.add(refVar);
+ forRemoval.add(vstPossibleOtherSrc);
+
+ } else if( alternateSESEs.contains(vstPossibleOtherSrc.getSESE() ) ) {
// this is an alternate source from ancestor or ancestor's sibling
- virtReadSet.add( refVar );
- forRemoval.add( vstPossibleOtherSrc );
+ virtReadSet.add(refVar);
+ forRemoval.add(vstPossibleOtherSrc);
} else {
- if( !(vstPossibleOtherSrc.getSESE().equals( exiter ) &&
- vstPossibleOtherSrc.getAge().equals( 0 )
- )
- ) {
- System.out.println( "For refVar="+refVar+" at exit of "+exiter+
- ", unexpected possible variable source "+vstPossibleOtherSrc );
- assert false;
- }
+ if( !(vstPossibleOtherSrc.getSESE().equals(exiter) &&
+ vstPossibleOtherSrc.getAge().equals(0)
+ )
+ ) {
+ System.out.println("For refVar="+refVar+" at exit of "+exiter+
+ ", unexpected possible variable source "+vstPossibleOtherSrc);
+ assert false;
+ }
}
}
}
vstItr = forRemoval.iterator();
while( vstItr.hasNext() ) {
VariableSourceToken vst = vstItr.next();
- remove( vst );
+ remove(vst);
}
assertConsistency();
-
+
return virtReadSet;
}
-
+
// given a table from a subsequent program point, decide
// which variables are going from a non-dynamic to a
// dynamic source and return them
- public Hashtable<TempDescriptor, VSTWrapper>
- getReadyOrStatic2DynamicSet( VarSrcTokTable nextTable,
- Set<TempDescriptor> nextLiveIn,
- FlatSESEEnterNode current
- ) {
-
- Hashtable<TempDescriptor, VSTWrapper> out =
+ public Hashtable<TempDescriptor, VSTWrapper>
+ getReadyOrStatic2DynamicSet(VarSrcTokTable nextTable,
+ Set<TempDescriptor> nextLiveIn,
+ FlatSESEEnterNode current
+ ) {
+
+ Hashtable<TempDescriptor, VSTWrapper> out =
new Hashtable<TempDescriptor, VSTWrapper>();
-
+
Iterator itr = var2vst.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- TempDescriptor var = (TempDescriptor) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ Map.Entry me = (Map.Entry)itr.next();
+ TempDescriptor var = (TempDescriptor) me.getKey();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>)me.getValue();
// only worth tracking if live
- if( nextLiveIn.contains( var ) ) {
-
- VSTWrapper vstIfStaticBefore = new VSTWrapper();
- VSTWrapper vstIfStaticAfter = new VSTWrapper();
+ if( nextLiveIn.contains(var) ) {
+
+ VSTWrapper vstIfStaticBefore = new VSTWrapper();
+ VSTWrapper vstIfStaticAfter = new VSTWrapper();
- Integer srcTypeBefore = this.getRefVarSrcType( var, current, vstIfStaticBefore );
- Integer srcTypeAfter = nextTable.getRefVarSrcType( var, current, vstIfStaticAfter );
+ Integer srcTypeBefore = this.getRefVarSrcType(var, current, vstIfStaticBefore);
+ Integer srcTypeAfter = nextTable.getRefVarSrcType(var, current, vstIfStaticAfter);
- if( !srcTypeBefore.equals( SrcType_DYNAMIC ) &&
- srcTypeAfter.equals( SrcType_DYNAMIC )
- ) {
+ if( !srcTypeBefore.equals(SrcType_DYNAMIC) &&
+ srcTypeAfter.equals(SrcType_DYNAMIC)
+ ) {
// remember the variable and a source
// it had before crossing the transition
- // 1) if it was ready, vstIfStatic.vst is null
- // 2) if is was static, use vstIfStatic.vst
- out.put( var, vstIfStaticBefore );
+ // 1) if it was ready, vstIfStatic.vst is null
+ // 2) if is was static, use vstIfStatic.vst
+ out.put(var, vstIfStaticBefore);
}
}
}
// a known age that will produce the value
// 3. Dynamic -- we don't know where the value will come
// from statically, so we'll track it dynamically
- public Integer getRefVarSrcType( TempDescriptor refVar,
- FlatSESEEnterNode currentSESE,
- VSTWrapper vstIfStatic ) {
+ public Integer getRefVarSrcType(TempDescriptor refVar,
+ FlatSESEEnterNode currentSESE,
+ VSTWrapper vstIfStatic) {
assert refVar != null;
assert vstIfStatic != null;
vstIfStatic.vst = null;
-
+
// when the current SESE is null, that simply means it is
// an unknown placeholder, in which case the system will
// ensure that any variables are READY
// comes from outside of any statically-known SESE scope,
// which means the system guarantees its READY, so jump over
// while loop
- Set<VariableSourceToken> srcs = get( refVar );
+ Set<VariableSourceToken> srcs = get(refVar);
Iterator<VariableSourceToken> itrSrcs = srcs.iterator();
while( itrSrcs.hasNext() ) {
VariableSourceToken vst = itrSrcs.next();
// one child token, there are two cases
// 1. if the current task invoked the local method context,
// its children are the locally-defined root tasks
- boolean case1 =
+ boolean case1 =
currentSESE.getIsCallerProxySESE() &&
- rblockRel.getLocalRootSESEs().contains( vst.getSESE() );
+ rblockRel.getLocalRootSESEs().contains(vst.getSESE() );
// 2. if the child task is a locally-defined child of the current task
- boolean case2 = currentSESE.getLocalChildren().contains( vst.getSESE() );
-
+ boolean case2 = currentSESE.getLocalChildren().contains(vst.getSESE() );
+
if( case1 || case2 ) {
-
- // if we ever have at least one child source with an
- // unknown age, have to treat var as dynamic
- if( vst.getAge().equals( OoOJavaAnalysis.maxSESEage ) ) {
- return SrcType_DYNAMIC;
- }
- // if we have a known-age child source, this var is
- // either static or dynamic now: it's static if this
- // source is the only source, otherwise dynamic
- if( srcs.size() > 1 ) {
- return SrcType_DYNAMIC;
- }
-
- vstIfStatic.vst = vst;
- return SrcType_STATIC;
+ // if we ever have at least one child source with an
+ // unknown age, have to treat var as dynamic
+ if( vst.getAge().equals(OoOJavaAnalysis.maxSESEage) ) {
+ return SrcType_DYNAMIC;
+ }
+
+ // if we have a known-age child source, this var is
+ // either static or dynamic now: it's static if this
+ // source is the only source, otherwise dynamic
+ if( srcs.size() > 1 ) {
+ return SrcType_DYNAMIC;
+ }
+
+ vstIfStatic.vst = vst;
+ return SrcType_STATIC;
}
}
// any reference variables that are not live can be pruned
- // from the table, and if any VSTs are then no longer
+ // from the table, and if any VSTs are then no longer
// referenced, they can be dropped as well
// THIS CAUSES INCONSISTENCY, FIX LATER, NOT REQUIRED
- public void pruneByLiveness( Set<TempDescriptor> rootLiveSet ) {
-
+ public void pruneByLiveness(Set<TempDescriptor> rootLiveSet) {
+
// the set of reference variables in the table minus the
// live set gives the set of reference variables to remove
Set<TempDescriptor> deadRefVars = new HashSet<TempDescriptor>();
- deadRefVars.addAll( var2vst.keySet() );
+ deadRefVars.addAll(var2vst.keySet() );
if( rootLiveSet != null ) {
- deadRefVars.removeAll( rootLiveSet );
+ deadRefVars.removeAll(rootLiveSet);
}
// just use the remove operation to prune the table now
Iterator<TempDescriptor> deadItr = deadRefVars.iterator();
while( deadItr.hasNext() ) {
TempDescriptor dead = deadItr.next();
- removePrivate( dead );
+ removePrivate(dead);
}
assertConsistency();
}
-
+
// use as an aid for debugging, where true-set is checked
}
/* public void assertConsistency() {
- Iterator itr;
- Set s;
+ Iterator itr;
+ Set s;
- Set<VariableSourceToken> trueSetByAlts = new HashSet<VariableSourceToken>();
- itr = sese2vst.entrySet().iterator();
- while( itr.hasNext() ) {
+ Set<VariableSourceToken> trueSetByAlts = new HashSet<VariableSourceToken>();
+ itr = sese2vst.entrySet().iterator();
+ while( itr.hasNext() ) {
Map.Entry me = (Map.Entry) itr.next();
FlatSESEEnterNode sese = (FlatSESEEnterNode) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
assert s1 != null;
-
+
// the trueSet should have all entries in s1
assert trueSet.containsAll( s1 );
// add s1 to a running union--at the end check if trueSet has extra
trueSetByAlts.addAll( s1 );
- }
- // make sure trueSet isn't too big
- assert trueSetByAlts.containsAll( trueSet );
+ }
+ // make sure trueSet isn't too big
+ assert trueSetByAlts.containsAll( trueSet );
- trueSetByAlts = new HashSet<VariableSourceToken>();
- itr = var2vst.entrySet().iterator();
- while( itr.hasNext() ) {
+ trueSetByAlts = new HashSet<VariableSourceToken>();
+ itr = var2vst.entrySet().iterator();
+ while( itr.hasNext() ) {
Map.Entry me = (Map.Entry) itr.next();
TempDescriptor var = (TempDescriptor) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
assert s1 != null;
-
+
// the trueSet should have all entries in s1
assert trueSet.containsAll( s1 );
// add s1 to a running union--at the end check if trueSet has extra
trueSetByAlts.addAll( s1 );
- }
- // make sure trueSet isn't too big
- assert trueSetByAlts.containsAll( trueSet );
+ }
+ // make sure trueSet isn't too big
+ assert trueSetByAlts.containsAll( trueSet );
- trueSetByAlts = new HashSet<VariableSourceToken>();
- itr = sv2vst.entrySet().iterator();
- while( itr.hasNext() ) {
+ trueSetByAlts = new HashSet<VariableSourceToken>();
+ itr = sv2vst.entrySet().iterator();
+ while( itr.hasNext() ) {
Map.Entry me = (Map.Entry) itr.next();
SVKey key = (SVKey) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
assert s1 != null;
-
+
// the trueSet should have all entries in s1
assert trueSet.containsAll( s1 );
// add s1 to a running union--at the end check if trueSet has extra
trueSetByAlts.addAll( s1 );
- }
- // make sure trueSet isn't too big
- assert trueSetByAlts.containsAll( trueSet );
+ }
+ // make sure trueSet isn't too big
+ assert trueSetByAlts.containsAll( trueSet );
- // also check that the reference var sets are consistent
- Hashtable<VariableSourceToken, Set<TempDescriptor> > vst2refVars =
+ // also check that the reference var sets are consistent
+ Hashtable<VariableSourceToken, Set<TempDescriptor> > vst2refVars =
new Hashtable<VariableSourceToken, Set<TempDescriptor> >();
- itr = var2vst.entrySet().iterator();
- while( itr.hasNext() ) {
+ itr = var2vst.entrySet().iterator();
+ while( itr.hasNext() ) {
Map.Entry me = (Map.Entry) itr.next();
TempDescriptor refVar = (TempDescriptor) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
Iterator<VariableSourceToken> vstItr = s1.iterator();
while( vstItr.hasNext() ) {
- VariableSourceToken vst = vstItr.next();
- assert vst.getRefVars().contains( refVar );
+ VariableSourceToken vst = vstItr.next();
+ assert vst.getRefVars().contains( refVar );
- Set<TempDescriptor> refVarsPart = vst2refVars.get( vst );
- if( refVarsPart == null ) {
- refVarsPart = new HashSet<TempDescriptor>();
- }
- refVarsPart.add( refVar );
- vst2refVars.put( vst, refVarsPart );
+ Set<TempDescriptor> refVarsPart = vst2refVars.get( vst );
+ if( refVarsPart == null ) {
+ refVarsPart = new HashSet<TempDescriptor>();
+ }
+ refVarsPart.add( refVar );
+ vst2refVars.put( vst, refVarsPart );
}
- }
- itr = vst2refVars.entrySet().iterator();
- while( itr.hasNext() ) {
+ }
+ itr = vst2refVars.entrySet().iterator();
+ while( itr.hasNext() ) {
Map.Entry me = (Map.Entry) itr.next();
VariableSourceToken vst = (VariableSourceToken) me.getKey();
Set<TempDescriptor> s1 = (Set<TempDescriptor>) me.getValue();
assert vst.getRefVars().equals( s1 );
- }
- }*/
+ }
+ }*/
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
}
VarSrcTokTable table = (VarSrcTokTable) o;
- return trueSet.equals( table.trueSet );
+ return trueSet.equals(table.trueSet);
}
public int hashCode() {
String tokHighlighter = "o";
String str = "VarSrcTokTable\n";
- Iterator<VariableSourceToken> vstItr = trueSet.iterator();
+ Iterator<VariableSourceToken> vstItr = trueSet.iterator();
while( vstItr.hasNext() ) {
str += " "+tokHighlighter+" "+vstItr.next()+"\n";
}
String str = "VarSrcTokTable\n";
Set s;
- Iterator itr;
+ Iterator itr;
Iterator<VariableSourceToken> vstItr;
str += " trueSet\n";
- vstItr = trueSet.iterator();
+ vstItr = trueSet.iterator();
while( vstItr.hasNext() ) {
str += " "+tokHighlighter+" "+vstItr.next()+"\n";
}
str += " sese2vst\n";
itr = sese2vst.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- FlatSESEEnterNode sese = (FlatSESEEnterNode) me.getKey();
- HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>) me.getValue();
+ Map.Entry me = (Map.Entry)itr.next();
+ FlatSESEEnterNode sese = (FlatSESEEnterNode) me.getKey();
+ HashSet<VariableSourceToken> s1 = (HashSet<VariableSourceToken>)me.getValue();
assert s1 != null;
str += " "+sese.getPrettyIdentifier()+" -> \n";
str += " var2vst\n";
itr = var2vst.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- TempDescriptor var = (TempDescriptor) me.getKey();
- Set<VariableSourceToken> s1 = (Set<VariableSourceToken>) me.getValue();
+ Map.Entry me = (Map.Entry)itr.next();
+ TempDescriptor var = (TempDescriptor) me.getKey();
+ Set<VariableSourceToken> s1 = (Set<VariableSourceToken>)me.getValue();
assert s1 != null;
str += " "+var+" -> \n";
str += " sv2vst\n";
itr = sv2vst.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- SVKey key = (SVKey) me.getKey();
- Set<VariableSourceToken> s1 = (Set<VariableSourceToken>) me.getValue();
+ Map.Entry me = (Map.Entry)itr.next();
+ SVKey key = (SVKey) me.getKey();
+ Set<VariableSourceToken> s1 = (Set<VariableSourceToken>)me.getValue();
assert s1 != null;
str += " "+key+" -> \n";
public class VariableSourceToken {
private Set<TempDescriptor> refVars;
- private FlatSESEEnterNode sese;
- private Integer seseAge;
- private TempDescriptor addrVar;
-
- public VariableSourceToken( Set<TempDescriptor> refVars,
- FlatSESEEnterNode sese,
- Integer seseAge,
- TempDescriptor addrVar
- ) {
+ private FlatSESEEnterNode sese;
+ private Integer seseAge;
+ private TempDescriptor addrVar;
+
+ public VariableSourceToken(Set<TempDescriptor> refVars,
+ FlatSESEEnterNode sese,
+ Integer seseAge,
+ TempDescriptor addrVar
+ ) {
this.refVars = refVars;
this.sese = sese;
this.seseAge = seseAge;
- this.addrVar = addrVar;
+ this.addrVar = addrVar;
}
public void setRefVars(Set<TempDescriptor> refVars) {
}
public VariableSourceToken copy() {
- return new VariableSourceToken( refVars,
- sese,
- new Integer( seseAge ),
- addrVar );
+ return new VariableSourceToken(refVars,
+ sese,
+ new Integer(seseAge),
+ addrVar);
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
VariableSourceToken vst = (VariableSourceToken) o;
// the reference vars have no bearing on equality
- return sese.equals( vst.sese ) &&
- addrVar.equals( vst.addrVar ) &&
- seseAge.equals( vst.seseAge );
+ return sese.equals(vst.sese) &&
+ addrVar.equals(vst.addrVar) &&
+ seseAge.equals(vst.seseAge);
}
public int hashCode() {
private int status;
private String dynID = "";
private TempDescriptor tempDesc;
-
+
// if scc/coarse represents more than one waiting elements
// ignored waiting element is inserted into a set of waiting element as a bogus element
private boolean bogus = false;
- // a set of tempDescriptors:
+ // a set of tempDescriptors:
// all associated with coarse conflicts for the same queue and the same sese
private Set<TempDescriptor> tempSet;
return hash;
}
-
- public boolean isBogus(){
+
+ public boolean isBogus() {
return bogus;
}
-
- public void setBogus(boolean b){
+
+ public void setBogus(boolean b) {
bogus=b;
}
public AccessPath() {
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
return true;
/*
- VariableSourceToken vst = (VariableSourceToken) o;
+ VariableSourceToken vst = (VariableSourceToken) o;
- // the reference vars have no bearing on equality
- return sese.equals( vst.sese ) &&
+ // the reference vars have no bearing on equality
+ return sese.equals( vst.sese ) &&
addrVar.equals( vst.addrVar ) &&
seseAge.equals( vst.seseAge );
- */
+ */
}
public int hashCode() {
public String toStringForDOT() {
/*
- if( disjointId != null ) {
- return "disjoint "+disjointId+"\\n"+toString()+"\\n"+getType().toPrettyString();
- } else {
- return toString()+"\\n"+getType().toPrettyString();
- }
- */
+ if( disjointId != null ) {
+ return "disjoint "+disjointId+"\\n"+toString()+"\\n"+getType().toPrettyString();
+ } else {
+ return toString()+"\\n"+getType().toPrettyString();
+ }
+ */
return "do";
- }
+ }
}
public static final int SHADOWAGE_in_I = -101;
public static final int SHADOWAGE_oldest = -102;
public static final int SHADOWAGE_summary = -103;
-
+
private boolean flag=false;
if( disjointId != null ) {
return "disjoint "+disjointId+"\\n"+toString()+"\\n"+getType().toPrettyString();
} else {
- return toString()+"\\n"+getType().toPrettyString();
+ return toString()+"\\n"+getType().toPrettyString();
}
}
-
- public void setFlag(boolean flag){
- this.flag=flag;
+
+ public void setFlag(boolean flag) {
+ this.flag=flag;
}
-
- public boolean getFlag(){
- return flag;
+
+ public boolean getFlag() {
+ return flag;
}
-
- public int getID(){
- return id;
+
+ public int getID() {
+ return id;
}
}
private static Hashtable<Canonical, Canonical> canon = new Hashtable<Canonical, Canonical>();
int canonicalvalue;
- private static int canonicalcount=1;
+ private static int canonicalcount=1;
public static Canonical makeCanonical(Canonical c) {
return c;
}
- static Hashtable<ReachOperation, ReachOperation> unionhash=new Hashtable<ReachOperation, ReachOperation>();
- static Hashtable<ReachOperation, ReachOperation> interhash=new Hashtable<ReachOperation, ReachOperation>();
- static Hashtable<CanonicalWrapper, CanonicalWrapper> lookuphash=new Hashtable<CanonicalWrapper, CanonicalWrapper>();
+ static Hashtable<ReachOperation, ReachOperation> unionhash=new Hashtable<ReachOperation, ReachOperation>();
+ static Hashtable<ReachOperation, ReachOperation> interhash=new Hashtable<ReachOperation, ReachOperation>();
+ static Hashtable<CanonicalWrapper, CanonicalWrapper> lookuphash=new Hashtable<CanonicalWrapper, CanonicalWrapper>();
}
\ No newline at end of file
package Analysis.OwnershipAnalysis;
public class CanonicalWrapper {
- Canonical a;
- public Canonical b;
-
- public CanonicalWrapper(Canonical a) {
- assert a.canonicalvalue!=0;
- this.a=a;
- }
- public int hashCode() {
- return a.canonicalvalue;
- }
- public boolean equals(Object o) {
- CanonicalWrapper ro=(CanonicalWrapper)o;
- return ro.a.canonicalvalue==a.canonicalvalue;
- }
+ Canonical a;
+ public Canonical b;
+
+ public CanonicalWrapper(Canonical a) {
+ assert a.canonicalvalue!=0;
+ this.a=a;
+ }
+ public int hashCode() {
+ return a.canonicalvalue;
+ }
+ public boolean equals(Object o) {
+ CanonicalWrapper ro=(CanonicalWrapper)o;
+ return ro.a.canonicalvalue==a.canonicalvalue;
+ }
}
\ No newline at end of file
public class EffectsKey {
- private String fd;
- private TypeDescriptor td;
- private Integer hrnId;
- private String hrnUniqueId;
- private int paramIden;
-
- public EffectsKey(String fd, TypeDescriptor td, Integer hrnId, String hrnUniqueId, int paramIden) {
- this.fd = fd;
- this.td = td;
- this.hrnId = hrnId;
- this.hrnUniqueId=hrnUniqueId;
- this.paramIden=paramIden;
- }
-
- public int getParamIden(){
- return paramIden;
- }
-
- public String getFieldDescriptor() {
- return fd;
- }
-
- public TypeDescriptor getTypeDescriptor() {
- return td;
- }
-
- public Integer getHRNId() {
- return hrnId;
- }
-
- public String getHRNUniqueId(){
- return hrnUniqueId;
- }
-
- public String toString() {
- return "(" + td + ")" + fd + "#" + hrnId;
- }
-
- public int hashCode() {
-
- int hash = 1;
-
- if (fd != null) {
- hash = hash * 31 + fd.hashCode();
- }
-
- if (td != null) {
- hash += td.getSymbol().hashCode();
- }
-
- if (hrnId != null) {
- hash += hrnId.hashCode();
- }
-
- return hash;
-
- }
-
- public boolean equals(Object o) {
-
- if (o == null) {
- return false;
- }
-
- if (!(o instanceof EffectsKey)) {
- return false;
- }
-
- EffectsKey in = (EffectsKey) o;
-
- if (fd.equals(in.getFieldDescriptor())
- && td.getSymbol().equals(in.getTypeDescriptor().getSymbol())
- && hrnId.equals(in.getHRNId())) {
- return true;
- } else {
- return false;
- }
-
- }
+ private String fd;
+ private TypeDescriptor td;
+ private Integer hrnId;
+ private String hrnUniqueId;
+ private int paramIden;
+
+ public EffectsKey(String fd, TypeDescriptor td, Integer hrnId, String hrnUniqueId, int paramIden) {
+ this.fd = fd;
+ this.td = td;
+ this.hrnId = hrnId;
+ this.hrnUniqueId=hrnUniqueId;
+ this.paramIden=paramIden;
+ }
+
+ public int getParamIden() {
+ return paramIden;
+ }
+
+ public String getFieldDescriptor() {
+ return fd;
+ }
+
+ public TypeDescriptor getTypeDescriptor() {
+ return td;
+ }
+
+ public Integer getHRNId() {
+ return hrnId;
+ }
+
+ public String getHRNUniqueId() {
+ return hrnUniqueId;
+ }
+
+ public String toString() {
+ return "(" + td + ")" + fd + "#" + hrnId;
+ }
+
+ public int hashCode() {
+
+ int hash = 1;
+
+ if (fd != null) {
+ hash = hash * 31 + fd.hashCode();
+ }
+
+ if (td != null) {
+ hash += td.getSymbol().hashCode();
+ }
+
+ if (hrnId != null) {
+ hash += hrnId.hashCode();
+ }
+
+ return hash;
+
+ }
+
+ public boolean equals(Object o) {
+
+ if (o == null) {
+ return false;
+ }
+
+ if (!(o instanceof EffectsKey)) {
+ return false;
+ }
+
+ EffectsKey in = (EffectsKey) o;
+
+ if (fd.equals(in.getFieldDescriptor())
+ && td.getSymbol().equals(in.getTypeDescriptor().getSymbol())
+ && hrnId.equals(in.getHRNId())) {
+ return true;
+ } else {
+ return false;
+ }
+
+ }
}
public class EffectsSet {
- private Hashtable<Integer, HashSet<EffectsKey>> readTable;
- private Hashtable<Integer, HashSet<EffectsKey>> writeTable;
- private Hashtable<Integer, HashSet<EffectsKey>> strongUpdateTable;
-
- public EffectsSet() {
- readTable = new Hashtable<Integer, HashSet<EffectsKey>>();
- writeTable = new Hashtable<Integer, HashSet<EffectsKey>>();
- strongUpdateTable = new Hashtable<Integer, HashSet<EffectsKey>>();
- }
-
- public void addReadingVar(Integer idx, EffectsKey access) {
- HashSet<EffectsKey> aSet = readTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
-
- aSet.add(access);
- readTable.put(idx, aSet);
- }
-
- public void addReadingEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
-
- if (newSet != null) {
- HashSet<EffectsKey> aSet = readTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
- aSet.addAll(newSet);
- readTable.put(idx, aSet);
- }
-
- }
-
- public void addWritingEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
-
- if (newSet != null) {
- HashSet<EffectsKey> aSet = writeTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
- aSet.addAll(newSet);
- writeTable.put(idx, aSet);
- }
-
- }
-
- public void addStrongUpdateEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
-
- if (newSet != null) {
- HashSet<EffectsKey> aSet = strongUpdateTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
- aSet.addAll(newSet);
- strongUpdateTable.put(idx, aSet);
- }
-
- }
-
-
- public Hashtable<Integer, HashSet<EffectsKey>> getReadTable() {
- return readTable;
- }
-
- public Hashtable<Integer, HashSet<EffectsKey>> getWriteTable() {
- return writeTable;
- }
-
- public Hashtable<Integer, HashSet<EffectsKey>> getStrongUpdateTable() {
- return strongUpdateTable;
- }
-
- public void addWritingVar(Integer idx, EffectsKey access) {
- HashSet<EffectsKey> aSet = writeTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
- aSet.add(access);
- writeTable.put(idx, aSet);
- }
-
- public void addStrongUpdateVar(Integer idx, EffectsKey access) {
- HashSet<EffectsKey> aSet = strongUpdateTable.get(idx);
- if (aSet == null) {
- aSet = new HashSet<EffectsKey>();
- }
- aSet.add(access);
- strongUpdateTable.put(idx, aSet);
- }
-
- public Set<EffectsKey> getReadingSet(Integer idx) {
- return readTable.get(idx);
- }
-
- public Set<EffectsKey> getWritingSet(Integer idx) {
- return writeTable.get(idx);
- }
-
- public Set<EffectsKey> getStrongUpdateSet(Integer idx) {
- return strongUpdateTable.get(idx);
- }
-
- public void printSet() {
- System.out.println("writeTable=>" + writeTable.hashCode());
-
- Set<Integer> keySet = readTable.keySet();
- Iterator<Integer> iter = keySet.iterator();
- while (iter.hasNext()) {
- Integer idx = iter.next();
- Set<EffectsKey> effectSet = readTable.get(idx);
- String keyStr = "{";
- if (effectSet != null) {
- Iterator<EffectsKey> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- EffectsKey key = effectIter.next();
- keyStr += " " + key;
- }
- } else {
- keyStr = "null";
- }
- System.out.println("param" + idx + " R=" + keyStr);
- }
-
- keySet = writeTable.keySet();
- System.out.println("# R keyset=" + keySet.size());
- iter = keySet.iterator();
- while (iter.hasNext()) {
- Integer idx = iter.next();
- Set<EffectsKey> effectSet = writeTable.get(idx);
- String keyStr = "{";
- if (effectSet != null) {
- Iterator<EffectsKey> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- EffectsKey key = effectIter.next();
- keyStr += " " + key;
- }
- } else {
- keyStr = "null";
- }
- System.out.println("param" + idx + " W=" + keyStr);
- }
-
- }
-
- public boolean equals(Object o) {
- if (o == null) {
- return false;
- }
-
- if (!(o instanceof EffectsSet)) {
- return false;
- }
-
- EffectsSet in = (EffectsSet) o;
-
- if (getReadTable().equals(in.getReadTable())
- && getWriteTable().equals(in.getWriteTable())
- && getStrongUpdateTable().equals(in.getStrongUpdateTable())) {
- return true;
- } else {
- return false;
- }
-
- }
-
- public int hashCode() {
- int hash = 1;
-
- hash += getReadTable().hashCode() + getWriteTable().hashCode() * 31 + getStrongUpdateTable().hashCode();
-
- return hash;
- }
+ private Hashtable<Integer, HashSet<EffectsKey>> readTable;
+ private Hashtable<Integer, HashSet<EffectsKey>> writeTable;
+ private Hashtable<Integer, HashSet<EffectsKey>> strongUpdateTable;
+
+ public EffectsSet() {
+ readTable = new Hashtable<Integer, HashSet<EffectsKey>>();
+ writeTable = new Hashtable<Integer, HashSet<EffectsKey>>();
+ strongUpdateTable = new Hashtable<Integer, HashSet<EffectsKey>>();
+ }
+
+ public void addReadingVar(Integer idx, EffectsKey access) {
+ HashSet<EffectsKey> aSet = readTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+
+ aSet.add(access);
+ readTable.put(idx, aSet);
+ }
+
+ public void addReadingEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
+
+ if (newSet != null) {
+ HashSet<EffectsKey> aSet = readTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+ aSet.addAll(newSet);
+ readTable.put(idx, aSet);
+ }
+
+ }
+
+ public void addWritingEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
+
+ if (newSet != null) {
+ HashSet<EffectsKey> aSet = writeTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+ aSet.addAll(newSet);
+ writeTable.put(idx, aSet);
+ }
+
+ }
+
+ public void addStrongUpdateEffectsSet(Integer idx, HashSet<EffectsKey> newSet) {
+
+ if (newSet != null) {
+ HashSet<EffectsKey> aSet = strongUpdateTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+ aSet.addAll(newSet);
+ strongUpdateTable.put(idx, aSet);
+ }
+
+ }
+
+
+ public Hashtable<Integer, HashSet<EffectsKey>> getReadTable() {
+ return readTable;
+ }
+
+ public Hashtable<Integer, HashSet<EffectsKey>> getWriteTable() {
+ return writeTable;
+ }
+
+ public Hashtable<Integer, HashSet<EffectsKey>> getStrongUpdateTable() {
+ return strongUpdateTable;
+ }
+
+ public void addWritingVar(Integer idx, EffectsKey access) {
+ HashSet<EffectsKey> aSet = writeTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+ aSet.add(access);
+ writeTable.put(idx, aSet);
+ }
+
+ public void addStrongUpdateVar(Integer idx, EffectsKey access) {
+ HashSet<EffectsKey> aSet = strongUpdateTable.get(idx);
+ if (aSet == null) {
+ aSet = new HashSet<EffectsKey>();
+ }
+ aSet.add(access);
+ strongUpdateTable.put(idx, aSet);
+ }
+
+ public Set<EffectsKey> getReadingSet(Integer idx) {
+ return readTable.get(idx);
+ }
+
+ public Set<EffectsKey> getWritingSet(Integer idx) {
+ return writeTable.get(idx);
+ }
+
+ public Set<EffectsKey> getStrongUpdateSet(Integer idx) {
+ return strongUpdateTable.get(idx);
+ }
+
+ public void printSet() {
+ System.out.println("writeTable=>" + writeTable.hashCode());
+
+ Set<Integer> keySet = readTable.keySet();
+ Iterator<Integer> iter = keySet.iterator();
+ while (iter.hasNext()) {
+ Integer idx = iter.next();
+ Set<EffectsKey> effectSet = readTable.get(idx);
+ String keyStr = "{";
+ if (effectSet != null) {
+ Iterator<EffectsKey> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ EffectsKey key = effectIter.next();
+ keyStr += " " + key;
+ }
+ } else {
+ keyStr = "null";
+ }
+ System.out.println("param" + idx + " R=" + keyStr);
+ }
+
+ keySet = writeTable.keySet();
+ System.out.println("# R keyset=" + keySet.size());
+ iter = keySet.iterator();
+ while (iter.hasNext()) {
+ Integer idx = iter.next();
+ Set<EffectsKey> effectSet = writeTable.get(idx);
+ String keyStr = "{";
+ if (effectSet != null) {
+ Iterator<EffectsKey> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ EffectsKey key = effectIter.next();
+ keyStr += " " + key;
+ }
+ } else {
+ keyStr = "null";
+ }
+ System.out.println("param" + idx + " W=" + keyStr);
+ }
+
+ }
+
+ public boolean equals(Object o) {
+ if (o == null) {
+ return false;
+ }
+
+ if (!(o instanceof EffectsSet)) {
+ return false;
+ }
+
+ EffectsSet in = (EffectsSet) o;
+
+ if (getReadTable().equals(in.getReadTable())
+ && getWriteTable().equals(in.getWriteTable())
+ && getStrongUpdateTable().equals(in.getStrongUpdateTable())) {
+ return true;
+ } else {
+ return false;
+ }
+
+ }
+
+ public int hashCode() {
+ int hash = 1;
+
+ hash += getReadTable().hashCode() + getWriteTable().hashCode() * 31 + getStrongUpdateTable().hashCode();
+
+ return hash;
+ }
}
protected ReachabilitySet alphaNew;
protected String description;
-
+
protected String globalIdentifier;
public HeapRegionNode(Integer id,
boolean isSingleObject,
boolean isFlagged,
- boolean isParameter,
+ boolean isParameter,
boolean isNewSummary,
- TypeDescriptor type,
+ TypeDescriptor type,
AllocationSite allocSite,
ReachabilitySet alpha,
String description,
return new HeapRegionNode(id,
isSingleObject,
isFlagged,
- isParameter,
+ isParameter,
isNewSummary,
- type,
+ type,
allocSite,
alpha,
description,
public ReferenceEdge getReferenceFrom(OwnershipNode on,
TypeDescriptor type,
- String field) {
+ String field) {
assert on != null;
Iterator<ReferenceEdge> itrEdge = referencers.iterator();
while( itrEdge.hasNext() ) {
ReferenceEdge edge = itrEdge.next();
if( edge.getSrc().equals(on) &&
- edge.typeEquals(type) &&
+ edge.typeEquals(type) &&
edge.fieldEquals(field) ) {
return edge;
}
public TypeDescriptor getType() {
return type;
- }
+ }
public AllocationSite getAllocationSite() {
return allocSite;
return s;
}
- public String getAlphaString( boolean hideSubsetReachability ) {
+ public String getAlphaString(boolean hideSubsetReachability) {
return alpha.toStringEscapeNewline(hideSubsetReachability);
}
return new String(description);
//return new String( description+" ID "+getIDString() );
}
-
- public String getGloballyUniqueIdentifier(){
- return globalIdentifier;
- }
-
- public long getGloballyUniqueIntegerIdentifier() {
- String fristpart = globalIdentifier;
- fristpart = fristpart.replaceAll("FN", "1");
- fristpart = fristpart.replaceAll("FM", "2");
- int idx = fristpart.indexOf(".");
- String endpart = fristpart.substring(idx + 1);
- endpart = endpart.replaceAll("S", "1");
- endpart = endpart.replaceAll("P", "2");
- endpart = endpart.replaceAll("A", "3");
- endpart = endpart.replace(".", "");
- String modified = fristpart.substring(0, idx) + endpart;
- return Long.parseLong(modified);
- }
+
+ public String getGloballyUniqueIdentifier() {
+ return globalIdentifier;
+ }
+
+ public long getGloballyUniqueIntegerIdentifier() {
+ String fristpart = globalIdentifier;
+ fristpart = fristpart.replaceAll("FN", "1");
+ fristpart = fristpart.replaceAll("FM", "2");
+ int idx = fristpart.indexOf(".");
+ String endpart = fristpart.substring(idx + 1);
+ endpart = endpart.replaceAll("S", "1");
+ endpart = endpart.replaceAll("P", "2");
+ endpart = endpart.replaceAll("A", "3");
+ endpart = endpart.replace(".", "");
+ String modified = fristpart.substring(0, idx) + endpart;
+ return Long.parseLong(modified);
+ }
}
private Set aliasedParameterIndices;
- public MethodContext( Descriptor d ) {
+ public MethodContext(Descriptor d) {
descMethodOrTask = d;
aliasedParameterIndices = new HashSet();
}
- public MethodContext( Descriptor d, Set a ) {
+ public MethodContext(Descriptor d, Set a) {
descMethodOrTask = d;
aliasedParameterIndices = a;
}
public Set getAliasedParamIndices() {
return aliasedParameterIndices;
- }
+ }
public boolean equals(Object o) {
MethodContext mc = (MethodContext) o;
- return mc.descMethodOrTask.equals( descMethodOrTask ) &&
- mc.aliasedParameterIndices.equals( aliasedParameterIndices );
+ return mc.descMethodOrTask.equals(descMethodOrTask) &&
+ mc.aliasedParameterIndices.equals(aliasedParameterIndices);
}
-
+
public int hashCode() {
- return descMethodOrTask.hashCode() ^
- aliasedParameterIndices.hashCode();
+ return descMethodOrTask.hashCode() ^
+ aliasedParameterIndices.hashCode();
}
public String toString() {
if( descMethodOrTask instanceof TaskDescriptor ) {
return descMethodOrTask.getSymbol()+
- descMethodOrTask.getNum()+
- getAliasString();
+ descMethodOrTask.getNum()+
+ getAliasString();
} else {
MethodDescriptor md = (MethodDescriptor) descMethodOrTask;
return md.getClassMethodName()+
- md.getNum()+
- getAliasString();
+ md.getNum()+
+ getAliasString();
}
}
}
private int priority;
private MethodContext mc;
- public MethodContextQWrapper( Integer p, MethodContext m ) {
+ public MethodContextQWrapper(Integer p, MethodContext m) {
priority = p.intValue();
mc = m;
}
- public MethodContextQWrapper( int p, MethodContext m ) {
+ public MethodContextQWrapper(int p, MethodContext m) {
priority = p;
mc = m;
}
public MethodContext getMethodContext() {
return mc;
}
-
- public int compareTo( Object o ) throws ClassCastException {
+
+ public int compareTo(Object o) throws ClassCastException {
if( !(o instanceof MethodContextQWrapper) ) {
throw new ClassCastException();
if( !( o instanceof MethodContextQWrapper) ) {
return false;
}
-
+
MethodContextQWrapper mcqw = (MethodContextQWrapper) o;
- return mc.equals( mcqw.mc );
- }
+ return mc.equals(mcqw.mc);
+ }
}
public class MethodEffects {
- private EffectsSet effectsSet;
+ private EffectsSet effectsSet;
- public MethodEffects() {
- effectsSet = new EffectsSet();
- }
+ public MethodEffects() {
+ effectsSet = new EffectsSet();
+ }
+
+ public EffectsSet getEffects() {
+ return effectsSet;
+ }
+
+ public void analyzeFlatFieldNode(OwnershipGraph og, TempDescriptor srcDesc,
+ FieldDescriptor fieldDesc) {
+
+ LabelNode ln = getLabelNodeFromTemp(og, srcDesc);
+ if (ln != null) {
+ Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
+
+ while (heapRegionsItr.hasNext()) {
+ ReferenceEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
+
+ if (hrn.isParameter()) {
+ Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
+ .getID());
+
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addReadingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+
+ }
+ }
+
+ // check weather this heap region is parameter
+ // reachable...
+
+ paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addReadingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
+
+ }
+ }
- public EffectsSet getEffects() {
- return effectsSet;
}
+ }
+ }
+
+ }
+
+ public void analyzeFlatElementNode(OwnershipGraph og,
+ TempDescriptor srcDesc, FieldDescriptor fieldDesc) {
+
+ LabelNode ln = getLabelNodeFromTemp(og, srcDesc);
+ if (ln != null) {
+ Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
- public void analyzeFlatFieldNode(OwnershipGraph og, TempDescriptor srcDesc,
- FieldDescriptor fieldDesc) {
+ while (heapRegionsItr.hasNext()) {
+ ReferenceEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
- LabelNode ln = getLabelNodeFromTemp(og, srcDesc);
- if (ln != null) {
- Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
+ if (hrn.isParameter()) {
+ Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
+ .getID());
- while (heapRegionsItr.hasNext()) {
- ReferenceEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addReadingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+ }
+ }
- if (hrn.isParameter()) {
- Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
- .getID());
+ // check weather this heap region is parameter
+ // reachable...
+
+ paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addReadingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
+ }
+ }
+
+ }
+ }
+ }
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addReadingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- }
- }
- // check weather this heap region is parameter
- // reachable...
- paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
+ }
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addReadingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
+ public void analyzeFlatSetElementNode(OwnershipGraph og,
+ TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
- }
- }
+ LabelNode ln = getLabelNodeFromTemp(og, dstDesc);
+ if (ln != null) {
- }
- }
- }
+ // / check possible strong updates
+ boolean strongUpdate = false;
+ if (!fieldDesc.getType().isImmutable()
+ || fieldDesc.getType().isArray()) {
+ Iterator<ReferenceEdge> itrXhrn = ln.iteratorToReferencees();
+ while (itrXhrn.hasNext()) {
+ ReferenceEdge edgeX = itrXhrn.next();
+ HeapRegionNode hrnX = edgeX.getDst();
+ if (fieldDesc != null
+ && fieldDesc != OwnershipAnalysis
+ .getArrayField(fieldDesc.getType())
+ && ((hrnX.getNumReferencers() == 1) || // case 1
+ (hrnX.isSingleObject() && ln.getNumReferencees() == 1) // case
+ // 2
+ )) {
+ strongUpdate = true;
+ }
}
-
- public void analyzeFlatElementNode(OwnershipGraph og,
- TempDescriptor srcDesc, FieldDescriptor fieldDesc){
-
- LabelNode ln = getLabelNodeFromTemp(og, srcDesc);
- if (ln != null) {
- Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
-
- while (heapRegionsItr.hasNext()) {
- ReferenceEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
-
- if (hrn.isParameter()) {
- Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
- .getID());
-
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addReadingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- }
- }
-
- // check weather this heap region is parameter
- // reachable...
-
- paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
-
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addReadingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), srcDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
- }
- }
-
- }
- }
- }
-
-
-
-
+ }
+ // //
+
+ Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
+ while (heapRegionsItr.hasNext()) {
+ ReferenceEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
+
+ if (hrn.isParameter()) {
+ Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
+ .getID());
+
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addWritingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),
+ hrn.getID(), hrn
+ .getGloballyUniqueIdentifier(), 0));
+ if(strongUpdate) {
+ effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+ }
+ }
+ }
+
+ // check weather this heap region is parameter
+ // reachable...
+
+ paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addWritingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),
+ hrn.getID(), hrn
+ .getGloballyUniqueIdentifier(), 1));
+ if(strongUpdate) {
+ effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+ }
+ }
+ }
+
}
-
- public void analyzeFlatSetElementNode(OwnershipGraph og,
- TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
-
- LabelNode ln = getLabelNodeFromTemp(og, dstDesc);
- if (ln != null) {
-
- // / check possible strong updates
- boolean strongUpdate = false;
- if (!fieldDesc.getType().isImmutable()
- || fieldDesc.getType().isArray()) {
- Iterator<ReferenceEdge> itrXhrn = ln.iteratorToReferencees();
- while (itrXhrn.hasNext()) {
- ReferenceEdge edgeX = itrXhrn.next();
- HeapRegionNode hrnX = edgeX.getDst();
-
- if (fieldDesc != null
- && fieldDesc != OwnershipAnalysis
- .getArrayField(fieldDesc.getType())
- && ((hrnX.getNumReferencers() == 1) || // case 1
- (hrnX.isSingleObject() && ln.getNumReferencees() == 1) // case
- // 2
- )) {
- strongUpdate = true;
- }
- }
- }
- // //
-
- Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
- while (heapRegionsItr.hasNext()) {
- ReferenceEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
-
- if (hrn.isParameter()) {
- Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
- .getID());
-
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addWritingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),
- hrn.getID(), hrn
- .getGloballyUniqueIdentifier(), 0));
- if(strongUpdate){
- effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- }
- }
- }
-
- // check weather this heap region is parameter
- // reachable...
-
- paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
-
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addWritingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),
- hrn.getID(), hrn
- .getGloballyUniqueIdentifier(), 1));
- if(strongUpdate){
- effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- }
- }
- }
-
- }
-
- }
- }
+ }
+ }
+
+ }
+
+ public void analyzeFlatSetFieldNode(OwnershipGraph og,
+ TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
+
+ LabelNode ln = getLabelNodeFromTemp(og, dstDesc);
+ if (ln != null) {
+ /// check possible strong updates
+ boolean strongUpdate = false;
+ if( !fieldDesc.getType().isImmutable() || fieldDesc.getType().isArray() ) {
+ Iterator<ReferenceEdge> itrXhrn = ln.iteratorToReferencees();
+ while( itrXhrn.hasNext() ) {
+ ReferenceEdge edgeX = itrXhrn.next();
+ HeapRegionNode hrnX = edgeX.getDst();
+
+ if( fieldDesc != null &&
+ fieldDesc != OwnershipAnalysis.getArrayField(fieldDesc.getType() ) &&
+ ( (hrnX.getNumReferencers() == 1) || // case 1
+ (hrnX.isSingleObject() && ln.getNumReferencees() == 1) // case 2
+ )
+ ) {
+ strongUpdate = true;
+ }
}
+ }
+ ////
- public void analyzeFlatSetFieldNode(OwnershipGraph og,
- TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
-
- LabelNode ln = getLabelNodeFromTemp(og, dstDesc);
- if (ln != null) {
- /// check possible strong updates
- boolean strongUpdate = false;
- if( !fieldDesc.getType().isImmutable() || fieldDesc.getType().isArray() ) {
- Iterator<ReferenceEdge> itrXhrn = ln.iteratorToReferencees();
- while( itrXhrn.hasNext() ) {
- ReferenceEdge edgeX = itrXhrn.next();
- HeapRegionNode hrnX = edgeX.getDst();
-
- if( fieldDesc != null &&
- fieldDesc != OwnershipAnalysis.getArrayField( fieldDesc.getType() ) &&
- ( (hrnX.getNumReferencers() == 1) || // case 1
- (hrnX.isSingleObject() && ln.getNumReferencees() == 1) // case 2
- )
- ) {
- strongUpdate = true;
- }
- }
- }
- ////
-
- Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
-
- while (heapRegionsItr.hasNext()) {
- ReferenceEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
-
- if (hrn.isParameter()) {
-
- Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
- .getID());
-
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addWritingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- if(strongUpdate){
- effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
- }
-
- }
- }
-
- // check weather this heap region is parameter
- // reachable...
-
- paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
-
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
- effectsSet.addWritingVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
- if(strongUpdate){
- effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
- fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
- }
-
- }
- }
-
- }
- }
- }
+ Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
- }
+ while (heapRegionsItr.hasNext()) {
+ ReferenceEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
+
+ if (hrn.isParameter()) {
+
+ Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
+ .getID());
+
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addWritingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+ if(strongUpdate) {
+ effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),0));
+ }
+
+ }
+ }
+
+ // check weather this heap region is parameter
+ // reachable...
+
+ paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
+ effectsSet.addWritingVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
+ if(strongUpdate) {
+ effectsSet.addStrongUpdateVar(paramID, new EffectsKey(
+ fieldDesc.getSymbol(), dstDesc.getType(),hrn.getID(),hrn.getGloballyUniqueIdentifier(),1));
+ }
- private Set<Integer> getReachableParamIndexSet(OwnershipGraph og,
- TempDescriptor paramDesc) {
+ }
+ }
- HashSet<Integer> resultSet = new HashSet<Integer>();
+ }
+ }
+ }
- LabelNode ln = getLabelNodeFromTemp(og, paramDesc);
- if (ln != null) {
+ }
- Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
+ private Set<Integer> getReachableParamIndexSet(OwnershipGraph og,
+ TempDescriptor paramDesc) {
- while (heapRegionsItr.hasNext()) {
- ReferenceEdge edge = heapRegionsItr.next();
- HeapRegionNode hrn = edge.getDst();
+ HashSet<Integer> resultSet = new HashSet<Integer>();
- if (hrn.isParameter()) {
+ LabelNode ln = getLabelNodeFromTemp(og, paramDesc);
+ if (ln != null) {
- Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
- .getID());
+ Iterator<ReferenceEdge> heapRegionsItr = ln.iteratorToReferencees();
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
+ while (heapRegionsItr.hasNext()) {
+ ReferenceEdge edge = heapRegionsItr.next();
+ HeapRegionNode hrn = edge.getDst();
- resultSet.add(paramID);
+ if (hrn.isParameter()) {
- }
- }
+ Set<Integer> paramSet = og.idPrimary2paramIndexSet.get(hrn
+ .getID());
- // check weather this heap region is parameter
- // reachable...
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
- paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
- if (paramSet != null) {
- Iterator<Integer> paramIter = paramSet.iterator();
+ resultSet.add(paramID);
- while (paramIter.hasNext()) {
- Integer paramID = paramIter.next();
+ }
+ }
- resultSet.add(paramID);
+ // check weather this heap region is parameter
+ // reachable...
- }
- }
+ paramSet = og.idSecondary2paramIndexSet.get(hrn.getID());
+ if (paramSet != null) {
+ Iterator<Integer> paramIter = paramSet.iterator();
- }
- }
+ while (paramIter.hasNext()) {
+ Integer paramID = paramIter.next();
- }
+ resultSet.add(paramID);
- return resultSet;
+ }
+ }
}
+ }
+
+ }
+
+ return resultSet;
+
+ }
+
+ public void analyzeFlatCall(OwnershipGraph og, FlatCall fc,
+ MethodContext mc, MethodEffects callee) {
+
+ TempDescriptor[] tdArray = fc.readsTemps();
+
+ for (int calleeParamIdx = 0; calleeParamIdx < tdArray.length; calleeParamIdx++) {
+ TempDescriptor paramDesc = tdArray[calleeParamIdx];
+
+ Set<Integer> paramIDs = getReachableParamIndexSet(og, paramDesc);
+
+ // handle read effects
+ Iterator<Integer> paramIter = paramIDs.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramIdx = paramIter.next();
+ HashSet<EffectsKey> newSet = callee.getEffects().getReadTable()
+ .get(calleeParamIdx);
+
+
+ if(newSet!=null) {
+ HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
+ HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
+ Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
+ HeapRegionNode secHRN=null;
+ if(secIdx!=null) {
+ secHRN=og.id2hrn.get(secIdx);
+ } else {
+ secHRN=priHRN;
+ }
+
+ for (Iterator iterator = newSet.iterator(); iterator.hasNext(); ) {
+ EffectsKey effectsKey = (EffectsKey) iterator.next();
+ HeapRegionNode hrnTemp;
+ if(effectsKey.getParamIden()==0) { //primary
+ hrnTemp=priHRN;
+ } else { //secondary
+ hrnTemp=secHRN;
+ }
+ EffectsKey newEffectsKey;
+ if(secIdx==null) {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
+ } else {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
+ }
+ thisSet.add(newEffectsKey);
+ }
+
+ effectsSet.addReadingEffectsSet(paramIdx, thisSet);
+ }
- public void analyzeFlatCall(OwnershipGraph og, FlatCall fc,
- MethodContext mc, MethodEffects callee) {
-
- TempDescriptor[] tdArray = fc.readsTemps();
-
- for (int calleeParamIdx = 0; calleeParamIdx < tdArray.length; calleeParamIdx++) {
- TempDescriptor paramDesc = tdArray[calleeParamIdx];
-
- Set<Integer> paramIDs = getReachableParamIndexSet(og, paramDesc);
-
- // handle read effects
- Iterator<Integer> paramIter = paramIDs.iterator();
- while (paramIter.hasNext()) {
- Integer paramIdx = paramIter.next();
- HashSet<EffectsKey> newSet = callee.getEffects().getReadTable()
- .get(calleeParamIdx);
-
-
- if(newSet!=null){
- HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
- HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
- Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
- HeapRegionNode secHRN=null;
- if(secIdx!=null){
- secHRN=og.id2hrn.get(secIdx);
- }else{
- secHRN=priHRN;
- }
-
- for (Iterator iterator = newSet.iterator(); iterator.hasNext();) {
- EffectsKey effectsKey = (EffectsKey) iterator.next();
- HeapRegionNode hrnTemp;
- if(effectsKey.getParamIden()==0){//primary
- hrnTemp=priHRN;
- }else{//secondary
- hrnTemp=secHRN;
- }
- EffectsKey newEffectsKey;
- if(secIdx==null){
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
- }else{
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
- }
- thisSet.add(newEffectsKey);
- }
-
- effectsSet.addReadingEffectsSet(paramIdx, thisSet);
- }
-
- }
-
- // handle write effects
- paramIter = paramIDs.iterator();
- while (paramIter.hasNext()) {
- Integer paramIdx = paramIter.next();
- HashSet<EffectsKey> newSet = callee.getEffects()
- .getWriteTable().get(calleeParamIdx);
-
- if(newSet!=null){
-
- HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
- HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
- Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
- HeapRegionNode secHRN=null;
- if(secIdx!=null){
- secHRN=og.id2hrn.get(secIdx);
- }else{
- secHRN=priHRN;
- }
-
- for (Iterator iterator = newSet.iterator(); iterator.hasNext();) {
- EffectsKey effectsKey = (EffectsKey) iterator.next();
- HeapRegionNode hrnTemp;
- if(effectsKey.getParamIden()==0){//primary
- hrnTemp=priHRN;
- }else{//secondary
- hrnTemp=secHRN;
- }
- EffectsKey newEffectsKey;
- if(secIdx==null){
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
- }else{
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
- }
- thisSet.add(newEffectsKey);
- }
-
- effectsSet.addWritingEffectsSet(paramIdx, thisSet);
- }
-
- }
-
- // handle strong update effects
- paramIter = paramIDs.iterator();
- while (paramIter.hasNext()) {
- Integer paramIdx = paramIter.next();
- HashSet<EffectsKey> newSet = callee.getEffects()
- .getStrongUpdateTable().get(calleeParamIdx);
- if(newSet!=null){
-
- HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
- HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
- Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
- HeapRegionNode secHRN=null;
- if(secIdx!=null){
- secHRN=og.id2hrn.get(secIdx);
- }else{
- secHRN=priHRN;
- }
-
- for (Iterator iterator = newSet.iterator(); iterator.hasNext();) {
- EffectsKey effectsKey = (EffectsKey) iterator.next();
- HeapRegionNode hrnTemp;
- if(effectsKey.getParamIden()==0){//primary
- hrnTemp=priHRN;
- }else{//secondary
- hrnTemp=secHRN;
- }
- EffectsKey newEffectsKey;
- if(secIdx==null){
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
- }else{
- newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
- }
- thisSet.add(newEffectsKey);
- }
-
- effectsSet.addStrongUpdateEffectsSet(paramIdx, thisSet);
- }
-
- }
-
- }
+ }
+
+ // handle write effects
+ paramIter = paramIDs.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramIdx = paramIter.next();
+ HashSet<EffectsKey> newSet = callee.getEffects()
+ .getWriteTable().get(calleeParamIdx);
+
+ if(newSet!=null) {
+
+ HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
+ HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
+ Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
+ HeapRegionNode secHRN=null;
+ if(secIdx!=null) {
+ secHRN=og.id2hrn.get(secIdx);
+ } else {
+ secHRN=priHRN;
+ }
+
+ for (Iterator iterator = newSet.iterator(); iterator.hasNext(); ) {
+ EffectsKey effectsKey = (EffectsKey) iterator.next();
+ HeapRegionNode hrnTemp;
+ if(effectsKey.getParamIden()==0) { //primary
+ hrnTemp=priHRN;
+ } else { //secondary
+ hrnTemp=secHRN;
+ }
+ EffectsKey newEffectsKey;
+ if(secIdx==null) {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
+ } else {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
+ }
+ thisSet.add(newEffectsKey);
+ }
+
+ effectsSet.addWritingEffectsSet(paramIdx, thisSet);
+ }
+ }
+
+ // handle strong update effects
+ paramIter = paramIDs.iterator();
+ while (paramIter.hasNext()) {
+ Integer paramIdx = paramIter.next();
+ HashSet<EffectsKey> newSet = callee.getEffects()
+ .getStrongUpdateTable().get(calleeParamIdx);
+ if(newSet!=null) {
+
+ HashSet<EffectsKey> thisSet=new HashSet<EffectsKey>();
+ HeapRegionNode priHRN=og.id2hrn.get(og.paramIndex2idPrimary.get(paramIdx));
+ Integer secIdx=og.paramIndex2idSecondary.get(paramIdx);
+ HeapRegionNode secHRN=null;
+ if(secIdx!=null) {
+ secHRN=og.id2hrn.get(secIdx);
+ } else {
+ secHRN=priHRN;
+ }
+
+ for (Iterator iterator = newSet.iterator(); iterator.hasNext(); ) {
+ EffectsKey effectsKey = (EffectsKey) iterator.next();
+ HeapRegionNode hrnTemp;
+ if(effectsKey.getParamIden()==0) { //primary
+ hrnTemp=priHRN;
+ } else { //secondary
+ hrnTemp=secHRN;
+ }
+ EffectsKey newEffectsKey;
+ if(secIdx==null) {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),0);
+ } else {
+ newEffectsKey=new EffectsKey(effectsKey.getFieldDescriptor(), effectsKey.getTypeDescriptor(), hrnTemp.getID(),hrnTemp.getGloballyUniqueIdentifier(),effectsKey.getParamIden());
+ }
+ thisSet.add(newEffectsKey);
+ }
+
+ effectsSet.addStrongUpdateEffectsSet(paramIdx, thisSet);
}
- protected LabelNode getLabelNodeFromTemp(OwnershipGraph og,
- TempDescriptor td) {
- assert td != null;
+ }
- if (!og.td2ln.containsKey(td)) {
- og.td2ln.put(td, new LabelNode(td));
- }
+ }
- return og.td2ln.get(td);
- }
+ }
- public boolean equals(Object o) {
- if (o == null) {
- return false;
- }
+ protected LabelNode getLabelNodeFromTemp(OwnershipGraph og,
+ TempDescriptor td) {
+ assert td != null;
- if (!(o instanceof MethodEffects)) {
- return false;
- }
+ if (!og.td2ln.containsKey(td)) {
+ og.td2ln.put(td, new LabelNode(td));
+ }
- MethodEffects in = (MethodEffects) o;
+ return og.td2ln.get(td);
+ }
- if (getEffects().equals(in.getEffects())) {
- return true;
- } else {
- return false;
- }
+ public boolean equals(Object o) {
+ if (o == null) {
+ return false;
+ }
- }
+ if (!(o instanceof MethodEffects)) {
+ return false;
+ }
- public int hashCode() {
- int hash = 1;
+ MethodEffects in = (MethodEffects) o;
- hash += getEffects().hashCode() * 37;
+ if (getEffects().equals(in.getEffects())) {
+ return true;
+ } else {
+ return false;
+ }
- return hash;
- }
+ }
+
+ public int hashCode() {
+ int hash = 1;
+
+ hash += getEffects().hashCode() * 37;
+
+ return hash;
+ }
}
public class MethodEffectsAnalysis {
- private Hashtable<MethodContext, MethodEffects> mapMethodContextToMethodEffects;
- boolean methodeffects = false;
-
- public MethodEffectsAnalysis(boolean methodeffects) {
- this.methodeffects = methodeffects;
- mapMethodContextToMethodEffects = new Hashtable<MethodContext, MethodEffects>();
- }
-
- public MethodEffects getMethodEffectsByMethodContext(MethodContext mc){
- return mapMethodContextToMethodEffects.get(mc);
+ private Hashtable<MethodContext, MethodEffects> mapMethodContextToMethodEffects;
+ boolean methodeffects = false;
+
+ public MethodEffectsAnalysis(boolean methodeffects) {
+ this.methodeffects = methodeffects;
+ mapMethodContextToMethodEffects = new Hashtable<MethodContext, MethodEffects>();
+ }
+
+ public MethodEffects getMethodEffectsByMethodContext(MethodContext mc) {
+ return mapMethodContextToMethodEffects.get(mc);
+ }
+
+ public void createNewMapping(MethodContext mcNew) {
+ if(!methodeffects) return;
+ if (!mapMethodContextToMethodEffects.containsKey(mcNew)) {
+ MethodEffects meNew = new MethodEffects();
+ mapMethodContextToMethodEffects.put(mcNew, meNew);
+ }
+ }
+
+ public void analyzeFlatCall(OwnershipGraph calleeOG,
+ MethodContext calleeMC, MethodContext callerMC, FlatCall fc) {
+ if(!methodeffects) return;
+ MethodEffects me = mapMethodContextToMethodEffects.get(callerMC);
+ MethodEffects meFlatCall = mapMethodContextToMethodEffects
+ .get(calleeMC);
+ me.analyzeFlatCall(calleeOG, fc, callerMC, meFlatCall);
+ mapMethodContextToMethodEffects.put(callerMC, me);
+ }
+
+ public void analyzeFlatFieldNode(MethodContext mc, OwnershipGraph og,
+ TempDescriptor srcDesc, FieldDescriptor fieldDesc) {
+ if(!methodeffects) return;
+ MethodEffects me = mapMethodContextToMethodEffects.get(mc);
+ me.analyzeFlatFieldNode(og, srcDesc, fieldDesc);
+ mapMethodContextToMethodEffects.put(mc, me);
+ }
+
+ public void analyzeFlatSetFieldNode(MethodContext mc, OwnershipGraph og,
+ TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
+ if(!methodeffects) return;
+ MethodEffects me = mapMethodContextToMethodEffects.get(mc);
+ me.analyzeFlatSetFieldNode(og, dstDesc, fieldDesc);
+ mapMethodContextToMethodEffects.put(mc, me);
+ }
+
+ public void analyzeFlatSetElementNode(MethodContext mc, OwnershipGraph og,
+ TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
+ if(!methodeffects) return;
+ MethodEffects me = mapMethodContextToMethodEffects.get(mc);
+ me.analyzeFlatSetElementNode(og, dstDesc, fieldDesc);
+ mapMethodContextToMethodEffects.put(mc, me);
+ }
+
+ public void analyzeFlatElementNode(MethodContext mc, OwnershipGraph og,
+ TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
+ if(!methodeffects) return;
+ MethodEffects me = mapMethodContextToMethodEffects.get(mc);
+ me.analyzeFlatElementNode(og, dstDesc, fieldDesc);
+ mapMethodContextToMethodEffects.put(mc, me);
+ }
+
+
+ public void writeMethodEffectsResult() throws IOException {
+
+ try {
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ "MethodEffects_report.txt"));
+
+ Set<MethodContext> mcSet = mapMethodContextToMethodEffects.keySet();
+ Iterator<MethodContext> mcIter = mcSet.iterator();
+ while (mcIter.hasNext()) {
+ MethodContext mc = mcIter.next();
+ MethodDescriptor md = (MethodDescriptor) mc.getDescriptor();
+
+ int startIdx = 0;
+ if (!md.isStatic()) {
+ startIdx = 1;
}
- public void createNewMapping(MethodContext mcNew) {
- if(!methodeffects) return;
- if (!mapMethodContextToMethodEffects.containsKey(mcNew)) {
- MethodEffects meNew = new MethodEffects();
- mapMethodContextToMethodEffects.put(mcNew, meNew);
- }
- }
+ MethodEffects me = mapMethodContextToMethodEffects.get(mc);
+ EffectsSet effectsSet = me.getEffects();
+
+ bw.write("Method " + mc + " :\n");
+ for (int i = startIdx; i < md.numParameters() + startIdx; i++) {
+
+ String paramName = md.getParamName(i - startIdx);
+
+ Set<EffectsKey> effectSet = effectsSet.getReadingSet(i);
+ String keyStr = "{";
+ if (effectSet != null) {
+ Iterator<EffectsKey> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ EffectsKey key = effectIter.next();
+ keyStr += " " + key;
+ }
+ }
+ keyStr += " }";
+ bw.write(" Paramter " + paramName + " ReadingSet="
+ + keyStr + "\n");
+
+ effectSet = effectsSet.getWritingSet(new Integer(i));
+ keyStr = "{";
+ if (effectSet != null) {
+ Iterator<EffectsKey> effectIter = effectSet.iterator();
+ while (effectIter.hasNext()) {
+ EffectsKey key = effectIter.next();
+ keyStr += " " + key;
+ }
+ }
+
+ keyStr += " }";
+ bw.write(" Paramter " + paramName + " WritingngSet="
+ + keyStr + "\n");
- public void analyzeFlatCall(OwnershipGraph calleeOG,
- MethodContext calleeMC, MethodContext callerMC, FlatCall fc) {
- if(!methodeffects) return;
- MethodEffects me = mapMethodContextToMethodEffects.get(callerMC);
- MethodEffects meFlatCall = mapMethodContextToMethodEffects
- .get(calleeMC);
- me.analyzeFlatCall(calleeOG, fc, callerMC, meFlatCall);
- mapMethodContextToMethodEffects.put(callerMC, me);
}
+ bw.write("\n");
- public void analyzeFlatFieldNode(MethodContext mc, OwnershipGraph og,
- TempDescriptor srcDesc, FieldDescriptor fieldDesc) {
- if(!methodeffects) return;
- MethodEffects me = mapMethodContextToMethodEffects.get(mc);
- me.analyzeFlatFieldNode(og, srcDesc, fieldDesc);
- mapMethodContextToMethodEffects.put(mc, me);
- }
+ }
- public void analyzeFlatSetFieldNode(MethodContext mc, OwnershipGraph og,
- TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
- if(!methodeffects) return;
- MethodEffects me = mapMethodContextToMethodEffects.get(mc);
- me.analyzeFlatSetFieldNode(og, dstDesc, fieldDesc);
- mapMethodContextToMethodEffects.put(mc, me);
- }
-
- public void analyzeFlatSetElementNode(MethodContext mc, OwnershipGraph og,
- TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
- if(!methodeffects) return;
- MethodEffects me = mapMethodContextToMethodEffects.get(mc);
- me.analyzeFlatSetElementNode(og, dstDesc, fieldDesc);
- mapMethodContextToMethodEffects.put(mc, me);
- }
-
- public void analyzeFlatElementNode(MethodContext mc, OwnershipGraph og,
- TempDescriptor dstDesc, FieldDescriptor fieldDesc) {
- if(!methodeffects) return;
- MethodEffects me = mapMethodContextToMethodEffects.get(mc);
- me.analyzeFlatElementNode(og, dstDesc, fieldDesc);
- mapMethodContextToMethodEffects.put(mc, me);
- }
-
-
- public void writeMethodEffectsResult() throws IOException {
-
- try {
- BufferedWriter bw = new BufferedWriter(new FileWriter(
- "MethodEffects_report.txt"));
-
- Set<MethodContext> mcSet = mapMethodContextToMethodEffects.keySet();
- Iterator<MethodContext> mcIter = mcSet.iterator();
- while (mcIter.hasNext()) {
- MethodContext mc = mcIter.next();
- MethodDescriptor md = (MethodDescriptor) mc.getDescriptor();
-
- int startIdx = 0;
- if (!md.isStatic()) {
- startIdx = 1;
- }
-
- MethodEffects me = mapMethodContextToMethodEffects.get(mc);
- EffectsSet effectsSet = me.getEffects();
-
- bw.write("Method " + mc + " :\n");
- for (int i = startIdx; i < md.numParameters() + startIdx; i++) {
-
- String paramName = md.getParamName(i - startIdx);
-
- Set<EffectsKey> effectSet = effectsSet.getReadingSet(i);
- String keyStr = "{";
- if (effectSet != null) {
- Iterator<EffectsKey> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- EffectsKey key = effectIter.next();
- keyStr += " " + key;
- }
- }
- keyStr += " }";
- bw.write(" Paramter " + paramName + " ReadingSet="
- + keyStr + "\n");
-
- effectSet = effectsSet.getWritingSet(new Integer(i));
- keyStr = "{";
- if (effectSet != null) {
- Iterator<EffectsKey> effectIter = effectSet.iterator();
- while (effectIter.hasNext()) {
- EffectsKey key = effectIter.next();
- keyStr += " " + key;
- }
- }
-
- keyStr += " }";
- bw.write(" Paramter " + paramName + " WritingngSet="
- + keyStr + "\n");
-
- }
- bw.write("\n");
-
- }
-
- bw.close();
- } catch (IOException e) {
- System.err.println(e);
- }
+ bw.close();
+ } catch (IOException e) {
+ System.err.println(e);
+ }
- }
+ }
}
}
public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
- int paramIndex1,
- int paramIndex2) {
+ int paramIndex1,
+ int paramIndex2) {
checkAnalysisComplete();
OwnershipGraph og = getGraphOfAllContextsFromDescriptor(taskOrMethod);
assert(og != null);
}
public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
- int paramIndex,
- AllocationSite alloc) {
+ int paramIndex,
+ AllocationSite alloc) {
checkAnalysisComplete();
OwnershipGraph og = getGraphOfAllContextsFromDescriptor(taskOrMethod);
assert(og != null);
}
public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
- AllocationSite alloc,
- int paramIndex) {
+ AllocationSite alloc,
+ int paramIndex) {
checkAnalysisComplete();
OwnershipGraph og = getGraphOfAllContextsFromDescriptor(taskOrMethod);
assert(og != null);
}
public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
- AllocationSite alloc1,
- AllocationSite alloc2) {
+ AllocationSite alloc1,
+ AllocationSite alloc2) {
checkAnalysisComplete();
OwnershipGraph og = getGraphOfAllContextsFromDescriptor(taskOrMethod);
assert(og != null);
OwnershipGraph og = new OwnershipGraph();
- assert mapDescriptorToAllMethodContexts.containsKey( d );
- HashSet<MethodContext> contexts = mapDescriptorToAllMethodContexts.get( d );
+ assert mapDescriptorToAllMethodContexts.containsKey(d);
+ HashSet<MethodContext> contexts = mapDescriptorToAllMethodContexts.get(d);
Iterator<MethodContext> mcItr = contexts.iterator();
while( mcItr.hasNext() ) {
MethodContext mc = mcItr.next();
OwnershipGraph ogContext = mapMethodContextToCompleteOwnershipGraph.get(mc);
assert ogContext != null;
- og.merge( ogContext );
+ og.merge(ogContext);
}
return og;
}
- public String prettyPrintNodeSet( Set<HeapRegionNode> s ) {
+ public String prettyPrintNodeSet(Set<HeapRegionNode> s) {
checkAnalysisComplete();
String out = "{\n";
// use the methods given above to check every possible alias
// between task parameters and flagged allocation sites reachable
// from the task
- public void writeAllAliases(String outputFile,
+ public void writeAllAliases(String outputFile,
String timeReport,
String justTime,
boolean tabularOutput,
bw.write("Conducting ownership analysis with allocation depth = "+allocationDepth+"\n");
bw.write(timeReport+"\n");
}
-
+
int numAlias = 0;
// look through every task for potential aliases
TaskDescriptor td = (TaskDescriptor) taskItr.next();
if( !tabularOutput ) {
- bw.write("\n---------"+td+"--------\n");
+ bw.write("\n---------"+td+"--------\n");
}
HashSet<AllocationSite> allocSites = getFlaggedAllocationSitesReachableFromTask(td);
Set<HeapRegionNode> common;
-
+
// for each task parameter, check for aliases with
// other task parameters and every allocation site
// reachable from this task
common = createsPotentialAliases(td, i, j);
if( !common.isEmpty() ) {
foundSomeAlias = true;
- if( !tabularOutput ) {
- bw.write("Potential alias between parameters "+i+" and "+j+".\n");
- bw.write(prettyPrintNodeSet( common )+"\n" );
- } else {
- ++numAlias;
- }
+ if( !tabularOutput ) {
+ bw.write("Potential alias between parameters "+i+" and "+j+".\n");
+ bw.write(prettyPrintNodeSet(common)+"\n");
+ } else {
+ ++numAlias;
+ }
}
}
-
+
// for the ith parameter, check for aliases against
// the set of allocation sites reachable from this
// task context
common = createsPotentialAliases(td, i, as);
if( !common.isEmpty() ) {
foundSomeAlias = true;
- if( !tabularOutput ) {
- bw.write("Potential alias between parameter "+i+" and "+as.getFlatNew()+".\n");
- bw.write(prettyPrintNodeSet( common )+"\n" );
- } else {
- ++numAlias;
- }
+ if( !tabularOutput ) {
+ bw.write("Potential alias between parameter "+i+" and "+as.getFlatNew()+".\n");
+ bw.write(prettyPrintNodeSet(common)+"\n");
+ } else {
+ ++numAlias;
+ }
}
}
}
-
+
// for each allocation site check for aliases with
// other allocation sites in the context of execution
// of this task
Iterator allocItr2 = allocSites.iterator();
while( allocItr2.hasNext() ) {
AllocationSite as2 = (AllocationSite) allocItr2.next();
-
+
if( !outerChecked.contains(as2) ) {
common = createsPotentialAliases(td, as1, as2);
-
+
if( !common.isEmpty() ) {
foundSomeAlias = true;
- if( !tabularOutput ) {
- bw.write("Potential alias between "+as1.getFlatNew()+" and "+as2.getFlatNew()+".\n");
- bw.write(prettyPrintNodeSet( common )+"\n" );
- } else {
- ++numAlias;
- }
+ if( !tabularOutput ) {
+ bw.write("Potential alias between "+as1.getFlatNew()+" and "+as2.getFlatNew()+".\n");
+ bw.write(prettyPrintNodeSet(common)+"\n");
+ } else {
+ ++numAlias;
+ }
}
}
}
}
if( !foundSomeAlias ) {
- if( !tabularOutput ) {
- bw.write("No aliases between flagged objects in Task "+td+".\n");
- }
+ if( !tabularOutput ) {
+ bw.write("No aliases between flagged objects in Task "+td+".\n");
+ }
}
}
if( !tabularOutput ) {
- bw.write( "\n"+computeAliasContextHistogram() );
+ bw.write("\n"+computeAliasContextHistogram() );
} else {
- bw.write( " & "+numAlias+
- " & "+justTime+
- " & "+numLines+
- " & "+numMethodsAnalyzed()+
- " \\\\\n" );
+ bw.write(" & "+numAlias+
+ " & "+justTime+
+ " & "+numLines+
+ " & "+numMethodsAnalyzed()+
+ " \\\\\n");
}
-
+
bw.close();
}
// this version of writeAllAliases is for Java programs that have no tasks
- public void writeAllAliasesJava(String outputFile,
+ public void writeAllAliasesJava(String outputFile,
String timeReport,
String justTime,
boolean tabularOutput,
) throws java.io.IOException {
checkAnalysisComplete();
- assert !state.TASK;
+ assert !state.TASK;
BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile) );
Iterator allocItr1 = allocSites.iterator();
while( allocItr1.hasNext() ) {
AllocationSite as1 = (AllocationSite) allocItr1.next();
-
+
Iterator allocItr2 = allocSites.iterator();
while( allocItr2.hasNext() ) {
AllocationSite as2 = (AllocationSite) allocItr2.next();
-
- if( !outerChecked.contains(as2) ) {
+
+ if( !outerChecked.contains(as2) ) {
Set<HeapRegionNode> common = createsPotentialAliases(d, as1, as2);
if( !common.isEmpty() ) {
foundSomeAlias = true;
bw.write("Potential alias between "+as1.getDisjointId()+" and "+as2.getDisjointId()+".\n");
- bw.write( prettyPrintNodeSet( common )+"\n" );
+ bw.write(prettyPrintNodeSet(common)+"\n");
}
}
}
-
+
outerChecked.add(as1);
}
-
+
if( !foundSomeAlias ) {
bw.write("No aliases between flagged objects found.\n");
}
- bw.write( "\n"+computeAliasContextHistogram() );
+ bw.write("\n"+computeAliasContextHistogram() );
bw.close();
}
///////////////////////////////////////////
// data from the compiler
- public State state;
- public CallGraph callGraph;
- public Liveness liveness;
+ public State state;
+ public CallGraph callGraph;
+ public Liveness liveness;
public ArrayReferencees arrayReferencees;
- public TypeUtil typeUtil;
- public int allocationDepth;
+ public TypeUtil typeUtil;
+ public int allocationDepth;
// for public interface methods to warn that they
// are grabbing results during analysis
// TaskDescriptor and MethodDescriptor are combined
// together, with a common parent class Descriptor
private Hashtable<MethodContext, OwnershipGraph> mapMethodContextToInitialParamAllocGraph;
- private Hashtable<MethodContext, OwnershipGraph> mapMethodContextToCompleteOwnershipGraph;
+ private Hashtable<MethodContext, OwnershipGraph> mapMethodContextToCompleteOwnershipGraph;
private Hashtable<FlatNew, AllocationSite> mapFlatNewToAllocationSite;
private Hashtable<Descriptor, HashSet<AllocationSite> > mapDescriptorToAllocationSiteSet;
private Hashtable<MethodContext, Integer> mapMethodContextToNumUpdates;
// for controlling DOT file output
private boolean writeDOTs;
private boolean writeAllDOTs;
-
+
// for controlling method effects
private boolean methodEffects;
-
+
//map each FlatNode to its own internal ownership graph
private MethodEffectsAnalysis meAnalysis;
-
+
//keep internal ownership graph by method context and flat node
private Hashtable<MethodContext, Hashtable<FlatNode, OwnershipGraph>> mapMethodContextToFlatNodeOwnershipGraph;
-
+
//map method context to a set of allocation sites of live-in vars
private Hashtable<MethodContext, HashSet<AllocationSite>> mapMethodContextToLiveInAllocationSiteSet;
public OwnershipAnalysis(State state,
TypeUtil tu,
CallGraph callGraph,
- Liveness liveness,
+ Liveness liveness,
ArrayReferencees ar,
int allocationDepth,
boolean writeDOTs,
boolean writeAllDOTs,
String aliasFile) throws java.io.IOException {
-
- this.methodEffects = false;
- init(state,tu,callGraph,liveness,ar,allocationDepth,writeDOTs,writeAllDOTs,aliasFile);
-
+
+ this.methodEffects = false;
+ init(state,tu,callGraph,liveness,ar,allocationDepth,writeDOTs,writeAllDOTs,aliasFile);
+
}
-
+
public OwnershipAnalysis(State state,
- TypeUtil tu,
- CallGraph callGraph,
- Liveness liveness,
+ TypeUtil tu,
+ CallGraph callGraph,
+ Liveness liveness,
ArrayReferencees ar,
- int allocationDepth,
- boolean writeDOTs,
- boolean writeAllDOTs,
- String aliasFile,
- boolean methodEffects) throws java.io.IOException {
-
- this.methodEffects = methodEffects;
- init(state,tu,callGraph,liveness,ar,allocationDepth,writeDOTs,writeAllDOTs,aliasFile);
-
+ int allocationDepth,
+ boolean writeDOTs,
+ boolean writeAllDOTs,
+ String aliasFile,
+ boolean methodEffects) throws java.io.IOException {
+
+ this.methodEffects = methodEffects;
+ init(state,tu,callGraph,liveness,ar,allocationDepth,writeDOTs,writeAllDOTs,aliasFile);
+
}
-
+
// new constructor for on-demand disjoint analysis
- public OwnershipAnalysis(
- State state,
- TypeUtil tu,
- CallGraph callGraph,
- Liveness liveness,
- ArrayReferencees ar,
- int allocationDepth,
- boolean writeDOTs,
- boolean writeAllDOTs,
- String aliasFile,
- boolean methodEffects,
- Hashtable<MethodContext, HashSet<AllocationSite>> mapMethodContextToLiveInAllocationSiteSet)
- throws java.io.IOException {
-
- this.methodEffects = methodEffects;
- this.mapMethodContextToLiveInAllocationSiteSet=mapMethodContextToLiveInAllocationSiteSet;
- init(state, tu, callGraph, liveness, ar, allocationDepth, writeDOTs, writeAllDOTs,
- aliasFile);
+ public OwnershipAnalysis(
+ State state,
+ TypeUtil tu,
+ CallGraph callGraph,
+ Liveness liveness,
+ ArrayReferencees ar,
+ int allocationDepth,
+ boolean writeDOTs,
+ boolean writeAllDOTs,
+ String aliasFile,
+ boolean methodEffects,
+ Hashtable<MethodContext, HashSet<AllocationSite>> mapMethodContextToLiveInAllocationSiteSet)
+ throws java.io.IOException {
+
+ this.methodEffects = methodEffects;
+ this.mapMethodContextToLiveInAllocationSiteSet=mapMethodContextToLiveInAllocationSiteSet;
+ init(state, tu, callGraph, liveness, ar, allocationDepth, writeDOTs, writeAllDOTs,
+ aliasFile);
+
+ }
- }
-
private void init(State state,
- TypeUtil tu,
- CallGraph callGraph,
- Liveness liveness,
+ TypeUtil tu,
+ CallGraph callGraph,
+ Liveness liveness,
ArrayReferencees ar,
- int allocationDepth,
- boolean writeDOTs,
- boolean writeAllDOTs,
- String aliasFile) throws java.io.IOException {
-
- analysisComplete = false;
-
- this.state = state;
- this.typeUtil = tu;
- this.callGraph = callGraph;
- this.liveness = liveness;
- this.arrayReferencees = ar;
- this.allocationDepth = allocationDepth;
- this.writeDOTs = writeDOTs;
- this.writeAllDOTs = writeAllDOTs;
-
- // set some static configuration for OwnershipGraphs
- OwnershipGraph.allocationDepth = allocationDepth;
- OwnershipGraph.typeUtil = typeUtil;
- OwnershipGraph.debugCallMapCount = state.OWNERSHIPDEBUGCALLCOUNT;
- OwnershipGraph.debugCallee = state.OWNERSHIPDEBUGCALLEE;
- OwnershipGraph.debugCaller = state.OWNERSHIPDEBUGCALLER;
- if( OwnershipGraph.debugCallee != null &&
- OwnershipGraph.debugCaller != null ) {
- OwnershipGraph.debugCallMap = true;
- }
+ int allocationDepth,
+ boolean writeDOTs,
+ boolean writeAllDOTs,
+ String aliasFile) throws java.io.IOException {
+
+ analysisComplete = false;
+
+ this.state = state;
+ this.typeUtil = tu;
+ this.callGraph = callGraph;
+ this.liveness = liveness;
+ this.arrayReferencees = ar;
+ this.allocationDepth = allocationDepth;
+ this.writeDOTs = writeDOTs;
+ this.writeAllDOTs = writeAllDOTs;
+
+ // set some static configuration for OwnershipGraphs
+ OwnershipGraph.allocationDepth = allocationDepth;
+ OwnershipGraph.typeUtil = typeUtil;
+ OwnershipGraph.debugCallMapCount = state.OWNERSHIPDEBUGCALLCOUNT;
+ OwnershipGraph.debugCallee = state.OWNERSHIPDEBUGCALLEE;
+ OwnershipGraph.debugCaller = state.OWNERSHIPDEBUGCALLER;
+ if( OwnershipGraph.debugCallee != null &&
+ OwnershipGraph.debugCaller != null ) {
+ OwnershipGraph.debugCallMap = true;
+ }
- descriptorsToAnalyze = new HashSet<Descriptor>();
+ descriptorsToAnalyze = new HashSet<Descriptor>();
- mapMethodContextToInitialParamAllocGraph =
- new Hashtable<MethodContext, OwnershipGraph>();
+ mapMethodContextToInitialParamAllocGraph =
+ new Hashtable<MethodContext, OwnershipGraph>();
- mapMethodContextToCompleteOwnershipGraph =
- new Hashtable<MethodContext, OwnershipGraph>();
+ mapMethodContextToCompleteOwnershipGraph =
+ new Hashtable<MethodContext, OwnershipGraph>();
- mapFlatNewToAllocationSite =
- new Hashtable<FlatNew, AllocationSite>();
+ mapFlatNewToAllocationSite =
+ new Hashtable<FlatNew, AllocationSite>();
- mapDescriptorToAllocationSiteSet =
- new Hashtable<Descriptor, HashSet<AllocationSite> >();
+ mapDescriptorToAllocationSiteSet =
+ new Hashtable<Descriptor, HashSet<AllocationSite> >();
- mapDescriptorToAllMethodContexts =
- new Hashtable<Descriptor, HashSet<MethodContext> >();
+ mapDescriptorToAllMethodContexts =
+ new Hashtable<Descriptor, HashSet<MethodContext> >();
- mapMethodContextToDependentContexts =
- new Hashtable<MethodContext, HashSet<MethodContext> >();
+ mapMethodContextToDependentContexts =
+ new Hashtable<MethodContext, HashSet<MethodContext> >();
- mapDescriptorToPriority =
- new Hashtable<Descriptor, Integer>();
+ mapDescriptorToPriority =
+ new Hashtable<Descriptor, Integer>();
- mapHrnIdToAllocationSite =
- new Hashtable<Integer, AllocationSite>();
-
- if( methodEffects ) {
- mapMethodContextToFlatNodeOwnershipGraph=new Hashtable<MethodContext, Hashtable<FlatNode, OwnershipGraph>>();
- }
-
- meAnalysis=new MethodEffectsAnalysis(methodEffects);
+ mapHrnIdToAllocationSite =
+ new Hashtable<Integer, AllocationSite>();
+ if( methodEffects ) {
+ mapMethodContextToFlatNodeOwnershipGraph=new Hashtable<MethodContext, Hashtable<FlatNode, OwnershipGraph>>();
+ }
- if( writeAllDOTs ) {
- mapMethodContextToNumUpdates = new Hashtable<MethodContext, Integer>();
- }
+ meAnalysis=new MethodEffectsAnalysis(methodEffects);
- double timeStartAnalysis = (double) System.nanoTime();
+ if( writeAllDOTs ) {
+ mapMethodContextToNumUpdates = new Hashtable<MethodContext, Integer>();
+ }
- if( state.TASK ) {
- // initialize methods to visit as the set of all tasks in the
- // program and then any method that could be called starting
- // from those tasks
- Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
- while( taskItr.hasNext() ) {
- Descriptor d = (Descriptor) taskItr.next();
- scheduleAllCallees(d);
- }
+ double timeStartAnalysis = (double) System.nanoTime();
- } else {
- // we are not in task mode, just normal Java, so start with
- // the main method
- Descriptor d = typeUtil.getMain();
- scheduleAllCallees(d);
- }
+ if( state.TASK ) {
+ // initialize methods to visit as the set of all tasks in the
+ // program and then any method that could be called starting
+ // from those tasks
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ while( taskItr.hasNext() ) {
+ Descriptor d = (Descriptor) taskItr.next();
+ scheduleAllCallees(d);
+ }
- // before beginning analysis, initialize every scheduled method
- // with an ownership graph that has populated parameter index tables
- // by analyzing the first node which is always a FlatMethod node
- Iterator<Descriptor> dItr = descriptorsToAnalyze.iterator();
- while( dItr.hasNext() ) {
- Descriptor d = dItr.next();
- OwnershipGraph og = new OwnershipGraph();
+ } else {
+ // we are not in task mode, just normal Java, so start with
+ // the main method
+ Descriptor d = typeUtil.getMain();
+ scheduleAllCallees(d);
+ }
- FlatMethod fm;
- if( d instanceof MethodDescriptor ) {
- fm = state.getMethodFlat( (MethodDescriptor) d);
- } else {
- assert d instanceof TaskDescriptor;
- fm = state.getMethodFlat( (TaskDescriptor) d);
- }
- MethodContext mc = new MethodContext( d );
- assert !mapDescriptorToAllMethodContexts.containsKey( d );
- HashSet<MethodContext> s = new HashSet<MethodContext>();
- s.add( mc );
- mapDescriptorToAllMethodContexts.put( d, s );
+ // before beginning analysis, initialize every scheduled method
+ // with an ownership graph that has populated parameter index tables
+ // by analyzing the first node which is always a FlatMethod node
+ Iterator<Descriptor> dItr = descriptorsToAnalyze.iterator();
+ while( dItr.hasNext() ) {
+ Descriptor d = dItr.next();
+ OwnershipGraph og = new OwnershipGraph();
- //System.out.println("Previsiting " + mc);
+ FlatMethod fm;
+ if( d instanceof MethodDescriptor ) {
+ fm = state.getMethodFlat( (MethodDescriptor) d);
+ } else {
+ assert d instanceof TaskDescriptor;
+ fm = state.getMethodFlat( (TaskDescriptor) d);
+ }
- meAnalysis.createNewMapping(mc);
+ MethodContext mc = new MethodContext(d);
+ assert !mapDescriptorToAllMethodContexts.containsKey(d);
+ HashSet<MethodContext> s = new HashSet<MethodContext>();
+ s.add(mc);
+ mapDescriptorToAllMethodContexts.put(d, s);
- og = analyzeFlatNode(mc, fm, fm, null, og);
- setGraphForMethodContext(mc, og);
- }
+ //System.out.println("Previsiting " + mc);
- // as mentioned above, analyze methods one-by-one, possibly revisiting
- // a method if the methods that it calls are updated
- analyzeMethods();
- analysisComplete = true;
+ meAnalysis.createNewMapping(mc);
+ og = analyzeFlatNode(mc, fm, fm, null, og);
+ setGraphForMethodContext(mc, og);
+ }
- double timeEndAnalysis = (double) System.nanoTime();
- double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
- String treport = String.format( "The reachability analysis took %.3f sec.", dt );
- String justtime = String.format( "%.2f", dt );
- System.out.println( treport );
+ // as mentioned above, analyze methods one-by-one, possibly revisiting
+ // a method if the methods that it calls are updated
+ analyzeMethods();
+ analysisComplete = true;
- if( writeDOTs && !writeAllDOTs ) {
- writeFinalContextGraphs();
- }
- if(methodEffects){
- meAnalysis.writeMethodEffectsResult();
- }
+ double timeEndAnalysis = (double) System.nanoTime();
+ double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow(10.0, 9.0) );
+ String treport = String.format("The reachability analysis took %.3f sec.", dt);
+ String justtime = String.format("%.2f", dt);
+ System.out.println(treport);
+
+ if( writeDOTs && !writeAllDOTs ) {
+ writeFinalContextGraphs();
+ }
+
+ if(methodEffects) {
+ meAnalysis.writeMethodEffectsResult();
+ }
+
+ if( aliasFile != null ) {
+ if( state.TASK ) {
+ writeAllAliases(aliasFile, treport, justtime, state.OWNERSHIPALIASTAB, state.lines);
+ } else {
+ writeAllAliasesJava(aliasFile, treport, justtime, state.OWNERSHIPALIASTAB, state.lines);
+ }
+ }
- if( aliasFile != null ) {
- if( state.TASK ) {
- writeAllAliases(aliasFile, treport, justtime, state.OWNERSHIPALIASTAB, state.lines);
- } else {
- writeAllAliasesJava(aliasFile, treport, justtime, state.OWNERSHIPALIASTAB, state.lines);
- }
- }
-
}
// called from the constructor to help initialize the set
// manage the set of tasks and methods to be analyzed
// and be sure to reschedule tasks/methods when the methods
// they call are updated
- private void analyzeMethods() throws java.io.IOException {
+ private void analyzeMethods() throws java.io.IOException {
// first gather all of the method contexts to analyze
HashSet<MethodContext> allContexts = new HashSet<MethodContext>();
Iterator<Descriptor> itrd2a = descriptorsToAnalyze.iterator();
while( itrd2a.hasNext() ) {
- HashSet<MethodContext> mcs = mapDescriptorToAllMethodContexts.get( itrd2a.next() );
+ HashSet<MethodContext> mcs = mapDescriptorToAllMethodContexts.get(itrd2a.next() );
assert mcs != null;
Iterator<MethodContext> itrmc = mcs.iterator();
while( itrmc.hasNext() ) {
- allContexts.add( itrmc.next() );
+ allContexts.add(itrmc.next() );
}
}
// topologically sort them according to the caller graph so leaf calls are
// ordered first; use that ordering to give method contexts priorities
- LinkedList<MethodContext> sortedMethodContexts = topologicalSort( allContexts );
+ LinkedList<MethodContext> sortedMethodContexts = topologicalSort(allContexts);
methodContextsToVisitQ = new PriorityQueue<MethodContextQWrapper>();
methodContextsToVisitSet = new HashSet<MethodContext>();
Iterator<MethodContext> mcItr = sortedMethodContexts.iterator();
while( mcItr.hasNext() ) {
MethodContext mc = mcItr.next();
- mapDescriptorToPriority.put( mc.getDescriptor(), new Integer( p ) );
- methodContextsToVisitQ.add( new MethodContextQWrapper( p, mc ) );
- methodContextsToVisitSet.add( mc );
+ mapDescriptorToPriority.put(mc.getDescriptor(), new Integer(p) );
+ methodContextsToVisitQ.add(new MethodContextQWrapper(p, mc) );
+ methodContextsToVisitSet.add(mc);
++p;
}
// analyze methods from the priority queue until it is empty
while( !methodContextsToVisitQ.isEmpty() ) {
MethodContext mc = methodContextsToVisitQ.poll().getMethodContext();
- assert methodContextsToVisitSet.contains( mc );
- methodContextsToVisitSet.remove( mc );
+ assert methodContextsToVisitSet.contains(mc);
+ methodContextsToVisitSet.remove(mc);
// because the task or method descriptor just extracted
// was in the "to visit" set it either hasn't been analyzed
if( !og.equals(ogPrev) ) {
setGraphForMethodContext(mc, og);
- Iterator<MethodContext> depsItr = iteratorDependents( mc );
+ Iterator<MethodContext> depsItr = iteratorDependents(mc);
while( depsItr.hasNext() ) {
MethodContext mcNext = depsItr.next();
- if( !methodContextsToVisitSet.contains( mcNext ) ) {
- methodContextsToVisitQ.add( new MethodContextQWrapper( mapDescriptorToPriority.get( mcNext.getDescriptor() ),
- mcNext ) );
- methodContextsToVisitSet.add( mcNext );
+ if( !methodContextsToVisitSet.contains(mcNext) ) {
+ methodContextsToVisitQ.add(new MethodContextQWrapper(mapDescriptorToPriority.get(mcNext.getDescriptor() ),
+ mcNext) );
+ methodContextsToVisitSet.add(mcNext);
}
}
}
// ownership graph made from the merge of the
// parent graphs
og = analyzeFlatNode(mc,
- flatm,
+ flatm,
fn,
returnNodesToCombineForCompleteOwnershipGraph,
og);
-
-
- if( takeDebugSnapshots &&
- mc.getDescriptor().getSymbol().equals( mcDescSymbolDebug ) ) {
+
+
+ if( takeDebugSnapshots &&
+ mc.getDescriptor().getSymbol().equals(mcDescSymbolDebug) ) {
debugSnapshot(og,fn);
}
private OwnershipGraph
analyzeFlatNode(MethodContext mc,
- FlatMethod fmContaining,
+ FlatMethod fmContaining,
FlatNode fn,
HashSet<FlatReturnNode> setRetNodes,
OwnershipGraph og) throws java.io.IOException {
// turn it on if we find we actually need it.
//og.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
-
+
TempDescriptor lhs;
TempDescriptor rhs;
FieldDescriptor fld;
// set up each parameter
for( int i = 0; i < fm.numParameters(); ++i ) {
- TempDescriptor tdParam = fm.getParameter( i );
+ TempDescriptor tdParam = fm.getParameter(i);
TypeDescriptor typeParam = tdParam.getType();
- Integer paramIndex = new Integer( i );
+ Integer paramIndex = new Integer(i);
if( typeParam.isImmutable() && !typeParam.isArray() ) {
// don't bother with this primitive parameter, it
continue;
}
- if( aliasedParamIndices.contains( paramIndex ) ) {
+ if( aliasedParamIndices.contains(paramIndex) ) {
// use the alias blob but give parameters their
// own primary obj region
- og.assignTempEqualToAliasedParam( tdParam,
- paramIndex, fm );
+ og.assignTempEqualToAliasedParam(tdParam,
+ paramIndex, fm);
} else {
// this parameter is not aliased to others, give it
// a fresh primary obj and secondary object
- og.assignTempEqualToParamAlloc( tdParam,
- mc.getDescriptor() instanceof TaskDescriptor,
- paramIndex, fm );
+ og.assignTempEqualToParamAlloc(tdParam,
+ mc.getDescriptor() instanceof TaskDescriptor,
+ paramIndex, fm);
}
}
-
+
// add additional edges for aliased regions if necessary
if( !aliasedParamIndices.isEmpty() ) {
- og.addParam2ParamAliasEdges( fm, aliasedParamIndices );
+ og.addParam2ParamAliasEdges(fm, aliasedParamIndices);
}
-
+
// clean up reachability on initial parameter shapes
og.globalSweep();
// this maps tokens to parameter indices and vice versa
// for when this method is a callee
- og.prepareParamTokenMaps( fm );
+ og.prepareParamTokenMaps(fm);
// cache the graph
OwnershipGraph ogResult = new OwnershipGraph();
og.merge(ogInitParamAlloc);
}
break;
-
+
case FKind.FlatOpNode:
FlatOpNode fon = (FlatOpNode) fn;
if( fon.getOp().getOp() == Operation.ASSIGN ) {
TypeDescriptor td = fcn.getType();
assert td != null;
-
+
og.assignTempXEqualToCastedTempY(lhs, rhs, td);
break;
if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
og.assignTempXEqualToTempYFieldF(lhs, rhs, fld);
}
-
+
meAnalysis.analyzeFlatFieldNode(mc, og, rhs, fld);
-
+
break;
case FKind.FlatSetFieldNode:
if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
og.assignTempXFieldFEqualToTempY(lhs, fld, rhs);
}
-
+
meAnalysis.analyzeFlatSetFieldNode(mc, og, lhs, fld);
-
+
break;
case FKind.FlatElementNode:
assert rhs.getType() != null;
assert rhs.getType().isArray();
-
- TypeDescriptor tdElement = rhs.getType().dereference();
- FieldDescriptor fdElement = getArrayField( tdElement );
+
+ TypeDescriptor tdElement = rhs.getType().dereference();
+ FieldDescriptor fdElement = getArrayField(tdElement);
og.assignTempXEqualToTempYFieldF(lhs, rhs, fdElement);
meAnalysis.analyzeFlatElementNode(mc, og, lhs, fdElement);
-
+
}
break;
case FKind.FlatSetElementNode:
FlatSetElementNode fsen = (FlatSetElementNode) fn;
-
+
lhs = fsen.getDst();
rhs = fsen.getSrc();
if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
- TypeDescriptor tdElement = lhs.getType().dereference();
- FieldDescriptor fdElement = getArrayField( tdElement );
- meAnalysis.analyzeFlatSetElementNode(mc, og, lhs, fdElement);
- }
+ TypeDescriptor tdElement = lhs.getType().dereference();
+ FieldDescriptor fdElement = getArrayField(tdElement);
+ meAnalysis.analyzeFlatSetElementNode(mc, og, lhs, fdElement);
+ }
- if( arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
+ if( arrayReferencees.doesNotCreateNewReaching(fsen) ) {
// skip this node if it cannot create new reachability paths
- break;
+ break;
}
lhs = fsen.getDst();
assert lhs.getType() != null;
assert lhs.getType().isArray();
-
- TypeDescriptor tdElement = lhs.getType().dereference();
- FieldDescriptor fdElement = getArrayField( tdElement );
+
+ TypeDescriptor tdElement = lhs.getType().dereference();
+ FieldDescriptor fdElement = getArrayField(tdElement);
og.assignTempXFieldFEqualToTempY(lhs, fdElement, rhs);
meAnalysis.analyzeFlatSetElementNode(mc, og, lhs, fdElement);
-
+
}
break;
lhs = fnn.getDst();
if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
AllocationSite as = getAllocationSiteFromFlatNewPRIVATE(fnn);
-
- if (mapMethodContextToLiveInAllocationSiteSet != null){
- HashSet<AllocationSite> alllocSet=mapMethodContextToLiveInAllocationSiteSet.get(mc);
- if(alllocSet!=null){
- for (Iterator iterator = alllocSet.iterator(); iterator
- .hasNext();) {
- AllocationSite allocationSite = (AllocationSite) iterator
- .next();
- if(allocationSite.flatNew.equals(as.flatNew)){
- as.setFlag(true);
- }
- }
- }
+
+ if (mapMethodContextToLiveInAllocationSiteSet != null) {
+ HashSet<AllocationSite> alllocSet=mapMethodContextToLiveInAllocationSiteSet.get(mc);
+ if(alllocSet!=null) {
+ for (Iterator iterator = alllocSet.iterator(); iterator
+ .hasNext(); ) {
+ AllocationSite allocationSite = (AllocationSite) iterator
+ .next();
+ if(allocationSite.flatNew.equals(as.flatNew)) {
+ as.setFlag(true);
+ }
+ }
+ }
}
-
+
og.assignTempEqualToNewAlloc(lhs, as);
}
break;
// a static method is simply always the same, makes life easy
ogMergeOfAllPossibleCalleeResults = og;
- Set<Integer> aliasedParamIndices =
+ Set<Integer> aliasedParamIndices =
ogMergeOfAllPossibleCalleeResults.calculateAliasedParamSet(fc, md.isStatic(), flatm);
- MethodContext mcNew = new MethodContext( md, aliasedParamIndices );
- Set contexts = mapDescriptorToAllMethodContexts.get( md );
+ MethodContext mcNew = new MethodContext(md, aliasedParamIndices);
+ Set contexts = mapDescriptorToAllMethodContexts.get(md);
assert contexts != null;
- contexts.add( mcNew );
+ contexts.add(mcNew);
- addDependent( mc, mcNew );
+ addDependent(mc, mcNew);
- OwnershipGraph onlyPossibleCallee = mapMethodContextToCompleteOwnershipGraph.get( mcNew );
+ OwnershipGraph onlyPossibleCallee = mapMethodContextToCompleteOwnershipGraph.get(mcNew);
if( onlyPossibleCallee == null ) {
// if this method context has never been analyzed just schedule it for analysis
// and skip over this call site for now
- if( !methodContextsToVisitSet.contains( mcNew ) ) {
- methodContextsToVisitQ.add( new MethodContextQWrapper( mapDescriptorToPriority.get( md ),
- mcNew ) );
- methodContextsToVisitSet.add( mcNew );
+ if( !methodContextsToVisitSet.contains(mcNew) ) {
+ methodContextsToVisitQ.add(new MethodContextQWrapper(mapDescriptorToPriority.get(md),
+ mcNew) );
+ methodContextsToVisitSet.add(mcNew);
}
-
+
} else {
ogMergeOfAllPossibleCalleeResults.resolveMethodCall(fc, md.isStatic(), flatm, onlyPossibleCallee, mc, null);
}
-
+
meAnalysis.createNewMapping(mcNew);
meAnalysis.analyzeFlatCall(ogMergeOfAllPossibleCalleeResults, mcNew, mc, fc);
-
+
} else {
// if the method descriptor is virtual, then there could be a
OwnershipGraph ogCopy = new OwnershipGraph();
ogCopy.merge(og);
- Set<Integer> aliasedParamIndices =
+ Set<Integer> aliasedParamIndices =
ogCopy.calculateAliasedParamSet(fc, possibleMd.isStatic(), pflatm);
- MethodContext mcNew = new MethodContext( possibleMd, aliasedParamIndices );
- Set contexts = mapDescriptorToAllMethodContexts.get( md );
+ MethodContext mcNew = new MethodContext(possibleMd, aliasedParamIndices);
+ Set contexts = mapDescriptorToAllMethodContexts.get(md);
assert contexts != null;
- contexts.add( mcNew );
-
-
- meAnalysis.createNewMapping(mcNew);
-
-
- addDependent( mc, mcNew );
+ contexts.add(mcNew);
+
+
+ meAnalysis.createNewMapping(mcNew);
- OwnershipGraph ogPotentialCallee = mapMethodContextToCompleteOwnershipGraph.get( mcNew );
+
+ addDependent(mc, mcNew);
+
+ OwnershipGraph ogPotentialCallee = mapMethodContextToCompleteOwnershipGraph.get(mcNew);
if( ogPotentialCallee == null ) {
// if this method context has never been analyzed just schedule it for analysis
// and skip over this call site for now
- if( !methodContextsToVisitSet.contains( mcNew ) ) {
- methodContextsToVisitQ.add( new MethodContextQWrapper( mapDescriptorToPriority.get( md ),
- mcNew ) );
- methodContextsToVisitSet.add( mcNew );
+ if( !methodContextsToVisitSet.contains(mcNew) ) {
+ methodContextsToVisitQ.add(new MethodContextQWrapper(mapDescriptorToPriority.get(md),
+ mcNew) );
+ methodContextsToVisitSet.add(mcNew);
}
-
+
} else {
ogCopy.resolveMethodCall(fc, possibleMd.isStatic(), pflatm, ogPotentialCallee, mc, null);
}
-
+
ogMergeOfAllPossibleCalleeResults.merge(ogCopy);
-
+
meAnalysis.analyzeFlatCall(ogMergeOfAllPossibleCalleeResults, mcNew, mc, fc);
}
-
+
}
og = ogMergeOfAllPossibleCalleeResults;
if( methodEffects ) {
Hashtable<FlatNode, OwnershipGraph> table=mapMethodContextToFlatNodeOwnershipGraph.get(mc);
- if(table==null){
- table=new Hashtable<FlatNode, OwnershipGraph>();
+ if(table==null) {
+ table=new Hashtable<FlatNode, OwnershipGraph>();
}
table.put(fn, og);
mapMethodContextToFlatNodeOwnershipGraph.put(mc, table);
}
- static public FieldDescriptor getArrayField( TypeDescriptor tdElement ) {
- FieldDescriptor fdElement = mapTypeToArrayField.get( tdElement );
+ static public FieldDescriptor getArrayField(TypeDescriptor tdElement) {
+ FieldDescriptor fdElement = mapTypeToArrayField.get(tdElement);
if( fdElement == null ) {
fdElement = new FieldDescriptor(new Modifiers(Modifiers.PUBLIC),
- tdElement,
- arrayElementFieldName,
- null,
- false);
- mapTypeToArrayField.put( tdElement, fdElement );
+ tdElement,
+ arrayElementFieldName,
+ null,
+ false);
+ mapTypeToArrayField.put(tdElement, fdElement);
}
return fdElement;
}
-
+
private void setGraphForMethodContext(MethodContext mc, OwnershipGraph og) {
mapMethodContextToCompleteOwnershipGraph.put(mc, og);
Integer n = mapMethodContextToNumUpdates.get(mc);
try {
og.writeGraph(mc+"COMPLETE"+String.format("%05d", n),
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {
+ }
mapMethodContextToNumUpdates.put(mc, n + 1);
}
}
- private void addDependent( MethodContext caller, MethodContext callee ) {
- HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get( callee );
+ private void addDependent(MethodContext caller, MethodContext callee) {
+ HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get(callee);
if( deps == null ) {
deps = new HashSet<MethodContext>();
}
- deps.add( caller );
- mapMethodContextToDependentContexts.put( callee, deps );
+ deps.add(caller);
+ mapMethodContextToDependentContexts.put(callee, deps);
}
- private Iterator<MethodContext> iteratorDependents( MethodContext callee ) {
- HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get( callee );
+ private Iterator<MethodContext> iteratorDependents(MethodContext callee) {
+ HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get(callee);
if( deps == null ) {
deps = new HashSet<MethodContext>();
- mapMethodContextToDependentContexts.put( callee, deps );
+ mapMethodContextToDependentContexts.put(callee, deps);
}
return deps.iterator();
}
Set entrySet = mapMethodContextToCompleteOwnershipGraph.entrySet();
Iterator itr = entrySet.iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- MethodContext mc = (MethodContext) me.getKey();
+ Map.Entry me = (Map.Entry)itr.next();
+ MethodContext mc = (MethodContext) me.getKey();
OwnershipGraph og = (OwnershipGraph) me.getValue();
try {
og.writeGraph(mc+"COMPLETE",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {
+ }
}
}
-
-
+
+
// return just the allocation site associated with one FlatNew node
private AllocationSite getAllocationSiteFromFlatNewPRIVATE(FlatNew fn) {
for( int i = 0; i < allocationDepth; ++i ) {
Integer id = generateUniqueHeapRegionNodeID();
as.setIthOldest(i, id);
- mapHrnIdToAllocationSite.put( id, as );
+ mapHrnIdToAllocationSite.put(id, as);
}
// the oldest node is a summary node
private HashSet<AllocationSite> getFlaggedAllocationSites(Descriptor dIn) {
-
+
HashSet<AllocationSite> out = new HashSet<AllocationSite>();
HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
HashSet<Descriptor> visited = new HashSet<Descriptor>();
}
}
}
-
+
return out;
}
}
- private LinkedList<MethodContext> topologicalSort( HashSet<MethodContext> set ) {
+ private LinkedList<MethodContext> topologicalSort(HashSet<MethodContext> set) {
HashSet <MethodContext> discovered = new HashSet <MethodContext>();
LinkedList<MethodContext> sorted = new LinkedList<MethodContext>();
-
+
Iterator<MethodContext> itr = set.iterator();
while( itr.hasNext() ) {
MethodContext mc = itr.next();
-
- if( !discovered.contains( mc ) ) {
- dfsVisit( set, mc, sorted, discovered );
+
+ if( !discovered.contains(mc) ) {
+ dfsVisit(set, mc, sorted, discovered);
}
}
-
+
return sorted;
}
-
- private void dfsVisit( HashSet<MethodContext> set,
- MethodContext mc,
- LinkedList<MethodContext> sorted,
- HashSet <MethodContext> discovered ) {
- discovered.add( mc );
-
+
+ private void dfsVisit(HashSet<MethodContext> set,
+ MethodContext mc,
+ LinkedList<MethodContext> sorted,
+ HashSet <MethodContext> discovered) {
+ discovered.add(mc);
+
Descriptor d = mc.getDescriptor();
if( d instanceof MethodDescriptor ) {
- MethodDescriptor md = (MethodDescriptor) d;
- Iterator itr = callGraph.getCallerSet( md ).iterator();
+ MethodDescriptor md = (MethodDescriptor) d;
+ Iterator itr = callGraph.getCallerSet(md).iterator();
while( itr.hasNext() ) {
Descriptor dCaller = (Descriptor) itr.next();
-
+
// only consider the callers in the original set to analyze
- Set<MethodContext> callerContexts = mapDescriptorToAllMethodContexts.get( dCaller );
+ Set<MethodContext> callerContexts = mapDescriptorToAllMethodContexts.get(dCaller);
if( callerContexts == null )
- continue;
-
+ continue;
+
// since the analysis hasn't started, there should be exactly one
// context if there are any at all
- assert callerContexts.size() == 1;
+ assert callerContexts.size() == 1;
MethodContext mcCaller = callerContexts.iterator().next();
- assert set.contains( mcCaller );
+ assert set.contains(mcCaller);
- if( !discovered.contains( mcCaller ) ) {
- dfsVisit( set, mcCaller, sorted, discovered );
+ if( !discovered.contains(mcCaller) ) {
+ dfsVisit(set, mcCaller, sorted, discovered);
}
}
}
- sorted.addFirst( mc );
+ sorted.addFirst(mc);
}
private String computeAliasContextHistogram() {
-
- Hashtable<Integer, Integer> mapNumContexts2NumDesc =
+
+ Hashtable<Integer, Integer> mapNumContexts2NumDesc =
new Hashtable<Integer, Integer>();
-
+
Iterator itr = mapDescriptorToAllMethodContexts.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- HashSet<MethodContext> s = (HashSet<MethodContext>) me.getValue();
-
- Integer i = mapNumContexts2NumDesc.get( s.size() );
+ Map.Entry me = (Map.Entry)itr.next();
+ HashSet<MethodContext> s = (HashSet<MethodContext>)me.getValue();
+
+ Integer i = mapNumContexts2NumDesc.get(s.size() );
if( i == null ) {
- i = new Integer( 0 );
+ i = new Integer(0);
}
- mapNumContexts2NumDesc.put( s.size(), i + 1 );
- }
+ mapNumContexts2NumDesc.put(s.size(), i + 1);
+ }
String s = "";
int total = 0;
itr = mapNumContexts2NumDesc.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
+ Map.Entry me = (Map.Entry)itr.next();
Integer c0 = (Integer) me.getKey();
Integer d0 = (Integer) me.getValue();
total += d0;
- s += String.format( "%4d methods had %4d unique alias contexts.\n", d0, c0 );
+ s += String.format("%4d methods had %4d unique alias contexts.\n", d0, c0);
}
- s += String.format( "\n%4d total methods analayzed.\n", total );
+ s += String.format("\n%4d total methods analayzed.\n", total);
return s;
}
- private int numMethodsAnalyzed() {
+ private int numMethodsAnalyzed() {
return descriptorsToAnalyze.size();
}
-
- // insert a call to debugSnapshot() somewhere in the analysis
+
+ // insert a call to debugSnapshot() somewhere in the analysis
// to get successive captures of the analysis state
boolean takeDebugSnapshots = false;
String mcDescSymbolDebug = "setRoute";
++debugCounter;
if( debugCounter > numStartCountReport &&
- freqCountReport > 0 &&
+ freqCountReport > 0 &&
debugCounter % freqCountReport == 0 ) {
System.out.println(" @@@ debug counter = "+debugCounter);
}
}
try {
og.writeGraph(graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
} catch( Exception e ) {
System.out.println("Error writing debug capture.");
System.exit(0);
System.exit(0);
}
}
-
- public MethodEffectsAnalysis getMethodEffectsAnalysis(){
- return meAnalysis;
+
+ public MethodEffectsAnalysis getMethodEffectsAnalysis() {
+ return meAnalysis;
}
-
- public OwnershipGraph getOwnvershipGraphByMethodContext(MethodContext mc){
- return mapMethodContextToCompleteOwnershipGraph.get(mc);
+
+ public OwnershipGraph getOwnvershipGraphByMethodContext(MethodContext mc) {
+ return mapMethodContextToCompleteOwnershipGraph.get(mc);
}
-
- public HashSet<MethodContext> getAllMethodContextSetByDescriptor(Descriptor d){
- return mapDescriptorToAllMethodContexts.get(d);
+
+ public HashSet<MethodContext> getAllMethodContextSetByDescriptor(Descriptor d) {
+ return mapDescriptorToAllMethodContexts.get(d);
}
-
- public MethodContext getCalleeMethodContext(MethodContext callerMC, FlatCall fc){
-
- Hashtable<FlatNode, OwnershipGraph> table=mapMethodContextToFlatNodeOwnershipGraph.get(callerMC);
-
- // merge previous ownership graph to calculate corresponding method context
- OwnershipGraph mergeOG = new OwnershipGraph();
-
- for(int i=0;i<fc.numPrev();i++){
- FlatNode prevNode=fc.getPrev(i);
- if(prevNode!=null){
- OwnershipGraph prevOG=table.get(prevNode);
- mergeOG.merge(prevOG);
- }
- }
-
- MethodDescriptor md=fc.getMethod();
- FlatMethod flatm = state.getMethodFlat(md);
- Set<Integer> aliasedParamIndices = mergeOG.calculateAliasedParamSet(fc, md.isStatic(), flatm);
- MethodContext calleeMC = new MethodContext( md, aliasedParamIndices );
-
- return calleeMC;
+
+ public MethodContext getCalleeMethodContext(MethodContext callerMC, FlatCall fc) {
+
+ Hashtable<FlatNode, OwnershipGraph> table=mapMethodContextToFlatNodeOwnershipGraph.get(callerMC);
+
+ // merge previous ownership graph to calculate corresponding method context
+ OwnershipGraph mergeOG = new OwnershipGraph();
+
+ for(int i=0; i<fc.numPrev(); i++) {
+ FlatNode prevNode=fc.getPrev(i);
+ if(prevNode!=null) {
+ OwnershipGraph prevOG=table.get(prevNode);
+ mergeOG.merge(prevOG);
+ }
+ }
+
+ MethodDescriptor md=fc.getMethod();
+ FlatMethod flatm = state.getMethodFlat(md);
+ Set<Integer> aliasedParamIndices = mergeOG.calculateAliasedParamSet(fc, md.isStatic(), flatm);
+ MethodContext calleeMC = new MethodContext(md, aliasedParamIndices);
+
+ return calleeMC;
}
-
-
+
+
}
protected static final boolean DISABLE_STRONG_UPDATES = false;
protected static final boolean DISABLE_GLOBAL_SWEEP = false;
- protected static int allocationDepth = -1;
+ protected static int allocationDepth = -1;
protected static TypeUtil typeUtil = null;
- protected static boolean debugCallMap = false;
- protected static int debugCallMapCount = 0;
- protected static String debugCallee = null;
- protected static String debugCaller = null;
+ protected static boolean debugCallMap = false;
+ protected static int debugCallMapCount = 0;
+ protected static String debugCallee = null;
+ protected static String debugCaller = null;
// there was already one other very similar reason
// for traversing heap nodes that is no longer needed
// actions to take during the traversal
protected static final int VISIT_HRN_WRITE_FULL = 0;
- protected static final String qString = new String( "Q_spec_" );
- protected static final String rString = new String( "R_spec_" );
- protected static final String blobString = new String( "_AliasBlob___" );
-
- protected static final TempDescriptor tdReturn = new TempDescriptor( "_Return___" );
- protected static final TempDescriptor tdAliasBlob = new TempDescriptor( blobString );
-
- protected static final TokenTupleSet ttsEmpty = new TokenTupleSet().makeCanonical();
+ protected static final String qString = new String("Q_spec_");
+ protected static final String rString = new String("R_spec_");
+ protected static final String blobString = new String("_AliasBlob___");
+
+ protected static final TempDescriptor tdReturn = new TempDescriptor("_Return___");
+ protected static final TempDescriptor tdAliasBlob = new TempDescriptor(blobString);
+
+ protected static final TokenTupleSet ttsEmpty = new TokenTupleSet().makeCanonical();
protected static final ReachabilitySet rsEmpty = new ReachabilitySet().makeCanonical();
- protected static final ReachabilitySet rsWttsEmpty = new ReachabilitySet( ttsEmpty ).makeCanonical();
+ protected static final ReachabilitySet rsWttsEmpty = new ReachabilitySet(ttsEmpty).makeCanonical();
// add a bogus entry with the identity rule for easy rewrite
// of new callee nodes and edges, doesn't belong to any parameter
protected static final int bogusParamIndexInt = -2;
- protected static final Integer bogusID = new Integer( bogusParamIndexInt );
- protected static final Integer bogusIndex = new Integer( bogusParamIndexInt );
- protected static final TokenTuple bogusToken = new TokenTuple( bogusID, true, TokenTuple.ARITY_ONE ).makeCanonical();
- protected static final TokenTuple bogusTokenPlus = new TokenTuple( bogusID, true, TokenTuple.ARITY_ONEORMORE ).makeCanonical();
- protected static final TokenTuple bogusTokenStar = new TokenTuple( bogusID, true, TokenTuple.ARITY_ZEROORMORE ).makeCanonical();
+ protected static final Integer bogusID = new Integer(bogusParamIndexInt);
+ protected static final Integer bogusIndex = new Integer(bogusParamIndexInt);
+ protected static final TokenTuple bogusToken = new TokenTuple(bogusID, true, TokenTuple.ARITY_ONE).makeCanonical();
+ protected static final TokenTuple bogusTokenPlus = new TokenTuple(bogusID, true, TokenTuple.ARITY_ONEORMORE).makeCanonical();
+ protected static final TokenTuple bogusTokenStar = new TokenTuple(bogusID, true, TokenTuple.ARITY_ZEROORMORE).makeCanonical();
protected static final ReachabilitySet rsIdentity =
- new ReachabilitySet( new TokenTupleSet( bogusToken ).makeCanonical() ).makeCanonical();
+ new ReachabilitySet(new TokenTupleSet(bogusToken).makeCanonical() ).makeCanonical();
public Hashtable<Integer, HeapRegionNode> id2hrn;
// to know the access paths that allowed it, to prune edges when
// mapping them back into the caller--an access path must appear
public Hashtable< TempDescriptor, Set<AccessPath> > temp2accessPaths;
-
+
public Hashtable< String, HeapRegionNode > gid2hrn;
td2ln = new Hashtable<TempDescriptor, LabelNode >();
idPrimary2paramIndexSet = new Hashtable<Integer, Set<Integer> >();
paramIndex2idPrimary = new Hashtable<Integer, Integer >();
- idSecondary2paramIndexSet = new Hashtable<Integer, Set<Integer> >();
+ idSecondary2paramIndexSet = new Hashtable<Integer, Set<Integer> >();
paramIndex2idSecondary = new Hashtable<Integer, Integer >();
paramIndex2tdQ = new Hashtable<Integer, TempDescriptor>();
paramIndex2tdR = new Hashtable<Integer, TempDescriptor>();
allocationSites = new HashSet <AllocationSite>();
- outOfScopeTemps = new HashSet<TempDescriptor>();
- outOfScopeLabels = new HashSet<LabelNode>();
- parameterTemps = new HashSet<TempDescriptor>();
+ outOfScopeTemps = new HashSet<TempDescriptor>();
+ outOfScopeLabels = new HashSet<LabelNode>();
+ parameterTemps = new HashSet<TempDescriptor>();
parameterLabels = new HashSet<LabelNode>();
- outOfScopeTemps.add( tdReturn );
- outOfScopeLabels.add( getLabelNodeFromTemp( tdReturn ) );
+ outOfScopeTemps.add(tdReturn);
+ outOfScopeLabels.add(getLabelNodeFromTemp(tdReturn) );
temp2accessPaths = new Hashtable< TempDescriptor, Set<AccessPath> >();
-
- gid2hrn =new Hashtable< String, HeapRegionNode >();
+
+ gid2hrn =new Hashtable< String, HeapRegionNode >();
}
createNewHeapRegionNode(Integer id,
boolean isSingleObject,
boolean isNewSummary,
- boolean isFlagged,
+ boolean isFlagged,
boolean isParameter,
- TypeDescriptor type,
+ TypeDescriptor type,
AllocationSite allocSite,
ReachabilitySet alpha,
String description,
).makeCanonical();
}
}
-
+
HeapRegionNode hrn = new HeapRegionNode(id,
isSingleObject,
markForAnalysis,
- isParameter,
+ isParameter,
isNewSummary,
- typeToUse,
+ typeToUse,
allocSite,
alpha,
description,
protected void removeReferenceEdge(ReferenceEdge e) {
removeReferenceEdge(e.getSrc(),
- e.getDst(),
- e.getType(),
- e.getField() );
+ e.getDst(),
+ e.getType(),
+ e.getField() );
}
protected void removeReferenceEdge(OwnershipNode referencer,
HeapRegionNode referencee,
TypeDescriptor type,
- String field) {
+ String field) {
assert referencer != null;
assert referencee != null;
-
+
ReferenceEdge edge = referencer.getReferenceTo(referencee,
type,
- field);
+ field);
assert edge != null;
assert edge == referencee.getReferenceFrom(referencer,
type,
- field);
-
+ field);
+
// int oldTaint=edge.getTaintIdentifier();
// if(referencer instanceof HeapRegionNode){
-// depropagateTaintIdentifier((HeapRegionNode)referencer,oldTaint,new HashSet<HeapRegionNode>());
+// depropagateTaintIdentifier((HeapRegionNode)referencer,oldTaint,new HashSet<HeapRegionNode>());
// }
referencer.removeReferencee(edge);
protected void clearReferenceEdgesFrom(OwnershipNode referencer,
TypeDescriptor type,
- String field,
+ String field,
boolean removeAll) {
assert referencer != null;
while( i.hasNext() ) {
ReferenceEdge edge = i.next();
- if( removeAll ||
- (edge.typeEquals( type ) && edge.fieldEquals( field ))
- ){
+ if( removeAll ||
+ (edge.typeEquals(type) && edge.fieldEquals(field))
+ ) {
HeapRegionNode referencee = edge.getDst();
-
+
removeReferenceEdge(referencer,
referencee,
edge.getType(),
- edge.getField() );
+ edge.getField() );
}
}
}
protected void clearReferenceEdgesTo(HeapRegionNode referencee,
- TypeDescriptor type,
- String field,
+ TypeDescriptor type,
+ String field,
boolean removeAll) {
assert referencee != null;
while( i.hasNext() ) {
ReferenceEdge edge = i.next();
- if( removeAll ||
- (edge.typeEquals( type ) && edge.fieldEquals( field ))
- ){
+ if( removeAll ||
+ (edge.typeEquals(type) && edge.fieldEquals(field))
+ ) {
OwnershipNode referencer = edge.getSrc();
removeReferenceEdge(referencer,
referencee,
edge.getType(),
- edge.getField() );
+ edge.getField() );
}
}
}
//
////////////////////////////////////////////////////
- public void nullifyDeadVars( Set<TempDescriptor> liveIn ) {
+ public void nullifyDeadVars(Set<TempDescriptor> liveIn) {
// make a set of the temps that are out of scope, don't
// consider them when nullifying dead in-scope variables
Set<TempDescriptor> outOfScope = new HashSet<TempDescriptor>();
- outOfScope.add( tdReturn );
- outOfScope.add( tdAliasBlob );
- outOfScope.addAll( paramIndex2tdQ.values() );
- outOfScope.addAll( paramIndex2tdR.values() );
-
+ outOfScope.add(tdReturn);
+ outOfScope.add(tdAliasBlob);
+ outOfScope.addAll(paramIndex2tdQ.values() );
+ outOfScope.addAll(paramIndex2tdR.values() );
+
Iterator varItr = td2ln.entrySet().iterator();
while( varItr.hasNext() ) {
- Map.Entry me = (Map.Entry) varItr.next();
+ Map.Entry me = (Map.Entry)varItr.next();
TempDescriptor td = (TempDescriptor) me.getKey();
- LabelNode ln = (LabelNode) me.getValue();
+ LabelNode ln = (LabelNode) me.getValue();
// if this variable is not out-of-scope or live
// in graph, nullify its references to anything
- if( !outOfScope.contains( td ) &&
- !liveIn.contains( td )
- ) {
- clearReferenceEdgesFrom( ln, null, null, true );
+ if( !outOfScope.contains(td) &&
+ !liveIn.contains(td)
+ ) {
+ clearReferenceEdgesFrom(ln, null, null, true);
}
}
}
- public void assignTempXEqualToTempY( TempDescriptor x,
- TempDescriptor y ) {
- assignTempXEqualToCastedTempY( x, y, null );
+ public void assignTempXEqualToTempY(TempDescriptor x,
+ TempDescriptor y) {
+ assignTempXEqualToCastedTempY(x, y, null);
}
- public void assignTempXEqualToCastedTempY( TempDescriptor x,
- TempDescriptor y,
- TypeDescriptor tdCast ) {
+ public void assignTempXEqualToCastedTempY(TempDescriptor x,
+ TempDescriptor y,
+ TypeDescriptor tdCast) {
+
+ LabelNode lnX = getLabelNodeFromTemp(x);
+ LabelNode lnY = getLabelNodeFromTemp(y);
- LabelNode lnX = getLabelNodeFromTemp( x );
- LabelNode lnY = getLabelNodeFromTemp( y );
-
- clearReferenceEdgesFrom( lnX, null, null, true );
+ clearReferenceEdgesFrom(lnX, null, null, true);
// note it is possible that the types of temps in the
// flat node to analyze will reveal that some typed
Iterator<ReferenceEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- ReferenceEdge edgeY = itrYhrn.next();
+ ReferenceEdge edgeY = itrYhrn.next();
HeapRegionNode referencee = edgeY.getDst();
- ReferenceEdge edgeNew = edgeY.copy();
+ ReferenceEdge edgeNew = edgeY.copy();
- if( !isSuperiorType( x.getType(), edgeY.getType() ) ) {
- impossibleEdges.add( edgeY );
+ if( !isSuperiorType(x.getType(), edgeY.getType() ) ) {
+ impossibleEdges.add(edgeY);
continue;
}
- edgeNew.setSrc( lnX );
-
- edgeNew.setType( mostSpecificType( y.getType(),
- tdCast,
- edgeY.getType(),
- referencee.getType()
- )
- );
+ edgeNew.setSrc(lnX);
- edgeNew.setField( null );
+ edgeNew.setType(mostSpecificType(y.getType(),
+ tdCast,
+ edgeY.getType(),
+ referencee.getType()
+ )
+ );
- addReferenceEdge( lnX, referencee, edgeNew );
+ edgeNew.setField(null);
+
+ addReferenceEdge(lnX, referencee, edgeNew);
}
Iterator<ReferenceEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
ReferenceEdge edgeImp = itrImp.next();
- removeReferenceEdge( edgeImp );
+ removeReferenceEdge(edgeImp);
}
}
- public void assignTempXEqualToTempYFieldF( TempDescriptor x,
- TempDescriptor y,
- FieldDescriptor f ) {
- LabelNode lnX = getLabelNodeFromTemp( x );
- LabelNode lnY = getLabelNodeFromTemp( y );
+ public void assignTempXEqualToTempYFieldF(TempDescriptor x,
+ TempDescriptor y,
+ FieldDescriptor f) {
+ LabelNode lnX = getLabelNodeFromTemp(x);
+ LabelNode lnY = getLabelNodeFromTemp(y);
- clearReferenceEdgesFrom( lnX, null, null, true );
+ clearReferenceEdgesFrom(lnX, null, null, true);
// note it is possible that the types of temps in the
// flat node to analyze will reveal that some typed
Iterator<ReferenceEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- ReferenceEdge edgeY = itrYhrn.next();
- HeapRegionNode hrnY = edgeY.getDst();
+ ReferenceEdge edgeY = itrYhrn.next();
+ HeapRegionNode hrnY = edgeY.getDst();
ReachabilitySet betaY = edgeY.getBeta();
Iterator<ReferenceEdge> itrHrnFhrn = hrnY.iteratorToReferencees();
while( itrHrnFhrn.hasNext() ) {
- ReferenceEdge edgeHrn = itrHrnFhrn.next();
- HeapRegionNode hrnHrn = edgeHrn.getDst();
+ ReferenceEdge edgeHrn = itrHrnFhrn.next();
+ HeapRegionNode hrnHrn = edgeHrn.getDst();
ReachabilitySet betaHrn = edgeHrn.getBeta();
// prune edges that are not a matching field
- if( edgeHrn.getType() != null &&
- !edgeHrn.getField().equals( f.getSymbol() )
+ if( edgeHrn.getType() != null &&
+ !edgeHrn.getField().equals(f.getSymbol() )
) {
continue;
}
// check for impossible edges
- if( !isSuperiorType( x.getType(), edgeHrn.getType() ) ) {
- impossibleEdges.add( edgeHrn );
+ if( !isSuperiorType(x.getType(), edgeHrn.getType() ) ) {
+ impossibleEdges.add(edgeHrn);
continue;
}
TypeDescriptor tdNewEdge =
- mostSpecificType( edgeHrn.getType(),
- hrnHrn.getType()
- );
-
- ReferenceEdge edgeNew = new ReferenceEdge( lnX,
- hrnHrn,
- tdNewEdge,
- null,
- false,
- betaY.intersection( betaHrn )
- );
-
+ mostSpecificType(edgeHrn.getType(),
+ hrnHrn.getType()
+ );
+
+ ReferenceEdge edgeNew = new ReferenceEdge(lnX,
+ hrnHrn,
+ tdNewEdge,
+ null,
+ false,
+ betaY.intersection(betaHrn)
+ );
+
int newTaintIdentifier=getTaintIdentifierFromHRN(hrnHrn);
edgeNew.setTaintIdentifier(newTaintIdentifier);
-
- addReferenceEdge( lnX, hrnHrn, edgeNew );
+
+ addReferenceEdge(lnX, hrnHrn, edgeNew);
}
}
Iterator<ReferenceEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
ReferenceEdge edgeImp = itrImp.next();
- removeReferenceEdge( edgeImp );
+ removeReferenceEdge(edgeImp);
}
// anytime you might remove edges between heap regions
}
- public void assignTempXFieldFEqualToTempY( TempDescriptor x,
- FieldDescriptor f,
- TempDescriptor y ) {
+ public void assignTempXFieldFEqualToTempY(TempDescriptor x,
+ FieldDescriptor f,
+ TempDescriptor y) {
- LabelNode lnX = getLabelNodeFromTemp( x );
- LabelNode lnY = getLabelNodeFromTemp( y );
+ LabelNode lnX = getLabelNodeFromTemp(x);
+ LabelNode lnY = getLabelNodeFromTemp(y);
HashSet<HeapRegionNode> nodesWithNewAlpha = new HashSet<HeapRegionNode>();
HashSet<ReferenceEdge> edgesWithNewBeta = new HashSet<ReferenceEdge>();
ReferenceEdge edgeX = itrXhrn.next();
HeapRegionNode hrnX = edgeX.getDst();
- // we can do a strong update here if one of two cases holds
+ // we can do a strong update here if one of two cases holds
if( f != null &&
- f != OwnershipAnalysis.getArrayField( f.getType() ) &&
- ( (hrnX.getNumReferencers() == 1) || // case 1
- (hrnX.isSingleObject() && lnX.getNumReferencees() == 1) // case 2
- )
- ) {
- if( !DISABLE_STRONG_UPDATES ) {
- strongUpdate = true;
- clearReferenceEdgesFrom( hrnX, f.getType(), f.getSymbol(), false );
- }
+ f != OwnershipAnalysis.getArrayField(f.getType() ) &&
+ ( (hrnX.getNumReferencers() == 1) || // case 1
+ (hrnX.isSingleObject() && lnX.getNumReferencees() == 1) // case 2
+ )
+ ) {
+ if( !DISABLE_STRONG_UPDATES ) {
+ strongUpdate = true;
+ clearReferenceEdgesFrom(hrnX, f.getType(), f.getSymbol(), false);
+ }
}
}
-
+
// then do all token propagation
itrXhrn = lnX.iteratorToReferencees();
while( itrXhrn.hasNext() ) {
- ReferenceEdge edgeX = itrXhrn.next();
- HeapRegionNode hrnX = edgeX.getDst();
+ ReferenceEdge edgeX = itrXhrn.next();
+ HeapRegionNode hrnX = edgeX.getDst();
ReachabilitySet betaX = edgeX.getBeta();
- ReachabilitySet R = hrnX.getAlpha().intersection( edgeX.getBeta() );
+ ReachabilitySet R = hrnX.getAlpha().intersection(edgeX.getBeta() );
Iterator<ReferenceEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
- ReferenceEdge edgeY = itrYhrn.next();
- HeapRegionNode hrnY = edgeY.getDst();
+ ReferenceEdge edgeY = itrYhrn.next();
+ HeapRegionNode hrnY = edgeY.getDst();
ReachabilitySet O = edgeY.getBeta();
// check for impossible edges
- if( !isSuperiorType( f.getType(), edgeY.getType() ) ) {
- impossibleEdges.add( edgeY );
+ if( !isSuperiorType(f.getType(), edgeY.getType() ) ) {
+ impossibleEdges.add(edgeY);
continue;
}
// propagate tokens over nodes starting from hrnSrc, and it will
// take care of propagating back up edges from any touched nodes
- ChangeTupleSet Cy = O.unionUpArityToChangeSet( R );
- propagateTokensOverNodes( hrnY, Cy, nodesWithNewAlpha, edgesWithNewBeta );
+ ChangeTupleSet Cy = O.unionUpArityToChangeSet(R);
+ propagateTokensOverNodes(hrnY, Cy, nodesWithNewAlpha, edgesWithNewBeta);
// then propagate back just up the edges from hrn
ChangeTupleSet Cx = R.unionUpArityToChangeSet(O);
- HashSet<ReferenceEdge> todoEdges = new HashSet<ReferenceEdge>();
+ HashSet<ReferenceEdge> todoEdges = new HashSet<ReferenceEdge>();
Hashtable<ReferenceEdge, ChangeTupleSet> edgePlannedChanges =
new Hashtable<ReferenceEdge, ChangeTupleSet>();
Iterator<ReferenceEdge> referItr = hrnX.iteratorToReferencers();
while( referItr.hasNext() ) {
ReferenceEdge edgeUpstream = referItr.next();
- todoEdges.add( edgeUpstream );
- edgePlannedChanges.put( edgeUpstream, Cx );
+ todoEdges.add(edgeUpstream);
+ edgePlannedChanges.put(edgeUpstream, Cx);
}
- propagateTokensOverEdges( todoEdges,
- edgePlannedChanges,
- edgesWithNewBeta );
+ propagateTokensOverEdges(todoEdges,
+ edgePlannedChanges,
+ edgesWithNewBeta);
}
}
while( itrXhrn.hasNext() ) {
ReferenceEdge edgeX = itrXhrn.next();
HeapRegionNode hrnX = edgeX.getDst();
-
+
Iterator<ReferenceEdge> itrYhrn = lnY.iteratorToReferencees();
while( itrYhrn.hasNext() ) {
ReferenceEdge edgeY = itrYhrn.next();
// skip impossible edges here, we already marked them
// when computing reachability propagations above
- if( !isSuperiorType( f.getType(), edgeY.getType() ) ) {
+ if( !isSuperiorType(f.getType(), edgeY.getType() ) ) {
continue;
}
-
+
// prepare the new reference edge hrnX.f -> hrnY
- TypeDescriptor tdNewEdge =
- mostSpecificType( y.getType(),
- edgeY.getType(),
- hrnY.getType()
- );
-
- ReferenceEdge edgeNew = new ReferenceEdge( hrnX,
- hrnY,
- tdNewEdge,
- f.getSymbol(),
- false,
- edgeY.getBeta().pruneBy( hrnX.getAlpha() )
- );
+ TypeDescriptor tdNewEdge =
+ mostSpecificType(y.getType(),
+ edgeY.getType(),
+ hrnY.getType()
+ );
+
+ ReferenceEdge edgeNew = new ReferenceEdge(hrnX,
+ hrnY,
+ tdNewEdge,
+ f.getSymbol(),
+ false,
+ edgeY.getBeta().pruneBy(hrnX.getAlpha() )
+ );
// look to see if an edge with same field exists
// and merge with it, otherwise just add the edge
- ReferenceEdge edgeExisting = hrnX.getReferenceTo( hrnY,
- tdNewEdge,
- f.getSymbol() );
-
+ ReferenceEdge edgeExisting = hrnX.getReferenceTo(hrnY,
+ tdNewEdge,
+ f.getSymbol() );
+
if( edgeExisting != null ) {
edgeExisting.setBeta(
- edgeExisting.getBeta().union( edgeNew.getBeta() )
- );
+ edgeExisting.getBeta().union(edgeNew.getBeta() )
+ );
- if((!hrnX.isParameter() && hrnY.isParameter()) || ( hrnX.isParameter() && hrnY.isParameter())){
+ if((!hrnX.isParameter() && hrnY.isParameter()) || ( hrnX.isParameter() && hrnY.isParameter())) {
int newTaintIdentifier=getTaintIdentifierFromHRN(hrnY);
edgeExisting.unionTaintIdentifier(newTaintIdentifier);
}
// a new edge here cannot be reflexive, so existing will
// always be also not reflexive anymore
- edgeExisting.setIsInitialParam( false );
+ edgeExisting.setIsInitialParam(false);
} else {
-
- if((!hrnX.isParameter() && hrnY.isParameter()) || ( hrnX.isParameter() && hrnY.isParameter())){
+
+ if((!hrnX.isParameter() && hrnY.isParameter()) || ( hrnX.isParameter() && hrnY.isParameter())) {
int newTaintIdentifier=getTaintIdentifierFromHRN(hrnY);
edgeNew.setTaintIdentifier(newTaintIdentifier);
}
//currently, taint isn't propagated through the chain of refrences
//propagateTaintIdentifier(hrnX,newTaintIdentifier,new HashSet<HeapRegionNode>());
-
- addReferenceEdge( hrnX, hrnY, edgeNew );
+
+ addReferenceEdge(hrnX, hrnY, edgeNew);
}
}
}
Iterator<ReferenceEdge> itrImp = impossibleEdges.iterator();
while( itrImp.hasNext() ) {
ReferenceEdge edgeImp = itrImp.next();
- removeReferenceEdge( edgeImp );
+ removeReferenceEdge(edgeImp);
}
// if there was a strong update, make sure to improve
// reachability with a global sweep
- if( strongUpdate || !impossibleEdges.isEmpty() ) {
+ if( strongUpdate || !impossibleEdges.isEmpty() ) {
if( !DISABLE_GLOBAL_SWEEP ) {
- globalSweep();
+ globalSweep();
}
}
}
// for the primary parameter, and a multiple-object heap
// region for the secondary objects reachable through the
// primary object, if necessary
- public void assignTempEqualToParamAlloc( TempDescriptor td,
- boolean isTask,
- Integer paramIndex, FlatMethod fm ) {
+ public void assignTempEqualToParamAlloc(TempDescriptor td,
+ boolean isTask,
+ Integer paramIndex, FlatMethod fm) {
assert td != null;
-
+
TypeDescriptor typeParam = td.getType();
assert typeParam != null;
// affect reachability
TypeDescriptor typeDeref = typeParam.dereference();
if( !typeDeref.isImmutable() || typeDeref.isArray() ) {
- primary2secondaryFields.add(
- OwnershipAnalysis.getArrayField( typeDeref )
- );
+ primary2secondaryFields.add(
+ OwnershipAnalysis.getArrayField(typeDeref)
+ );
createSecondaryRegion = true;
// also handle a special case where an array of objects
// can point back to the array, which is an object!
- if( typeParam.toPrettyString().equals( "Object[]" ) &&
- typeDeref.toPrettyString().equals( "Object" ) ) {
+ if( typeParam.toPrettyString().equals("Object[]") &&
+ typeDeref.toPrettyString().equals("Object") ) {
- primary2primaryFields.add(
- OwnershipAnalysis.getArrayField( typeDeref )
- );
+ primary2primaryFields.add(
+ OwnershipAnalysis.getArrayField(typeDeref)
+ );
}
}
}
Iterator fieldItr = cd.getFields();
while( fieldItr.hasNext() ) {
-
+
FieldDescriptor fd = (FieldDescriptor) fieldItr.next();
TypeDescriptor typeField = fd.getType();
- assert typeField != null;
-
+ assert typeField != null;
+
if( !typeField.isImmutable() || typeField.isArray() ) {
- primary2secondaryFields.add( fd );
+ primary2secondaryFields.add(fd);
createSecondaryRegion = true;
}
-
- if( typeUtil.isSuperorType( typeField, typeParam ) ) {
- primary2primaryFields.add( fd );
+
+ if( typeUtil.isSuperorType(typeField, typeParam) ) {
+ primary2primaryFields.add(fd);
}
}
cd = cd.getSuperDesc();
}
}
-
+
// now build everything we need
- LabelNode lnParam = getLabelNodeFromTemp( td );
- HeapRegionNode hrnPrimary = createNewHeapRegionNode( null, // id or null to generate a new one
- true, // single object?
- false, // summary?
- false, // flagged?
- true, // is a parameter?
- typeParam, // type
- null, // allocation site
- null, // reachability set
- "param"+paramIndex+" obj",
- generateUniqueIdentifier(fm,paramIndex,"P"));
-
- parameterTemps.add( td );
- parameterLabels.add( lnParam );
+ LabelNode lnParam = getLabelNodeFromTemp(td);
+ HeapRegionNode hrnPrimary = createNewHeapRegionNode(null, // id or null to generate a new one
+ true, // single object?
+ false, // summary?
+ false, // flagged?
+ true, // is a parameter?
+ typeParam, // type
+ null, // allocation site
+ null, // reachability set
+ "param"+paramIndex+" obj",
+ generateUniqueIdentifier(fm,paramIndex,"P"));
+
+ parameterTemps.add(td);
+ parameterLabels.add(lnParam);
// this is a non-program-accessible label that picks up beta
// info to be used for fixing a caller of this method
- TempDescriptor tdParamQ = new TempDescriptor( td+qString );
- paramIndex2tdQ.put( paramIndex, tdParamQ );
- LabelNode lnParamQ = getLabelNodeFromTemp( tdParamQ );
+ TempDescriptor tdParamQ = new TempDescriptor(td+qString);
+ paramIndex2tdQ.put(paramIndex, tdParamQ);
+ LabelNode lnParamQ = getLabelNodeFromTemp(tdParamQ);
- outOfScopeTemps.add( tdParamQ );
- outOfScopeLabels.add( lnParamQ );
+ outOfScopeTemps.add(tdParamQ);
+ outOfScopeLabels.add(lnParamQ);
// keep track of heap regions that were created for
// parameter labels, the index of the parameter they
// are for is important when resolving method calls
Integer newPrimaryID = hrnPrimary.getID();
- assert !idPrimary2paramIndexSet.containsKey( newPrimaryID );
+ assert !idPrimary2paramIndexSet.containsKey(newPrimaryID);
Set<Integer> s = new HashSet<Integer>();
- s.add( paramIndex );
- idPrimary2paramIndexSet.put( newPrimaryID, s );
- paramIndex2idPrimary.put( paramIndex, newPrimaryID );
-
- TokenTuple ttPrimary = new TokenTuple( newPrimaryID,
- false, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
-
-
+ s.add(paramIndex);
+ idPrimary2paramIndexSet.put(newPrimaryID, s);
+ paramIndex2idPrimary.put(paramIndex, newPrimaryID);
+
+ TokenTuple ttPrimary = new TokenTuple(newPrimaryID,
+ false, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
+
+
HeapRegionNode hrnSecondary = null;
- Integer newSecondaryID = null;
- TokenTuple ttSecondary = null;
+ Integer newSecondaryID = null;
+ TokenTuple ttSecondary = null;
TempDescriptor tdParamR = null;
- LabelNode lnParamR = null;
-
+ LabelNode lnParamR = null;
+
if( createSecondaryRegion ) {
- tdParamR = new TempDescriptor( td+rString );
- paramIndex2tdR.put( paramIndex, tdParamR );
- lnParamR = getLabelNodeFromTemp( tdParamR );
-
- outOfScopeTemps.add( tdParamR );
- outOfScopeLabels.add( lnParamR );
-
- hrnSecondary = createNewHeapRegionNode( null, // id or null to generate a new one
- false, // single object?
- false, // summary?
- false, // flagged?
- true, // is a parameter?
- null, // type
- null, // allocation site
- null, // reachability set
- "param"+paramIndex+" reachable",
- generateUniqueIdentifier(fm,paramIndex,"S"));
+ tdParamR = new TempDescriptor(td+rString);
+ paramIndex2tdR.put(paramIndex, tdParamR);
+ lnParamR = getLabelNodeFromTemp(tdParamR);
+
+ outOfScopeTemps.add(tdParamR);
+ outOfScopeLabels.add(lnParamR);
+
+ hrnSecondary = createNewHeapRegionNode(null, // id or null to generate a new one
+ false, // single object?
+ false, // summary?
+ false, // flagged?
+ true, // is a parameter?
+ null, // type
+ null, // allocation site
+ null, // reachability set
+ "param"+paramIndex+" reachable",
+ generateUniqueIdentifier(fm,paramIndex,"S"));
newSecondaryID = hrnSecondary.getID();
- assert !idSecondary2paramIndexSet.containsKey( newSecondaryID );
+ assert !idSecondary2paramIndexSet.containsKey(newSecondaryID);
Set<Integer> s2 = new HashSet<Integer>();
- s2.add( paramIndex );
- idSecondary2paramIndexSet.put( newSecondaryID, s2 );
- paramIndex2idSecondary.put( paramIndex, newSecondaryID );
-
-
- ttSecondary = new TokenTuple( newSecondaryID,
- true, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
+ s2.add(paramIndex);
+ idSecondary2paramIndexSet.put(newSecondaryID, s2);
+ paramIndex2idSecondary.put(paramIndex, newSecondaryID);
+
+
+ ttSecondary = new TokenTuple(newSecondaryID,
+ true, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
}
// use a beta that has everything and put it all over the
// parameter model, then use a global sweep later to fix
// it up, since parameters can have different shapes
- TokenTupleSet tts0 = new TokenTupleSet( ttPrimary ).makeCanonical();
+ TokenTupleSet tts0 = new TokenTupleSet(ttPrimary).makeCanonical();
ReachabilitySet betaSoup;
if( createSecondaryRegion ) {
- TokenTupleSet tts1 = new TokenTupleSet( ttSecondary ).makeCanonical();
- TokenTupleSet tts2 = new TokenTupleSet( ttPrimary ).makeCanonical().union( ttSecondary );
- betaSoup = ReachabilitySet.factory( tts0 ).union( tts1 ).union( tts2 );
+ TokenTupleSet tts1 = new TokenTupleSet(ttSecondary).makeCanonical();
+ TokenTupleSet tts2 = new TokenTupleSet(ttPrimary).makeCanonical().union(ttSecondary);
+ betaSoup = ReachabilitySet.factory(tts0).union(tts1).union(tts2);
} else {
- betaSoup = ReachabilitySet.factory( tts0 );
+ betaSoup = ReachabilitySet.factory(tts0);
}
ReferenceEdge edgeFromLabel =
- new ReferenceEdge( lnParam, // src
- hrnPrimary, // dst
- typeParam, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParam, // src
+ hrnPrimary, // dst
+ typeParam, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabel.tainedBy(paramIndex);
- addReferenceEdge( lnParam, hrnPrimary, edgeFromLabel );
+ addReferenceEdge(lnParam, hrnPrimary, edgeFromLabel);
ReferenceEdge edgeFromLabelQ =
- new ReferenceEdge( lnParamQ, // src
- hrnPrimary, // dst
- null, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParamQ, // src
+ hrnPrimary, // dst
+ null, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabelQ.tainedBy(paramIndex);
- addReferenceEdge( lnParamQ, hrnPrimary, edgeFromLabelQ );
-
+ addReferenceEdge(lnParamQ, hrnPrimary, edgeFromLabelQ);
+
ReferenceEdge edgeSecondaryReflexive;
if( createSecondaryRegion ) {
edgeSecondaryReflexive =
- new ReferenceEdge( hrnSecondary, // src
- hrnSecondary, // dst
- null, // match all types
- null, // match all fields
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( hrnSecondary, hrnSecondary, edgeSecondaryReflexive );
+ new ReferenceEdge(hrnSecondary, // src
+ hrnSecondary, // dst
+ null, // match all types
+ null, // match all fields
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(hrnSecondary, hrnSecondary, edgeSecondaryReflexive);
ReferenceEdge edgeSecondary2Primary =
- new ReferenceEdge( hrnSecondary, // src
- hrnPrimary, // dst
- null, // match all types
- null, // match all fields
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( hrnSecondary, hrnPrimary, edgeSecondary2Primary );
+ new ReferenceEdge(hrnSecondary, // src
+ hrnPrimary, // dst
+ null, // match all types
+ null, // match all fields
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(hrnSecondary, hrnPrimary, edgeSecondary2Primary);
ReferenceEdge edgeFromLabelR =
- new ReferenceEdge( lnParamR, // src
- hrnSecondary, // dst
- null, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParamR, // src
+ hrnSecondary, // dst
+ null, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabelR.tainedBy(paramIndex);
- addReferenceEdge( lnParamR, hrnSecondary, edgeFromLabelR );
+ addReferenceEdge(lnParamR, hrnSecondary, edgeFromLabelR);
}
-
+
Iterator<FieldDescriptor> fieldItr = primary2primaryFields.iterator();
while( fieldItr.hasNext() ) {
FieldDescriptor fd = fieldItr.next();
ReferenceEdge edgePrimaryReflexive =
- new ReferenceEdge( hrnPrimary, // src
- hrnPrimary, // dst
- fd.getType(), // type
- fd.getSymbol(), // field
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( hrnPrimary, hrnPrimary, edgePrimaryReflexive );
+ new ReferenceEdge(hrnPrimary, // src
+ hrnPrimary, // dst
+ fd.getType(), // type
+ fd.getSymbol(), // field
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(hrnPrimary, hrnPrimary, edgePrimaryReflexive);
}
fieldItr = primary2secondaryFields.iterator();
FieldDescriptor fd = fieldItr.next();
ReferenceEdge edgePrimary2Secondary =
- new ReferenceEdge( hrnPrimary, // src
- hrnSecondary, // dst
- fd.getType(), // type
- fd.getSymbol(), // field
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( hrnPrimary, hrnSecondary, edgePrimary2Secondary );
+ new ReferenceEdge(hrnPrimary, // src
+ hrnSecondary, // dst
+ fd.getType(), // type
+ fd.getSymbol(), // field
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(hrnPrimary, hrnSecondary, edgePrimary2Secondary);
}
}
public void makeAliasedParamHeapRegionNode(FlatMethod fm) {
- LabelNode lnBlob = getLabelNodeFromTemp( tdAliasBlob );
-
- outOfScopeTemps.add( tdAliasBlob );
- outOfScopeLabels.add( lnBlob );
-
- HeapRegionNode hrn = createNewHeapRegionNode( null, // id or null to generate a new one
- false, // single object?
- false, // summary?
- false, // flagged?
- true, // is a parameter?
- null, // type
- null, // allocation site
- null, // reachability set
- "aliasedParams",
- generateUniqueIdentifier(fm,0,"A"));
-
-
- ReachabilitySet beta = new ReachabilitySet( new TokenTuple( hrn.getID(),
- true,
- TokenTuple.ARITY_ONE).makeCanonical()
- ).makeCanonical();
-
+ LabelNode lnBlob = getLabelNodeFromTemp(tdAliasBlob);
+
+ outOfScopeTemps.add(tdAliasBlob);
+ outOfScopeLabels.add(lnBlob);
+
+ HeapRegionNode hrn = createNewHeapRegionNode(null, // id or null to generate a new one
+ false, // single object?
+ false, // summary?
+ false, // flagged?
+ true, // is a parameter?
+ null, // type
+ null, // allocation site
+ null, // reachability set
+ "aliasedParams",
+ generateUniqueIdentifier(fm,0,"A"));
+
+
+ ReachabilitySet beta = new ReachabilitySet(new TokenTuple(hrn.getID(),
+ true,
+ TokenTuple.ARITY_ONE).makeCanonical()
+ ).makeCanonical();
+
ReferenceEdge edgeFromLabel =
- new ReferenceEdge( lnBlob, hrn, null, null, false, beta );
+ new ReferenceEdge(lnBlob, hrn, null, null, false, beta);
ReferenceEdge edgeReflexive =
- new ReferenceEdge( hrn, hrn, null, null, true, beta );
-
- addReferenceEdge( lnBlob, hrn, edgeFromLabel );
- addReferenceEdge( hrn, hrn, edgeReflexive );
+ new ReferenceEdge(hrn, hrn, null, null, true, beta);
+
+ addReferenceEdge(lnBlob, hrn, edgeFromLabel);
+ addReferenceEdge(hrn, hrn, edgeReflexive);
}
- public void assignTempEqualToAliasedParam( TempDescriptor tdParam,
- Integer paramIndex, FlatMethod fm ) {
+ public void assignTempEqualToAliasedParam(TempDescriptor tdParam,
+ Integer paramIndex, FlatMethod fm) {
assert tdParam != null;
TypeDescriptor typeParam = tdParam.getType();
assert typeParam != null;
- LabelNode lnParam = getLabelNodeFromTemp( tdParam );
- LabelNode lnAliased = getLabelNodeFromTemp( tdAliasBlob );
+ LabelNode lnParam = getLabelNodeFromTemp(tdParam);
+ LabelNode lnAliased = getLabelNodeFromTemp(tdAliasBlob);
- parameterTemps.add( tdParam );
- parameterLabels.add( lnParam );
+ parameterTemps.add(tdParam);
+ parameterLabels.add(lnParam);
// this is a non-program-accessible label that picks up beta
// info to be used for fixing a caller of this method
- TempDescriptor tdParamQ = new TempDescriptor( tdParam+qString );
- TempDescriptor tdParamR = new TempDescriptor( tdParam+rString );
+ TempDescriptor tdParamQ = new TempDescriptor(tdParam+qString);
+ TempDescriptor tdParamR = new TempDescriptor(tdParam+rString);
- paramIndex2tdQ.put( paramIndex, tdParamQ );
- paramIndex2tdR.put( paramIndex, tdParamR );
+ paramIndex2tdQ.put(paramIndex, tdParamQ);
+ paramIndex2tdR.put(paramIndex, tdParamR);
- LabelNode lnParamQ = getLabelNodeFromTemp( tdParamQ );
- LabelNode lnParamR = getLabelNodeFromTemp( tdParamR );
+ LabelNode lnParamQ = getLabelNodeFromTemp(tdParamQ);
+ LabelNode lnParamR = getLabelNodeFromTemp(tdParamR);
- outOfScopeTemps.add( tdParamR );
- outOfScopeLabels.add( lnParamR );
- outOfScopeTemps.add( tdParamQ );
- outOfScopeLabels.add( lnParamQ );
+ outOfScopeTemps.add(tdParamR);
+ outOfScopeLabels.add(lnParamR);
+ outOfScopeTemps.add(tdParamQ);
+ outOfScopeLabels.add(lnParamQ);
// the lnAliased should always only reference one node, and that
// heap region node is the aliased param blob
HeapRegionNode hrnAliasBlob = lnAliased.iteratorToReferencees().next().getDst();
Integer idAliased = hrnAliasBlob.getID();
-
- TokenTuple ttAliased = new TokenTuple( idAliased,
- true, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
+ TokenTuple ttAliased = new TokenTuple(idAliased,
+ true, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
- HeapRegionNode hrnPrimary = createNewHeapRegionNode( null, // id or null to generate a new one
- true, // single object?
- false, // summary?
- false, // flagged?
- true, // is a parameter?
- typeParam, // type
- null, // allocation site
- null, // reachability set
- "param"+paramIndex+" obj",
- generateUniqueIdentifier(fm, paramIndex.intValue(), "P"));
+
+ HeapRegionNode hrnPrimary = createNewHeapRegionNode(null, // id or null to generate a new one
+ true, // single object?
+ false, // summary?
+ false, // flagged?
+ true, // is a parameter?
+ typeParam, // type
+ null, // allocation site
+ null, // reachability set
+ "param"+paramIndex+" obj",
+ generateUniqueIdentifier(fm, paramIndex.intValue(), "P"));
Integer newPrimaryID = hrnPrimary.getID();
- assert !idPrimary2paramIndexSet.containsKey( newPrimaryID );
+ assert !idPrimary2paramIndexSet.containsKey(newPrimaryID);
Set<Integer> s1 = new HashSet<Integer>();
- s1.add( paramIndex );
- idPrimary2paramIndexSet.put( newPrimaryID, s1 );
- paramIndex2idPrimary.put( paramIndex, newPrimaryID );
+ s1.add(paramIndex);
+ idPrimary2paramIndexSet.put(newPrimaryID, s1);
+ paramIndex2idPrimary.put(paramIndex, newPrimaryID);
- Set<Integer> s2 = idSecondary2paramIndexSet.get( idAliased );
+ Set<Integer> s2 = idSecondary2paramIndexSet.get(idAliased);
if( s2 == null ) {
s2 = new HashSet<Integer>();
}
- s2.add( paramIndex );
- idSecondary2paramIndexSet.put( idAliased, s2 );
- paramIndex2idSecondary.put( paramIndex, idAliased );
-
+ s2.add(paramIndex);
+ idSecondary2paramIndexSet.put(idAliased, s2);
+ paramIndex2idSecondary.put(paramIndex, idAliased);
+
+
-
- TokenTuple ttPrimary = new TokenTuple( newPrimaryID,
- false, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
+ TokenTuple ttPrimary = new TokenTuple(newPrimaryID,
+ false, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
-
- TokenTupleSet tts0 = new TokenTupleSet( ttPrimary ).makeCanonical();
- TokenTupleSet tts1 = new TokenTupleSet( ttAliased ).makeCanonical();
- TokenTupleSet tts2 = new TokenTupleSet( ttPrimary ).makeCanonical().union( ttAliased );
- ReachabilitySet betaSoup = ReachabilitySet.factory( tts0 ).union( tts1 ).union( tts2 );
+
+ TokenTupleSet tts0 = new TokenTupleSet(ttPrimary).makeCanonical();
+ TokenTupleSet tts1 = new TokenTupleSet(ttAliased).makeCanonical();
+ TokenTupleSet tts2 = new TokenTupleSet(ttPrimary).makeCanonical().union(ttAliased);
+ ReachabilitySet betaSoup = ReachabilitySet.factory(tts0).union(tts1).union(tts2);
ReferenceEdge edgeFromLabel =
- new ReferenceEdge( lnParam, // src
- hrnPrimary, // dst
- typeParam, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParam, // src
+ hrnPrimary, // dst
+ typeParam, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabel.tainedBy(paramIndex);
- addReferenceEdge( lnParam, hrnPrimary, edgeFromLabel );
+ addReferenceEdge(lnParam, hrnPrimary, edgeFromLabel);
ReferenceEdge edgeFromLabelQ =
- new ReferenceEdge( lnParamQ, // src
- hrnPrimary, // dst
- null, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParamQ, // src
+ hrnPrimary, // dst
+ null, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabelQ.tainedBy(paramIndex);
- addReferenceEdge( lnParamQ, hrnPrimary, edgeFromLabelQ );
-
+ addReferenceEdge(lnParamQ, hrnPrimary, edgeFromLabelQ);
+
ReferenceEdge edgeAliased2Primary =
- new ReferenceEdge( hrnAliasBlob, // src
- hrnPrimary, // dst
- null, // match all types
- null, // match all fields
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( hrnAliasBlob, hrnPrimary, edgeAliased2Primary );
+ new ReferenceEdge(hrnAliasBlob, // src
+ hrnPrimary, // dst
+ null, // match all types
+ null, // match all fields
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(hrnAliasBlob, hrnPrimary, edgeAliased2Primary);
ReferenceEdge edgeFromLabelR =
- new ReferenceEdge( lnParamR, // src
- hrnAliasBlob, // dst
- null, // type
- null, // field
- false, // special param initial (not needed on label->node)
- betaSoup ); // reachability
+ new ReferenceEdge(lnParamR, // src
+ hrnAliasBlob, // dst
+ null, // type
+ null, // field
+ false, // special param initial (not needed on label->node)
+ betaSoup); // reachability
edgeFromLabelR.tainedBy(paramIndex);
- addReferenceEdge( lnParamR, hrnAliasBlob, edgeFromLabelR );
+ addReferenceEdge(lnParamR, hrnAliasBlob, edgeFromLabelR);
}
- public void addParam2ParamAliasEdges( FlatMethod fm,
- Set<Integer> aliasedParamIndices ) {
+ public void addParam2ParamAliasEdges(FlatMethod fm,
+ Set<Integer> aliasedParamIndices) {
- LabelNode lnAliased = getLabelNodeFromTemp( tdAliasBlob );
+ LabelNode lnAliased = getLabelNodeFromTemp(tdAliasBlob);
// the lnAliased should always only reference one node, and that
// heap region node is the aliased param blob
HeapRegionNode hrnAliasBlob = lnAliased.iteratorToReferencees().next().getDst();
Integer idAliased = hrnAliasBlob.getID();
-
- TokenTuple ttAliased = new TokenTuple( idAliased,
- true, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
+
+ TokenTuple ttAliased = new TokenTuple(idAliased,
+ true, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
Iterator<Integer> apItrI = aliasedParamIndices.iterator();
while( apItrI.hasNext() ) {
Integer i = apItrI.next();
- TempDescriptor tdParamI = fm.getParameter( i );
+ TempDescriptor tdParamI = fm.getParameter(i);
TypeDescriptor typeI = tdParamI.getType();
- LabelNode lnParamI = getLabelNodeFromTemp( tdParamI );
-
- Integer idPrimaryI = paramIndex2idPrimary.get( i );
- assert idPrimaryI != null;
- HeapRegionNode primaryI = id2hrn.get( idPrimaryI );
- assert primaryI != null;
-
- TokenTuple ttPrimaryI = new TokenTuple( idPrimaryI,
- false, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
-
- TokenTupleSet ttsI = new TokenTupleSet( ttPrimaryI ).makeCanonical();
- TokenTupleSet ttsA = new TokenTupleSet( ttAliased ).makeCanonical();
- TokenTupleSet ttsIA = new TokenTupleSet( ttPrimaryI ).makeCanonical().union( ttAliased );
- ReachabilitySet betaSoup = ReachabilitySet.factory( ttsI ).union( ttsA ).union( ttsIA );
+ LabelNode lnParamI = getLabelNodeFromTemp(tdParamI);
+
+ Integer idPrimaryI = paramIndex2idPrimary.get(i);
+ assert idPrimaryI != null;
+ HeapRegionNode primaryI = id2hrn.get(idPrimaryI);
+ assert primaryI != null;
+
+ TokenTuple ttPrimaryI = new TokenTuple(idPrimaryI,
+ false, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
+
+ TokenTupleSet ttsI = new TokenTupleSet(ttPrimaryI).makeCanonical();
+ TokenTupleSet ttsA = new TokenTupleSet(ttAliased).makeCanonical();
+ TokenTupleSet ttsIA = new TokenTupleSet(ttPrimaryI).makeCanonical().union(ttAliased);
+ ReachabilitySet betaSoup = ReachabilitySet.factory(ttsI).union(ttsA).union(ttsIA);
// calculate whether fields of this aliased parameter are able to
// primary objects!
Set<FieldDescriptor> primary2primaryFields = new HashSet<FieldDescriptor>();
Set<FieldDescriptor> primary2secondaryFields = new HashSet<FieldDescriptor>();
-
+
// there might be an element reference for array types
if( typeI.isArray() ) {
// only bother with this if the dereferenced type can
// affect reachability
TypeDescriptor typeDeref = typeI.dereference();
-
+
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
// for this parameter to be aliased the following must be true
//assert !typeDeref.isImmutable() || typeDeref.isArray();
-
-
- primary2secondaryFields.add(
- OwnershipAnalysis.getArrayField( typeDeref )
- );
+
+
+ primary2secondaryFields.add(
+ OwnershipAnalysis.getArrayField(typeDeref)
+ );
// also handle a special case where an array of objects
// can point back to the array, which is an object!
- if( typeI .toPrettyString().equals( "Object[]" ) &&
- typeDeref.toPrettyString().equals( "Object" ) ) {
- primary2primaryFields.add(
- OwnershipAnalysis.getArrayField( typeDeref )
- );
+ if( typeI.toPrettyString().equals("Object[]") &&
+ typeDeref.toPrettyString().equals("Object") ) {
+ primary2primaryFields.add(
+ OwnershipAnalysis.getArrayField(typeDeref)
+ );
}
}
-
+
// there might be member references for class types
if( typeI.isClass() ) {
ClassDescriptor cd = typeI.getClassDesc();
while( cd != null ) {
-
+
Iterator fieldItr = cd.getFields();
while( fieldItr.hasNext() ) {
-
+
FieldDescriptor fd = (FieldDescriptor) fieldItr.next();
TypeDescriptor typeField = fd.getType();
- assert typeField != null;
-
+ assert typeField != null;
+
if( !typeField.isImmutable() || typeField.isArray() ) {
- primary2secondaryFields.add( fd );
+ primary2secondaryFields.add(fd);
+ }
+
+ if( typeUtil.isSuperorType(typeField, typeI) ) {
+ primary2primaryFields.add(fd);
}
-
- if( typeUtil.isSuperorType( typeField, typeI ) ) {
- primary2primaryFields.add( fd );
- }
}
-
+
cd = cd.getSuperDesc();
}
}
Iterator<FieldDescriptor> fieldItr = primary2primaryFields.iterator();
while( fieldItr.hasNext() ) {
FieldDescriptor fd = fieldItr.next();
-
+
ReferenceEdge edgePrimaryReflexive =
- new ReferenceEdge( primaryI, // src
- primaryI, // dst
- fd.getType(), // type
- fd.getSymbol(), // field
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( primaryI, primaryI, edgePrimaryReflexive );
+ new ReferenceEdge(primaryI, // src
+ primaryI, // dst
+ fd.getType(), // type
+ fd.getSymbol(), // field
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(primaryI, primaryI, edgePrimaryReflexive);
}
fieldItr = primary2secondaryFields.iterator();
while( fieldItr.hasNext() ) {
FieldDescriptor fd = fieldItr.next();
TypeDescriptor typeField = fd.getType();
- assert typeField != null;
-
+ assert typeField != null;
+
ReferenceEdge edgePrimary2Secondary =
- new ReferenceEdge( primaryI, // src
- hrnAliasBlob, // dst
- fd.getType(), // type
- fd.getSymbol(), // field
- true, // special param initial
- betaSoup ); // reachability
- addReferenceEdge( primaryI, hrnAliasBlob, edgePrimary2Secondary );
+ new ReferenceEdge(primaryI, // src
+ hrnAliasBlob, // dst
+ fd.getType(), // type
+ fd.getSymbol(), // field
+ true, // special param initial
+ betaSoup); // reachability
+ addReferenceEdge(primaryI, hrnAliasBlob, edgePrimary2Secondary);
// ask whether these fields might match any of the other aliased
// parameters and make those edges too
Iterator<Integer> apItrJ = aliasedParamIndices.iterator();
while( apItrJ.hasNext() ) {
- Integer j = apItrJ.next();
- TempDescriptor tdParamJ = fm.getParameter( j );
+ Integer j = apItrJ.next();
+ TempDescriptor tdParamJ = fm.getParameter(j);
TypeDescriptor typeJ = tdParamJ.getType();
- if( !i.equals( j ) && typeUtil.isSuperorType( typeField, typeJ ) ) {
+ if( !i.equals(j) && typeUtil.isSuperorType(typeField, typeJ) ) {
- Integer idPrimaryJ = paramIndex2idPrimary.get( j );
+ Integer idPrimaryJ = paramIndex2idPrimary.get(j);
assert idPrimaryJ != null;
- HeapRegionNode primaryJ = id2hrn.get( idPrimaryJ );
- assert primaryJ != null;
+ HeapRegionNode primaryJ = id2hrn.get(idPrimaryJ);
+ assert primaryJ != null;
- TokenTuple ttPrimaryJ = new TokenTuple( idPrimaryJ,
- false, // multi-object
- TokenTuple.ARITY_ONE ).makeCanonical();
+ TokenTuple ttPrimaryJ = new TokenTuple(idPrimaryJ,
+ false, // multi-object
+ TokenTuple.ARITY_ONE).makeCanonical();
- TokenTupleSet ttsJ = new TokenTupleSet( ttPrimaryJ ).makeCanonical();
- TokenTupleSet ttsIJ = ttsI.union( ttsJ );
- TokenTupleSet ttsAJ = ttsA.union( ttsJ );
- TokenTupleSet ttsIAJ = ttsIA.union( ttsJ );
- ReachabilitySet betaSoupWJ = ReachabilitySet.factory( ttsJ ).union( ttsIJ ).union( ttsAJ ).union( ttsIAJ );
+ TokenTupleSet ttsJ = new TokenTupleSet(ttPrimaryJ).makeCanonical();
+ TokenTupleSet ttsIJ = ttsI.union(ttsJ);
+ TokenTupleSet ttsAJ = ttsA.union(ttsJ);
+ TokenTupleSet ttsIAJ = ttsIA.union(ttsJ);
+ ReachabilitySet betaSoupWJ = ReachabilitySet.factory(ttsJ).union(ttsIJ).union(ttsAJ).union(ttsIAJ);
ReferenceEdge edgePrimaryI2PrimaryJ =
- new ReferenceEdge( primaryI, // src
- primaryJ, // dst
- fd.getType(), // type
- fd.getSymbol(), // field
- true, // special param initial
- betaSoupWJ ); // reachability
- addReferenceEdge( primaryI, primaryJ, edgePrimaryI2PrimaryJ );
+ new ReferenceEdge(primaryI, // src
+ primaryJ, // dst
+ fd.getType(), // type
+ fd.getSymbol(), // field
+ true, // special param initial
+ betaSoupWJ); // reachability
+ addReferenceEdge(primaryI, primaryJ, edgePrimaryI2PrimaryJ);
}
- }
- }
-
-
+ }
+ }
+
+
// look at whether aliased parameters i and j can
// possibly be the same primary object, add edges
Iterator<Integer> apItrJ = aliasedParamIndices.iterator();
while( apItrJ.hasNext() ) {
- Integer j = apItrJ.next();
- TempDescriptor tdParamJ = fm.getParameter( j );
+ Integer j = apItrJ.next();
+ TempDescriptor tdParamJ = fm.getParameter(j);
TypeDescriptor typeJ = tdParamJ.getType();
- LabelNode lnParamJ = getLabelNodeFromTemp( tdParamJ );
+ LabelNode lnParamJ = getLabelNodeFromTemp(tdParamJ);
- if( !i.equals( j ) && typeUtil.isSuperorType( typeI, typeJ ) ) {
-
- Integer idPrimaryJ = paramIndex2idPrimary.get( j );
+ if( !i.equals(j) && typeUtil.isSuperorType(typeI, typeJ) ) {
+
+ Integer idPrimaryJ = paramIndex2idPrimary.get(j);
assert idPrimaryJ != null;
- HeapRegionNode primaryJ = id2hrn.get( idPrimaryJ );
+ HeapRegionNode primaryJ = id2hrn.get(idPrimaryJ);
assert primaryJ != null;
-
- ReferenceEdge lnJ2PrimaryJ = lnParamJ.getReferenceTo( primaryJ,
- tdParamJ.getType(),
- null );
+
+ ReferenceEdge lnJ2PrimaryJ = lnParamJ.getReferenceTo(primaryJ,
+ tdParamJ.getType(),
+ null);
assert lnJ2PrimaryJ != null;
-
+
ReferenceEdge lnI2PrimaryJ = lnJ2PrimaryJ.copy();
- lnI2PrimaryJ.setSrc( lnParamI );
- lnI2PrimaryJ.setType( tdParamI.getType() );
+ lnI2PrimaryJ.setSrc(lnParamI);
+ lnI2PrimaryJ.setType(tdParamI.getType() );
lnI2PrimaryJ.tainedBy(new Integer(j));
- addReferenceEdge( lnParamI, primaryJ, lnI2PrimaryJ );
+ addReferenceEdge(lnParamI, primaryJ, lnI2PrimaryJ);
}
}
}
}
- public void prepareParamTokenMaps( FlatMethod fm ) {
+ public void prepareParamTokenMaps(FlatMethod fm) {
// always add the bogus mappings that are used to
// rewrite "with respect to no parameter"
- paramTokenPrimary2paramIndex.put( bogusToken, bogusIndex );
- paramIndex2paramTokenPrimary.put( bogusIndex, bogusToken );
+ paramTokenPrimary2paramIndex.put(bogusToken, bogusIndex);
+ paramIndex2paramTokenPrimary.put(bogusIndex, bogusToken);
- paramTokenSecondary2paramIndex.put( bogusToken, bogusIndex );
- paramIndex2paramTokenSecondary.put( bogusIndex, bogusToken );
- paramTokenSecondaryPlus2paramIndex.put( bogusTokenPlus, bogusIndex );
- paramIndex2paramTokenSecondaryPlus.put( bogusIndex, bogusTokenPlus );
- paramTokenSecondaryStar2paramIndex.put( bogusTokenStar, bogusIndex );
- paramIndex2paramTokenSecondaryStar.put( bogusIndex, bogusTokenStar );
+ paramTokenSecondary2paramIndex.put(bogusToken, bogusIndex);
+ paramIndex2paramTokenSecondary.put(bogusIndex, bogusToken);
+ paramTokenSecondaryPlus2paramIndex.put(bogusTokenPlus, bogusIndex);
+ paramIndex2paramTokenSecondaryPlus.put(bogusIndex, bogusTokenPlus);
+ paramTokenSecondaryStar2paramIndex.put(bogusTokenStar, bogusIndex);
+ paramIndex2paramTokenSecondaryStar.put(bogusIndex, bogusTokenStar);
for( int i = 0; i < fm.numParameters(); ++i ) {
- Integer paramIndex = new Integer( i );
+ Integer paramIndex = new Integer(i);
// immutable objects have no primary regions
- if( paramIndex2idPrimary.containsKey( paramIndex ) ) {
- Integer idPrimary = paramIndex2idPrimary.get( paramIndex );
-
- assert id2hrn.containsKey( idPrimary );
- HeapRegionNode hrnPrimary = id2hrn.get( idPrimary );
-
- TokenTuple p_i = new TokenTuple( hrnPrimary.getID(),
- false, // multiple-object?
- TokenTuple.ARITY_ONE ).makeCanonical();
- paramTokenPrimary2paramIndex.put( p_i, paramIndex );
- paramIndex2paramTokenPrimary.put( paramIndex, p_i );
- }
-
+ if( paramIndex2idPrimary.containsKey(paramIndex) ) {
+ Integer idPrimary = paramIndex2idPrimary.get(paramIndex);
+
+ assert id2hrn.containsKey(idPrimary);
+ HeapRegionNode hrnPrimary = id2hrn.get(idPrimary);
+
+ TokenTuple p_i = new TokenTuple(hrnPrimary.getID(),
+ false, // multiple-object?
+ TokenTuple.ARITY_ONE).makeCanonical();
+ paramTokenPrimary2paramIndex.put(p_i, paramIndex);
+ paramIndex2paramTokenPrimary.put(paramIndex, p_i);
+ }
+
// any parameter object, by type, may have no secondary region
- if( paramIndex2idSecondary.containsKey( paramIndex ) ) {
- Integer idSecondary = paramIndex2idSecondary.get( paramIndex );
-
- assert id2hrn.containsKey( idSecondary );
- HeapRegionNode hrnSecondary = id2hrn.get( idSecondary );
-
- TokenTuple s_i = new TokenTuple( hrnSecondary.getID(),
- true, // multiple-object?
- TokenTuple.ARITY_ONE ).makeCanonical();
- paramTokenSecondary2paramIndex.put( s_i, paramIndex );
- paramIndex2paramTokenSecondary.put( paramIndex, s_i );
-
- TokenTuple s_i_plus = new TokenTuple( hrnSecondary.getID(),
- true, // multiple-object?
- TokenTuple.ARITY_ONEORMORE ).makeCanonical();
- paramTokenSecondaryPlus2paramIndex.put( s_i_plus, paramIndex );
- paramIndex2paramTokenSecondaryPlus.put( paramIndex, s_i_plus );
-
- TokenTuple s_i_star = new TokenTuple( hrnSecondary.getID(),
- true, // multiple-object?
- TokenTuple.ARITY_ZEROORMORE ).makeCanonical();
- paramTokenSecondaryStar2paramIndex.put( s_i_star, paramIndex );
- paramIndex2paramTokenSecondaryStar.put( paramIndex, s_i_star );
+ if( paramIndex2idSecondary.containsKey(paramIndex) ) {
+ Integer idSecondary = paramIndex2idSecondary.get(paramIndex);
+
+ assert id2hrn.containsKey(idSecondary);
+ HeapRegionNode hrnSecondary = id2hrn.get(idSecondary);
+
+ TokenTuple s_i = new TokenTuple(hrnSecondary.getID(),
+ true, // multiple-object?
+ TokenTuple.ARITY_ONE).makeCanonical();
+ paramTokenSecondary2paramIndex.put(s_i, paramIndex);
+ paramIndex2paramTokenSecondary.put(paramIndex, s_i);
+
+ TokenTuple s_i_plus = new TokenTuple(hrnSecondary.getID(),
+ true, // multiple-object?
+ TokenTuple.ARITY_ONEORMORE).makeCanonical();
+ paramTokenSecondaryPlus2paramIndex.put(s_i_plus, paramIndex);
+ paramIndex2paramTokenSecondaryPlus.put(paramIndex, s_i_plus);
+
+ TokenTuple s_i_star = new TokenTuple(hrnSecondary.getID(),
+ true, // multiple-object?
+ TokenTuple.ARITY_ZEROORMORE).makeCanonical();
+ paramTokenSecondaryStar2paramIndex.put(s_i_star, paramIndex);
+ paramIndex2paramTokenSecondaryStar.put(paramIndex, s_i_star);
}
}
}
assert x != null;
assert as != null;
- age( as );
+ age(as);
// after the age operation the newest (or zero-ith oldest)
// node associated with the allocation site should have
// no references to it as if it were a newly allocated
// heap region
- Integer idNewest = as.getIthOldest( 0 );
- HeapRegionNode hrnNewest = id2hrn.get( idNewest );
- assert hrnNewest != null;
+ Integer idNewest = as.getIthOldest(0);
+ HeapRegionNode hrnNewest = id2hrn.get(idNewest);
+ assert hrnNewest != null;
- LabelNode lnX = getLabelNodeFromTemp( x );
- clearReferenceEdgesFrom( lnX, null, null, true );
+ LabelNode lnX = getLabelNodeFromTemp(x);
+ clearReferenceEdgesFrom(lnX, null, null, true);
// make a new reference to allocated node
TypeDescriptor type = as.getType();
- ReferenceEdge edgeNew =
- new ReferenceEdge( lnX, // source
- hrnNewest, // dest
- type, // type
- null, // field name
- false, // is initial param
- hrnNewest.getAlpha() // beta
- );
-
- addReferenceEdge( lnX, hrnNewest, edgeNew );
+ ReferenceEdge edgeNew =
+ new ReferenceEdge(lnX, // source
+ hrnNewest, // dest
+ type, // type
+ null, // field name
+ false, // is initial param
+ hrnNewest.getAlpha() // beta
+ );
+
+ addReferenceEdge(lnX, hrnNewest, edgeNew);
}
if( as.getType().isClass() ) {
hasFlags = as.getType().getClassDesc().hasFlags();
}
-
- if(as.getFlag()){
- hasFlags=as.getFlag();
+
+ if(as.getFlag()) {
+ hasFlags=as.getFlag();
}
- hrnSummary = createNewHeapRegionNode(idSummary, // id or null to generate a new one
- false, // single object?
- true, // summary?
- hasFlags, // flagged?
- false, // is a parameter?
- as.getType(), // type
- as, // allocation site
- null, // reachability set
+ hrnSummary = createNewHeapRegionNode(idSummary, // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ hasFlags, // flagged?
+ false, // is a parameter?
+ as.getType(), // type
+ as, // allocation site
+ null, // reachability set
as.toStringForDOT() + "\\nsummary",
generateUniqueIdentifier(as,0,true));
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
Integer idIth = as.getIthOldest(i);
assert !id2hrn.containsKey(idIth);
- createNewHeapRegionNode(idIth, // id or null to generate a new one
- true, // single object?
- false, // summary?
- hasFlags, // flagged?
- false, // is a parameter?
- as.getType(), // type
- as, // allocation site
- null, // reachability set
+ createNewHeapRegionNode(idIth, // id or null to generate a new one
+ true, // single object?
+ false, // summary?
+ hasFlags, // flagged?
+ false, // is a parameter?
+ as.getType(), // type
+ as, // allocation site
+ null, // reachability set
as.toStringForDOT() + "\\n" + i + " oldest",
generateUniqueIdentifier(as,i,false));
}
hasFlags = as.getType().getClassDesc().hasFlags();
}
- hrnShadowSummary = createNewHeapRegionNode(idShadowSummary, // id or null to generate a new one
- false, // single object?
- true, // summary?
- hasFlags, // flagged?
- false, // is a parameter?
- as.getType(), // type
- as, // allocation site
- null, // reachability set
+ hrnShadowSummary = createNewHeapRegionNode(idShadowSummary, // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ hasFlags, // flagged?
+ false, // is a parameter?
+ as.getType(), // type
+ as, // allocation site
+ null, // reachability set
as + "\\n" + as.getType() + "\\nshadowSum",
"");
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
Integer idShadowIth = as.getIthOldestShadow(i);
assert !id2hrn.containsKey(idShadowIth);
- createNewHeapRegionNode(idShadowIth, // id or null to generate a new one
- true, // single object?
- false, // summary?
- hasFlags, // flagged?
- false, // is a parameter?
- as.getType(), // type
- as, // allocation site
- null, // reachability set
+ createNewHeapRegionNode(idShadowIth, // id or null to generate a new one
+ true, // single object?
+ false, // summary?
+ hasFlags, // flagged?
+ false, // is a parameter?
+ as.getType(), // type
+ as, // allocation site
+ null, // reachability set
as + "\\n" + as.getType() + "\\n" + i + " shadow",
"");
}
edgeMerged.setSrc(hrnSummary);
HeapRegionNode hrnReferencee = edge.getDst();
- ReferenceEdge edgeSummary = hrnSummary.getReferenceTo(hrnReferencee,
- edge.getType(),
- edge.getField() );
+ ReferenceEdge edgeSummary = hrnSummary.getReferenceTo(hrnReferencee,
+ edge.getType(),
+ edge.getField() );
if( edgeSummary == null ) {
// the merge is trivial, nothing to be done
edgeMerged.setDst(hrnSummary);
OwnershipNode onReferencer = edge.getSrc();
- ReferenceEdge edgeSummary = onReferencer.getReferenceTo(hrnSummary,
- edge.getType(),
- edge.getField() );
+ ReferenceEdge edgeSummary = onReferencer.getReferenceTo(hrnSummary,
+ edge.getType(),
+ edge.getField() );
if( edgeSummary == null ) {
// the merge is trivial, nothing to be done
Iterator<ChangeTuple> itrCprime = C.iterator();
while( itrCprime.hasNext() ) {
ChangeTuple c = itrCprime.next();
- if( edgeF.getBeta().contains( c.getSetToMatch() ) ) {
+ if( edgeF.getBeta().contains(c.getSetToMatch() ) ) {
changesToPass = changesToPass.union(c);
}
}
// then apply all of the changes for each node at once
Iterator itrMap = nodePlannedChanges.entrySet().iterator();
while( itrMap.hasNext() ) {
- Map.Entry me = (Map.Entry) itrMap.next();
+ Map.Entry me = (Map.Entry)itrMap.next();
HeapRegionNode n = (HeapRegionNode) me.getKey();
ChangeTupleSet C = (ChangeTupleSet) me.getValue();
// this propagation step is with respect to one change,
// so we capture the full change from the old alpha:
- ReachabilitySet localDelta = n.getAlpha().applyChangeSet( C, true );
+ ReachabilitySet localDelta = n.getAlpha().applyChangeSet(C, true);
// but this propagation may be only one of many concurrent
// possible changes, so keep a running union with the node's
// partially updated new alpha set
- n.setAlphaNew( n.getAlphaNew().union( localDelta ) );
+ n.setAlphaNew(n.getAlphaNew().union(localDelta) );
- nodesWithNewAlpha.add( n );
+ nodesWithNewAlpha.add(n);
}
propagateTokensOverEdges(todoEdges, edgePlannedChanges, edgesWithNewBeta);
Iterator<ChangeTuple> itrC = C.iterator();
while( itrC.hasNext() ) {
ChangeTuple c = itrC.next();
- if( edgeE.getBeta().contains( c.getSetToMatch() ) ) {
+ if( edgeE.getBeta().contains(c.getSetToMatch() ) ) {
changesToPass = changesToPass.union(c);
}
}
// then apply all of the changes for each edge at once
Iterator itrMap = edgePlannedChanges.entrySet().iterator();
while( itrMap.hasNext() ) {
- Map.Entry me = (Map.Entry) itrMap.next();
- ReferenceEdge e = (ReferenceEdge) me.getKey();
+ Map.Entry me = (Map.Entry)itrMap.next();
+ ReferenceEdge e = (ReferenceEdge) me.getKey();
ChangeTupleSet C = (ChangeTupleSet) me.getValue();
// this propagation step is with respect to one change,
// so we capture the full change from the old beta:
- ReachabilitySet localDelta = e.getBeta().applyChangeSet( C, true );
+ ReachabilitySet localDelta = e.getBeta().applyChangeSet(C, true);
// but this propagation may be only one of many concurrent
// possible changes, so keep a running union with the edge's
// partially updated new beta set
- e.setBetaNew( e.getBetaNew().union( localDelta ) );
-
- edgesWithNewBeta.add( e );
+ e.setBetaNew(e.getBetaNew().union(localDelta) );
+
+ edgesWithNewBeta.add(e);
}
}
- public Set<Integer> calculateAliasedParamSet( FlatCall fc,
- boolean isStatic,
- FlatMethod fm ) {
+ public Set<Integer> calculateAliasedParamSet(FlatCall fc,
+ boolean isStatic,
+ FlatMethod fm) {
Hashtable<Integer, LabelNode> paramIndex2ln =
new Hashtable<Integer, LabelNode>();
new Hashtable<Integer, HashSet<HeapRegionNode> >();
for( int i = 0; i < fm.numParameters(); ++i ) {
- Integer paramIndex = new Integer( i );
- TempDescriptor tdParam = fm.getParameter( i );
+ Integer paramIndex = new Integer(i);
+ TempDescriptor tdParam = fm.getParameter(i);
TypeDescriptor typeParam = tdParam.getType();
if( typeParam.isImmutable() && !typeParam.isArray() ) {
// now depending on whether the callee is static or not
// we need to account for a "this" argument in order to
// find the matching argument in the caller context
- TempDescriptor argTemp_i = fc.getArgMatchingParamIndex( fm, paramIndex );
+ TempDescriptor argTemp_i = fc.getArgMatchingParamIndex(fm, paramIndex);
LabelNode argLabel_i = getLabelNodeFromTemp(argTemp_i);
paramIndex2ln.put(paramIndex, argLabel_i);
Iterator<ReferenceEdge> edgeArgItr = lnArg_i.iteratorToReferencees();
while( edgeArgItr.hasNext() ) {
ReferenceEdge edge = edgeArgItr.next();
- todoNodes.add( edge.getDst() );
+ todoNodes.add(edge.getDst() );
}
// then follow links until all reachable nodes have been found
// check for arguments that are aliased
for( int i = 0; i < fm.numParameters(); ++i ) {
- for( int j = 0; j < i; ++j ) {
- HashSet<HeapRegionNode> s1 = paramIndex2reachableCallerNodes.get( i );
- HashSet<HeapRegionNode> s2 = paramIndex2reachableCallerNodes.get( j );
+ for( int j = 0; j < i; ++j ) {
+ HashSet<HeapRegionNode> s1 = paramIndex2reachableCallerNodes.get(i);
+ HashSet<HeapRegionNode> s2 = paramIndex2reachableCallerNodes.get(j);
// some parameters are immutable or primitive, so skip em
if( s1 == null || s2 == null ) {
intersection.retainAll(s2);
if( !intersection.isEmpty() ) {
- aliasedIndices.add( new Integer( i ) );
- aliasedIndices.add( new Integer( j ) );
+ aliasedIndices.add(new Integer(i) );
+ aliasedIndices.add(new Integer(j) );
}
}
}
}
- private String makeMapKey( Integer i, Integer j, String field ) {
+ private String makeMapKey(Integer i, Integer j, String field) {
return i+","+j+","+field;
}
- private String makeMapKey( Integer i, String field ) {
+ private String makeMapKey(Integer i, String field) {
return i+","+field;
}
// category for mapping with respect to another argument index j
// so the key into the hashtable is i, the value is a two-element vector
// that contains in 0 the edge and in 1 the Integer index j
- private void ensureEmptyEdgeIndexPair( Hashtable< Integer, Set<Vector> > edge_index_pairs,
- Integer indexI ) {
+ private void ensureEmptyEdgeIndexPair(Hashtable< Integer, Set<Vector> > edge_index_pairs,
+ Integer indexI) {
- Set<Vector> ei = edge_index_pairs.get( indexI );
- if( ei == null ) {
- ei = new HashSet<Vector>();
+ Set<Vector> ei = edge_index_pairs.get(indexI);
+ if( ei == null ) {
+ ei = new HashSet<Vector>();
}
- edge_index_pairs.put( indexI, ei );
+ edge_index_pairs.put(indexI, ei);
}
- private void addEdgeIndexPair( Hashtable< Integer, Set<Vector> > edge_index_pairs,
- Integer indexI,
- ReferenceEdge edge,
- Integer indexJ ) {
-
- Vector v = new Vector(); v.setSize( 2 );
- v.set( 0 , edge );
- v.set( 1 , indexJ );
- Set<Vector> ei = edge_index_pairs.get( indexI );
- if( ei == null ) {
- ei = new HashSet<Vector>();
- }
- ei.add( v );
- edge_index_pairs.put( indexI, ei );
+ private void addEdgeIndexPair(Hashtable< Integer, Set<Vector> > edge_index_pairs,
+ Integer indexI,
+ ReferenceEdge edge,
+ Integer indexJ) {
+
+ Vector v = new Vector(); v.setSize(2);
+ v.set(0, edge);
+ v.set(1, indexJ);
+ Set<Vector> ei = edge_index_pairs.get(indexI);
+ if( ei == null ) {
+ ei = new HashSet<Vector>();
+ }
+ ei.add(v);
+ edge_index_pairs.put(indexI, ei);
}
- private ReachabilitySet funcScriptR( ReachabilitySet rsIn,
- OwnershipGraph ogCallee,
- MethodContext mc ) {
+ private ReachabilitySet funcScriptR(ReachabilitySet rsIn,
+ OwnershipGraph ogCallee,
+ MethodContext mc) {
- ReachabilitySet rsOut = new ReachabilitySet( rsIn );
+ ReachabilitySet rsOut = new ReachabilitySet(rsIn);
Iterator itr = ogCallee.paramIndex2paramTokenPrimary.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- Integer i = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)itr.next();
+ Integer i = (Integer) me.getKey();
TokenTuple p_i = (TokenTuple) me.getValue();
- TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get( i );
+ TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get(i);
// skip this if there is no secondary token or the parameter
// is part of the aliasing context
- if( s_i == null || mc.getAliasedParamIndices().contains( i ) ) {
+ if( s_i == null || mc.getAliasedParamIndices().contains(i) ) {
continue;
}
- rsOut = rsOut.removeTokenAIfTokenB( p_i, s_i );
+ rsOut = rsOut.removeTokenAIfTokenB(p_i, s_i);
}
return rsOut;
// detects strong updates to the primary parameter object and
// effects the removal of old edges in the calling graph
- private void effectCalleeStrongUpdates( Integer paramIndex,
- OwnershipGraph ogCallee,
- HeapRegionNode hrnCaller
- ) {
- Integer idPrimary = ogCallee.paramIndex2idPrimary.get( paramIndex );
+ private void effectCalleeStrongUpdates(Integer paramIndex,
+ OwnershipGraph ogCallee,
+ HeapRegionNode hrnCaller
+ ) {
+ Integer idPrimary = ogCallee.paramIndex2idPrimary.get(paramIndex);
assert idPrimary != null;
- HeapRegionNode hrnPrimary = ogCallee.id2hrn.get( idPrimary );
+ HeapRegionNode hrnPrimary = ogCallee.id2hrn.get(idPrimary);
assert hrnPrimary != null;
TypeDescriptor typeParam = hrnPrimary.getType();
assert typeParam.isClass();
-
- Set<String> fieldNamesToRemove = new HashSet<String>();
+
+ Set<String> fieldNamesToRemove = new HashSet<String>();
ClassDescriptor cd = typeParam.getClassDesc();
while( cd != null ) {
Iterator fieldItr = cd.getFields();
while( fieldItr.hasNext() ) {
-
+
FieldDescriptor fd = (FieldDescriptor) fieldItr.next();
TypeDescriptor typeField = fd.getType();
- assert typeField != null;
-
- if( ogCallee.hasFieldBeenUpdated( hrnPrimary, fd.getSymbol() ) ) {
- clearReferenceEdgesFrom( hrnCaller, fd.getType(), fd.getSymbol(), false );
+ assert typeField != null;
+
+ if( ogCallee.hasFieldBeenUpdated(hrnPrimary, fd.getSymbol() ) ) {
+ clearReferenceEdgesFrom(hrnCaller, fd.getType(), fd.getSymbol(), false);
}
}
-
+
cd = cd.getSuperDesc();
}
}
- private boolean hasFieldBeenUpdated( HeapRegionNode hrnPrimary, String field ) {
+ private boolean hasFieldBeenUpdated(HeapRegionNode hrnPrimary, String field) {
Iterator<ReferenceEdge> itr = hrnPrimary.iteratorToReferencees();
while( itr.hasNext() ) {
ReferenceEdge e = itr.next();
- if( e.fieldEquals( field ) && e.isInitialParam() ) {
+ if( e.fieldEquals(field) && e.isInitialParam() ) {
return false;
}
}
// resolveMethodCall() is used to incorporate a callee graph's effects into
// *this* graph, which is the caller. This method can also be used, after
- // the entire analysis is complete, to perform parameter decomposition for
+ // the entire analysis is complete, to perform parameter decomposition for
// a given call chain.
- public void resolveMethodCall(FlatCall fc, // call site in caller method
- boolean isStatic, // whether it is a static method
- FlatMethod fm, // the callee method (when virtual, can be many)
+ public void resolveMethodCall(FlatCall fc, // call site in caller method
+ boolean isStatic, // whether it is a static method
+ FlatMethod fm, // the callee method (when virtual, can be many)
OwnershipGraph ogCallee, // the callee's current ownership graph
- MethodContext mc, // the aliasing context for this call
- ParameterDecomposition pd // if this is not null, we're calling after analysis
- ) {
+ MethodContext mc, // the aliasing context for this call
+ ParameterDecomposition pd // if this is not null, we're calling after analysis
+ ) {
if( debugCallMap &&
- mc.getDescriptor().getSymbol().equals( debugCaller ) &&
- fm.getMethod().getSymbol().equals( debugCallee )
- ) {
+ mc.getDescriptor().getSymbol().equals(debugCaller) &&
+ fm.getMethod().getSymbol().equals(debugCallee)
+ ) {
try {
writeGraph("debug1BeforeCall",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
ogCallee.writeGraph("debug0Callee",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {
+ }
- System.out.println( " "+mc+" is calling "+fm );
+ System.out.println(" "+mc+" is calling "+fm);
}
// define rewrite rules and other structures to organize data by parameter/argument index
Hashtable<Integer, ReachabilitySet> paramIndex2rewriteH_p = new Hashtable<Integer, ReachabilitySet>();
Hashtable<Integer, ReachabilitySet> paramIndex2rewriteH_s = new Hashtable<Integer, ReachabilitySet>();
-
+
Hashtable<String, ReachabilitySet> paramIndex2rewriteJ_p2p = new Hashtable<String, ReachabilitySet>(); // select( i, j, f )
Hashtable<String, ReachabilitySet> paramIndex2rewriteJ_p2s = new Hashtable<String, ReachabilitySet>(); // select( i, f )
Hashtable<Integer, ReachabilitySet> paramIndex2rewriteJ_s2p = new Hashtable<Integer, ReachabilitySet>();
Hashtable<Integer, LabelNode> paramIndex2ln = new Hashtable<Integer, LabelNode>();
- paramIndex2rewriteH_p.put( bogusIndex, rsIdentity );
- paramIndex2rewriteH_s.put( bogusIndex, rsIdentity );
+ paramIndex2rewriteH_p.put(bogusIndex, rsIdentity);
+ paramIndex2rewriteH_s.put(bogusIndex, rsIdentity);
- paramIndex2rewriteJ_p2p.put( bogusIndex.toString(), rsIdentity );
- paramIndex2rewriteJ_p2s.put( bogusIndex.toString(), rsIdentity );
- paramIndex2rewriteJ_s2p.put( bogusIndex, rsIdentity );
- paramIndex2rewriteJ_s2s.put( bogusIndex, rsIdentity );
+ paramIndex2rewriteJ_p2p.put(bogusIndex.toString(), rsIdentity);
+ paramIndex2rewriteJ_p2s.put(bogusIndex.toString(), rsIdentity);
+ paramIndex2rewriteJ_s2p.put(bogusIndex, rsIdentity);
+ paramIndex2rewriteJ_s2s.put(bogusIndex, rsIdentity);
for( int i = 0; i < fm.numParameters(); ++i ) {
Integer paramIndex = new Integer(i);
- if( !ogCallee.paramIndex2idPrimary.containsKey( paramIndex ) ) {
+ if( !ogCallee.paramIndex2idPrimary.containsKey(paramIndex) ) {
// skip this immutable parameter
continue;
}
-
+
// setup H (primary)
- Integer idPrimary = ogCallee.paramIndex2idPrimary.get( paramIndex );
- assert ogCallee.id2hrn.containsKey( idPrimary );
- HeapRegionNode hrnPrimary = ogCallee.id2hrn.get( idPrimary );
+ Integer idPrimary = ogCallee.paramIndex2idPrimary.get(paramIndex);
+ assert ogCallee.id2hrn.containsKey(idPrimary);
+ HeapRegionNode hrnPrimary = ogCallee.id2hrn.get(idPrimary);
assert hrnPrimary != null;
- paramIndex2rewriteH_p.put( paramIndex, toShadowTokens( ogCallee, hrnPrimary.getAlpha() ) );
+ paramIndex2rewriteH_p.put(paramIndex, toShadowTokens(ogCallee, hrnPrimary.getAlpha() ) );
// setup J (primary->X)
Iterator<ReferenceEdge> p2xItr = hrnPrimary.iteratorToReferencees();
ReferenceEdge p2xEdge = p2xItr.next();
// we only care about initial parameter edges here
- if( !p2xEdge.isInitialParam() ) { continue; }
+ if( !p2xEdge.isInitialParam() ) {
+ continue;
+ }
HeapRegionNode hrnDst = p2xEdge.getDst();
- if( ogCallee.idPrimary2paramIndexSet.containsKey( hrnDst.getID() ) ) {
- Iterator<Integer> jItr = ogCallee.idPrimary2paramIndexSet.get( hrnDst.getID() ).iterator();
+ if( ogCallee.idPrimary2paramIndexSet.containsKey(hrnDst.getID() ) ) {
+ Iterator<Integer> jItr = ogCallee.idPrimary2paramIndexSet.get(hrnDst.getID() ).iterator();
while( jItr.hasNext() ) {
Integer j = jItr.next();
- paramIndex2rewriteJ_p2p.put( makeMapKey( i, j, p2xEdge.getField() ),
- toShadowTokens( ogCallee, p2xEdge.getBeta() ) );
+ paramIndex2rewriteJ_p2p.put(makeMapKey(i, j, p2xEdge.getField() ),
+ toShadowTokens(ogCallee, p2xEdge.getBeta() ) );
}
} else {
- assert ogCallee.idSecondary2paramIndexSet.containsKey( hrnDst.getID() );
- paramIndex2rewriteJ_p2s.put( makeMapKey( i, p2xEdge.getField() ),
- toShadowTokens( ogCallee, p2xEdge.getBeta() ) );
+ assert ogCallee.idSecondary2paramIndexSet.containsKey(hrnDst.getID() );
+ paramIndex2rewriteJ_p2s.put(makeMapKey(i, p2xEdge.getField() ),
+ toShadowTokens(ogCallee, p2xEdge.getBeta() ) );
}
}
// setup K (primary)
- TempDescriptor tdParamQ = ogCallee.paramIndex2tdQ.get( paramIndex );
+ TempDescriptor tdParamQ = ogCallee.paramIndex2tdQ.get(paramIndex);
assert tdParamQ != null;
- LabelNode lnParamQ = ogCallee.td2ln.get( tdParamQ );
+ LabelNode lnParamQ = ogCallee.td2ln.get(tdParamQ);
assert lnParamQ != null;
- ReferenceEdge edgeSpecialQ_i = lnParamQ.getReferenceTo( hrnPrimary, null, null );
+ ReferenceEdge edgeSpecialQ_i = lnParamQ.getReferenceTo(hrnPrimary, null, null);
assert edgeSpecialQ_i != null;
- ReachabilitySet qBeta = toShadowTokens( ogCallee, edgeSpecialQ_i.getBeta() );
+ ReachabilitySet qBeta = toShadowTokens(ogCallee, edgeSpecialQ_i.getBeta() );
- TokenTuple p_i = ogCallee.paramIndex2paramTokenPrimary .get( paramIndex );
- TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get( paramIndex );
+ TokenTuple p_i = ogCallee.paramIndex2paramTokenPrimary.get(paramIndex);
+ TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get(paramIndex);
ReachabilitySet K_p = new ReachabilitySet().makeCanonical();
ReachabilitySet K_p2 = new ReachabilitySet().makeCanonical();
if( s_i == null ) {
K_p = qBeta;
} else {
- // sort qBeta into K_p1 and K_p2
+ // sort qBeta into K_p1 and K_p2
Iterator<TokenTupleSet> ttsItr = qBeta.iterator();
while( ttsItr.hasNext() ) {
TokenTupleSet tts = ttsItr.next();
- if( s_i != null && tts.containsBoth( p_i, s_i ) ) {
- K_p2 = K_p2.union( tts );
+ if( s_i != null && tts.containsBoth(p_i, s_i) ) {
+ K_p2 = K_p2.union(tts);
} else {
- K_p = K_p.union( tts );
+ K_p = K_p.union(tts);
}
}
}
- paramIndex2rewriteK_p .put( paramIndex, K_p );
- paramIndex2rewriteK_p2.put( paramIndex, K_p2 );
+ paramIndex2rewriteK_p.put(paramIndex, K_p);
+ paramIndex2rewriteK_p2.put(paramIndex, K_p2);
// if there is a secondary node, compute the rest of the rewrite rules
- if( ogCallee.paramIndex2idSecondary.containsKey( paramIndex ) ) {
+ if( ogCallee.paramIndex2idSecondary.containsKey(paramIndex) ) {
// setup H (secondary)
- Integer idSecondary = ogCallee.paramIndex2idSecondary.get( paramIndex );
- assert ogCallee.id2hrn.containsKey( idSecondary );
- HeapRegionNode hrnSecondary = ogCallee.id2hrn.get( idSecondary );
+ Integer idSecondary = ogCallee.paramIndex2idSecondary.get(paramIndex);
+ assert ogCallee.id2hrn.containsKey(idSecondary);
+ HeapRegionNode hrnSecondary = ogCallee.id2hrn.get(idSecondary);
assert hrnSecondary != null;
- paramIndex2rewriteH_s.put( paramIndex, toShadowTokens( ogCallee, hrnSecondary.getAlpha() ) );
+ paramIndex2rewriteH_s.put(paramIndex, toShadowTokens(ogCallee, hrnSecondary.getAlpha() ) );
// setup J (secondary->X)
Iterator<ReferenceEdge> s2xItr = hrnSecondary.iteratorToReferencees();
while( s2xItr.hasNext() ) {
ReferenceEdge s2xEdge = s2xItr.next();
-
- if( !s2xEdge.isInitialParam() ) { continue; }
-
+
+ if( !s2xEdge.isInitialParam() ) {
+ continue;
+ }
+
HeapRegionNode hrnDst = s2xEdge.getDst();
-
- if( ogCallee.idPrimary2paramIndexSet.containsKey( hrnDst.getID() ) ) {
- Iterator<Integer> jItr = ogCallee.idPrimary2paramIndexSet.get( hrnDst.getID() ).iterator();
+
+ if( ogCallee.idPrimary2paramIndexSet.containsKey(hrnDst.getID() ) ) {
+ Iterator<Integer> jItr = ogCallee.idPrimary2paramIndexSet.get(hrnDst.getID() ).iterator();
while( jItr.hasNext() ) {
Integer j = jItr.next();
- paramIndex2rewriteJ_s2p.put( i, toShadowTokens( ogCallee, s2xEdge.getBeta() ) );
+ paramIndex2rewriteJ_s2p.put(i, toShadowTokens(ogCallee, s2xEdge.getBeta() ) );
}
-
+
} else {
- assert ogCallee.idSecondary2paramIndexSet.containsKey( hrnDst.getID() );
- paramIndex2rewriteJ_s2s.put( i, toShadowTokens( ogCallee, s2xEdge.getBeta() ) );
+ assert ogCallee.idSecondary2paramIndexSet.containsKey(hrnDst.getID() );
+ paramIndex2rewriteJ_s2s.put(i, toShadowTokens(ogCallee, s2xEdge.getBeta() ) );
}
}
// setup K (secondary)
- TempDescriptor tdParamR = ogCallee.paramIndex2tdR.get( paramIndex );
+ TempDescriptor tdParamR = ogCallee.paramIndex2tdR.get(paramIndex);
assert tdParamR != null;
- LabelNode lnParamR = ogCallee.td2ln.get( tdParamR );
+ LabelNode lnParamR = ogCallee.td2ln.get(tdParamR);
assert lnParamR != null;
- ReferenceEdge edgeSpecialR_i = lnParamR.getReferenceTo( hrnSecondary, null, null );
+ ReferenceEdge edgeSpecialR_i = lnParamR.getReferenceTo(hrnSecondary, null, null);
assert edgeSpecialR_i != null;
- paramIndex2rewriteK_s.put( paramIndex,
- toShadowTokens( ogCallee, edgeSpecialR_i.getBeta() ) );
+ paramIndex2rewriteK_s.put(paramIndex,
+ toShadowTokens(ogCallee, edgeSpecialR_i.getBeta() ) );
}
-
+
// now depending on whether the callee is static or not
// we need to account for a "this" argument in order to
// find the matching argument in the caller context
- TempDescriptor argTemp_i = fc.getArgMatchingParamIndex( fm, paramIndex );
+ TempDescriptor argTemp_i = fc.getArgMatchingParamIndex(fm, paramIndex);
// remember which caller arg label maps to param index
- LabelNode argLabel_i = getLabelNodeFromTemp( argTemp_i );
- paramIndex2ln.put( paramIndex, argLabel_i );
+ LabelNode argLabel_i = getLabelNodeFromTemp(argTemp_i);
+ paramIndex2ln.put(paramIndex, argLabel_i);
- // do a callee-effect strong update pre-pass here
+ // do a callee-effect strong update pre-pass here
if( argTemp_i.getType().isClass() ) {
Iterator<ReferenceEdge> edgeItr = argLabel_i.iteratorToReferencees();
HeapRegionNode hrn = edge.getDst();
if( (hrn.getNumReferencers() == 1) || // case 1
- (hrn.isSingleObject() && argLabel_i.getNumReferencees() == 1) // case 2
- ) {
+ (hrn.isSingleObject() && argLabel_i.getNumReferencees() == 1) // case 2
+ ) {
if( !DISABLE_STRONG_UPDATES ) {
- effectCalleeStrongUpdates( paramIndex, ogCallee, hrn );
- }
+ effectCalleeStrongUpdates(paramIndex, ogCallee, hrn);
+ }
}
}
}
while( edgeItr.hasNext() ) {
ReferenceEdge edge = edgeItr.next();
- d_i_p = d_i_p.union( edge.getBeta().intersection( edge.getDst().getAlpha() ) );
- d_i_s = d_i_s.union( edge.getBeta() );
+ d_i_p = d_i_p.union(edge.getBeta().intersection(edge.getDst().getAlpha() ) );
+ d_i_s = d_i_s.union(edge.getBeta() );
}
- paramIndex2rewrite_d_p.put( paramIndex, d_i_p );
- paramIndex2rewrite_d_s.put( paramIndex, d_i_s );
+ paramIndex2rewrite_d_p.put(paramIndex, d_i_p);
+ paramIndex2rewrite_d_s.put(paramIndex, d_i_s);
// TODO: we should only do this when we need it, and then
// memoize it for the rest of the mapping procedure
ReachabilitySet D_i = d_i_s.exhaustiveArityCombinations();
- paramIndex2rewriteD.put( paramIndex, D_i );
+ paramIndex2rewriteD.put(paramIndex, D_i);
}
Iterator lnArgItr = paramIndex2ln.entrySet().iterator();
while( lnArgItr.hasNext() ) {
- Map.Entry me = (Map.Entry) lnArgItr.next();
- Integer index = (Integer) me.getKey();
+ Map.Entry me = (Map.Entry)lnArgItr.next();
+ Integer index = (Integer) me.getKey();
LabelNode lnArg_i = (LabelNode) me.getValue();
-
+
Set<HeapRegionNode> dr = new HashSet<HeapRegionNode>();
Set<HeapRegionNode> r = new HashSet<HeapRegionNode>();
Set<HeapRegionNode> todo = new HashSet<HeapRegionNode>();
ReferenceEdge edge = edgeArgItr.next();
HeapRegionNode hrn = edge.getDst();
- dr.add( hrn );
+ dr.add(hrn);
if( lnArg_i.getNumReferencees() == 1 && hrn.isSingleObject() ) {
- defParamObj.add( hrn );
+ defParamObj.add(hrn);
}
Iterator<ReferenceEdge> edgeHrnItr = hrn.iteratorToReferencees();
while( edgeHrnItr.hasNext() ) {
ReferenceEdge edger = edgeHrnItr.next();
- todo.add( edger.getDst() );
+ todo.add(edger.getDst() );
}
// then follow links until all reachable nodes have been found
while( !todo.isEmpty() ) {
HeapRegionNode hrnr = todo.iterator().next();
- todo.remove( hrnr );
-
- r.add( hrnr );
-
+ todo.remove(hrnr);
+
+ r.add(hrnr);
+
Iterator<ReferenceEdge> edgeItr = hrnr.iteratorToReferencees();
while( edgeItr.hasNext() ) {
ReferenceEdge edger = edgeItr.next();
- if( !r.contains( edger.getDst() ) ) {
- todo.add( edger.getDst() );
+ if( !r.contains(edger.getDst() ) ) {
+ todo.add(edger.getDst() );
}
}
}
if( hrn.isSingleObject() ) {
- r.remove( hrn );
+ r.remove(hrn);
}
}
- pi2dr.put( index, dr );
- pi2r .put( index, r );
+ pi2dr.put(index, dr);
+ pi2r.put(index, r);
}
assert defParamObj.size() <= fm.numParameters();
// report primary parameter object mappings
mapItr = pi2dr.entrySet().iterator();
while( mapItr.hasNext() ) {
- Map.Entry me = (Map.Entry) mapItr.next();
- Integer paramIndex = (Integer) me.getKey();
- Set<HeapRegionNode> hrnAset = (Set<HeapRegionNode>) me.getValue();
+ Map.Entry me = (Map.Entry)mapItr.next();
+ Integer paramIndex = (Integer) me.getKey();
+ Set<HeapRegionNode> hrnAset = (Set<HeapRegionNode>)me.getValue();
Iterator<HeapRegionNode> hrnItr = hrnAset.iterator();
while( hrnItr.hasNext() ) {
HeapRegionNode hrnA = hrnItr.next();
- pd.mapRegionToParamObject( hrnA, paramIndex );
+ pd.mapRegionToParamObject(hrnA, paramIndex);
}
}
// report parameter-reachable mappings
mapItr = pi2r.entrySet().iterator();
while( mapItr.hasNext() ) {
- Map.Entry me = (Map.Entry) mapItr.next();
- Integer paramIndex = (Integer) me.getKey();
- Set<HeapRegionNode> hrnRset = (Set<HeapRegionNode>) me.getValue();
+ Map.Entry me = (Map.Entry)mapItr.next();
+ Integer paramIndex = (Integer) me.getKey();
+ Set<HeapRegionNode> hrnRset = (Set<HeapRegionNode>)me.getValue();
Iterator<HeapRegionNode> hrnItr = hrnRset.iterator();
while( hrnItr.hasNext() ) {
HeapRegionNode hrnR = hrnItr.next();
- pd.mapRegionToParamReachable( hrnR, paramIndex );
+ pd.mapRegionToParamReachable(hrnR, paramIndex);
}
}
// now iterate over reachable nodes to rewrite their alpha, and
- // classify edges found for beta rewrite
+ // classify edges found for beta rewrite
Hashtable<TokenTuple, ReachabilitySet> tokens2states = new Hashtable<TokenTuple, ReachabilitySet>();
Hashtable< Integer, Set<Vector> > edges_p2p = new Hashtable< Integer, Set<Vector> >();
// so again, with respect to some arg i...
lnArgItr = paramIndex2ln.entrySet().iterator();
while( lnArgItr.hasNext() ) {
- Map.Entry me = (Map.Entry) lnArgItr.next();
- Integer index = (Integer) me.getKey();
- LabelNode lnArg_i = (LabelNode) me.getValue();
-
- TokenTuple p_i = ogCallee.paramIndex2paramTokenPrimary.get( index );
- TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get( index );
- assert p_i != null;
-
- ensureEmptyEdgeIndexPair( edges_p2p, index );
- ensureEmptyEdgeIndexPair( edges_p2s, index );
- ensureEmptyEdgeIndexPair( edges_s2p, index );
- ensureEmptyEdgeIndexPair( edges_s2s, index );
- ensureEmptyEdgeIndexPair( edges_up_dr, index );
- ensureEmptyEdgeIndexPair( edges_up_r, index );
-
- Set<HeapRegionNode> dr = pi2dr.get( index );
+ Map.Entry me = (Map.Entry)lnArgItr.next();
+ Integer index = (Integer) me.getKey();
+ LabelNode lnArg_i = (LabelNode) me.getValue();
+
+ TokenTuple p_i = ogCallee.paramIndex2paramTokenPrimary.get(index);
+ TokenTuple s_i = ogCallee.paramIndex2paramTokenSecondary.get(index);
+ assert p_i != null;
+
+ ensureEmptyEdgeIndexPair(edges_p2p, index);
+ ensureEmptyEdgeIndexPair(edges_p2s, index);
+ ensureEmptyEdgeIndexPair(edges_s2p, index);
+ ensureEmptyEdgeIndexPair(edges_s2s, index);
+ ensureEmptyEdgeIndexPair(edges_up_dr, index);
+ ensureEmptyEdgeIndexPair(edges_up_r, index);
+
+ Set<HeapRegionNode> dr = pi2dr.get(index);
Iterator<HeapRegionNode> hrnItr = dr.iterator();
while( hrnItr.hasNext() ) {
// this heap region is definitely an "a_i" or primary by virtue of being in dr
HeapRegionNode hrn = hrnItr.next();
tokens2states.clear();
- tokens2states.put( p_i, hrn.getAlpha() );
-
- rewriteCallerReachability( index,
- hrn,
- null,
- paramIndex2rewriteH_p.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- nodesWithNewAlpha.add( hrn );
+ tokens2states.put(p_i, hrn.getAlpha() );
+
+ rewriteCallerReachability(index,
+ hrn,
+ null,
+ paramIndex2rewriteH_p.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ nodesWithNewAlpha.add(hrn);
// sort edges
Iterator<ReferenceEdge> edgeItr = hrn.iteratorToReferencers();
Iterator itr = pi2dr.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry mo = (Map.Entry) itr.next();
- Integer pi = (Integer) mo.getKey();
- Set<HeapRegionNode> dr_i = (Set<HeapRegionNode>) mo.getValue();
+ Map.Entry mo = (Map.Entry)itr.next();
+ Integer pi = (Integer) mo.getKey();
+ Set<HeapRegionNode> dr_i = (Set<HeapRegionNode>)mo.getValue();
- if( dr_i.contains( hrn0 ) ) {
- addEdgeIndexPair( edges_p2p, pi, edge, index );
+ if( dr_i.contains(hrn0) ) {
+ addEdgeIndexPair(edges_p2p, pi, edge, index);
edge_classified = true;
- }
+ }
}
itr = pi2r.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry mo = (Map.Entry) itr.next();
- Integer pi = (Integer) mo.getKey();
- Set<HeapRegionNode> r_i = (Set<HeapRegionNode>) mo.getValue();
+ Map.Entry mo = (Map.Entry)itr.next();
+ Integer pi = (Integer) mo.getKey();
+ Set<HeapRegionNode> r_i = (Set<HeapRegionNode>)mo.getValue();
- if( r_i.contains( hrn0 ) ) {
- addEdgeIndexPair( edges_s2p, pi, edge, index );
+ if( r_i.contains(hrn0) ) {
+ addEdgeIndexPair(edges_s2p, pi, edge, index);
edge_classified = true;
- }
+ }
}
}
// all of these edges are upstream of directly reachable objects
if( !edge_classified ) {
- addEdgeIndexPair( edges_up_dr, index, edge, index );
+ addEdgeIndexPair(edges_up_dr, index, edge, index);
}
}
}
- Set<HeapRegionNode> r = pi2r.get( index );
+ Set<HeapRegionNode> r = pi2r.get(index);
hrnItr = r.iterator();
while( hrnItr.hasNext() ) {
// this heap region is definitely an "r_i" or secondary by virtue of being in r
HeapRegionNode hrn = hrnItr.next();
-
- if( paramIndex2rewriteH_s.containsKey( index ) ) {
+
+ if( paramIndex2rewriteH_s.containsKey(index) ) {
tokens2states.clear();
- tokens2states.put( p_i, new ReachabilitySet().makeCanonical() );
- tokens2states.put( s_i, hrn.getAlpha() );
-
- rewriteCallerReachability( index,
- hrn,
- null,
- paramIndex2rewriteH_s.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- nodesWithNewAlpha.add( hrn );
- }
+ tokens2states.put(p_i, new ReachabilitySet().makeCanonical() );
+ tokens2states.put(s_i, hrn.getAlpha() );
+
+ rewriteCallerReachability(index,
+ hrn,
+ null,
+ paramIndex2rewriteH_s.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ nodesWithNewAlpha.add(hrn);
+ }
// sort edges
Iterator<ReferenceEdge> edgeItr = hrn.iteratorToReferencers();
Iterator itr = pi2dr.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry mo = (Map.Entry) itr.next();
- Integer pi = (Integer) mo.getKey();
- Set<HeapRegionNode> dr_i = (Set<HeapRegionNode>) mo.getValue();
+ Map.Entry mo = (Map.Entry)itr.next();
+ Integer pi = (Integer) mo.getKey();
+ Set<HeapRegionNode> dr_i = (Set<HeapRegionNode>)mo.getValue();
- if( dr_i.contains( hrn0 ) ) {
- addEdgeIndexPair( edges_p2s, pi, edge, index );
+ if( dr_i.contains(hrn0) ) {
+ addEdgeIndexPair(edges_p2s, pi, edge, index);
edge_classified = true;
- }
+ }
}
itr = pi2r.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry mo = (Map.Entry) itr.next();
- Integer pi = (Integer) mo.getKey();
- Set<HeapRegionNode> r_i = (Set<HeapRegionNode>) mo.getValue();
+ Map.Entry mo = (Map.Entry)itr.next();
+ Integer pi = (Integer) mo.getKey();
+ Set<HeapRegionNode> r_i = (Set<HeapRegionNode>)mo.getValue();
- if( r_i.contains( hrn0 ) ) {
- addEdgeIndexPair( edges_s2s, pi, edge, index );
+ if( r_i.contains(hrn0) ) {
+ addEdgeIndexPair(edges_s2s, pi, edge, index);
edge_classified = true;
- }
+ }
}
}
// these edges are all upstream of some reachable node
if( !edge_classified ) {
- addEdgeIndexPair( edges_up_r, index, edge, index );
+ addEdgeIndexPair(edges_up_r, index, edge, index);
}
}
}
// and again, with respect to some arg i...
lnArgItr = paramIndex2ln.entrySet().iterator();
while( lnArgItr.hasNext() ) {
- Map.Entry me = (Map.Entry) lnArgItr.next();
- Integer index = (Integer) me.getKey();
- LabelNode lnArg_i = (LabelNode) me.getValue();
+ Map.Entry me = (Map.Entry)lnArgItr.next();
+ Integer index = (Integer) me.getKey();
+ LabelNode lnArg_i = (LabelNode) me.getValue();
// update reachable edges
- Iterator edgeItr = edges_p2p.get( index ).iterator();
+ Iterator edgeItr = edges_p2p.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- if( !paramIndex2rewriteJ_p2p.containsKey( makeMapKey( index,
- indexJ,
- edge.getField() ) ) ) {
+ if( !paramIndex2rewriteJ_p2p.containsKey(makeMapKey(index,
+ indexJ,
+ edge.getField() ) ) ) {
continue;
}
- TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get( indexJ );
+ TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get(indexJ);
assert p_j != null;
-
+
tokens2states.clear();
- tokens2states.put( p_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteJ_p2p.get( makeMapKey( index,
- indexJ,
- edge.getField() ) ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- edgesWithNewBeta.add( edge );
+ tokens2states.put(p_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteJ_p2p.get(makeMapKey(index,
+ indexJ,
+ edge.getField() ) ),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ edgesWithNewBeta.add(edge);
}
- edgeItr = edges_p2s.get( index ).iterator();
+ edgeItr = edges_p2s.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- if( !paramIndex2rewriteJ_p2s.containsKey( makeMapKey( index,
- edge.getField() ) ) ) {
+ if( !paramIndex2rewriteJ_p2s.containsKey(makeMapKey(index,
+ edge.getField() ) ) ) {
continue;
}
- TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get( indexJ );
+ TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get(indexJ);
assert s_j != null;
tokens2states.clear();
- tokens2states.put( s_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteJ_p2s.get( makeMapKey( index,
- edge.getField() ) ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- edgesWithNewBeta.add( edge );
+ tokens2states.put(s_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteJ_p2s.get(makeMapKey(index,
+ edge.getField() ) ),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ edgesWithNewBeta.add(edge);
}
- edgeItr = edges_s2p.get( index ).iterator();
+ edgeItr = edges_s2p.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- if( !paramIndex2rewriteJ_s2p.containsKey( index ) ) {
+ if( !paramIndex2rewriteJ_s2p.containsKey(index) ) {
continue;
}
- TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get( indexJ );
+ TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get(indexJ);
assert p_j != null;
tokens2states.clear();
- tokens2states.put( p_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteJ_s2p.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- edgesWithNewBeta.add( edge );
+ tokens2states.put(p_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteJ_s2p.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ edgesWithNewBeta.add(edge);
}
- edgeItr = edges_s2s.get( index ).iterator();
+ edgeItr = edges_s2s.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- if( !paramIndex2rewriteJ_s2s.containsKey( index ) ) {
+ if( !paramIndex2rewriteJ_s2s.containsKey(index) ) {
continue;
}
- TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get( indexJ );
+ TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get(indexJ);
assert s_j != null;
tokens2states.clear();
- tokens2states.put( s_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteJ_s2s.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
-
- edgesWithNewBeta.add( edge );
+ tokens2states.put(s_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteJ_s2s.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
+
+ edgesWithNewBeta.add(edge);
}
// update directly upstream edges
Hashtable<ReferenceEdge, ChangeTupleSet> edgeUpstreamPlannedChanges =
new Hashtable<ReferenceEdge, ChangeTupleSet>();
-
+
HashSet<ReferenceEdge> edgesDirectlyUpstream =
- new HashSet<ReferenceEdge>();
+ new HashSet<ReferenceEdge>();
- edgeItr = edges_up_dr.get( index ).iterator();
+ edgeItr = edges_up_dr.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- edgesDirectlyUpstream.add( edge );
+ edgesDirectlyUpstream.add(edge);
- TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get( indexJ );
+ TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get(indexJ);
assert p_j != null;
// start with K_p2 and p_j
tokens2states.clear();
- tokens2states.put( p_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteK_p2.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- true,
- edgeUpstreamPlannedChanges );
+ tokens2states.put(p_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteK_p2.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ true,
+ edgeUpstreamPlannedChanges);
// and add in s_j, if required, and do K_p
- TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get( indexJ );
+ TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get(indexJ);
if( s_j != null ) {
- tokens2states.put( s_j, edge.getBeta() );
+ tokens2states.put(s_j, edge.getBeta() );
}
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteK_p.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- true,
- edgeUpstreamPlannedChanges );
-
- edgesWithNewBeta.add( edge );
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteK_p.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ true,
+ edgeUpstreamPlannedChanges);
+
+ edgesWithNewBeta.add(edge);
}
- propagateTokensOverEdges( edgesDirectlyUpstream,
- edgeUpstreamPlannedChanges,
- edgesWithNewBeta );
-
+ propagateTokensOverEdges(edgesDirectlyUpstream,
+ edgeUpstreamPlannedChanges,
+ edgesWithNewBeta);
+
// update upstream edges
edgeUpstreamPlannedChanges =
new Hashtable<ReferenceEdge, ChangeTupleSet>();
HashSet<ReferenceEdge> edgesUpstream =
- new HashSet<ReferenceEdge>();
+ new HashSet<ReferenceEdge>();
- edgeItr = edges_up_r.get( index ).iterator();
+ edgeItr = edges_up_r.get(index).iterator();
while( edgeItr.hasNext() ) {
- Vector mo = (Vector) edgeItr.next();
- ReferenceEdge edge = (ReferenceEdge) mo.get( 0 );
- Integer indexJ = (Integer) mo.get( 1 );
+ Vector mo = (Vector) edgeItr.next();
+ ReferenceEdge edge = (ReferenceEdge) mo.get(0);
+ Integer indexJ = (Integer) mo.get(1);
- if( !paramIndex2rewriteK_s.containsKey( index ) ) {
+ if( !paramIndex2rewriteK_s.containsKey(index) ) {
continue;
}
- edgesUpstream.add( edge );
+ edgesUpstream.add(edge);
- TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get( indexJ );
+ TokenTuple p_j = ogCallee.paramIndex2paramTokenPrimary.get(indexJ);
assert p_j != null;
- TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get( indexJ );
+ TokenTuple s_j = ogCallee.paramIndex2paramTokenSecondary.get(indexJ);
assert s_j != null;
tokens2states.clear();
- tokens2states.put( p_j, rsWttsEmpty );
- tokens2states.put( s_j, edge.getBeta() );
-
- rewriteCallerReachability( index,
- null,
- edge,
- paramIndex2rewriteK_s.get( index ),
- tokens2states,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- true,
- edgeUpstreamPlannedChanges );
-
- edgesWithNewBeta.add( edge );
+ tokens2states.put(p_j, rsWttsEmpty);
+ tokens2states.put(s_j, edge.getBeta() );
+
+ rewriteCallerReachability(index,
+ null,
+ edge,
+ paramIndex2rewriteK_s.get(index),
+ tokens2states,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ true,
+ edgeUpstreamPlannedChanges);
+
+ edgesWithNewBeta.add(edge);
}
- propagateTokensOverEdges( edgesUpstream,
- edgeUpstreamPlannedChanges,
- edgesWithNewBeta );
+ propagateTokensOverEdges(edgesUpstream,
+ edgeUpstreamPlannedChanges,
+ edgesWithNewBeta);
} // end effects per argument/parameter map
edgeItr.next().applyBetaNew();
}
-
+
// verify the existence of allocation sites and their
// shadows from the callee in the context of this caller graph
// then map allocated nodes of callee onto the caller shadows
// grab the summary in the caller just to make sure
// the allocation site has nodes in the caller
- HeapRegionNode hrnSummary = getSummaryNode( allocSite );
+ HeapRegionNode hrnSummary = getSummaryNode(allocSite);
// assert that the shadow nodes have no reference edges
// because they're brand new to the graph, or last time
// they were used they should have been cleared of edges
- HeapRegionNode hrnShadowSummary = getShadowSummaryNode( allocSite );
+ HeapRegionNode hrnShadowSummary = getShadowSummaryNode(allocSite);
assert hrnShadowSummary.getNumReferencers() == 0;
assert hrnShadowSummary.getNumReferencees() == 0;
// then bring g_ij onto g'_ij and rewrite
- HeapRegionNode hrnSummaryCallee = ogCallee.getSummaryNode( allocSite );
- hrnShadowSummary.setAlpha( toShadowTokens( ogCallee, hrnSummaryCallee.getAlpha() ) );
+ HeapRegionNode hrnSummaryCallee = ogCallee.getSummaryNode(allocSite);
+ hrnShadowSummary.setAlpha(toShadowTokens(ogCallee, hrnSummaryCallee.getAlpha() ) );
// shadow nodes only are touched by a rewrite one time,
// so rewrite and immediately commit--and they don't belong
// to a particular parameter, so use a bogus param index
// that pulls a self-rewrite out of H
- rewriteCallerReachability( bogusIndex,
- hrnShadowSummary,
- null,
- funcScriptR( hrnShadowSummary.getAlpha(), ogCallee, mc ),
- tokens2statesEmpty,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
+ rewriteCallerReachability(bogusIndex,
+ hrnShadowSummary,
+ null,
+ funcScriptR(hrnShadowSummary.getAlpha(), ogCallee, mc),
+ tokens2statesEmpty,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
hrnShadowSummary.applyAlphaNew();
HeapRegionNode hrnIthCallee = ogCallee.id2hrn.get(idIth);
hrnIthShadow.setAlpha(toShadowTokens(ogCallee, hrnIthCallee.getAlpha() ) );
- rewriteCallerReachability( bogusIndex,
- hrnIthShadow,
- null,
- funcScriptR( hrnIthShadow.getAlpha(), ogCallee, mc ),
- tokens2statesEmpty,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
+ rewriteCallerReachability(bogusIndex,
+ hrnIthShadow,
+ null,
+ funcScriptR(hrnIthShadow.getAlpha(), ogCallee, mc),
+ tokens2statesEmpty,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
hrnIthShadow.applyAlphaNew();
}
// for every heap region->heap region edge in the
// callee graph, create the matching edge or edges
// in the caller graph
- Set sCallee = ogCallee.id2hrn.entrySet();
+ Set sCallee = ogCallee.id2hrn.entrySet();
Iterator iCallee = sCallee.iterator();
while( iCallee.hasNext() ) {
- Map.Entry meCallee = (Map.Entry) iCallee.next();
- Integer idCallee = (Integer) meCallee.getKey();
+ Map.Entry meCallee = (Map.Entry)iCallee.next();
+ Integer idCallee = (Integer) meCallee.getKey();
HeapRegionNode hrnCallee = (HeapRegionNode) meCallee.getValue();
Iterator<ReferenceEdge> heapRegionsItrCallee = hrnCallee.iteratorToReferencees();
while( heapRegionsItrCallee.hasNext() ) {
- ReferenceEdge edgeCallee = heapRegionsItrCallee.next();
+ ReferenceEdge edgeCallee = heapRegionsItrCallee.next();
HeapRegionNode hrnChildCallee = edgeCallee.getDst();
- Integer idChildCallee = hrnChildCallee.getID();
+ Integer idChildCallee = hrnChildCallee.getID();
// only address this edge if it is not a special initial edge
if( !edgeCallee.isInitialParam() ) {
// make the edge with src and dst so beta info is
// calculated once, then copy it for each new edge in caller
- ReferenceEdge edgeNewInCallerTemplate = new ReferenceEdge( null,
- null,
- edgeCallee.getType(),
- edgeCallee.getField(),
- false,
- funcScriptR( toShadowTokens( ogCallee,
- edgeCallee.getBeta()
- ),
- ogCallee,
- mc )
- );
-
- rewriteCallerReachability( bogusIndex,
- null,
- edgeNewInCallerTemplate,
- edgeNewInCallerTemplate.getBeta(),
- tokens2statesEmpty,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
+ ReferenceEdge edgeNewInCallerTemplate = new ReferenceEdge(null,
+ null,
+ edgeCallee.getType(),
+ edgeCallee.getField(),
+ false,
+ funcScriptR(toShadowTokens(ogCallee,
+ edgeCallee.getBeta()
+ ),
+ ogCallee,
+ mc)
+ );
+
+ rewriteCallerReachability(bogusIndex,
+ null,
+ edgeNewInCallerTemplate,
+ edgeNewInCallerTemplate.getBeta(),
+ tokens2statesEmpty,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
edgeNewInCallerTemplate.applyBetaNew();
// and a set of destination heaps in the caller graph, and make
// a reference edge in the caller for every possible (src,dst) pair
HashSet<HeapRegionNode> possibleCallerSrcs =
- getHRNSetThatPossiblyMapToCalleeHRN( ogCallee,
- (HeapRegionNode) edgeCallee.getSrc(),
- pi2dr,
- pi2r );
+ getHRNSetThatPossiblyMapToCalleeHRN(ogCallee,
+ (HeapRegionNode) edgeCallee.getSrc(),
+ pi2dr,
+ pi2r);
HashSet<HeapRegionNode> possibleCallerDsts =
- getHRNSetThatPossiblyMapToCalleeHRN( ogCallee,
- edgeCallee.getDst(),
- pi2dr,
- pi2r );
+ getHRNSetThatPossiblyMapToCalleeHRN(ogCallee,
+ edgeCallee.getDst(),
+ pi2dr,
+ pi2r);
// make every possible pair of {srcSet} -> {dstSet} edges in the caller
Iterator srcItr = possibleCallerSrcs.iterator();
while( srcItr.hasNext() ) {
HeapRegionNode src = (HeapRegionNode) srcItr.next();
-
- if( !hasMatchingField( src, edgeCallee ) ) {
+
+ if( !hasMatchingField(src, edgeCallee) ) {
// prune this source node possibility
continue;
}
while( dstItr.hasNext() ) {
HeapRegionNode dst = (HeapRegionNode) dstItr.next();
- if( !hasMatchingType( edgeCallee, dst ) ) {
+ if( !hasMatchingType(edgeCallee, dst) ) {
// prune
continue;
}
-
+
/*
- //// KEEP THIS HACK AROUND FOR EXPERIMENTING WITH EDGE REMOVAL
- TypeDescriptor tdX = src.getType();
- TypeDescriptor tdY = dst.getType();
- if( tdX != null && tdY != null ) {
- if( tdX.toPrettyString().equals( "Object[]" ) &&
- tdY.toPrettyString().equals( "D2" ) ) {
- System.out.println( "Skipping an edge from Object[] -> D2 during call mapping" );
- continue;
- }
- if( tdX.toPrettyString().equals( "Object[]" ) &&
- tdY.toPrettyString().equals( "MessageList" ) ) {
- System.out.println( "Skipping an edge from Object[] -> MessageList during call mapping" );
- continue;
- }
- }
- */
+ //// KEEP THIS HACK AROUND FOR EXPERIMENTING WITH EDGE REMOVAL
+ TypeDescriptor tdX = src.getType();
+ TypeDescriptor tdY = dst.getType();
+ if( tdX != null && tdY != null ) {
+ if( tdX.toPrettyString().equals( "Object[]" ) &&
+ tdY.toPrettyString().equals( "D2" ) ) {
+ System.out.println( "Skipping an edge from Object[] -> D2 during call mapping" );
+ continue;
+ }
+ if( tdX.toPrettyString().equals( "Object[]" ) &&
+ tdY.toPrettyString().equals( "MessageList" ) ) {
+ System.out.println( "Skipping an edge from Object[] -> MessageList during call mapping" );
+ continue;
+ }
+ }
+ */
// otherwise the caller src and dst pair can match the edge, so make it
TypeDescriptor tdNewEdge =
- mostSpecificType( edgeCallee.getType(),
- hrnChildCallee.getType(),
- dst.getType()
- );
+ mostSpecificType(edgeCallee.getType(),
+ hrnChildCallee.getType(),
+ dst.getType()
+ );
ReferenceEdge edgeNewInCaller = edgeNewInCallerTemplate.copy();
- edgeNewInCaller.setSrc( src );
- edgeNewInCaller.setDst( dst );
- edgeNewInCaller.setType( tdNewEdge );
+ edgeNewInCaller.setSrc(src);
+ edgeNewInCaller.setDst(dst);
+ edgeNewInCaller.setType(tdNewEdge);
+
-
// handle taint info if callee created this edge
// added by eom
Set<Integer> pParamSet=idPrimary2paramIndexSet.get(dst.getID());
Set<Integer> sParamSet=idSecondary2paramIndexSet.get(dst.getID());
HashSet<Integer> paramSet=new HashSet<Integer>();
- if(pParamSet!=null){
- paramSet.addAll(pParamSet);
+ if(pParamSet!=null) {
+ paramSet.addAll(pParamSet);
}
- if(sParamSet!=null){
- paramSet.addAll(sParamSet);
+ if(sParamSet!=null) {
+ paramSet.addAll(sParamSet);
}
Iterator<Integer> paramIter=paramSet.iterator();
int newTaintIdentifier=0;
- while(paramIter.hasNext()){
- Integer paramIdx=paramIter.next();
- edgeNewInCaller.tainedBy(paramIdx);
+ while(paramIter.hasNext()) {
+ Integer paramIdx=paramIter.next();
+ edgeNewInCaller.tainedBy(paramIdx);
}
- ReferenceEdge edgeExisting = src.getReferenceTo( dst,
- edgeNewInCaller.getType(),
- edgeNewInCaller.getField() );
+ ReferenceEdge edgeExisting = src.getReferenceTo(dst,
+ edgeNewInCaller.getType(),
+ edgeNewInCaller.getField() );
if( edgeExisting == null ) {
// if this edge doesn't exist in the caller, create it
- addReferenceEdge( src, dst, edgeNewInCaller );
+ addReferenceEdge(src, dst, edgeNewInCaller);
} else {
// if it already exists, merge with it
- edgeExisting.setBeta( edgeExisting.getBeta().union( edgeNewInCaller.getBeta() ) );
+ edgeExisting.setBeta(edgeExisting.getBeta().union(edgeNewInCaller.getBeta() ) );
}
}
}
TempDescriptor returnTemp = fc.getReturnTemp();
if( returnTemp != null && !returnTemp.getType().isImmutable() ) {
- LabelNode lnLhsCaller = getLabelNodeFromTemp( returnTemp );
- clearReferenceEdgesFrom( lnLhsCaller, null, null, true );
+ LabelNode lnLhsCaller = getLabelNodeFromTemp(returnTemp);
+ clearReferenceEdgesFrom(lnLhsCaller, null, null, true);
- LabelNode lnReturnCallee = ogCallee.getLabelNodeFromTemp( tdReturn );
+ LabelNode lnReturnCallee = ogCallee.getLabelNodeFromTemp(tdReturn);
Iterator<ReferenceEdge> edgeCalleeItr = lnReturnCallee.iteratorToReferencees();
while( edgeCalleeItr.hasNext() ) {
- ReferenceEdge edgeCallee = edgeCalleeItr.next();
+ ReferenceEdge edgeCallee = edgeCalleeItr.next();
HeapRegionNode hrnChildCallee = edgeCallee.getDst();
// some edge types are not possible return values when we can
// see what type variable we are assigning it to
- if( !isSuperiorType( returnTemp.getType(), edgeCallee.getType() ) ) {
- System.out.println( "*** NOT EXPECTING TO SEE THIS: Throwing out "+edgeCallee+" for return temp "+returnTemp );
+ if( !isSuperiorType(returnTemp.getType(), edgeCallee.getType() ) ) {
+ System.out.println("*** NOT EXPECTING TO SEE THIS: Throwing out "+edgeCallee+" for return temp "+returnTemp);
// prune
continue;
- }
-
- ReferenceEdge edgeNewInCallerTemplate = new ReferenceEdge( null,
- null,
- edgeCallee.getType(),
- edgeCallee.getField(),
- false,
- funcScriptR( toShadowTokens(ogCallee,
- edgeCallee.getBeta() ),
- ogCallee,
- mc )
- );
- rewriteCallerReachability( bogusIndex,
- null,
- edgeNewInCallerTemplate,
- edgeNewInCallerTemplate.getBeta(),
- tokens2statesEmpty,
- paramIndex2rewrite_d_p,
- paramIndex2rewrite_d_s,
- paramIndex2rewriteD,
- ogCallee,
- false,
- null );
+ }
+
+ ReferenceEdge edgeNewInCallerTemplate = new ReferenceEdge(null,
+ null,
+ edgeCallee.getType(),
+ edgeCallee.getField(),
+ false,
+ funcScriptR(toShadowTokens(ogCallee,
+ edgeCallee.getBeta() ),
+ ogCallee,
+ mc)
+ );
+ rewriteCallerReachability(bogusIndex,
+ null,
+ edgeNewInCallerTemplate,
+ edgeNewInCallerTemplate.getBeta(),
+ tokens2statesEmpty,
+ paramIndex2rewrite_d_p,
+ paramIndex2rewrite_d_s,
+ paramIndex2rewriteD,
+ ogCallee,
+ false,
+ null);
edgeNewInCallerTemplate.applyBetaNew();
HashSet<HeapRegionNode> assignCallerRhs =
- getHRNSetThatPossiblyMapToCalleeHRN( ogCallee,
- edgeCallee.getDst(),
- pi2dr,
- pi2r );
+ getHRNSetThatPossiblyMapToCalleeHRN(ogCallee,
+ edgeCallee.getDst(),
+ pi2dr,
+ pi2r);
Iterator<HeapRegionNode> itrHrn = assignCallerRhs.iterator();
while( itrHrn.hasNext() ) {
HeapRegionNode hrnCaller = itrHrn.next();
// don't make edge in caller if it is disallowed by types
- if( !isSuperiorType( returnTemp.getType(), hrnCaller.getType() ) ) {
- // prune
+ if( !isSuperiorType(returnTemp.getType(), hrnCaller.getType() ) ) {
+ // prune
continue;
}
- if( !isSuperiorType( returnTemp.getType(), hrnChildCallee.getType() ) ) {
- // prune
+ if( !isSuperiorType(returnTemp.getType(), hrnChildCallee.getType() ) ) {
+ // prune
continue;
}
- if( !isSuperiorType( edgeCallee.getType(), hrnCaller.getType() ) ) {
+ if( !isSuperiorType(edgeCallee.getType(), hrnCaller.getType() ) ) {
// prune
continue;
}
-
+
TypeDescriptor tdNewEdge =
- mostSpecificType( edgeCallee.getType(),
- hrnChildCallee.getType(),
- hrnCaller.getType()
- );
+ mostSpecificType(edgeCallee.getType(),
+ hrnChildCallee.getType(),
+ hrnCaller.getType()
+ );
// otherwise caller node can match callee edge, so make it
ReferenceEdge edgeNewInCaller = edgeNewInCallerTemplate.copy();
- edgeNewInCaller.setSrc( lnLhsCaller );
- edgeNewInCaller.setDst( hrnCaller );
- edgeNewInCaller.setType( tdNewEdge );
+ edgeNewInCaller.setSrc(lnLhsCaller);
+ edgeNewInCaller.setDst(hrnCaller);
+ edgeNewInCaller.setType(tdNewEdge);
- ReferenceEdge edgeExisting = lnLhsCaller.getReferenceTo( hrnCaller,
- tdNewEdge,
- edgeNewInCaller.getField() );
+ ReferenceEdge edgeExisting = lnLhsCaller.getReferenceTo(hrnCaller,
+ tdNewEdge,
+ edgeNewInCaller.getField() );
if( edgeExisting == null ) {
// if this edge doesn't exist in the caller, create it
- addReferenceEdge( lnLhsCaller, hrnCaller, edgeNewInCaller );
+ addReferenceEdge(lnLhsCaller, hrnCaller, edgeNewInCaller);
} else {
// if it already exists, merge with it
- edgeExisting.setBeta( edgeExisting.getBeta().union( edgeNewInCaller.getBeta() ) );
+ edgeExisting.setBeta(edgeExisting.getBeta().union(edgeNewInCaller.getBeta() ) );
}
}
}
/*
- if( debugCallMap &&
- mc.getDescriptor().getSymbol().equals( debugCaller ) &&
- fm.getMethod().getSymbol().equals( debugCallee )
- ) {
-
- try {
- writeGraph("debug7JustBeforeMergeToKCapacity",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
- }
- */
+ if( debugCallMap &&
+ mc.getDescriptor().getSymbol().equals( debugCaller ) &&
+ fm.getMethod().getSymbol().equals( debugCallee )
+ ) {
+
+ try {
+ writeGraph("debug7JustBeforeMergeToKCapacity",
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {}
+ }
+ */
// merge the shadow nodes of allocation sites back down to normal capacity
// first age each allocation site enough times to make room for the shadow nodes
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
- age( as );
+ age(as);
}
// then merge the shadow summary into the normal summary
- HeapRegionNode hrnSummary = getSummaryNode( as );
+ HeapRegionNode hrnSummary = getSummaryNode(as);
assert hrnSummary != null;
- HeapRegionNode hrnSummaryShadow = getShadowSummaryNode( as );
+ HeapRegionNode hrnSummaryShadow = getShadowSummaryNode(as);
assert hrnSummaryShadow != null;
- mergeIntoSummary( hrnSummaryShadow, hrnSummary );
+ mergeIntoSummary(hrnSummaryShadow, hrnSummary);
// then clear off after merge
- clearReferenceEdgesFrom( hrnSummaryShadow, null, null, true );
- clearReferenceEdgesTo ( hrnSummaryShadow, null, null, true );
- hrnSummaryShadow.setAlpha( new ReachabilitySet().makeCanonical() );
+ clearReferenceEdgesFrom(hrnSummaryShadow, null, null, true);
+ clearReferenceEdgesTo(hrnSummaryShadow, null, null, true);
+ hrnSummaryShadow.setAlpha(new ReachabilitySet().makeCanonical() );
// then transplant shadow nodes onto the now clean normal nodes
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
- Integer idIth = as.getIthOldest( i );
- HeapRegionNode hrnIth = id2hrn.get( idIth );
- Integer idIthShadow = as.getIthOldestShadow( i );
- HeapRegionNode hrnIthShadow = id2hrn.get( idIthShadow );
+ Integer idIth = as.getIthOldest(i);
+ HeapRegionNode hrnIth = id2hrn.get(idIth);
+ Integer idIthShadow = as.getIthOldestShadow(i);
+ HeapRegionNode hrnIthShadow = id2hrn.get(idIthShadow);
- transferOnto( hrnIthShadow, hrnIth );
+ transferOnto(hrnIthShadow, hrnIth);
// clear off shadow nodes after transfer
- clearReferenceEdgesFrom( hrnIthShadow, null, null, true );
- clearReferenceEdgesTo ( hrnIthShadow, null, null, true );
- hrnIthShadow.setAlpha( new ReachabilitySet().makeCanonical() );
+ clearReferenceEdgesFrom(hrnIthShadow, null, null, true);
+ clearReferenceEdgesTo(hrnIthShadow, null, null, true);
+ hrnIthShadow.setAlpha(new ReachabilitySet().makeCanonical() );
}
// finally, globally change shadow tokens into normal tokens
Iterator itrAllLabelNodes = td2ln.entrySet().iterator();
while( itrAllLabelNodes.hasNext() ) {
- Map.Entry me = (Map.Entry) itrAllLabelNodes.next();
+ Map.Entry me = (Map.Entry)itrAllLabelNodes.next();
LabelNode ln = (LabelNode) me.getValue();
Iterator<ReferenceEdge> itrEdges = ln.iteratorToReferencees();
while( itrEdges.hasNext() ) {
- unshadowTokens( as, itrEdges.next() );
+ unshadowTokens(as, itrEdges.next() );
}
}
Iterator itrAllHRNodes = id2hrn.entrySet().iterator();
while( itrAllHRNodes.hasNext() ) {
- Map.Entry me = (Map.Entry) itrAllHRNodes.next();
+ Map.Entry me = (Map.Entry)itrAllHRNodes.next();
HeapRegionNode hrnToAge = (HeapRegionNode) me.getValue();
- unshadowTokens( as, hrnToAge );
+ unshadowTokens(as, hrnToAge);
Iterator<ReferenceEdge> itrEdges = hrnToAge.iteratorToReferencees();
while( itrEdges.hasNext() ) {
- unshadowTokens( as, itrEdges.next() );
+ unshadowTokens(as, itrEdges.next() );
}
}
}
/*
- if( debugCallMap &&
- mc.getDescriptor().getSymbol().equals( debugCaller ) &&
- fm.getMethod().getSymbol().equals( debugCallee )
- ) {
-
- try {
- writeGraph( "debug8JustBeforeSweep",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
- }
- */
+ if( debugCallMap &&
+ mc.getDescriptor().getSymbol().equals( debugCaller ) &&
+ fm.getMethod().getSymbol().equals( debugCallee )
+ ) {
+
+ try {
+ writeGraph( "debug8JustBeforeSweep",
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {}
+ }
+ */
// improve reachability as much as possible
if( debugCallMap &&
- mc.getDescriptor().getSymbol().equals( debugCaller ) &&
- fm.getMethod().getSymbol().equals( debugCallee )
- ) {
-
+ mc.getDescriptor().getSymbol().equals(debugCaller) &&
+ fm.getMethod().getSymbol().equals(debugCallee)
+ ) {
+
try {
- writeGraph( "debug9endResolveCall",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
- System.out.println( " "+mc+" done calling "+fm );
+ writeGraph("debug9endResolveCall",
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {
+ }
+ System.out.println(" "+mc+" done calling "+fm);
++x;
if( x == debugCallMapCount ) {
- System.exit( 0 );
+ System.exit(0);
}
}
}
protected boolean hasMatchingField(HeapRegionNode src, ReferenceEdge edge) {
// if no type, then it's a match-everything region
- TypeDescriptor tdSrc = src.getType();
+ TypeDescriptor tdSrc = src.getType();
if( tdSrc == null ) {
return true;
}
TypeDescriptor tdSrcDeref = tdSrc.dereference();
assert tdSrcDeref != null;
- if( !typeUtil.isSuperorType( tdSrcDeref, td ) ) {
+ if( !typeUtil.isSuperorType(tdSrcDeref, td) ) {
return false;
}
- return edge.getField().equals( OwnershipAnalysis.arrayElementFieldName );
+ return edge.getField().equals(OwnershipAnalysis.arrayElementFieldName);
}
// if it's not a class, it doesn't have any fields to match
}
ClassDescriptor cd = tdSrc.getClassDesc();
- while( cd != null ) {
+ while( cd != null ) {
Iterator fieldItr = cd.getFields();
- while( fieldItr.hasNext() ) {
+ while( fieldItr.hasNext() ) {
FieldDescriptor fd = (FieldDescriptor) fieldItr.next();
- if( fd.getType().equals( edge.getType() ) &&
- fd.getSymbol().equals( edge.getField() ) ) {
+ if( fd.getType().equals(edge.getType() ) &&
+ fd.getSymbol().equals(edge.getField() ) ) {
return true;
}
}
-
+
cd = cd.getSuperDesc();
}
-
+
// otherwise it is a class with fields
// but we didn't find a match
return false;
protected boolean hasMatchingType(ReferenceEdge edge, HeapRegionNode dst) {
-
+
// if the region has no type, matches everything
TypeDescriptor tdDst = dst.getType();
if( tdDst == null ) {
return true;
}
-
+
// if the type is not a class or an array, don't
// match because primitives are copied, no aliases
ClassDescriptor cdDst = tdDst.getClassDesc();
if( cdDst == null && !tdDst.isArray() ) {
return false;
}
-
+
// if the edge type is null, it matches everything
TypeDescriptor tdEdge = edge.getType();
if( tdEdge == null ) {
return true;
}
-
+
return typeUtil.isSuperorType(tdEdge, tdDst);
}
HeapRegionNode hrn,
ReferenceEdge edge,
ReachabilitySet rules,
- Hashtable<TokenTuple, ReachabilitySet> tokens2states,
+ Hashtable<TokenTuple, ReachabilitySet> tokens2states,
Hashtable<Integer, ReachabilitySet> paramIndex2rewrite_d_p,
Hashtable<Integer, ReachabilitySet> paramIndex2rewrite_d_s,
Hashtable<Integer, ReachabilitySet> paramIndex2rewriteD,
- OwnershipGraph ogCallee,
+ OwnershipGraph ogCallee,
boolean makeChangeSet,
Hashtable<ReferenceEdge, ChangeTupleSet> edgePlannedChanges) {
assert(hrn == null && edge != null) ||
- (hrn != null && edge == null);
+ (hrn != null && edge == null);
assert rules != null;
assert tokens2states != null;
// caller-context token tuple sets that were used to generate it
Hashtable<TokenTupleSet, HashSet<TokenTupleSet> > rewritten2source =
new Hashtable<TokenTupleSet, HashSet<TokenTupleSet> >();
- rewritten2source.put( ttsEmpty, new HashSet<TokenTupleSet>() );
+ rewritten2source.put(ttsEmpty, new HashSet<TokenTupleSet>() );
+
-
Iterator<TokenTupleSet> rulesItr = rules.iterator();
while(rulesItr.hasNext()) {
TokenTupleSet rule = rulesItr.next();
Iterator<TokenTuple> ruleItr = rule.iterator();
while(ruleItr.hasNext()) {
- TokenTuple ttCallee = ruleItr.next();
+ TokenTuple ttCallee = ruleItr.next();
// compute the possibilities for rewriting this callee token
ReachabilitySet ttCalleeRewrites = null;
- boolean callerSourceUsed = false;
+ boolean callerSourceUsed = false;
- if( tokens2states.containsKey( ttCallee ) ) {
+ if( tokens2states.containsKey(ttCallee) ) {
callerSourceUsed = true;
- ttCalleeRewrites = tokens2states.get( ttCallee );
+ ttCalleeRewrites = tokens2states.get(ttCallee);
assert ttCalleeRewrites != null;
- } else if( ogCallee.paramTokenPrimary2paramIndex.containsKey( ttCallee ) ) {
+ } else if( ogCallee.paramTokenPrimary2paramIndex.containsKey(ttCallee) ) {
// use little d_p
- Integer paramIndex_j = ogCallee.paramTokenPrimary2paramIndex.get( ttCallee );
- assert paramIndex_j != null;
- ttCalleeRewrites = paramIndex2rewrite_d_p.get( paramIndex_j );
+ Integer paramIndex_j = ogCallee.paramTokenPrimary2paramIndex.get(ttCallee);
+ assert paramIndex_j != null;
+ ttCalleeRewrites = paramIndex2rewrite_d_p.get(paramIndex_j);
assert ttCalleeRewrites != null;
- } else if( ogCallee.paramTokenSecondary2paramIndex.containsKey( ttCallee ) ) {
+ } else if( ogCallee.paramTokenSecondary2paramIndex.containsKey(ttCallee) ) {
// use little d_s
- Integer paramIndex_j = ogCallee.paramTokenSecondary2paramIndex.get( ttCallee );
- assert paramIndex_j != null;
- ttCalleeRewrites = paramIndex2rewrite_d_s.get( paramIndex_j );
+ Integer paramIndex_j = ogCallee.paramTokenSecondary2paramIndex.get(ttCallee);
+ assert paramIndex_j != null;
+ ttCalleeRewrites = paramIndex2rewrite_d_s.get(paramIndex_j);
assert ttCalleeRewrites != null;
- } else if( ogCallee.paramTokenSecondaryPlus2paramIndex.containsKey( ttCallee ) ) {
+ } else if( ogCallee.paramTokenSecondaryPlus2paramIndex.containsKey(ttCallee) ) {
// worse, use big D
- Integer paramIndex_j = ogCallee.paramTokenSecondaryPlus2paramIndex.get( ttCallee );
- assert paramIndex_j != null;
- ttCalleeRewrites = paramIndex2rewriteD.get( paramIndex_j );
+ Integer paramIndex_j = ogCallee.paramTokenSecondaryPlus2paramIndex.get(ttCallee);
+ assert paramIndex_j != null;
+ ttCalleeRewrites = paramIndex2rewriteD.get(paramIndex_j);
assert ttCalleeRewrites != null;
- } else if( ogCallee.paramTokenSecondaryStar2paramIndex.containsKey( ttCallee ) ) {
+ } else if( ogCallee.paramTokenSecondaryStar2paramIndex.containsKey(ttCallee) ) {
// worse, use big D
- Integer paramIndex_j = ogCallee.paramTokenSecondaryStar2paramIndex.get( ttCallee );
- assert paramIndex_j != null;
- ttCalleeRewrites = paramIndex2rewriteD.get( paramIndex_j );
+ Integer paramIndex_j = ogCallee.paramTokenSecondaryStar2paramIndex.get(ttCallee);
+ assert paramIndex_j != null;
+ ttCalleeRewrites = paramIndex2rewriteD.get(paramIndex_j);
assert ttCalleeRewrites != null;
} else {
// otherwise there's no need for a rewrite, just pass this one on
- TokenTupleSet ttsCaller = new TokenTupleSet( ttCallee ).makeCanonical();
- ttCalleeRewrites = new ReachabilitySet( ttsCaller ).makeCanonical();
+ TokenTupleSet ttsCaller = new TokenTupleSet(ttCallee).makeCanonical();
+ ttCalleeRewrites = new ReachabilitySet(ttsCaller).makeCanonical();
}
// branch every version of the working rewritten rule with
while( ttCalleeRewritesItr.hasNext() ) {
TokenTupleSet ttsBranch = ttCalleeRewritesItr.next();
- TokenTupleSet ttsRewrittenNext = ttsRewritten.unionUpArity( ttsBranch );
+ TokenTupleSet ttsRewrittenNext = ttsRewritten.unionUpArity(ttsBranch);
if( makeChangeSet ) {
// in order to keep the list of source token tuple sets
// start with the sets used to make the partially rewritten
// rule up to this point
- HashSet<TokenTupleSet> sourceSets = rewritten2source.get( ttsRewritten );
+ HashSet<TokenTupleSet> sourceSets = rewritten2source.get(ttsRewritten);
assert sourceSets != null;
// make a shallow copy for possible modification
- sourceSets = (HashSet<TokenTupleSet>) sourceSets.clone();
+ sourceSets = (HashSet<TokenTupleSet>)sourceSets.clone();
// if we used something from the caller to rewrite it, remember
if( callerSourceUsed ) {
- sourceSets.add( ttsBranch );
+ sourceSets.add(ttsBranch);
}
// set mapping for the further rewritten rule
- rewritten2source.put( ttsRewrittenNext, sourceSets );
+ rewritten2source.put(ttsRewrittenNext, sourceSets);
}
rewrittenRuleWithTTCallee =
- rewrittenRuleWithTTCallee.union( ttsRewrittenNext );
+ rewrittenRuleWithTTCallee.union(ttsRewrittenNext);
}
}
// the rule has been entirely rewritten into the caller context
// now, so add it to the new reachability information
callerReachabilityNew =
- callerReachabilityNew.union( rewrittenRule );
+ callerReachabilityNew.union(rewrittenRule);
}
if( makeChangeSet ) {
Iterator<TokenTupleSet> callerReachabilityItr = callerReachabilityNew.iterator();
while( callerReachabilityItr.hasNext() ) {
TokenTupleSet ttsRewrittenFinal = callerReachabilityItr.next();
- HashSet<TokenTupleSet> sourceSets = rewritten2source.get( ttsRewrittenFinal );
+ HashSet<TokenTupleSet> sourceSets = rewritten2source.get(ttsRewrittenFinal);
assert sourceSets != null;
Iterator<TokenTupleSet> sourceSetsItr = sourceSets.iterator();
TokenTupleSet ttsSource = sourceSetsItr.next();
callerChangeSet =
- callerChangeSet.union( new ChangeTuple( ttsSource, ttsRewrittenFinal ) );
+ callerChangeSet.union(new ChangeTuple(ttsSource, ttsRewrittenFinal) );
}
}
assert edgePlannedChanges != null;
- edgePlannedChanges.put( edge, callerChangeSet );
+ edgePlannedChanges.put(edge, callerChangeSet);
}
if( hrn == null ) {
- edge.setBetaNew( edge.getBetaNew().union( callerReachabilityNew ) );
+ edge.setBetaNew(edge.getBetaNew().union(callerReachabilityNew) );
} else {
- hrn.setAlphaNew( hrn.getAlphaNew().union( callerReachabilityNew ) );
+ hrn.setAlphaNew(hrn.getAlphaNew().union(callerReachabilityNew) );
}
}
private HashSet<HeapRegionNode>
- getHRNSetThatPossiblyMapToCalleeHRN( OwnershipGraph ogCallee,
- HeapRegionNode hrnCallee,
- Hashtable<Integer, Set<HeapRegionNode> > pi2dr,
- Hashtable<Integer, Set<HeapRegionNode> > pi2r
- ) {
-
+ getHRNSetThatPossiblyMapToCalleeHRN(OwnershipGraph ogCallee,
+ HeapRegionNode hrnCallee,
+ Hashtable<Integer, Set<HeapRegionNode> > pi2dr,
+ Hashtable<Integer, Set<HeapRegionNode> > pi2r
+ ) {
+
HashSet<HeapRegionNode> possibleCallerHRNs = new HashSet<HeapRegionNode>();
- Set<Integer> paramIndicesCallee_p = ogCallee.idPrimary2paramIndexSet .get( hrnCallee.getID() );
- Set<Integer> paramIndicesCallee_s = ogCallee.idSecondary2paramIndexSet.get( hrnCallee.getID() );
+ Set<Integer> paramIndicesCallee_p = ogCallee.idPrimary2paramIndexSet.get(hrnCallee.getID() );
+ Set<Integer> paramIndicesCallee_s = ogCallee.idSecondary2paramIndexSet.get(hrnCallee.getID() );
if( paramIndicesCallee_p == null &&
- paramIndicesCallee_s == null ) {
+ paramIndicesCallee_s == null ) {
// this is a node allocated in the callee and it has
// exactly one shadow node in the caller to map to
AllocationSite as = hrnCallee.getAllocationSite();
assert as != null;
- int age = as.getAgeCategory( hrnCallee.getID() );
+ int age = as.getAgeCategory(hrnCallee.getID() );
assert age != AllocationSite.AGE_notInThisSite;
Integer idCaller;
} else {
assert age == AllocationSite.AGE_in_I;
- Integer I = as.getAge( hrnCallee.getID() );
+ Integer I = as.getAge(hrnCallee.getID() );
assert I != null;
- idCaller = as.getIthOldestShadow( I );
+ idCaller = as.getIthOldestShadow(I);
}
- assert id2hrn.containsKey( idCaller );
- possibleCallerHRNs.add( id2hrn.get( idCaller ) );
+ assert id2hrn.containsKey(idCaller);
+ possibleCallerHRNs.add(id2hrn.get(idCaller) );
return possibleCallerHRNs;
}
Iterator<Integer> itrIndex = paramIndicesCallee_p.iterator();
while( itrIndex.hasNext() ) {
Integer paramIndexCallee = itrIndex.next();
- assert pi2dr.containsKey( paramIndexCallee );
- possibleCallerHRNs.addAll( pi2dr.get( paramIndexCallee ) );
+ assert pi2dr.containsKey(paramIndexCallee);
+ possibleCallerHRNs.addAll(pi2dr.get(paramIndexCallee) );
}
}
Iterator<Integer> itrIndex = paramIndicesCallee_s.iterator();
while( itrIndex.hasNext() ) {
Integer paramIndexCallee = itrIndex.next();
- assert pi2r.containsKey( paramIndexCallee );
- possibleCallerHRNs.addAll( pi2r.get( paramIndexCallee ) );
+ assert pi2r.containsKey(paramIndexCallee);
+ possibleCallerHRNs.addAll(pi2r.get(paramIndexCallee) );
}
}
// boldB is part of the phase 1 sweep
Hashtable< Integer, Hashtable<ReferenceEdge, ReachabilitySet> > boldB =
- new Hashtable< Integer, Hashtable<ReferenceEdge, ReachabilitySet> >();
+ new Hashtable< Integer, Hashtable<ReferenceEdge, ReachabilitySet> >();
// visit every heap region to initialize alphaNew and calculate boldB
Set hrns = id2hrn.entrySet();
Map.Entry me = (Map.Entry)itrHrns.next();
Integer token = (Integer) me.getKey();
HeapRegionNode hrn = (HeapRegionNode) me.getValue();
-
+
// assert that this node and incoming edges have clean alphaNew
// and betaNew sets, respectively
- assert rsEmpty.equals( hrn.getAlphaNew() );
+ assert rsEmpty.equals(hrn.getAlphaNew() );
Iterator<ReferenceEdge> itrRers = hrn.iteratorToReferencers();
while( itrRers.hasNext() ) {
ReferenceEdge edge = itrRers.next();
- assert rsEmpty.equals( edge.getBetaNew() );
- }
+ assert rsEmpty.equals(edge.getBetaNew() );
+ }
// calculate boldB for this flagged node
if( hrn.isFlagged() || hrn.isParameter() ) {
-
+
Hashtable<ReferenceEdge, ReachabilitySet> boldB_f =
new Hashtable<ReferenceEdge, ReachabilitySet>();
-
+
Set<ReferenceEdge> workSetEdges = new HashSet<ReferenceEdge>();
// initial boldB_f constraints
while( itrRees.hasNext() ) {
ReferenceEdge edge = itrRees.next();
- assert !boldB.containsKey( edge );
- boldB_f.put( edge, edge.getBeta() );
+ assert !boldB.containsKey(edge);
+ boldB_f.put(edge, edge.getBeta() );
- assert !workSetEdges.contains( edge );
- workSetEdges.add( edge );
- }
+ assert !workSetEdges.contains(edge);
+ workSetEdges.add(edge);
+ }
// enforce the boldB_f constraint at edges until we reach a fixed point
while( !workSetEdges.isEmpty() ) {
ReferenceEdge edge = workSetEdges.iterator().next();
- workSetEdges.remove( edge );
-
+ workSetEdges.remove(edge);
+
Iterator<ReferenceEdge> itrPrime = edge.getDst().iteratorToReferencees();
while( itrPrime.hasNext() ) {
- ReferenceEdge edgePrime = itrPrime.next();
-
- ReachabilitySet prevResult = boldB_f.get( edgePrime );
- ReachabilitySet intersection = boldB_f.get( edge ).intersection( edgePrime.getBeta() );
-
- if( prevResult == null ||
- prevResult.union( intersection ).size() > prevResult.size() ) {
-
+ ReferenceEdge edgePrime = itrPrime.next();
+
+ ReachabilitySet prevResult = boldB_f.get(edgePrime);
+ ReachabilitySet intersection = boldB_f.get(edge).intersection(edgePrime.getBeta() );
+
+ if( prevResult == null ||
+ prevResult.union(intersection).size() > prevResult.size() ) {
+
if( prevResult == null ) {
- boldB_f.put( edgePrime, edgePrime.getBeta().union( intersection ) );
+ boldB_f.put(edgePrime, edgePrime.getBeta().union(intersection) );
} else {
- boldB_f.put( edgePrime, prevResult .union( intersection ) );
+ boldB_f.put(edgePrime, prevResult.union(intersection) );
}
- workSetEdges.add( edgePrime );
+ workSetEdges.add(edgePrime);
}
}
}
-
- boldB.put( token, boldB_f );
- }
+
+ boldB.put(token, boldB_f);
+ }
}
// never remove the identity token from a flagged region
// because it is trivially satisfied
- TokenTuple ttException = new TokenTuple( token,
- !hrn.isSingleObject(),
- TokenTuple.ARITY_ONE ).makeCanonical();
+ TokenTuple ttException = new TokenTuple(token,
+ !hrn.isSingleObject(),
+ TokenTuple.ARITY_ONE).makeCanonical();
ChangeTupleSet cts = new ChangeTupleSet().makeCanonical();
// never remove the identity token from a flagged region
// because it is trivially satisfied
- if( hrn.isFlagged() || hrn.isParameter() ) {
+ if( hrn.isFlagged() || hrn.isParameter() ) {
if( ttOld == ttException ) {
continue;
}
// if it isn't allowed, mark for removal
Integer idOld = ttOld.getToken();
- assert id2hrn.containsKey( idOld );
- Hashtable<ReferenceEdge, ReachabilitySet> B = boldB.get( idOld );
- ReachabilitySet boldB_ttOld_incident = B.get( incidentEdge );// B is NULL!
+ assert id2hrn.containsKey(idOld);
+ Hashtable<ReferenceEdge, ReachabilitySet> B = boldB.get(idOld);
+ ReachabilitySet boldB_ttOld_incident = B.get(incidentEdge); // B is NULL!
if( boldB_ttOld_incident != null &&
- boldB_ttOld_incident.contains( ttsOld ) ) {
+ boldB_ttOld_incident.contains(ttsOld) ) {
foundState = true;
}
}
if( !foundState ) {
- markedTokens = markedTokens.add( ttOld );
+ markedTokens = markedTokens.add(ttOld);
}
}
// if there is nothing marked, just move on
if( markedTokens.isEmpty() ) {
- hrn.setAlphaNew( hrn.getAlphaNew().union( ttsOld ) );
+ hrn.setAlphaNew(hrn.getAlphaNew().union(ttsOld) );
continue;
}
while( ttItr.hasNext() ) {
TokenTuple ttOld = ttItr.next();
- if( !markedTokens.containsTuple( ttOld ) ) {
- ttsPruned = ttsPruned.union( ttOld );
+ if( !markedTokens.containsTuple(ttOld) ) {
+ ttsPruned = ttsPruned.union(ttOld);
}
}
- assert !ttsOld.equals( ttsPruned );
+ assert !ttsOld.equals(ttsPruned);
- hrn.setAlphaNew( hrn.getAlphaNew().union( ttsPruned ) );
- ChangeTuple ct = new ChangeTuple( ttsOld, ttsPruned ).makeCanonical();
- cts = cts.union( ct );
+ hrn.setAlphaNew(hrn.getAlphaNew().union(ttsPruned) );
+ ChangeTuple ct = new ChangeTuple(ttsOld, ttsPruned).makeCanonical();
+ cts = cts.union(ct);
}
// throw change tuple set on all incident edges
Iterator<ReferenceEdge> incidentEdgeItr = hrn.iteratorToReferencers();
while( incidentEdgeItr.hasNext() ) {
ReferenceEdge incidentEdge = incidentEdgeItr.next();
-
- edgesForPropagation.add( incidentEdge );
-
- if( edgePlannedChanges.get( incidentEdge ) == null ) {
- edgePlannedChanges.put( incidentEdge, cts );
- } else {
- edgePlannedChanges.put(
- incidentEdge,
- edgePlannedChanges.get( incidentEdge ).union( cts )
- );
+
+ edgesForPropagation.add(incidentEdge);
+
+ if( edgePlannedChanges.get(incidentEdge) == null ) {
+ edgePlannedChanges.put(incidentEdge, cts);
+ } else {
+ edgePlannedChanges.put(
+ incidentEdge,
+ edgePlannedChanges.get(incidentEdge).union(cts)
+ );
}
}
}
}
-
+
HashSet<ReferenceEdge> edgesUpdated = new HashSet<ReferenceEdge>();
- propagateTokensOverEdges( edgesForPropagation,
- edgePlannedChanges,
- edgesUpdated );
+ propagateTokensOverEdges(edgesForPropagation,
+ edgePlannedChanges,
+ edgesUpdated);
// at the end of the 1st phase reference edges have
// beta, betaNew that correspond to beta and betaR
hrn.applyAlphaNew();
Iterator<ReferenceEdge> itrRes = hrn.iteratorToReferencers();
while( itrRes.hasNext() ) {
- res.add( itrRes.next() );
+ res.add(itrRes.next() );
}
}
- // 2nd phase
+ // 2nd phase
Iterator<ReferenceEdge> edgeItr = res.iterator();
while( edgeItr.hasNext() ) {
ReferenceEdge edge = edgeItr.next();
HeapRegionNode hrn = edge.getDst();
// commit results of last phase
- if( edgesUpdated.contains( edge ) ) {
+ if( edgesUpdated.contains(edge) ) {
edge.applyBetaNew();
}
// compute intial condition of 2nd phase
- edge.setBetaNew( edge.getBeta().intersection( hrn.getAlpha() ) );
+ edge.setBetaNew(edge.getBeta().intersection(hrn.getAlpha() ) );
}
-
+
// every edge in the graph is the initial workset
Set<ReferenceEdge> edgeWorkSet = (Set) res.clone();
while( !edgeWorkSet.isEmpty() ) {
ReferenceEdge edgePrime = edgeWorkSet.iterator().next();
- edgeWorkSet.remove( edgePrime );
+ edgeWorkSet.remove(edgePrime);
OwnershipNode on = edgePrime.getSrc();
if( !(on instanceof HeapRegionNode) ) {
Iterator<ReferenceEdge> itrEdge = hrn.iteratorToReferencers();
while( itrEdge.hasNext() ) {
- ReferenceEdge edge = itrEdge.next();
+ ReferenceEdge edge = itrEdge.next();
ReachabilitySet prevResult = edge.getBetaNew();
assert prevResult != null;
- ReachabilitySet intersection = edge.getBeta().intersection( edgePrime.getBetaNew() );
-
- if( prevResult.union( intersection ).size() > prevResult.size() ) {
- edge.setBetaNew( prevResult.union( intersection ) );
- edgeWorkSet.add( edge );
- }
- }
+ ReachabilitySet intersection = edge.getBeta().intersection(edgePrime.getBetaNew() );
+
+ if( prevResult.union(intersection).size() > prevResult.size() ) {
+ edge.setBetaNew(prevResult.union(intersection) );
+ edgeWorkSet.add(edge);
+ }
+ }
}
// commit beta' (beta<-betaNew)
edgeItr = res.iterator();
while( edgeItr.hasNext() ) {
edgeItr.next().applyBetaNew();
- }
- }
+ }
+ }
// don't use the ReferenceEdge.equals() here because
// we're talking about existence between graphs
- if( idChildB.equals( idChildA ) &&
- edgeB.typeAndFieldEquals( edgeA ) ) {
+ if( idChildB.equals(idChildA) &&
+ edgeB.typeAndFieldEquals(edgeA) ) {
edgeToMerge = edgeB;
}
edgeToMerge.setBeta(
edgeToMerge.getBeta().union(edgeA.getBeta() )
);
- //TODO eom
- edgeToMerge.unionTaintIdentifier(edgeA.getTaintIdentifier());
+ //TODO eom
+ edgeToMerge.unionTaintIdentifier(edgeA.getTaintIdentifier());
if( !edgeA.isInitialParam() ) {
edgeToMerge.setIsInitialParam(false);
}
while( heapRegionsItrB.hasNext() &&
edgeToMerge == null ) {
- ReferenceEdge edgeB = heapRegionsItrB.next();
+ ReferenceEdge edgeB = heapRegionsItrB.next();
HeapRegionNode hrnChildB = edgeB.getDst();
- Integer idChildB = hrnChildB.getID();
+ Integer idChildB = hrnChildB.getID();
// don't use the ReferenceEdge.equals() here because
// we're talking about existence between graphs
- if( idChildB.equals( idChildA ) &&
- edgeB.typeAndFieldEquals( edgeA ) ) {
+ if( idChildB.equals(idChildA) &&
+ edgeB.typeAndFieldEquals(edgeA) ) {
edgeToMerge = edgeB;
}
edgeToMerge.setBeta(
edgeToMerge.getBeta().union(edgeA.getBeta() )
);
- edgeToMerge.unionTaintIdentifier(edgeA.getTaintIdentifier());
+ edgeToMerge.unionTaintIdentifier(edgeA.getTaintIdentifier());
if( !edgeA.isInitialParam() ) {
edgeToMerge.setIsInitialParam(false);
}
// same number of parameters, or if one or both parameter
// index tables are empty
protected void mergeParamIndexMappings(OwnershipGraph og) {
-
+
if( idPrimary2paramIndexSet.size() == 0 ) {
idPrimary2paramIndexSet = og.idPrimary2paramIndexSet;
paramIndex2tdR = og.paramIndex2tdR;
paramTokenPrimary2paramIndex = og.paramTokenPrimary2paramIndex;
- paramIndex2paramTokenPrimary = og.paramIndex2paramTokenPrimary;
+ paramIndex2paramTokenPrimary = og.paramIndex2paramTokenPrimary;
- paramTokenSecondary2paramIndex = og.paramTokenSecondary2paramIndex;
- paramIndex2paramTokenSecondary = og.paramIndex2paramTokenSecondary;
+ paramTokenSecondary2paramIndex = og.paramTokenSecondary2paramIndex;
+ paramIndex2paramTokenSecondary = og.paramIndex2paramTokenSecondary;
paramTokenSecondaryPlus2paramIndex = og.paramTokenSecondaryPlus2paramIndex;
paramIndex2paramTokenSecondaryPlus = og.paramIndex2paramTokenSecondaryPlus;
paramTokenSecondaryStar2paramIndex = og.paramTokenSecondaryStar2paramIndex;
- paramIndex2paramTokenSecondaryStar = og.paramIndex2paramTokenSecondaryStar;
+ paramIndex2paramTokenSecondaryStar = og.paramIndex2paramTokenSecondaryStar;
return;
}
og.idPrimary2paramIndexSet = idPrimary2paramIndexSet;
og.paramIndex2idPrimary = paramIndex2idPrimary;
-
+
og.idSecondary2paramIndexSet = idSecondary2paramIndexSet;
og.paramIndex2idSecondary = paramIndex2idSecondary;
-
+
og.paramIndex2tdQ = paramIndex2tdQ;
og.paramIndex2tdR = paramIndex2tdR;
-
+
og.paramTokenPrimary2paramIndex = paramTokenPrimary2paramIndex;
- og.paramIndex2paramTokenPrimary = paramIndex2paramTokenPrimary;
-
- og.paramTokenSecondary2paramIndex = paramTokenSecondary2paramIndex;
- og.paramIndex2paramTokenSecondary = paramIndex2paramTokenSecondary;
+ og.paramIndex2paramTokenPrimary = paramIndex2paramTokenPrimary;
+
+ og.paramTokenSecondary2paramIndex = paramTokenSecondary2paramIndex;
+ og.paramIndex2paramTokenSecondary = paramIndex2paramTokenSecondary;
og.paramTokenSecondaryPlus2paramIndex = paramTokenSecondaryPlus2paramIndex;
og.paramIndex2paramTokenSecondaryPlus = paramIndex2paramTokenSecondaryPlus;
og.paramTokenSecondaryStar2paramIndex = paramTokenSecondaryStar2paramIndex;
- og.paramIndex2paramTokenSecondaryStar = paramIndex2paramTokenSecondaryStar;
+ og.paramIndex2paramTokenSecondaryStar = paramIndex2paramTokenSecondaryStar;
return;
}
protected void mergeAccessPaths(OwnershipGraph og) {
UtilAlgorithms.mergeHashtablesWithHashSetValues(temp2accessPaths,
- og.temp2accessPaths);
+ og.temp2accessPaths);
}
protected void mergeTempAndLabelCategories(OwnershipGraph og) {
// if everything is equal up to this point,
// assert that allocationSites is also equal--
// this data is redundant and kept for efficiency
- assert allocationSites .equals(og.allocationSites );
- assert outOfScopeTemps .equals(og.outOfScopeTemps );
+ assert allocationSites.equals(og.allocationSites);
+ assert outOfScopeTemps.equals(og.outOfScopeTemps);
assert outOfScopeLabels.equals(og.outOfScopeLabels);
- assert parameterTemps .equals(og.parameterTemps );
- assert parameterLabels .equals(og.parameterLabels );
+ assert parameterTemps.equals(og.parameterTemps);
+ assert parameterLabels.equals(og.parameterLabels);
return true;
}
HeapRegionNode hrnChildB = edgeB.getDst();
Integer idChildB = hrnChildB.getID();
- if( idChildA.equals( idChildB ) &&
- edgeA.typeAndFieldEquals( edgeB ) ) {
+ if( idChildA.equals(idChildB) &&
+ edgeA.typeAndFieldEquals(edgeB) ) {
// there is an edge in the right place with the right field,
// but do they have the same attributes?
protected boolean areAccessPathsEqual(OwnershipGraph og) {
- return temp2accessPaths.equals( og.temp2accessPaths );
+ return temp2accessPaths.equals(og.temp2accessPaths);
}
- public Set<HeapRegionNode> hasPotentialAlias( HeapRegionNode hrn1, HeapRegionNode hrn2 ) {
+ public Set<HeapRegionNode> hasPotentialAlias(HeapRegionNode hrn1, HeapRegionNode hrn2) {
assert hrn1 != null;
assert hrn2 != null;
// then get the various tokens for these heap regions
TokenTuple h1 = new TokenTuple(hrn1.getID(),
- !hrn1.isSingleObject(),
+ !hrn1.isSingleObject(),
TokenTuple.ARITY_ONE).makeCanonical();
TokenTuple h1plus = new TokenTuple(hrn1.getID(),
TokenTuple.ARITY_ZEROORMORE).makeCanonical();
TokenTuple h2 = new TokenTuple(hrn2.getID(),
- !hrn2.isSingleObject(),
+ !hrn2.isSingleObject(),
TokenTuple.ARITY_ONE).makeCanonical();
TokenTuple h2plus = new TokenTuple(hrn2.getID(),
Iterator<ReferenceEdge> itrEdge = hrn1.iteratorToReferencees();
while( itrEdge.hasNext() ) {
ReferenceEdge edge = itrEdge.next();
- beta1 = beta1.union( edge.getBeta() );
+ beta1 = beta1.union(edge.getBeta() );
}
ReachabilitySet beta2 = new ReachabilitySet().makeCanonical();
itrEdge = hrn2.iteratorToReferencees();
while( itrEdge.hasNext() ) {
ReferenceEdge edge = itrEdge.next();
- beta2 = beta2.union( edge.getBeta() );
+ beta2 = beta2.union(edge.getBeta() );
}
boolean aliasDetected = false;
}
if( h1 != h2 &&
- beta2.containsTupleSetWithBoth(h1, h2) ) {
+ beta2.containsTupleSetWithBoth(h1, h2) ) {
aliasDetected = true;
}
if( beta2.containsTupleSetWithBoth(h1plus, h2) ) {
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
if( aliasDetected ) {
- common = findCommonReachableNodes( hrn1, hrn2 );
+ common = findCommonReachableNodes(hrn1, hrn2);
if( !(DISABLE_STRONG_UPDATES || DISABLE_GLOBAL_SWEEP) ) {
- assert !common.isEmpty();
+ assert !common.isEmpty();
}
}
- return common;
+ return common;
}
}
Set<HeapRegionNode> common = new HashSet<HeapRegionNode>();
- common.addAll( hasPotentialAlias( hrnParamPri1, hrnParamPri2 ) );
+ common.addAll(hasPotentialAlias(hrnParamPri1, hrnParamPri2) );
if( hrnParamSec1 != null ) {
- common.addAll( hasPotentialAlias( hrnParamSec1, hrnParamPri2 ) );
+ common.addAll(hasPotentialAlias(hrnParamSec1, hrnParamPri2) );
}
if( hrnParamSec2 != null ) {
- common.addAll( hasPotentialAlias( hrnParamSec2, hrnParamPri1 ) );
+ common.addAll(hasPotentialAlias(hrnParamSec2, hrnParamPri1) );
}
if( hrnParamSec1 != null && hrnParamSec2 != null ) {
- common.addAll( hasPotentialAlias( hrnParamSec1, hrnParamSec2 ) );
+ common.addAll(hasPotentialAlias(hrnParamSec1, hrnParamSec2) );
}
return common;
}
// get summary node
- assert id2hrn.containsKey( as.getSummary() );
- HeapRegionNode hrnSummary = id2hrn.get( as.getSummary() );
+ assert id2hrn.containsKey(as.getSummary() );
+ HeapRegionNode hrnSummary = id2hrn.get(as.getSummary() );
assert hrnSummary != null;
- Set<HeapRegionNode> common = hasPotentialAlias( hrnParamPri, hrnSummary );
-
+ Set<HeapRegionNode> common = hasPotentialAlias(hrnParamPri, hrnSummary);
+
if( hrnParamSec != null ) {
- common.addAll( hasPotentialAlias( hrnParamSec, hrnSummary ) );
+ common.addAll(hasPotentialAlias(hrnParamSec, hrnSummary) );
}
// check for other nodes
for( int i = 0; i < as.getAllocationDepth(); ++i ) {
- assert id2hrn.containsKey( as.getIthOldest( i ) );
- HeapRegionNode hrnIthOldest = id2hrn.get( as.getIthOldest( i ) );
+ assert id2hrn.containsKey(as.getIthOldest(i) );
+ HeapRegionNode hrnIthOldest = id2hrn.get(as.getIthOldest(i) );
assert hrnIthOldest != null;
- common = hasPotentialAlias( hrnParamPri, hrnIthOldest );
-
+ common = hasPotentialAlias(hrnParamPri, hrnIthOldest);
+
if( hrnParamSec != null ) {
- common.addAll( hasPotentialAlias( hrnParamSec, hrnIthOldest ) );
+ common.addAll(hasPotentialAlias(hrnParamSec, hrnIthOldest) );
}
}
-
+
return common;
}
- public Set<HeapRegionNode> hasPotentialAlias(AllocationSite as1, AllocationSite as2) {
+ public Set<HeapRegionNode> hasPotentialAlias(AllocationSite as1, AllocationSite as2) {
// get summary node 1's alpha
Integer idSum1 = as1.getSummary();
HeapRegionNode hrnSum2 = id2hrn.get(idSum2);
assert hrnSum2 != null;
- Set<HeapRegionNode> common = hasPotentialAlias( hrnSum1, hrnSum2 );
+ Set<HeapRegionNode> common = hasPotentialAlias(hrnSum1, hrnSum2);
// check sum2 against alloc1 nodes
for( int i = 0; i < as1.getAllocationDepth(); ++i ) {
HeapRegionNode hrnI1 = id2hrn.get(idI1);
assert hrnI1 != null;
- common.addAll( hasPotentialAlias( hrnI1, hrnSum2 ) );
+ common.addAll(hasPotentialAlias(hrnI1, hrnSum2) );
}
// check sum1 against alloc2 nodes
HeapRegionNode hrnI2 = id2hrn.get(idI2);
assert hrnI2 != null;
- common.addAll( hasPotentialAlias( hrnSum1, hrnI2 ) );
+ common.addAll(hasPotentialAlias(hrnSum1, hrnI2) );
// while we're at it, do an inner loop for alloc2 vs alloc1 nodes
for( int j = 0; j < as1.getAllocationDepth(); ++j ) {
// if these are the same site, don't look for the same token, no alias.
// different tokens of the same site could alias together though
- if( idI1.equals( idI2 ) ) {
+ if( idI1.equals(idI2) ) {
continue;
}
HeapRegionNode hrnI1 = id2hrn.get(idI1);
- common.addAll( hasPotentialAlias( hrnI1, hrnI2 ) );
+ common.addAll(hasPotentialAlias(hrnI1, hrnI2) );
}
}
}
- public Set<HeapRegionNode> findCommonReachableNodes( HeapRegionNode hrn1,
- HeapRegionNode hrn2 ) {
+ public Set<HeapRegionNode> findCommonReachableNodes(HeapRegionNode hrn1,
+ HeapRegionNode hrn2) {
Set<HeapRegionNode> reachableNodes1 = new HashSet<HeapRegionNode>();
Set<HeapRegionNode> reachableNodes2 = new HashSet<HeapRegionNode>();
Set<HeapRegionNode> todoNodes1 = new HashSet<HeapRegionNode>();
- todoNodes1.add( hrn1 );
+ todoNodes1.add(hrn1);
- Set<HeapRegionNode> todoNodes2 = new HashSet<HeapRegionNode>();
- todoNodes2.add( hrn2 );
+ Set<HeapRegionNode> todoNodes2 = new HashSet<HeapRegionNode>();
+ todoNodes2.add(hrn2);
// follow links until all reachable nodes have been found
while( !todoNodes1.isEmpty() ) {
HeapRegionNode hrn = todoNodes1.iterator().next();
- todoNodes1.remove( hrn );
+ todoNodes1.remove(hrn);
reachableNodes1.add(hrn);
-
+
Iterator<ReferenceEdge> edgeItr = hrn.iteratorToReferencees();
while( edgeItr.hasNext() ) {
ReferenceEdge edge = edgeItr.next();
-
- if( !reachableNodes1.contains( edge.getDst() ) ) {
- todoNodes1.add( edge.getDst() );
+
+ if( !reachableNodes1.contains(edge.getDst() ) ) {
+ todoNodes1.add(edge.getDst() );
}
}
}
while( !todoNodes2.isEmpty() ) {
HeapRegionNode hrn = todoNodes2.iterator().next();
- todoNodes2.remove( hrn );
+ todoNodes2.remove(hrn);
reachableNodes2.add(hrn);
-
+
Iterator<ReferenceEdge> edgeItr = hrn.iteratorToReferencees();
while( edgeItr.hasNext() ) {
ReferenceEdge edge = edgeItr.next();
-
- if( !reachableNodes2.contains( edge.getDst() ) ) {
- todoNodes2.add( edge.getDst() );
+
+ if( !reachableNodes2.contains(edge.getDst() ) ) {
+ todoNodes2.add(edge.getDst() );
}
}
}
-
- Set<HeapRegionNode> intersection =
- new HashSet<HeapRegionNode>( reachableNodes1 );
- intersection.retainAll( reachableNodes2 );
-
+ Set<HeapRegionNode> intersection =
+ new HashSet<HeapRegionNode>(reachableNodes1);
+
+ intersection.retainAll(reachableNodes2);
+
return intersection;
}
-
+
public void writeGraph(String graphName,
boolean writeLabels,
boolean labelSelect,
boolean writeReferencers,
boolean writeParamMappings,
boolean hideSubsetReachability,
- boolean hideEdgeTaints
+ boolean hideEdgeTaints
) throws java.io.IOException {
// remove all non-word characters from the graph name so
Iterator i = s.iterator();
while( i.hasNext() ) {
Map.Entry me = (Map.Entry)i.next();
- HeapRegionNode hrn = (HeapRegionNode) me.getValue();
+ HeapRegionNode hrn = (HeapRegionNode) me.getValue();
if( !pruneGarbage ||
(hrn.isFlagged() && hrn.getID() > 0) ||
null,
visited,
writeReferencers,
- hideSubsetReachability,
- hideEdgeTaints);
+ hideSubsetReachability,
+ hideEdgeTaints);
}
}
}
if( writeParamMappings ) {
/* UNMAINTAINED
- Set df = paramIndex2id.entrySet();
- Iterator ih = df.iterator();
- while( ih.hasNext() ) {
- Map.Entry meh = (Map.Entry)ih.next();
- Integer pi = (Integer) meh.getKey();
- Integer id = (Integer) meh.getValue();
- bw.write(" pindex"+pi+"[label=\""+pi+" to "+id+"\",shape=box];\n");
- }
- */
+ Set df = paramIndex2id.entrySet();
+ Iterator ih = df.iterator();
+ while( ih.hasNext() ) {
+ Map.Entry meh = (Map.Entry)ih.next();
+ Integer pi = (Integer) meh.getKey();
+ Integer id = (Integer) meh.getValue();
+ bw.write(" pindex"+pi+"[label=\""+pi+" to "+id+"\",shape=box];\n");
+ }
+ */
}
// then visit every label node, useful for debugging
null,
visited,
writeReferencers,
- hideSubsetReachability,
- hideEdgeTaints);
+ hideSubsetReachability,
+ hideEdgeTaints);
}
bw.write(" " + ln.toString() +
" -> " + hrn.toString() +
"[label=\"" + edge.toGraphEdgeString(hideSubsetReachability,
- hideEdgeTaints) +
+ hideEdgeTaints) +
"\",decorate];\n");
}
}
HashSet<HeapRegionNode> visited,
boolean writeReferencers,
boolean hideSubsetReachability,
- boolean hideEdgeTaints
+ boolean hideEdgeTaints
) throws java.io.IOException {
if( visited.contains(hrn) ) {
"\\n";
if( hrn.getType() != null ) {
- attributes += hrn.getType().toPrettyString() + "\\n";
+ attributes += hrn.getType().toPrettyString() + "\\n";
}
-
+
attributes += hrn.getDescription() +
- "\\n" +
+ "\\n" +
hrn.getAlphaString(hideSubsetReachability) +
"\"]";
// useful for debugging
// UNMAINTAINED
/*
- if( writeReferencers ) {
- OwnershipNode onRef = null;
- Iterator refItr = hrn.iteratorToReferencers();
- while( refItr.hasNext() ) {
- onRef = (OwnershipNode) refItr.next();
-
- switch( mode ) {
- case VISIT_HRN_WRITE_FULL:
- bw.write(" " + hrn.toString() +
- " -> " + onRef.toString() +
- "[color=lightgray];\n");
- break;
- }
- }
- }
- */
+ if( writeReferencers ) {
+ OwnershipNode onRef = null;
+ Iterator refItr = hrn.iteratorToReferencers();
+ while( refItr.hasNext() ) {
+ onRef = (OwnershipNode) refItr.next();
+
+ switch( mode ) {
+ case VISIT_HRN_WRITE_FULL:
+ bw.write(" " + hrn.toString() +
+ " -> " + onRef.toString() +
+ "[color=lightgray];\n");
+ break;
+ }
+ }
+ }
+ */
Iterator<ReferenceEdge> childRegionsItr = hrn.iteratorToReferencees();
while( childRegionsItr.hasNext() ) {
bw.write(" " + hrn.toString() +
" -> " + hrnChild.toString() +
"[label=\"" + edge.toGraphEdgeString(hideSubsetReachability,
- hideEdgeTaints) +
+ hideEdgeTaints) +
"\",decorate];\n");
break;
}
visited,
writeReferencers,
hideSubsetReachability,
- hideEdgeTaints);
+ hideEdgeTaints);
}
}
-
- public int getTaintIdentifierFromHRN(HeapRegionNode hrn){
- HashSet<ReferenceEdge> referenceEdges=hrn.referencers;
- Iterator<ReferenceEdge> iter=referenceEdges.iterator();
-
- int taintIdentifier=0;
- while(iter.hasNext()){
- ReferenceEdge edge=iter.next();
- taintIdentifier=taintIdentifier | edge.getTaintIdentifier();
- }
-
- return taintIdentifier;
-
+
+ public int getTaintIdentifierFromHRN(HeapRegionNode hrn) {
+ HashSet<ReferenceEdge> referenceEdges=hrn.referencers;
+ Iterator<ReferenceEdge> iter=referenceEdges.iterator();
+
+ int taintIdentifier=0;
+ while(iter.hasNext()) {
+ ReferenceEdge edge=iter.next();
+ taintIdentifier=taintIdentifier | edge.getTaintIdentifier();
+ }
+
+ return taintIdentifier;
+
}
-
- public void propagateTaintIdentifier(HeapRegionNode hrn, int newTaintIdentifier, HashSet<HeapRegionNode> visitedSet){
-
- HashSet<ReferenceEdge> setEdge=hrn.referencers;
- Iterator<ReferenceEdge> iter=setEdge.iterator();
- while(iter.hasNext()){
- ReferenceEdge edge= iter.next();
- edge.unionTaintIdentifier(newTaintIdentifier);
- if(edge.getSrc() instanceof HeapRegionNode){
-
- HeapRegionNode refHRN=(HeapRegionNode)edge.getSrc();
- //check whether it is reflexive edge
- if(!refHRN.equals(hrn) && !visitedSet.contains(refHRN)){
- visitedSet.add(refHRN);
- propagateTaintIdentifier((HeapRegionNode)edge.getSrc(),newTaintIdentifier,visitedSet);
- }
-
- }
- }
-
+
+ public void propagateTaintIdentifier(HeapRegionNode hrn, int newTaintIdentifier, HashSet<HeapRegionNode> visitedSet) {
+
+ HashSet<ReferenceEdge> setEdge=hrn.referencers;
+ Iterator<ReferenceEdge> iter=setEdge.iterator();
+ while(iter.hasNext()) {
+ ReferenceEdge edge= iter.next();
+ edge.unionTaintIdentifier(newTaintIdentifier);
+ if(edge.getSrc() instanceof HeapRegionNode) {
+
+ HeapRegionNode refHRN=(HeapRegionNode)edge.getSrc();
+ //check whether it is reflexive edge
+ if(!refHRN.equals(hrn) && !visitedSet.contains(refHRN)) {
+ visitedSet.add(refHRN);
+ propagateTaintIdentifier((HeapRegionNode)edge.getSrc(),newTaintIdentifier,visitedSet);
+ }
+
+ }
+ }
+
}
-
- public void depropagateTaintIdentifier(HeapRegionNode hrn, int newTaintIdentifier, HashSet<HeapRegionNode> visitedSet){
-
- HashSet<ReferenceEdge> setEdge=hrn.referencers;
- Iterator<ReferenceEdge> iter=setEdge.iterator();
- while(iter.hasNext()){
- ReferenceEdge edge= iter.next();
- edge.minusTaintIdentifier(newTaintIdentifier);
- if(edge.getSrc() instanceof HeapRegionNode){
-
- HeapRegionNode refHRN=(HeapRegionNode)edge.getSrc();
- //check whether it is reflexive edge
- if(!refHRN.equals(hrn) && !visitedSet.contains(refHRN)){
- visitedSet.add(refHRN);
- depropagateTaintIdentifier((HeapRegionNode)edge.getSrc(),newTaintIdentifier,visitedSet);
- }
-
- }
- }
-
+
+ public void depropagateTaintIdentifier(HeapRegionNode hrn, int newTaintIdentifier, HashSet<HeapRegionNode> visitedSet) {
+
+ HashSet<ReferenceEdge> setEdge=hrn.referencers;
+ Iterator<ReferenceEdge> iter=setEdge.iterator();
+ while(iter.hasNext()) {
+ ReferenceEdge edge= iter.next();
+ edge.minusTaintIdentifier(newTaintIdentifier);
+ if(edge.getSrc() instanceof HeapRegionNode) {
+
+ HeapRegionNode refHRN=(HeapRegionNode)edge.getSrc();
+ //check whether it is reflexive edge
+ if(!refHRN.equals(hrn) && !visitedSet.contains(refHRN)) {
+ visitedSet.add(refHRN);
+ depropagateTaintIdentifier((HeapRegionNode)edge.getSrc(),newTaintIdentifier,visitedSet);
+ }
+
+ }
+ }
+
}
// in this analysis specifically:
// we have a notion that a null type is the "match any" type,
// so wrap calls to the utility methods that deal with null
- public TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2 ) {
+ public TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2) {
if( td1 == null ) {
return td2;
}
if( td2.isNull() ) {
return td1;
}
- return typeUtil.mostSpecific( td1, td2 );
+ return typeUtil.mostSpecific(td1, td2);
+ }
+
+ public TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2,
+ TypeDescriptor td3) {
+
+ return mostSpecificType(td1,
+ mostSpecificType(td2, td3)
+ );
+ }
+
+ public TypeDescriptor mostSpecificType(TypeDescriptor td1,
+ TypeDescriptor td2,
+ TypeDescriptor td3,
+ TypeDescriptor td4) {
+
+ return mostSpecificType(mostSpecificType(td1, td2),
+ mostSpecificType(td3, td4)
+ );
}
-
- public TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2,
- TypeDescriptor td3 ) {
-
- return mostSpecificType( td1,
- mostSpecificType( td2, td3 )
- );
- }
-
- public TypeDescriptor mostSpecificType( TypeDescriptor td1,
- TypeDescriptor td2,
- TypeDescriptor td3,
- TypeDescriptor td4 ) {
-
- return mostSpecificType( mostSpecificType( td1, td2 ),
- mostSpecificType( td3, td4 )
- );
- }
// remember, in this analysis a null type means "any type"
- public boolean isSuperiorType( TypeDescriptor possibleSuper,
- TypeDescriptor possibleChild ) {
+ public boolean isSuperiorType(TypeDescriptor possibleSuper,
+ TypeDescriptor possibleChild) {
if( possibleSuper == null ||
- possibleChild == null ) {
+ possibleChild == null ) {
return true;
}
if( possibleSuper.isNull() ||
- possibleChild.isNull() ) {
+ possibleChild.isNull() ) {
return true;
}
- return typeUtil.isSuperorType( possibleSuper, possibleChild );
+ return typeUtil.isSuperorType(possibleSuper, possibleChild);
}
- public String generateUniqueIdentifier(FlatMethod fm, int paramIdx, String type){
-
- //type: A->aliapsed parameter heap region
- // P -> primary paramter heap region
- // S -> secondary paramter heap region
-
- String identifier;
- if(type.equals("A")){
- //aliased param
- identifier="FM"+fm.hashCode()+".A";
- }else{
- identifier="FM"+fm.hashCode()+"."+paramIdx+"."+type;
- }
- return identifier;
-
+ public String generateUniqueIdentifier(FlatMethod fm, int paramIdx, String type) {
+
+ //type: A->aliapsed parameter heap region
+ // P -> primary paramter heap region
+ // S -> secondary paramter heap region
+
+ String identifier;
+ if(type.equals("A")) {
+ //aliased param
+ identifier="FM"+fm.hashCode()+".A";
+ } else {
+ identifier="FM"+fm.hashCode()+"."+paramIdx+"."+type;
+ }
+ return identifier;
+
}
-
- public String generateUniqueIdentifier(AllocationSite as, int age, boolean isSummary){
-
- String identifier;
-
- FlatNew fn=as.getFlatNew();
-
- if(isSummary){
- identifier="FN"+fn.hashCode()+".S";
- }else{
- identifier="FN"+fn.hashCode()+"."+age;
- }
-
- return identifier;
-
+
+ public String generateUniqueIdentifier(AllocationSite as, int age, boolean isSummary) {
+
+ String identifier;
+
+ FlatNew fn=as.getFlatNew();
+
+ if(isSummary) {
+ identifier="FN"+fn.hashCode()+".S";
+ } else {
+ identifier="FN"+fn.hashCode()+"."+age;
+ }
+
+ return identifier;
+
+ }
+
+ public HeapRegionNode getHRNbyUniqueID(String id) {
+
+ Enumeration<HeapRegionNode> elements = id2hrn.elements();
+ while (elements.hasMoreElements()) {
+ HeapRegionNode hrn = elements.nextElement();
+ if (hrn.getGloballyUniqueIdentifier().equals(id)) {
+ return hrn;
+ }
+ }
+
+ return null;
+
}
-
- public HeapRegionNode getHRNbyUniqueID(String id) {
-
- Enumeration<HeapRegionNode> elements = id2hrn.elements();
- while (elements.hasMoreElements()) {
- HeapRegionNode hrn = elements.nextElement();
- if (hrn.getGloballyUniqueIdentifier().equals(id)) {
- return hrn;
- }
- }
-
- return null;
- }
-
}
public ReferenceEdge getReferenceTo(HeapRegionNode hrn,
TypeDescriptor type,
- String field) {
+ String field) {
assert hrn != null;
Iterator<ReferenceEdge> itrEdge = referencees.iterator();
while( itrEdge.hasNext() ) {
ReferenceEdge edge = itrEdge.next();
if( edge.getDst().equals(hrn) &&
- edge.typeEquals( type ) &&
- edge.fieldEquals( field ) ) {
+ edge.typeEquals(type) &&
+ edge.fieldEquals(field) ) {
return edge;
}
}
// info needed to use OwnershipGraph.resolveMethodCall()
// to do the parameter decomp mapping itself
- protected FlatCall fcInCaller;
- protected FlatMethod fmPossible;
- protected MethodContext mcCallSite;
+ protected FlatCall fcInCaller;
+ protected FlatMethod fmPossible;
+ protected MethodContext mcCallSite;
protected OwnershipGraph ogCallee;
protected OwnershipGraph ogCaller;
protected Hashtable<Integer, Set<TypeDescriptor> > pi2r_td;
- public ParameterDecomposition( OwnershipAnalysis oa,
- FlatCall fc,
- FlatMethod fm,
- MethodContext mc,
- OwnershipGraph cee,
- OwnershipGraph cer ) {
+ public ParameterDecomposition(OwnershipAnalysis oa,
+ FlatCall fc,
+ FlatMethod fm,
+ MethodContext mc,
+ OwnershipGraph cee,
+ OwnershipGraph cer) {
oa.checkAnalysisComplete();
this.oa = oa;
// make copies of the graphs so that resolveMethodCall can
// destroy the graph while calculating the stuff we want
this.ogCallee = new OwnershipGraph();
- this.ogCallee.merge( cee );
+ this.ogCallee.merge(cee);
this.ogCaller = new OwnershipGraph();
- this.ogCaller.merge( cer );
+ this.ogCaller.merge(cer);
allocOutputStructs();
}
/*
- public ParameterDecomposition( ParameterDecomposition pd,
- FlatCall fc ) {
- this.oa = pd.oa;
+ public ParameterDecomposition( ParameterDecomposition pd,
+ FlatCall fc ) {
+ this.oa = pd.oa;
- // the call site should be calling the caller of
- // the input parameter decomposition object
- assert fc.getMethod() == pd.mdCaller;
+ // the call site should be calling the caller of
+ // the input parameter decomposition object
+ assert fc.getMethod() == pd.mdCaller;
- mdCallee = pd.mdCaller;
- mdCaller = getCaller( fc );
- }
- */
+ mdCallee = pd.mdCaller;
+ mdCaller = getCaller( fc );
+ }
+ */
protected void allocOutputStructs() {
pi2a_id = new Hashtable<Integer, Set<Integer> >();
}
protected void computeDecompositon() {
- MethodDescriptor mdCallee = (MethodDescriptor) mcCallSite.getDescriptor();
-
- ogCaller.resolveMethodCall( fcInCaller,
- mdCallee.isStatic(),
- fmPossible,
- ogCallee,
- mcCallSite,
- this );
+ MethodDescriptor mdCallee = (MethodDescriptor) mcCallSite.getDescriptor();
+
+ ogCaller.resolveMethodCall(fcInCaller,
+ mdCallee.isStatic(),
+ fmPossible,
+ ogCallee,
+ mcCallSite,
+ this);
}
// called by resolveMethodCall in decomp mode
// to report mapping results
- protected void mapRegionToParamObject( HeapRegionNode hrn, Integer paramIndex ) {
+ protected void mapRegionToParamObject(HeapRegionNode hrn, Integer paramIndex) {
// extract region's intergraph ID
- Set<Integer> hrnIDs = pi2a_id.get( paramIndex );
+ Set<Integer> hrnIDs = pi2a_id.get(paramIndex);
if( hrnIDs == null ) {
hrnIDs = new HashSet<Integer>();
}
- hrnIDs.add( hrn.getID() );
- pi2a_id.put( paramIndex, hrnIDs );
+ hrnIDs.add(hrn.getID() );
+ pi2a_id.put(paramIndex, hrnIDs);
// the regions allocation site (if any)
AllocationSite as = hrn.getAllocationSite();
if( as != null ) {
- Set<AllocationSite> asSet = pi2a_as.get( paramIndex );
+ Set<AllocationSite> asSet = pi2a_as.get(paramIndex);
if( asSet == null ) {
asSet = new HashSet<AllocationSite>();
}
- asSet.add( as );
- pi2a_as.put( paramIndex, asSet );
+ asSet.add(as);
+ pi2a_as.put(paramIndex, asSet);
// and if there is an allocation site, grab type
- Set<TypeDescriptor> tdSet = pi2a_td.get( paramIndex );
+ Set<TypeDescriptor> tdSet = pi2a_td.get(paramIndex);
if( tdSet == null ) {
tdSet = new HashSet<TypeDescriptor>();
}
- tdSet.add( as.getType() );
- pi2a_td.put( paramIndex, tdSet );
+ tdSet.add(as.getType() );
+ pi2a_td.put(paramIndex, tdSet);
}
}
- protected void mapRegionToParamReachable( HeapRegionNode hrn, Integer paramIndex ) {
+ protected void mapRegionToParamReachable(HeapRegionNode hrn, Integer paramIndex) {
// extract region's intergraph ID
- Set<Integer> hrnIDs = pi2r_id.get( paramIndex );
+ Set<Integer> hrnIDs = pi2r_id.get(paramIndex);
if( hrnIDs == null ) {
hrnIDs = new HashSet<Integer>();
}
- hrnIDs.add( hrn.getID() );
- pi2r_id.put( paramIndex, hrnIDs );
+ hrnIDs.add(hrn.getID() );
+ pi2r_id.put(paramIndex, hrnIDs);
// the regions allocation site (if any)
AllocationSite as = hrn.getAllocationSite();
if( as != null ) {
- Set<AllocationSite> asSet = pi2r_as.get( paramIndex );
+ Set<AllocationSite> asSet = pi2r_as.get(paramIndex);
if( asSet == null ) {
asSet = new HashSet<AllocationSite>();
}
- asSet.add( as );
- pi2r_as.put( paramIndex, asSet );
+ asSet.add(as);
+ pi2r_as.put(paramIndex, asSet);
// and if there is an allocation site, grab type
- Set<TypeDescriptor> tdSet = pi2r_td.get( paramIndex );
+ Set<TypeDescriptor> tdSet = pi2r_td.get(paramIndex);
if( tdSet == null ) {
tdSet = new HashSet<TypeDescriptor>();
}
- tdSet.add( as.getType() );
- pi2r_td.put( paramIndex, tdSet );
+ tdSet.add(as.getType() );
+ pi2r_td.put(paramIndex, tdSet);
}
}
- // this family of "gets" returns, for some
+ // this family of "gets" returns, for some
// parameter index, all of the associated data
// that parameter might decompose into
- public Set<Integer> getParamObject_hrnIDs( Integer paramIndex ) {
- Set<Integer> hrnIDs = pi2a_id.get( paramIndex );
+ public Set<Integer> getParamObject_hrnIDs(Integer paramIndex) {
+ Set<Integer> hrnIDs = pi2a_id.get(paramIndex);
if( hrnIDs == null ) {
hrnIDs = new HashSet<Integer>();
}
return hrnIDs;
}
- public Set<AllocationSite> getParamObject_allocSites( Integer paramIndex ) {
- Set<AllocationSite> asSet = pi2a_as.get( paramIndex );
+ public Set<AllocationSite> getParamObject_allocSites(Integer paramIndex) {
+ Set<AllocationSite> asSet = pi2a_as.get(paramIndex);
if( asSet == null ) {
asSet = new HashSet<AllocationSite>();
}
return asSet;
}
- public Set<TypeDescriptor> getParamObject_TypeDescs( Integer paramIndex ) {
- Set<TypeDescriptor> tdSet = pi2a_td.get( paramIndex );
+ public Set<TypeDescriptor> getParamObject_TypeDescs(Integer paramIndex) {
+ Set<TypeDescriptor> tdSet = pi2a_td.get(paramIndex);
if( tdSet == null ) {
tdSet = new HashSet<TypeDescriptor>();
}
}
- public Set<Integer> getParamReachable_hrnIDs( Integer paramIndex ) {
- Set<Integer> hrnIDs = pi2r_id.get( paramIndex );
+ public Set<Integer> getParamReachable_hrnIDs(Integer paramIndex) {
+ Set<Integer> hrnIDs = pi2r_id.get(paramIndex);
if( hrnIDs == null ) {
hrnIDs = new HashSet<Integer>();
}
return hrnIDs;
}
- public Set<AllocationSite> getParamReachable_allocSites( Integer paramIndex ) {
- Set<AllocationSite> asSet = pi2r_as.get( paramIndex );
+ public Set<AllocationSite> getParamReachable_allocSites(Integer paramIndex) {
+ Set<AllocationSite> asSet = pi2r_as.get(paramIndex);
if( asSet == null ) {
asSet = new HashSet<AllocationSite>();
}
return asSet;
}
- public Set<TypeDescriptor> getParamReachable_TypeDescs( Integer paramIndex ) {
- Set<TypeDescriptor> tdSet = pi2r_td.get( paramIndex );
+ public Set<TypeDescriptor> getParamReachable_TypeDescs(Integer paramIndex) {
+ Set<TypeDescriptor> tdSet = pi2r_td.get(paramIndex);
if( tdSet == null ) {
tdSet = new HashSet<TypeDescriptor>();
}
package Analysis.OwnershipAnalysis;
public class ReachOperation {
- Canonical a;
- Canonical b;
- public Canonical c;
+ Canonical a;
+ Canonical b;
+ public Canonical c;
- public ReachOperation(Canonical a, Canonical b) {
- assert a.canonicalvalue!=0;
- assert b.canonicalvalue!=0;
- this.a=a;
- this.b=b;
- }
-
- public int hashCode() {
- return a.canonicalvalue^(b.canonicalvalue<<1);
- }
- public boolean equals(Object o) {
- ReachOperation ro=(ReachOperation)o;
- return ro.a.canonicalvalue==a.canonicalvalue&&
- ro.b.canonicalvalue==b.canonicalvalue;
- }
+ public ReachOperation(Canonical a, Canonical b) {
+ assert a.canonicalvalue!=0;
+ assert b.canonicalvalue!=0;
+ this.a=a;
+ this.b=b;
+ }
+
+ public int hashCode() {
+ return a.canonicalvalue^(b.canonicalvalue<<1);
+ }
+ public boolean equals(Object o) {
+ ReachOperation ro=(ReachOperation)o;
+ return ro.a.canonicalvalue==a.canonicalvalue&&
+ ro.b.canonicalvalue==b.canonicalvalue;
+ }
}
\ No newline at end of file
}
}
- return false;
+ return false;
}
public boolean containsSuperSet(TokenTupleSet tts) {
- return containsSuperSet( tts, false );
+ return containsSuperSet(tts, false);
}
public boolean containsStrictSuperSet(TokenTupleSet tts) {
- return containsSuperSet( tts, true );
+ return containsSuperSet(tts, true);
}
public boolean containsSuperSet(TokenTupleSet tts, boolean strict) {
while( itr.hasNext() ) {
TokenTupleSet ttsThis = (TokenTupleSet) itr.next();
if( strict ) {
- if( !tts.equals(ttsThis) && tts.isSubset(ttsThis) ) {
- return true;
- }
+ if( !tts.equals(ttsThis) && tts.isSubset(ttsThis) ) {
+ return true;
+ }
} else {
- if( tts.isSubset(ttsThis) ) {
- return true;
- }
+ if( tts.isSubset(ttsThis) ) {
+ return true;
+ }
}
}
- return false;
+ return false;
}
return false;
}
- public static ReachabilitySet factory(TokenTupleSet tts) {
- CanonicalWrapper cw=new CanonicalWrapper(tts);
- if (lookuphash.containsKey(cw))
- return (ReachabilitySet)lookuphash.get(cw).b;
- ReachabilitySet rs=new ReachabilitySet(tts);
- rs=rs.makeCanonical();
- cw.b=rs;
- lookuphash.put(cw,cw);
- return rs;
+ public static ReachabilitySet factory(TokenTupleSet tts) {
+ CanonicalWrapper cw=new CanonicalWrapper(tts);
+ if (lookuphash.containsKey(cw))
+ return (ReachabilitySet)lookuphash.get(cw).b;
+ ReachabilitySet rs=new ReachabilitySet(tts);
+ rs=rs.makeCanonical();
+ cw.b=rs;
+ lookuphash.put(cw,cw);
+ return rs;
}
- public ReachabilitySet union(TokenTupleSet ttsIn) {
- ReachOperation ro=new ReachOperation(this, ttsIn);
- if (unionhash.containsKey(ro)) {
- return (ReachabilitySet) unionhash.get(ro).c;
- } else {
- ReachabilitySet rsOut = new ReachabilitySet(this);
- rsOut.possibleReachabilities.add(ttsIn);
- ro.c=rsOut=rsOut.makeCanonical();
- unionhash.put(ro,ro);
- return rsOut;
- }
+ public ReachabilitySet union(TokenTupleSet ttsIn) {
+ ReachOperation ro=new ReachOperation(this, ttsIn);
+ if (unionhash.containsKey(ro)) {
+ return (ReachabilitySet) unionhash.get(ro).c;
+ } else {
+ ReachabilitySet rsOut = new ReachabilitySet(this);
+ rsOut.possibleReachabilities.add(ttsIn);
+ ro.c=rsOut=rsOut.makeCanonical();
+ unionhash.put(ro,ro);
+ return rsOut;
}
+ }
public ReachabilitySet union(ReachabilitySet rsIn) {
- // assert rsIn != null;
-
- // assert can.containsKey(this);
- // assert can.containsKey(rsIn);
+ // assert rsIn != null;
+
+ // assert can.containsKey(this);
+ // assert can.containsKey(rsIn);
ReachOperation ro=new ReachOperation(this, rsIn);
if (unionhash.containsKey(ro))
- return (ReachabilitySet) unionhash.get(ro).c;
+ return (ReachabilitySet) unionhash.get(ro).c;
else {
- ReachabilitySet rsOut = new ReachabilitySet(this);
- rsOut.possibleReachabilities.addAll(rsIn.possibleReachabilities);
- ro.c=rsOut=rsOut.makeCanonical();
- unionhash.put(ro, ro);
- return rsOut;
+ ReachabilitySet rsOut = new ReachabilitySet(this);
+ rsOut.possibleReachabilities.addAll(rsIn.possibleReachabilities);
+ ro.c=rsOut=rsOut.makeCanonical();
+ unionhash.put(ro, ro);
+ return rsOut;
}
}
public ReachabilitySet intersection(ReachabilitySet rsIn) {
- // assert rsIn != null;
+ // assert rsIn != null;
// assert can.containsKey(this);
// assert can.containsKey(rsIn);
ReachOperation ro=new ReachOperation(this, rsIn);
if (interhash.containsKey(ro))
- return (ReachabilitySet) interhash.get(ro).c;
+ return (ReachabilitySet) interhash.get(ro).c;
else {
- ReachabilitySet rsOut = new ReachabilitySet();
- Iterator i = this.iterator();
- while( i.hasNext() ) {
- TokenTupleSet tts = (TokenTupleSet) i.next();
- if( rsIn.possibleReachabilities.contains(tts) ) {
- rsOut.possibleReachabilities.add(tts);
- }
+ ReachabilitySet rsOut = new ReachabilitySet();
+ Iterator i = this.iterator();
+ while( i.hasNext() ) {
+ TokenTupleSet tts = (TokenTupleSet) i.next();
+ if( rsIn.possibleReachabilities.contains(tts) ) {
+ rsOut.possibleReachabilities.add(tts);
}
- ro.c=rsOut=rsOut.makeCanonical();
- interhash.put(ro,ro);
- return rsOut;
+ }
+ ro.c=rsOut=rsOut.makeCanonical();
+ interhash.put(ro,ro);
+ return rsOut;
}
}
}
public ReachabilitySet removeTokenAIfTokenB(TokenTuple ttA,
- TokenTuple ttB) {
+ TokenTuple ttB) {
assert ttA != null;
assert ttB != null;
Iterator i = this.iterator();
while( i.hasNext() ) {
TokenTupleSet tts = (TokenTupleSet) i.next();
- if( tts.containsTuple( ttB ) ) {
- rsOut.possibleReachabilities.add( tts.remove(ttA) );
+ if( tts.containsTuple(ttB) ) {
+ rsOut.possibleReachabilities.add(tts.remove(ttA) );
} else {
- rsOut.possibleReachabilities.add( tts );
+ rsOut.possibleReachabilities.add(tts);
}
- }
+ }
- return rsOut.makeCanonical();
+ return rsOut.makeCanonical();
}
while( itrC.hasNext() ) {
ChangeTuple c = itrC.next();
- if( tts.equals( c.getSetToMatch() ) ) {
- rsOut.possibleReachabilities.add( c.getSetToAdd() );
+ if( tts.equals(c.getSetToMatch() ) ) {
+ rsOut.possibleReachabilities.add(c.getSetToAdd() );
changeFound = true;
}
}
if( keepSourceState || !changeFound ) {
- rsOut.possibleReachabilities.add( tts );
+ rsOut.possibleReachabilities.add(tts);
}
}
-
+
return rsOut.makeCanonical();
}
TokenTuple ttO = o.containsToken(ttR.getToken() );
if( ttO != null ) {
- theUnion = theUnion.union((new TokenTupleSet(ttR.unionArity(ttO)).makeCanonical() ) );
+ theUnion = theUnion.union((new TokenTupleSet(ttR.unionArity(ttO)).makeCanonical() ) );
} else {
- theUnion = theUnion.union((new TokenTupleSet(ttR)).makeCanonical() );
+ theUnion = theUnion.union((new TokenTupleSet(ttR)).makeCanonical() );
}
}
TokenTuple ttR = theUnion.containsToken(ttO.getToken() );
if( ttR == null ) {
- theUnion = theUnion.union(new TokenTupleSet(ttO).makeCanonical() );
+ theUnion = theUnion.union(new TokenTupleSet(ttO).makeCanonical() );
}
}
if( !theUnion.isEmpty() ) {
- ctsOut = ctsOut.union((new ChangeTupleSet(new ChangeTuple(o, theUnion) )).makeCanonical() );
+ ctsOut = ctsOut.union((new ChangeTupleSet(new ChangeTuple(o, theUnion) )).makeCanonical() );
}
}
}
public ReachabilitySet exhaustiveArityCombinations() {
- ReachabilitySet rsOut = (new ReachabilitySet()).makeCanonical();
+ ReachabilitySet rsOut = (new ReachabilitySet()).makeCanonical();
int numDimensions = this.possibleReachabilities.size();
}
- public String toStringEscapeNewline( boolean hideSubsetReachability ) {
+ public String toStringEscapeNewline(boolean hideSubsetReachability) {
String s = "[";
Iterator<TokenTupleSet> i = this.iterator();
// skip this if there is a superset already
if( hideSubsetReachability &&
- containsStrictSuperSet( tts ) ) {
- continue;
+ containsStrictSuperSet(tts) ) {
+ continue;
}
s += tts;
s += "]";
return s;
}
-
+
public String toString() {
- return toString( false );
+ return toString(false);
}
- public String toString( boolean hideSubsetReachability ) {
+ public String toString(boolean hideSubsetReachability) {
String s = "[";
Iterator<TokenTupleSet> i = this.iterator();
// skip this if there is a superset already
if( hideSubsetReachability &&
- containsStrictSuperSet( tts ) ) {
- continue;
+ containsStrictSuperSet(tts) ) {
+ continue;
}
s += tts;
public ReferenceEdge(OwnershipNode src,
HeapRegionNode dst,
- TypeDescriptor type,
- String field,
+ TypeDescriptor type,
+ String field,
boolean isInitialParam,
ReachabilitySet beta) {
public ReferenceEdge copy() {
- ReferenceEdge copy= new ReferenceEdge(src,
- dst,
- type,
- field,
- isInitialParam,
- beta);
- copy.setTaintIdentifier(this.taintIdentifier);
- return copy;
+ ReferenceEdge copy= new ReferenceEdge(src,
+ dst,
+ type,
+ field,
+ isInitialParam,
+ beta);
+ copy.setTaintIdentifier(this.taintIdentifier);
+ return copy;
}
ReferenceEdge edge = (ReferenceEdge) o;
- if( !typeEquals( edge.type ) ) {
+ if( !typeEquals(edge.type) ) {
return false;
}
- if( !fieldEquals( edge.field ) ) {
+ if( !fieldEquals(edge.field) ) {
return false;
}
return type;
}
- public void setType( TypeDescriptor td ) {
+ public void setType(TypeDescriptor td) {
type = td;
}
return field;
}
- public void setField( String s ) {
+ public void setField(String s) {
field = s;
}
- public boolean typeEquals( TypeDescriptor td ) {
+ public boolean typeEquals(TypeDescriptor td) {
if( type == null && td == null ) {
return true;
}
if( type == null ) {
return false;
}
- return type.equals( td );
+ return type.equals(td);
}
- public boolean fieldEquals( String s ) {
+ public boolean fieldEquals(String s) {
if( field == null && s == null ) {
return true;
}
if( field == null ) {
return false;
}
- return field.equals( s );
+ return field.equals(s);
}
- public boolean typeAndFieldEquals( ReferenceEdge e ) {
- return typeEquals ( e.getType() ) &&
- fieldEquals( e.getField() );
+ public boolean typeAndFieldEquals(ReferenceEdge e) {
+ return typeEquals(e.getType() ) &&
+ fieldEquals(e.getField() );
}
public String toGraphEdgeString(boolean hideSubsetReachability,
- boolean hideEdgeTaints) {
+ boolean hideEdgeTaints) {
String edgeLabel = "";
if (type != null) {
if( !hideEdgeTaints ) {
edgeLabel += "*taint*=" + Integer.toBinaryString(taintIdentifier)
- + "\\n*SESE*=" + Integer.toBinaryString(SESEtaintIdentifier)
- + "\\n";
+ + "\\n*SESE*=" + Integer.toBinaryString(SESEtaintIdentifier)
+ + "\\n";
}
edgeLabel += beta.toStringEscapeNewline(hideSubsetReachability);
-
+
return edgeLabel;
}
return new String("("+src+"->"+type+" "+field+"->"+dst+")");
}
-
- public void tainedBy(Integer paramIdx){
- int newTaint=(int) Math.pow(2, paramIdx.intValue());
- taintIdentifier=taintIdentifier | newTaint;
+
+ public void tainedBy(Integer paramIdx) {
+ int newTaint=(int) Math.pow(2, paramIdx.intValue());
+ taintIdentifier=taintIdentifier | newTaint;
}
-
- public void setTaintIdentifier(int newTaint){
- taintIdentifier=newTaint;
+
+ public void setTaintIdentifier(int newTaint) {
+ taintIdentifier=newTaint;
}
-
- public void unionTaintIdentifier(int newTaint){
- taintIdentifier=taintIdentifier | newTaint;
+
+ public void unionTaintIdentifier(int newTaint) {
+ taintIdentifier=taintIdentifier | newTaint;
}
-
- public void minusTaintIdentifier(int removedTaint){
- taintIdentifier = taintIdentifier & (~removedTaint);
+
+ public void minusTaintIdentifier(int removedTaint) {
+ taintIdentifier = taintIdentifier & (~removedTaint);
}
-
- public int getTaintIdentifier(){
- return taintIdentifier;
+
+ public int getTaintIdentifier() {
+ return taintIdentifier;
}
-
- public int getSESETaintIdentifier(){
- return SESEtaintIdentifier;
+
+ public int getSESETaintIdentifier() {
+ return SESEtaintIdentifier;
}
-
- public void setSESETaintIdentifier(int newTaint){
- SESEtaintIdentifier=newTaint;
+
+ public void setSESETaintIdentifier(int newTaint) {
+ SESEtaintIdentifier=newTaint;
}
-
- public void unionSESETaintIdentifier(int newTaint){
- SESEtaintIdentifier=SESEtaintIdentifier | newTaint;
+
+ public void unionSESETaintIdentifier(int newTaint) {
+ SESEtaintIdentifier=SESEtaintIdentifier | newTaint;
}
-
-
+
+
}
}
private void fixStuff() {
- //This is an evil hack...we should fix this stuff elsewhere...
- if (!isMultiObject) {
- arity=ARITY_ONE;
- } else {
- if (arity==ARITY_ONEORMORE)
- arity=ARITY_ZEROORMORE;
- }
+ //This is an evil hack...we should fix this stuff elsewhere...
+ if (!isMultiObject) {
+ arity=ARITY_ONE;
+ } else {
+ if (arity==ARITY_ONEORMORE)
+ arity=ARITY_ZEROORMORE;
+ }
}
}
public int hashCode() {
- return (token.intValue() << 2) ^ arity;
+ return (token.intValue() << 2) ^ arity;
}
if( ttThis == null ) {
return false;
}
- }
-
+ }
+
// then establish that anything in this set that is
- // not in tts is a zero-arity token tuple, which is okay
+ // not in tts is a zero-arity token tuple, which is okay
Iterator<TokenTuple> ttItrThis = this.iterator();
while( ttItrThis.hasNext() ) {
TokenTuple ttThis = ttItrThis.next();
TokenTuple ttIn = tts.containsToken(ttThis.getToken() );
- if( ttIn == null &&
- ttThis.getArity() != TokenTuple.ARITY_ZEROORMORE ) {
+ if( ttIn == null &&
+ ttThis.getArity() != TokenTuple.ARITY_ZEROORMORE ) {
return false;
}
- }
+ }
// if so this set contains tts with zeroes
return true;
assert ttIn != null;
ReachOperation ro=new ReachOperation(this, ttIn);
if (unionhash.containsKey(ro))
- return (TokenTupleSet) unionhash.get(ro).c;
+ return (TokenTupleSet) unionhash.get(ro).c;
else {
- TokenTupleSet ttsOut = new TokenTupleSet(this);
- ttsOut.tokenTuples.add(ttIn);
- ro.c=ttsOut=ttsOut.makeCanonical();
- unionhash.put(ro,ro);
- return ttsOut;
+ TokenTupleSet ttsOut = new TokenTupleSet(this);
+ ttsOut.tokenTuples.add(ttIn);
+ ro.c=ttsOut=ttsOut.makeCanonical();
+ unionhash.put(ro,ro);
+ return ttsOut;
}
}
assert ttsIn != null;
ReachOperation ro=new ReachOperation(this, ttsIn);
if (unionhash.containsKey(ro)) {
- return (TokenTupleSet) unionhash.get(ro).c;
+ return (TokenTupleSet) unionhash.get(ro).c;
} else {
- TokenTupleSet ttsOut = new TokenTupleSet(this);
- ttsOut.tokenTuples.addAll(ttsIn.tokenTuples);
- ro.c=ttsOut=ttsOut.makeCanonical();
- unionhash.put(ro,ro);
- return ttsOut;
+ TokenTupleSet ttsOut = new TokenTupleSet(this);
+ ttsOut.tokenTuples.addAll(ttsIn.tokenTuples);
+ ro.c=ttsOut=ttsOut.makeCanonical();
+ unionhash.put(ro,ro);
+ return ttsOut;
}
}
return tokenTuples.equals(tts.tokenTuples);
}
- boolean hashcodecomputed=false;
- int ourhashcode=0;
+ boolean hashcodecomputed=false;
+ int ourhashcode=0;
public int hashCode() {
- if (hashcodecomputed)
- return ourhashcode;
- else {
- ourhashcode=tokenTuples.hashCode();
- hashcodecomputed=true;
- return ourhashcode;
- }
+ if (hashcodecomputed)
+ return ourhashcode;
+ else {
+ ourhashcode=tokenTuples.hashCode();
+ hashcodecomputed=true;
+ return ourhashcode;
+ }
}
boolean summary;
FlatNew node;
AllocSiteNode as;
-
+
public AllocNode(int allocsite, FlatNew node, boolean summary) {
this.allocsite=allocsite;
this.summary=summary;
public boolean isSummary() {
return summary;
}
-
+
public int hashCode() {
return allocsite<<1^(summary?0:1);
}
-
+
public boolean equals(Object o) {
if (o instanceof AllocNode) {
AllocNode an=(AllocNode)o;
public static class AllocSiteNode implements Alloc {
int allocsite;
FlatNew node;
-
+
public AllocSiteNode(int allocsite, FlatNew node) {
this.allocsite=allocsite;
this.node=node;
public int hashCode() {
return allocsite;
}
-
+
public boolean equals(Object o) {
if (o instanceof AllocSiteNode) {
AllocSiteNode an=(AllocSiteNode)o;
public String toStringBrief() {
return getID();
}
-
+
public String toString() {
return getID();
}
exit=block;
do {
if (pm.numNext(fn)!=1) {
- for(int i=0;i<pm.numNext(fn);i++) {
+ for(int i=0; i<pm.numNext(fn); i++) {
FlatNode fnext=pm.getNext(fn,i);
if (!map.containsKey(fnext)) {
BBlock newb=new BBlock();
HashMap<AllocNode, Boolean> addOldNodes;
public Delta check() {
- for(Map.Entry<AllocNode, MySet<Edge>> entry:heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : heapedgeadd.entrySet()) {
AllocNode node=entry.getKey();
if (node==null)
throw new Error("null node key");
- for(Edge e:entry.getValue())
+ for(Edge e : entry.getValue())
if (e.src!=node)
throw new Error(e.src+" is not equal to "+node);
}
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:varedgeadd.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : varedgeadd.entrySet()) {
TempDescriptor tmp=entry.getKey();
if (tmp==null)
throw new Error("null temp key");
- for(Edge e:entry.getValue())
+ for(Edge e : entry.getValue())
if (e.srcvar!=tmp)
throw new Error(e.srcvar+" is not equal to "+tmp);
}
/* Init is set for false for delta propagations inside of one basic block.
*/
-
+
public Delta(PPoint block, boolean init) {
this.init=init;
this.baseheapedge=new HashMap<AllocNode, MySet<Edge>>();
newdelta.heapedgeremove=heapedgeremove;
//Update variable edge mappings
newdelta.varedgeadd=new HashMap<TempDescriptor, MySet<Edge>>();
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:varedgeadd.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : varedgeadd.entrySet()) {
TempDescriptor origTmp=entry.getKey();
TempDescriptor newTmp=tmpMap.get(entry.getKey());
MySet<Edge> edgeset=entry.getValue();
if (!edgeset.isEmpty()) {
newdelta.varedgeadd.put(newTmp, new MySet<Edge>());
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
newdelta.varedgeadd.get(newTmp).add(e.rewrite(origTmp, newTmp));
}
}
newdelta.addOldNodes=addOldNodes;
newdelta.baseOldNodes=baseOldNodes;
- for (Map.Entry<AllocNode, MySet<Edge>> entry:heapedgeadd.entrySet()) {
+ for (Map.Entry<AllocNode, MySet<Edge>> entry : heapedgeadd.entrySet()) {
newdelta.heapedgeadd.put(entry.getKey(), new MySet<Edge>(entry.getValue()));
}
- for (Map.Entry<TempDescriptor, MySet<Edge>> entry:varedgeadd.entrySet()) {
+ for (Map.Entry<TempDescriptor, MySet<Edge>> entry : varedgeadd.entrySet()) {
newdelta.varedgeadd.put(entry.getKey(), new MySet<Edge>(entry.getValue()));
}
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (e.srcvar!=null) {
if (!newdelta.varedgeadd.containsKey(e.srcvar)) {
newdelta.varedgeadd.put(e.srcvar, new MySet<Edge>());
}
public void removeEdges(MySet<Edge> eset) {
- for(Edge e:eset) {
+ for(Edge e : eset) {
removeEdge(e);
}
}
this.dst=dst;
this.statuspredicate=statuspredicate;
}
-
+
public Edge(TempDescriptor tmp, AllocNode dst) {
this.srcvar=tmp;
this.dst=dst;
}
-
+
public AllocNode getDst() {
return dst;
}
return this;
Edge newe=copy();
newe.statuspredicate=mergeStatus(statuspredicate, e.statuspredicate);
- if (e.taints!=null) {
+ if (e.taints!=null) {
if (newe.taints==null)
newe.taints=e.taints;
else
Edge[] earray=new Edge[numedges];
int mask=1;
int edgeindex=0;
- for(int count=0;count<4;count++) {
+ for(int count=0; count<4; count++) {
if ((mask&statuspredicate)==mask) {
Edge e=new Edge();
e.fd=fd;
public static MySet<Edge> makeOld(MySet<Edge> old) {
MySet<Edge> newedge=new MySet<Edge>();
- for(Edge eold:old) {
+ for(Edge eold : old) {
newedge.add(eold.makeOld());
}
return newedge;
}
public static void mergeEdgesInto(MySet<Edge> orig, MySet<Edge> merge) {
- for(Edge e:merge) {
+ for(Edge e : merge) {
if (orig.contains(e)) {
Edge old=orig.get(e);
e=e.merge(old);
public static MySet<Edge> taintAll(MySet<Edge> orig, Taint t) {
MySet<Edge> taintedEdges=new MySet<Edge>();
- for(Edge e:orig) {
+ for(Edge e : orig) {
taintedEdges.add(e.addTaint(t));
}
return taintedEdges;
public static MySet<Edge> taintAll(MySet<Edge> orig, TaintSet t) {
MySet<Edge> taintedEdges=new MySet<Edge>();
- for(Edge e:orig) {
+ for(Edge e : orig) {
taintedEdges.add(e.addTaintSet(t));
}
return taintedEdges;
}
orig.add(e);
}
-
+
public AllocNode getSrcAlloc() {
return src;
}
-
+
public AllocNode getDstAlloc() {
return dst;
}
-
+
public FieldDescriptor getFieldDesc() {
return fd;
}
}
public void check() {
- for(Map.Entry<AllocNode, MySet<Edge>> entry:nodeMap.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : nodeMap.entrySet()) {
AllocNode node=entry.getKey();
if (node==null)
throw new Error("Null node key");
- for(Edge e:entry.getValue())
+ for(Edge e : entry.getValue())
if (e.src!=node)
throw new Error();
}
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:varMap.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : varMap.entrySet()) {
TempDescriptor tmp=entry.getKey();
if (tmp==null)
throw new Error("Null tmp key");
- for(Edge e:entry.getValue())
+ for(Edge e : entry.getValue())
if (e.srcvar!=tmp)
throw new Error();
}
return edges.get(old);
}
}
-
+
public MySet<Edge> getEdges(TempDescriptor tmp) {
if (varMap.containsKey(tmp))
return varMap.get(tmp);
output.println("}\n");
}
- private void outputTempEdges(PrintWriter output, HashMap<TempDescriptor, MySet<Edge>> varMap,
- HashMap<TempDescriptor, MySet<Edge>> childvarMap) {
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:varMap.entrySet()) {
+ private void outputTempEdges(PrintWriter output, HashMap<TempDescriptor, MySet<Edge>> varMap,
+ HashMap<TempDescriptor, MySet<Edge>> childvarMap) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : varMap.entrySet()) {
TempDescriptor tmp=entry.getKey();
if (childvarMap!=null&&childvarMap.containsKey(tmp))
continue;
output.println(tmp.getSymbol()+"[shape=rectangle];");
- for(Edge e:entry.getValue()) {
+ for(Edge e : entry.getValue()) {
if (e.srcvar!=tmp)
throw new Error(e.srcvar +" is not equal to "+tmp);
AllocNode n=e.dst;
}
}
- private void outputHeapEdges(PrintWriter output, HashMap<AllocNode, MySet<Edge>> nodeMap,
- HashMap<AllocNode, MySet<Edge>> childNodeMap) {
- for(Map.Entry<AllocNode, MySet<Edge>> entry:nodeMap.entrySet()) {
+ private void outputHeapEdges(PrintWriter output, HashMap<AllocNode, MySet<Edge>> nodeMap,
+ HashMap<AllocNode, MySet<Edge>> childNodeMap) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : nodeMap.entrySet()) {
AllocNode node=entry.getKey();
if (childNodeMap!=null&&childNodeMap.containsKey(node))
continue;
- for(Edge e:entry.getValue()) {
+ for(Edge e : entry.getValue()) {
if (e.src!=node)
throw new Error(e.src+" is not equal to "+node);
AllocNode n=e.dst;
public class GraphManip {
static MySet<Edge> genEdges(TempDescriptor tmp, HashSet<AllocNode> dstSet) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(AllocNode node:dstSet) {
+ for(AllocNode node : dstSet) {
edgeset.add(new Edge(tmp, node));
}
return edgeset;
static MySet<Edge> genEdges(TempDescriptor tmp, MySet<Edge> dstSet) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(Edge e:dstSet) {
+ for(Edge e : dstSet) {
edgeset.add(e.changeSrcVar(tmp, null));
}
return edgeset;
static MySet<Edge> genEdges(HashSet<AllocNode> srcSet, FieldDescriptor fd, HashSet<AllocNode> dstSet) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(AllocNode srcnode:srcSet) {
- for(AllocNode dstnode:dstSet) {
+ for(AllocNode srcnode : srcSet) {
+ for(AllocNode dstnode : dstSet) {
edgeset.add(new Edge(srcnode, fd, dstnode, Edge.NEW));
}
}
static MySet<Edge> genEdges(MySet<Edge> srcSet, FieldDescriptor fd, MySet<Edge> dstSet) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(Edge srcedge:srcSet) {
- for(Edge dstedge:dstSet) {
+ for(Edge srcedge : srcSet) {
+ for(Edge dstedge : dstSet) {
edgeset.add(dstedge.changeSrc(fd, srcedge.dst));
}
}
static MySet<Edge> genEdges(HashSet<AllocNode> srcSet, FieldDescriptor fd, MySet<Edge> dstSet) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(AllocNode srcnode:srcSet) {
- for(Edge dstedge:dstSet) {
+ for(AllocNode srcnode : srcSet) {
+ for(Edge dstedge : dstSet) {
edgeset.add(dstedge.changeSrc(fd, srcnode));
}
}
static MySet<Edge> getDiffEdges(Delta delta, TempDescriptor tmp) {
MySet<Edge> edges=new MySet<Edge>();
MySet<Edge> removeedges=delta.varedgeremove.get(tmp);
-
+
MySet<Edge> baseedges=delta.basevaredge.get(tmp);
if (baseedges!=null) {
- for(Edge e:baseedges) {
+ for(Edge e : baseedges) {
if (removeedges==null||!removeedges.contains(e))
edges.add(e);
}
}
if (delta.varedgeadd.containsKey(tmp))
- for(Edge e:delta.varedgeadd.get(tmp)) {
+ for(Edge e : delta.varedgeadd.get(tmp)) {
edges.add(e);
}
return edges;
MySet<Edge> edges=new MySet<Edge>();
MySet<Edge> removeedges=delta.varedgeremove.get(tmp);
- for(Edge e:graph.getEdges(tmp)) {
+ for(Edge e : graph.getEdges(tmp)) {
if (removeedges==null||!removeedges.contains(e))
edges.add(e);
}
if (delta.varedgeadd.containsKey(tmp))
- for(Edge e:delta.varedgeadd.get(tmp)) {
+ for(Edge e : delta.varedgeadd.get(tmp)) {
edges.add(e);
}
return edges;
static MySet<Edge> getEdges(Graph graph, Delta delta, MySet<Edge> srcNodes, FieldDescriptor fd) {
MySet<Edge> nodes=new MySet<Edge>();
- for(Edge node:srcNodes) {
+ for(Edge node : srcNodes) {
MySet<Edge> removeedges=delta.heapedgeremove.get(node.dst);
- for(Edge e:graph.getEdges(node.dst)) {
+ for(Edge e : graph.getEdges(node.dst)) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e)))
nodes.add(e);
}
if (delta.heapedgeadd.containsKey(node.dst))
- for(Edge e:delta.heapedgeadd.get(node.dst)) {
+ for(Edge e : delta.heapedgeadd.get(node.dst)) {
if (e.fd==fd)
nodes.add(e);
}
static MySet<Edge> getEdges(Graph graph, Delta delta, HashSet<AllocNode> srcNodes, FieldDescriptor fd) {
MySet<Edge> nodes=new MySet<Edge>();
- for(AllocNode node:srcNodes) {
+ for(AllocNode node : srcNodes) {
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
- for(Edge e:graph.getEdges(node)) {
+ for(Edge e : graph.getEdges(node)) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e)))
nodes.add(e);
}
if (delta.heapedgeadd.containsKey(node))
- for(Edge e:delta.heapedgeadd.get(node)) {
+ for(Edge e : delta.heapedgeadd.get(node)) {
if (e.fd==fd)
nodes.add(e);
}
static MySet<Edge> getEdges(Graph graph, Delta delta, AllocNode node) {
MySet<Edge> nodes=new MySet<Edge>();
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
- for(Edge e:graph.getEdges(node)) {
+ for(Edge e : graph.getEdges(node)) {
if ((removeedges==null||!removeedges.contains(e)))
nodes.add(e);
}
if (delta.heapedgeadd.containsKey(node))
- for(Edge e:delta.heapedgeadd.get(node)) {
+ for(Edge e : delta.heapedgeadd.get(node)) {
nodes.add(e);
}
-
+
return nodes;
}
static HashSet<AllocNode> getDiffNodes(Delta delta, TempDescriptor tmp) {
HashSet<AllocNode> nodes=new HashSet<AllocNode>();
MySet<Edge> removeedges=delta.varedgeremove.get(tmp);
-
+
MySet<Edge> baseEdges=delta.basevaredge.get(tmp);
if (baseEdges!=null)
- for(Edge e:baseEdges) {
+ for(Edge e : baseEdges) {
if (removeedges==null||!removeedges.contains(e))
nodes.add(e.dst);
}
if (delta.varedgeadd.containsKey(tmp))
- for(Edge e:delta.varedgeadd.get(tmp)) {
+ for(Edge e : delta.varedgeadd.get(tmp)) {
nodes.add(e.dst);
}
return nodes;
HashSet<AllocNode> nodes=new HashSet<AllocNode>();
MySet<Edge> removeedges=delta.varedgeremove.get(tmp);
- for(Edge e:graph.getEdges(tmp)) {
+ for(Edge e : graph.getEdges(tmp)) {
if (removeedges==null||!removeedges.contains(e))
nodes.add(e.dst);
}
if (delta.varedgeadd.containsKey(tmp))
- for(Edge e:delta.varedgeadd.get(tmp)) {
+ for(Edge e : delta.varedgeadd.get(tmp)) {
nodes.add(e.dst);
}
return nodes;
static HashSet<AllocNode> getDiffNodes(Delta delta, HashSet<AllocNode> srcNodes, FieldDescriptor fd) {
HashSet<AllocNode> nodes=new HashSet<AllocNode>();
- for(AllocNode node:srcNodes) {
+ for(AllocNode node : srcNodes) {
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
MySet<Edge> baseEdges=delta.baseheapedge.get(node);
if (baseEdges!=null)
- for(Edge e:baseEdges) {
+ for(Edge e : baseEdges) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e)))
nodes.add(e.dst);
}
if (delta.heapedgeadd.containsKey(node))
- for(Edge e:delta.heapedgeadd.get(node)) {
+ for(Edge e : delta.heapedgeadd.get(node)) {
if (e.fd==fd)
nodes.add(e.dst);
}
static MySet<Edge> getDiffEdges(Delta delta, HashSet<AllocNode> srcNodes) {
MySet<Edge> newedges=new MySet<Edge>();
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.baseheapedge.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.baseheapedge.entrySet()) {
AllocNode node=entry.getKey();
if (srcNodes.contains(node)) {
MySet<Edge> edges=entry.getValue();
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (removeedges==null||!removeedges.contains(e)) {
newedges.add(e);
}
}
}
}
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.heapedgeadd.entrySet()) {
AllocNode node=entry.getKey();
if (srcNodes.contains(node)) {
MySet<Edge> edges=entry.getValue();
static MySet<Edge> getDiffEdges(Delta delta, HashSet<AllocNode> srcNodes, FieldDescriptor fd) {
MySet<Edge> newedges=new MySet<Edge>();
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.baseheapedge.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.baseheapedge.entrySet()) {
AllocNode node=entry.getKey();
if (srcNodes.contains(node)) {
MySet<Edge> edges=entry.getValue();
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
- for(Edge e:edges) {
+ for(Edge e : edges) {
if ((removeedges==null||!removeedges.contains(e))&&(e.fd==fd)) {
newedges.add(e);
}
}
}
}
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.heapedgeadd.entrySet()) {
AllocNode node=entry.getKey();
if (srcNodes.contains(node)) {
MySet<Edge> edges=entry.getValue();
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (e.fd==fd)
newedges.add(e);
}
static MySet<Edge> makeOld(MySet<Edge> edgesin) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(Edge e:edgesin) {
+ for(Edge e : edgesin) {
edgeset.add(e.makeOld());
}
return edgeset;
static MySet<Edge> dereference(Graph graph, Delta delta, TempDescriptor dst, MySet<Edge> srcEdges, FieldDescriptor fd, FlatNode fn) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(Edge edge:srcEdges) {
+ for(Edge edge : srcEdges) {
TaintSet ts=edge.getTaints();
if (ts!=null) {
ts=ts.reTaint(fn);
}
MySet<Edge> removeedges=delta.heapedgeremove.get(edge.dst);
- for(Edge e:graph.getEdges(edge.dst)) {
+ for(Edge e : graph.getEdges(edge.dst)) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e))) {
e=e.changeSrcVar(dst, ts);
if (!edgeset.contains(e))
}
}
if (delta.heapedgeadd.containsKey(edge.dst))
- for(Edge e:delta.heapedgeadd.get(edge.dst)) {
+ for(Edge e : delta.heapedgeadd.get(edge.dst)) {
if (e.fd==fd) {
e=e.changeSrcVar(dst, ts);
if (!edgeset.contains(e))
static MySet<Edge> diffDereference(Delta delta, TempDescriptor dst, MySet<Edge> srcEdges, FieldDescriptor fd, FlatNode fn) {
MySet<Edge> edgeset=new MySet<Edge>();
- for(Edge edge:srcEdges) {
+ for(Edge edge : srcEdges) {
TaintSet ts=edge.getTaints();
if (ts!=null) {
ts=ts.reTaint(fn);
}
MySet<Edge> removeedges=delta.heapedgeremove.get(edge.dst);
if (delta.baseheapedge.containsKey(edge.dst)) {
- for(Edge e:delta.baseheapedge.get(edge.dst)) {
+ for(Edge e : delta.baseheapedge.get(edge.dst)) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e))) {
e=e.changeSrcVar(dst, ts);
if (!edgeset.contains(e))
}
}
if (delta.heapedgeadd.containsKey(edge.dst))
- for(Edge e:delta.heapedgeadd.get(edge.dst)) {
+ for(Edge e : delta.heapedgeadd.get(edge.dst)) {
if (e.fd==fd) {
e=e.changeSrcVar(dst, ts);
if (!edgeset.contains(e))
static HashSet<AllocNode> getNodes(Graph graph, Delta delta, HashSet<AllocNode> srcNodes, FieldDescriptor fd) {
HashSet<AllocNode> nodes=new HashSet<AllocNode>();
- for(AllocNode node:srcNodes) {
+ for(AllocNode node : srcNodes) {
MySet<Edge> removeedges=delta.heapedgeremove.get(node);
- for(Edge e:graph.getEdges(node)) {
+ for(Edge e : graph.getEdges(node)) {
if (e.fd==fd&&(removeedges==null||!removeedges.contains(e)))
nodes.add(e.dst);
}
if (delta.heapedgeadd.containsKey(node))
- for(Edge e:delta.heapedgeadd.get(node)) {
+ for(Edge e : delta.heapedgeadd.get(node)) {
if (e.fd==fd)
nodes.add(e.dst);
}
public boolean remove(Object obj) {
if (locked)
throw new Error();
- return map.remove(obj)!=null;
+ return map.remove(obj)!=null;
}
-
+
public boolean add(T obj) {
if (locked)
throw new Error();
public boolean isEmpty() {
return map.isEmpty();
}
-
+
public Iterator<T> iterator() {
return map.keySet().iterator();
}
import java.io.*;
-public class Pointer implements HeapAnalysis{
+public class Pointer implements HeapAnalysis {
HashMap<FlatMethod, BasicBlock> blockMap;
HashMap<BBlock, Graph> bbgraphMap;
HashMap<FlatNode, Graph> graphMap;
if (!blockMap.containsKey(fm)) {
blockMap.put(fm, BasicBlock.getBBlock(fm));
Hashtable<FlatNode, Set<TempDescriptor>> livemap=Liveness.computeLiveTemps(fm);
- for(BBlock bblock:blockMap.get(fm).getBlocks()) {
+ for(BBlock bblock : blockMap.get(fm).getBlocks()) {
FlatNode fn=bblock.nodes.get(0);
if (fn==fm) {
HashSet<TempDescriptor> fmset=new HashSet<TempDescriptor>();
}
return blockMap.get(fm);
}
-
+
Delta buildInitialContext() {
MethodDescriptor md=typeUtil.getMain();
FlatMethod fm=state.getMethodFlat(md);
public void doAnalysis() {
toprocess.add(buildInitialContext());
- nextdelta:
+nextdelta:
while(!toprocess.isEmpty()) {
Delta delta=toprocess.remove();
PPoint ppoint=delta.getBlock();
}
Graph graph=bbgraphMap.get(bblock);
Graph nodeGraph=null;
-
+
int lasti=-1;
//Compute delta at exit of each node
- for(int i=startindex; i<nodes.size();i++) {
+ for(int i=startindex; i<nodes.size(); i++) {
FlatNode currNode=nodes.get(i);
//System.out.println("Start Processing "+currNode);
boolean init=delta.getInit();
boolean fallthru=true;
if (isINACC(currNode)&&((lasti==-1)||(lasti==i))) {
if (lasti==-1) {
- for(lasti=nodes.size()-1;lasti>=i;lasti--) {
+ for(lasti=nodes.size()-1; lasti>=i; lasti--) {
FlatNode scurrNode=nodes.get(lasti);
if (isNEEDED(scurrNode)||isINACC(scurrNode)) {
break;
//DEBUG
if (false) {
int debugindex=0;
- for(Map.Entry<BBlock, Graph> e:bbgraphMap.entrySet()) {
+ for(Map.Entry<BBlock, Graph> e : bbgraphMap.entrySet()) {
Graph g=e.getValue();
plotGraph(g,"BB"+e.getKey().nodes.get(0).toString().replace(' ','_'));
debugindex++;
}
-
- for(FlatMethod fm:blockMap.keySet()) {
+
+ for(FlatMethod fm : blockMap.keySet()) {
System.out.println(fm.printMethod());
}
- for(Map.Entry<FlatNode, Graph> e:graphMap.entrySet()) {
+ for(Map.Entry<FlatNode, Graph> e : graphMap.entrySet()) {
FlatNode fn=e.getKey();
Graph g=e.getValue();
plotGraph(g,"FN"+fn.toString()+debugindex);
debugindex++;
- }
+ }
}
State.logEvent("Done With Pointer Analysis");
ex.printStackTrace();
}
}
-
+
/* This function builds the last delta for a basic block. It
* handles the case for the first time the basic block is
HashSet<TempDescriptor> tmpSet=new HashSet<TempDescriptor>();
tmpSet.addAll(graph.varMap.keySet());
tmpSet.addAll(graph.parent.varMap.keySet());
-
+
//Next build the temp map part of the delta
- for(TempDescriptor tmp:tmpSet) {
+ for(TempDescriptor tmp : tmpSet) {
MySet<Edge> edgeSet=new MySet<Edge>();
/* Get target set */
if (graph.varMap.containsKey(tmp))
edgeSet.addAll(graph.parent.varMap.get(tmp));
newDelta.varedgeadd.put(tmp, edgeSet);
}
-
+
//Next compute the set of src allocnodes
HashSet<AllocNode> nodeSet=new HashSet<AllocNode>();
nodeSet.addAll(graph.nodeMap.keySet());
nodeSet.addAll(graph.parent.nodeMap.keySet());
-
- for(AllocNode node:nodeSet) {
+
+ for(AllocNode node : nodeSet) {
MySet<Edge> edgeSet=new MySet<Edge>();
/* Get edge set */
if (graph.nodeMap.containsKey(node))
else
edgeSet.addAll(graph.parent.nodeMap.get(node));
newDelta.heapedgeadd.put(node, edgeSet);
-
+
/* Compute ages */
if (graph.oldNodes.containsKey(node)) {
if (graph.oldNodes.get(node).booleanValue())
newDelta.addOldNodes.put(node, Boolean.TRUE);
}
}
-
+
newDelta.addNodeAges.addAll(graph.nodeAges);
newDelta.addNodeAges.addAll(graph.parent.nodeAges);
}
HashSet<TempDescriptor> tmpSet=new HashSet<TempDescriptor>();
tmpSet.addAll(delta.basevaredge.keySet());
tmpSet.addAll(delta.varedgeadd.keySet());
- for(TempDescriptor tmp:tmpSet) {
+ for(TempDescriptor tmp : tmpSet) {
/* Start with the new incoming edges */
MySet<Edge> newbaseedge=delta.basevaredge.get(tmp);
/* Remove the remove set */
nodeSet.addAll(delta.baseheapedge.keySet());
nodeSet.addAll(delta.heapedgeadd.keySet());
nodeSet.addAll(delta.heapedgeremove.keySet());
- for(AllocNode node:nodeSet) {
+ for(AllocNode node : nodeSet) {
/* Start with the new incoming edges */
MySet<Edge> newheapedge=new MySet<Edge>(delta.baseheapedge.get(node));
/* Remove the remove set */
/* Compute whether old nodes survive */
oldNodes.addAll(delta.baseOldNodes.keySet());
oldNodes.addAll(delta.addOldNodes.keySet());
- for(AllocNode node:oldNodes) {
+ for(AllocNode node : oldNodes) {
if (delta.addOldNodes.containsKey(node)) {
if (delta.addOldNodes.get(node).booleanValue()) {
newDelta.addOldNodes.put(node, Boolean.TRUE);
if (returnMap.containsKey(bblock)) {
//exit of call block
boolean first=true;
-
- for(PPoint caller:returnMap.get(bblock)) {
+
+ for(PPoint caller : returnMap.get(bblock)) {
//System.out.println("Sending Return BBlock to "+caller.getBBlock().nodes.get(caller.getIndex()).toString().replace(' ','_'));
//newDelta.print();
if (first) {
} else {
//normal block
Vector<BBlock> blockvector=bblock.next();
- for(int i=0;i<blockvector.size();i++) {
+ for(int i=0; i<blockvector.size(); i++) {
//System.out.println("Sending BBlock to "+blockvector.get(i).nodes.get(0).toString().replace(' ','_'));
//newDelta.print();
if (i==0) {
FlatSetFieldNode n=(FlatSetFieldNode)node;
return n.getSrc().getType().isPtr();
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode n=(FlatSetElementNode)node;
return n.getSrc().getType().isPtr();
}
+
case FKind.FlatFieldNode: {
FlatFieldNode n=(FlatFieldNode)node;
return n.getDst().getType().isPtr();
}
+
case FKind.FlatElementNode: {
FlatElementNode n=(FlatElementNode)node;
return n.getDst().getType().isPtr();
switch(node.kind()) {
case FKind.FlatNew:
return processNewNode((FlatNew)node, delta, newgraph);
+
case FKind.FlatFieldNode:
case FKind.FlatElementNode:
return processFieldElementNode(node, delta, newgraph);
+
case FKind.FlatCastNode:
case FKind.FlatOpNode:
case FKind.FlatReturnNode:
return processCopyNode(node, delta, newgraph);
+
case FKind.FlatSetFieldNode:
case FKind.FlatSetElementNode:
return processSetFieldElementNode(node, delta, newgraph);
+
case FKind.FlatSESEEnterNode:
return processSESEEnterNode((FlatSESEEnterNode) node, delta, newgraph);
+
case FKind.FlatSESEExitNode:
return processSESEExitNode((FlatSESEExitNode) node, delta, newgraph);
+
case FKind.FlatMethod:
case FKind.FlatExit:
case FKind.FlatBackEdge:
case FKind.FlatGenReachNode:
return processFlatNop(node, delta, newgraph);
+
case FKind.FlatCall:
return processFlatCall(bblock, index, (FlatCall) node, delta, newgraph);
+
default:
throw new Error("Unrecognized node:"+node);
}
return processFlatNop(sese, delta, graph);
if (delta.getInit()) {
removeInitTaints(null, delta, graph);
- for (TempDescriptor tmp:sese.getInVarSet()) {
+ for (TempDescriptor tmp : sese.getInVarSet()) {
Taint taint=Taint.factory(sese, null, tmp, AllocFactory.dummySite, null, ReachGraph.predsEmpty);
MySet<Edge> edges=GraphManip.getEdges(graph, delta, tmp);
- for(Edge e:edges) {
+ for(Edge e : edges) {
Edge newe=e.addTaint(taint);
delta.addVarEdge(newe);
}
}
} else {
removeDiffTaints(null, delta);
- for (TempDescriptor tmp:sese.getInVarSet()) {
+ for (TempDescriptor tmp : sese.getInVarSet()) {
Taint taint=Taint.factory(sese, null, tmp, AllocFactory.dummySite, null, ReachGraph.predsEmpty);
MySet<Edge> edges=GraphManip.getDiffEdges(delta, tmp);
- for(Edge e:edges) {
+ for(Edge e : edges) {
Edge newe=e.addTaint(taint);
delta.addVarEdge(newe);
}
applyDiffs(graph, delta);
return delta;
}
-
+
private boolean isRecursive(FlatSESEEnterNode sese) {
MethodDescriptor md=sese.getmdEnclosing();
boolean isrecursive=callGraph.getCalleeSet(md).contains(md);
applyDiffs(graph, delta);
return delta;
}
-
+
void removeDiffTaints(FlatSESEEnterNode sese, Delta delta) {
//Start with variable edges
{
MySet<Edge> edgestoadd=new MySet<Edge>();
MySet<Edge> edgestoremove=new MySet<Edge>();
-
+
//Process base diff edges
- processEdgeMap(sese, delta.basevaredge, null, delta.varedgeremove, edgestoremove, edgestoadd);
+ processEdgeMap(sese, delta.basevaredge, null, delta.varedgeremove, edgestoremove, edgestoadd);
//Process delta edges
- processEdgeMap(sese, delta.varedgeadd, null, null, edgestoremove, edgestoadd);
- for(Edge e:edgestoremove) {
+ processEdgeMap(sese, delta.varedgeadd, null, null, edgestoremove, edgestoadd);
+ for(Edge e : edgestoremove) {
delta.removeVarEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addVarEdge(e);
}
}
MySet<Edge> edgestoremove=new MySet<Edge>();
//Process base diff edges
- processEdgeMap(sese, delta.baseheapedge, null, delta.heapedgeremove, edgestoremove, edgestoadd);
+ processEdgeMap(sese, delta.baseheapedge, null, delta.heapedgeremove, edgestoremove, edgestoadd);
//Process delta edges
- processEdgeMap(sese, delta.heapedgeadd, null, null, edgestoremove, edgestoadd);
- for(Edge e:edgestoremove) {
+ processEdgeMap(sese, delta.heapedgeadd, null, null, edgestoremove, edgestoadd);
+ for(Edge e : edgestoremove) {
delta.removeHeapEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addHeapEdge(e);
}
}
{
MySet<Edge> edgestoadd=new MySet<Edge>();
MySet<Edge> edgestoremove=new MySet<Edge>();
-
+
//Process parent edges
processEdgeMap(sese, graph.parent.varMap, graph.varMap, delta.varedgeremove, edgestoremove, edgestoadd);
//Process graph edges
- processEdgeMap(sese, graph.varMap, null, delta.varedgeremove, edgestoremove, edgestoadd);
+ processEdgeMap(sese, graph.varMap, null, delta.varedgeremove, edgestoremove, edgestoadd);
//Process delta edges
- processEdgeMap(sese, delta.varedgeadd, null, null, edgestoremove, edgestoadd);
- for(Edge e:edgestoremove) {
+ processEdgeMap(sese, delta.varedgeadd, null, null, edgestoremove, edgestoadd);
+ for(Edge e : edgestoremove) {
delta.removeVarEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addVarEdge(e);
}
}
//Process parent edges
processEdgeMap(sese, graph.parent.nodeMap, graph.nodeMap, delta.heapedgeremove, edgestoremove, edgestoadd);
//Process graph edges
- processEdgeMap(sese, graph.nodeMap, null, delta.heapedgeremove, edgestoremove, edgestoadd);
+ processEdgeMap(sese, graph.nodeMap, null, delta.heapedgeremove, edgestoremove, edgestoadd);
//Process delta edges
- processEdgeMap(sese, delta.heapedgeadd, null, null, edgestoremove, edgestoadd);
- for(Edge e:edgestoremove) {
+ processEdgeMap(sese, delta.heapedgeadd, null, null, edgestoremove, edgestoadd);
+ for(Edge e : edgestoremove) {
delta.removeHeapEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addHeapEdge(e);
}
}
//Handle the this temp
if (tmpthis!=null) {
MySet<Edge> edges=(oldnodeset!=null)?GraphManip.getDiffEdges(delta, tmpthis):GraphManip.getEdges(graph, delta, tmpthis);
- newDelta.varedgeadd.put(tmpthis, (MySet<Edge>) edges.clone());
+ newDelta.varedgeadd.put(tmpthis, (MySet<Edge>)edges.clone());
edgeset.addAll(edges);
for(Edge e:edges) {
AllocNode dstnode=e.dst;
void processParams(Graph graph, Delta delta, Delta newDelta, HashSet<AllocNode> nodeset, Stack<AllocNode> tovisit, MySet<Edge> edgeset, FlatCall fcall, boolean diff) {
//Go through each temp
- for(int i=0;i<fcall.numArgs();i++) {
+ for(int i=0; i<fcall.numArgs(); i++) {
TempDescriptor tmp=fcall.getArg(i);
MySet<Edge> edges=diff?GraphManip.getDiffEdges(delta, tmp):GraphManip.getEdges(graph, delta, tmp);
- newDelta.varedgeadd.put(tmp, (MySet<Edge>) edges.clone());
+ newDelta.varedgeadd.put(tmp, (MySet<Edge>)edges.clone());
edgeset.addAll(edges);
for(Edge e:edges) {
if (!nodeset.contains(e.dst)) {
/* This function computes the reachable nodes for a callee. */
void computeReachableNodes(Graph graph, Delta delta, Delta newDelta, HashSet<AllocNode> nodeset, Stack<AllocNode> tovisit, MySet<Edge> edgeset, HashSet<AllocNode> oldnodeset) {
- while(!tovisit.isEmpty()) {
- AllocNode node=tovisit.pop();
- MySet<Edge> edges=GraphManip.getEdges(graph, delta, node);
- if (!edges.isEmpty()) {
- newDelta.heapedgeadd.put(node, Edge.makeOld(edges));
- edgeset.addAll(edges);
- for(Edge e:edges) {
- if (!nodeset.contains(e.dst)&&(oldnodeset==null||!oldnodeset.contains(e.dst))) {
- nodeset.add(e.dst);
- tovisit.add(e.dst);
- }
+ while(!tovisit.isEmpty()) {
+ AllocNode node=tovisit.pop();
+ MySet<Edge> edges=GraphManip.getEdges(graph, delta, node);
+ if (!edges.isEmpty()) {
+ newDelta.heapedgeadd.put(node, Edge.makeOld(edges));
+ edgeset.addAll(edges);
+ for(Edge e : edges) {
+ if (!nodeset.contains(e.dst)&&(oldnodeset==null||!oldnodeset.contains(e.dst))) {
+ nodeset.add(e.dst);
+ tovisit.add(e.dst);
}
}
}
+ }
}
HashSet<MethodDescriptor> computeTargets(FlatCall fcall, Delta newDelta) {
targets.add(md);
} else {
//Compute Edges
- for(Edge e:newDelta.varedgeadd.get(tmpthis)) {
+ for(Edge e : newDelta.varedgeadd.get(tmpthis)) {
AllocNode node=e.dst;
ClassDescriptor cd=node.getType().getClassDesc();
//Figure out exact method called and add to set
Delta basedelta=null;
TempDescriptor tmpthis=fcall.getThis();
- for(MethodDescriptor calledmd:targets) {
+ for(MethodDescriptor calledmd : targets) {
FlatMethod fm=state.getMethodFlat(calledmd);
boolean newmethod=false;
-
+
//Build tmpMap
HashMap<TempDescriptor, TempDescriptor> tmpMap=new HashMap<TempDescriptor, TempDescriptor>();
int offset=0;
if(tmpthis!=null) {
tmpMap.put(tmpthis, fm.getParameter(offset++));
}
- for(int i=0;i<fcall.numArgs();i++) {
+ for(int i=0; i<fcall.numArgs(); i++) {
TempDescriptor tmp=fcall.getArg(i);
tmpMap.put(tmp,fm.getParameter(i+offset));
}
//Get basicblock for the method
BasicBlock block=getBBlock(fm);
-
+
//Hook up exits
if (!callMap.containsKey(fcall)) {
callMap.put(fcall, new HashSet<BBlock>());
}
-
+
Delta returnDelta=null;
if (!callMap.get(fcall).contains(block.getStart())) {
callMap.get(fcall).add(block.getStart());
newmethod=true;
-
+
//Hook up return
if (!returnMap.containsKey(block.getExit())) {
returnMap.put(block.getExit(), new HashSet<PPoint>());
}
returnMap.get(block.getExit()).add(new PPoint(callblock, callindex));
-
+
if (bbgraphMap.containsKey(block.getExit())) {
//Need to push existing results to current node
if (returnDelta==null) {
}
}
}
-
+
if (oldedgeset==null) {
//First build of this graph
//Build and enqueue delta...safe to just use existing delta
externalnodes.addAll(delta.heapedgeremove.keySet());
//remove allinternal nodes
externalnodes.removeAll(nodeset);
- for(AllocNode extNode:externalnodes) {
+ for(AllocNode extNode : externalnodes) {
//Compute set of edges from given node
MySet<Edge> edges=new MySet<Edge>(delta.baseheapedge.get(extNode));
edges.removeAll(delta.heapedgeremove.get(extNode));
edges.addAll(delta.heapedgeadd.get(extNode));
-
- for(Edge e:edges) {
+
+ for(Edge e : edges) {
if (nodeset.contains(e.dst))
externaledgeset.add(e);
}
temps.addAll(delta.varedgeremove.keySet());
//remove allinternal nodes
temps.removeAll(nodeset);
-
- for(TempDescriptor tmp:temps) {
+
+ for(TempDescriptor tmp : temps) {
//Compute set of edges from given node
MySet<Edge> edges=new MySet<Edge>(delta.basevaredge.get(tmp));
-
+
edges.removeAll(delta.varedgeremove.get(tmp));
edges.addAll(delta.varedgeadd.get(tmp));
-
- for(Edge e:edges) {
+
+ for(Edge e : edges) {
if (nodeset.contains(e.dst))
externaledgeset.add(e);
}
/* This function removes the caller reachable edges from the
* callee's heap. */
-
+
void removeEdges(Graph graph, Delta delta, HashSet<AllocNode> nodeset, MySet<Edge> edgeset, MySet<Edge> externaledgeset) {
//Want to remove the set of internal edges
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
if (e.src!=null&&!graph.callerEdges.contains(e)) {
delta.removeHeapEdge(e);
}
}
//Want to remove the set of external edges
- for(Edge e:externaledgeset) {
+ for(Edge e : externaledgeset) {
//want to remove the set of internal edges
if (!graph.callerEdges.contains(e))
delta.removeEdge(e);
//Go through each temp
processParams(graph, delta, newDelta, nodeset, tovisit, edgeset, fcall, false);
-
+
//Traverse all reachable nodes
computeReachableNodes(graph, delta, newDelta, nodeset, tovisit, edgeset, null);
graph.externalEdgeSet=externaledgeset;
graph.reachNode=nodeset;
graph.reachEdge=edgeset;
-
+
graph.callTargets=newtargets;
graph.callNodeAges=new HashSet<AllocNode>();
graph.callOldNodes=new HashSet<AllocNode>();
Stack<AllocNode> tovisit=new Stack<AllocNode>();
TempDescriptor tmpthis=fcall.getThis();
//Fix up delta to get rid of unnecessary heap edge removals
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.heapedgeremove.entrySet()) {
- for(Iterator<Edge> eit=entry.getValue().iterator();eit.hasNext();) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.heapedgeremove.entrySet()) {
+ for(Iterator<Edge> eit=entry.getValue().iterator(); eit.hasNext(); ) {
Edge e=eit.next();
if (graph.callerEdges.contains(e))
eit.remove();
}
//Fix up delta to get rid of unnecessary var edge removals
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:delta.varedgeremove.entrySet()) {
- for(Iterator<Edge> eit=entry.getValue().iterator();eit.hasNext();) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : delta.varedgeremove.entrySet()) {
+ for(Iterator<Edge> eit=entry.getValue().iterator(); eit.hasNext(); ) {
Edge e=eit.next();
if (graph.callerEdges.contains(e))
eit.remove();
}
}
-
+
//Handle the this temp
processThisTargets(targetSet, graph, delta, newDelta, nodeset, tovisit, edgeset, tmpthis, oldnodeset);
//Go through each new heap edge that starts from old node
MySet<Edge> newedges=GraphManip.getDiffEdges(delta, oldnodeset);
edgeset.addAll(newedges);
- for(Edge e:newedges) {
+ for(Edge e : newedges) {
//Add new edges that start from old node to newDelta
AllocNode src=e.src;
if (!newDelta.heapedgeadd.containsKey(src)) {
Set<FlatSESEEnterNode> seseCallers=OoOJava?taskAnalysis.getTransitiveExecutingRBlocks(fcall):null;
//Check if the new nodes allow us to insert a new edge
- for(AllocNode node:nodeset) {
+ for(AllocNode node : nodeset) {
if (graph.callNewEdges.containsKey(node)) {
- for(Iterator<Edge> eit=graph.callNewEdges.get(node).iterator();eit.hasNext();) {
+ for(Iterator<Edge> eit=graph.callNewEdges.get(node).iterator(); eit.hasNext(); ) {
Edge e=eit.next();
if ((graph.callNodeAges.contains(e.src)||graph.reachNode.contains(e.src))&&
- (graph.callNodeAges.contains(e.dst)||graph.reachNode.contains(e.dst))) {
- Edge edgetoadd=e.copy();//we need our own copy to modify below
+ (graph.callNodeAges.contains(e.dst)||graph.reachNode.contains(e.dst))) {
+ Edge edgetoadd=e.copy(); //we need our own copy to modify below
eit.remove();
if (seseCallers!=null)
edgetoadd.taintModify(seseCallers);
}
}
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
//See if these edges would allow an old edge to be added
if (graph.callOldEdges.containsKey(e)) {
- for(Edge adde:graph.callOldEdges.get(e)) {
+ for(Edge adde : graph.callOldEdges.get(e)) {
Edge ecopy=adde.copy();
ecopy.statuspredicate=e.statuspredicate;
mergeCallEdge(graph, delta, ecopy);
void processSumVarEdgeSet(HashMap<TempDescriptor, MySet<Edge>> map, Delta delta, Graph graph) {
MySet<Edge> edgestoadd=new MySet<Edge>();
MySet<Edge> edgestoremove=new MySet<Edge>();
- for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> eit=map.entrySet().iterator();eit.hasNext();) {
+ for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> eit=map.entrySet().iterator(); eit.hasNext(); ) {
Map.Entry<TempDescriptor, MySet<Edge>> entry=eit.next();
MySet<Edge> edgeset=entry.getValue();
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
Edge copy=e.copy();
boolean rewrite=false;
if (copy.dst!=null&&graph.callNodeAges.contains(copy.dst)) {
}
}
}
- for(Edge e:edgestoremove) {
+ for(Edge e : edgestoremove) {
if (!graph.callerEdges.contains(e))
delta.removeVarEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addVarEdge(e);
}
}
-
+
public Alloc getAllocationSiteFromFlatNew(FlatNew node) {
return allocFactory.getAllocNode(node, false).getAllocSite();
}
-
+
void processSumHeapEdgeSet(HashMap<AllocNode, MySet<Edge>> map, Delta delta, Graph graph) {
MySet<Edge> edgestoadd=new MySet<Edge>();
MySet<Edge> edgestoremove=new MySet<Edge>();
- for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> eit=map.entrySet().iterator();eit.hasNext();) {
+ for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> eit=map.entrySet().iterator(); eit.hasNext(); ) {
Map.Entry<AllocNode, MySet<Edge>> entry=eit.next();
AllocNode node=entry.getKey();
MySet<Edge> edgeset=entry.getValue();
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
Edge copy=e.copy();
boolean rewrite=false;
if (copy.src!=null&&graph.callNodeAges.contains(copy.src)) {
}
}
}
- for(Edge e:edgestoremove) {
+ for(Edge e : edgestoremove) {
if (!graph.callerEdges.contains(e))
delta.removeHeapEdge(e);
}
- for(Edge e:edgestoadd) {
+ for(Edge e : edgestoadd) {
delta.addHeapEdge(e);
}
}
//Handle external edges
void processCallExternal(Graph graph, Delta newDelta, MySet<Edge> externalEdgeSet) {
//Add external edges in
- for(Edge e:externalEdgeSet) {
+ for(Edge e : externalEdgeSet) {
//First did we age the source
Edge newedge=e.copy();
if (newedge.src!=null&&!e.src.isSummary()&&graph.callNodeAges.contains(e.src)) {
FlatCall fcall=(FlatCall)nodes.get(ppoint.getIndex());
Graph graph=graphMap.get(fcall);
Graph oldgraph=(ppoint.getIndex()==0)?
- bbgraphMap.get(bblock):
- graphMap.get(nodes.get(ppoint.getIndex()-1));
+ bbgraphMap.get(bblock):
+ graphMap.get(nodes.get(ppoint.getIndex()-1));
Set<FlatSESEEnterNode> seseCallers=OoOJava?taskAnalysis.getTransitiveExecutingRBlocks(fcall):null;
//Age outside nodes if necessary
- for(Iterator<AllocNode> nodeit=delta.addNodeAges.iterator();nodeit.hasNext();) {
+ for(Iterator<AllocNode> nodeit=delta.addNodeAges.iterator(); nodeit.hasNext(); ) {
AllocNode node=nodeit.next();
if (!graph.callNodeAges.contains(node)) {
graph.callNodeAges.add(node);
AllocNode summaryAdd=null;
if (!graph.reachNode.contains(node)&&!node.isSummary()) {
/* Need to age node in existing graph*/
-
+
AllocNode summaryNode=allocFactory.getAllocNode(node, true);
-
+
if (!graph.callNodeAges.contains(summaryNode)) {
graph.callNodeAges.add(summaryNode);
newDelta.addNodeAges.add(summaryNode);
}
do {
if (graph.callNewEdges.containsKey(node)) {
- for(Iterator<Edge> eit=graph.callNewEdges.get(node).iterator();eit.hasNext();) {
+ for(Iterator<Edge> eit=graph.callNewEdges.get(node).iterator(); eit.hasNext(); ) {
Edge e=eit.next();
if ((graph.callNodeAges.contains(e.src)||graph.reachNode.contains(e.src))&&
- (graph.callNodeAges.contains(e.dst)||graph.reachNode.contains(e.dst))) {
- Edge edgetoadd=e.copy();//we need our own copy to modify below
+ (graph.callNodeAges.contains(e.dst)||graph.reachNode.contains(e.dst))) {
+ Edge edgetoadd=e.copy(); //we need our own copy to modify below
eit.remove();
if (seseCallers!=null)
edgetoadd.taintModify(seseCallers);
}
//Add heap edges in
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.heapedgeadd.entrySet()) {
- for(Edge e:entry.getValue()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.heapedgeadd.entrySet()) {
+ for(Edge e : entry.getValue()) {
boolean addedge=false;
Edge edgetoadd=null;
if (e.statuspredicate==Edge.NEW) {
if ((graph.callNodeAges.contains(e.src)||graph.reachNode.contains(e.src))&&
(graph.callNodeAges.contains(e.dst)||graph.reachNode.contains(e.dst))) {
- edgetoadd=e.copy();//we need our own copy to modify below
+ edgetoadd=e.copy(); //we need our own copy to modify below
} else {
graph.addCallEdge(e);
}
Edge[] edgeArray=e.makeStatus(allocFactory);
int statuspredicate=0;
- for(int i=0;i<edgeArray.length;i++) {
+ for(int i=0; i<edgeArray.length; i++) {
Edge origEdgeKey=edgeArray[i];
if (graph.reachEdge.contains(origEdgeKey)) {
Edge origEdge=graph.reachEdge.get(origEdgeKey);
mergeCallEdge(graph, newDelta, edgetoadd);
}
}
-
+
processCallExternal(graph, newDelta, graph.externalEdgeSet);
//Add edge for return value
if (fcall.getReturnTemp()!=null) {
MySet<Edge> returnedge=delta.varedgeadd.get(returntmp);
if (returnedge!=null)
- for(Edge e:returnedge) {
+ for(Edge e : returnedge) {
//skip the edge if types don't allow it...
if (!typeUtil.isSuperorType(fcall.getReturnTemp().getType(), e.dst.getType()))
continue;
applyDiffs(graph, newDelta);
return newDelta;
}
-
+
public void mergeEdge(Graph graph, Delta newDelta, Edge edgetoadd) {
if (edgetoadd!=null) {
Edge match=graph.getMatch(edgetoadd);
newDelta.addEdgeClear(edgetoadd);
Edge match=graph.getMatch(edgetoadd);
-
+
if (match==null||!match.subsumes(edgetoadd)) {
Edge mergededge=edgetoadd.merge(match);
newDelta.addEdge(mergededge);
//Handle outgoing heap edges
MySet<Edge> edgeset=graph.getEdges(singleNode);
- for(Edge e:edgeset) {
+ for(Edge e : edgeset) {
Edge rewrite=e.rewrite(singleNode, summaryNode);
//Remove old edge
newDelta.removeHeapEdge(e);
mergeCallEdge(graph, newDelta, rewrite);
}
-
+
//Handle incoming edges
MySet<Edge> backedges=graph.getBackEdges(singleNode);
- for(Edge e:backedges) {
+ for(Edge e : backedges) {
if (e.dst==singleNode) {
//Need to get original edge so that predicate will be correct
Edge match=graph.getMatch(e);
graph.backMap=new HashMap<AllocNode, MySet<Edge>>();
if (graph.parent.backMap==null) {
graph.parent.backMap=new HashMap<AllocNode, MySet<Edge>>();
- for(Map.Entry<AllocNode, MySet<Edge>> entry:graph.nodeMap.entrySet()) {
- for(Edge e:entry.getValue()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : graph.nodeMap.entrySet()) {
+ for(Edge e : entry.getValue()) {
if (!graph.parent.backMap.containsKey(e.dst))
graph.parent.backMap.put(e.dst, new MySet<Edge>());
graph.parent.backMap.get(e.dst).add(e);
}
}
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:graph.varMap.entrySet()) {
- for(Edge e:entry.getValue()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : graph.varMap.entrySet()) {
+ for(Edge e : entry.getValue()) {
if (!graph.parent.backMap.containsKey(e.dst))
graph.parent.backMap.put(e.dst, new MySet<Edge>());
graph.parent.backMap.get(e.dst).add(e);
}
//Add hidden base edges
- for(Map.Entry<AllocNode, MySet<Edge>> e: delta.baseheapedge.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> e : delta.baseheapedge.entrySet()) {
AllocNode node=e.getKey();
MySet<Edge> edges=e.getValue();
if (graph.nodeMap.containsKey(node)) {
}
//Remove heap edges
- for(Map.Entry<AllocNode, MySet<Edge>> e: delta.heapedgeremove.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> e : delta.heapedgeremove.entrySet()) {
AllocNode node=e.getKey();
MySet<Edge> edgestoremove=e.getValue();
if (graph.nodeMap.containsKey(node)) {
}
//Add heap edges
- for(Map.Entry<AllocNode, MySet<Edge>> e: delta.heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> e : delta.heapedgeadd.entrySet()) {
AllocNode node=e.getKey();
MySet<Edge> edgestoadd=e.getValue();
- //If we have not done a subtract, then
+ //If we have not done a subtract, then
if (!graph.nodeMap.containsKey(node)) {
//Copy the parent entry
if (graph.parent.nodeMap.containsKey(node))
}
Edge.mergeEdgesInto(graph.nodeMap.get(node),edgestoadd);
if (genbackwards) {
- for(Edge eadd:edgestoadd) {
+ for(Edge eadd : edgestoadd) {
if (!graph.backMap.containsKey(eadd.dst))
graph.backMap.put(eadd.dst, new MySet<Edge>());
graph.backMap.get(eadd.dst).add(eadd);
}
//Remove var edges
- for(Map.Entry<TempDescriptor, MySet<Edge>> e: delta.varedgeremove.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> e : delta.varedgeremove.entrySet()) {
TempDescriptor tmp=e.getKey();
MySet<Edge> edgestoremove=e.getValue();
}
//Add var edges
- for(Map.Entry<TempDescriptor, MySet<Edge>> e: delta.varedgeadd.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> e : delta.varedgeadd.entrySet()) {
TempDescriptor tmp=e.getKey();
MySet<Edge> edgestoadd=e.getValue();
if (graph.varMap.containsKey(tmp)) {
graph.varMap.put(tmp, new MySet<Edge>(graph.parent.varMap.get(tmp)));
Edge.mergeEdgesInto(graph.varMap.get(tmp), edgestoadd);
} else
- graph.varMap.put(tmp, (MySet<Edge>) edgestoadd.clone());
+ graph.varMap.put(tmp, (MySet<Edge>)edgestoadd.clone());
if (genbackwards) {
- for(Edge eadd:edgestoadd) {
+ for(Edge eadd : edgestoadd) {
if (!graph.backMap.containsKey(eadd.dst))
graph.backMap.put(eadd.dst, new MySet<Edge>());
graph.backMap.get(eadd.dst).add(eadd);
}
//Add node additions
- for(AllocNode node:delta.addNodeAges) {
+ for(AllocNode node : delta.addNodeAges) {
graph.nodeAges.add(node);
}
-
- for(Map.Entry<AllocNode, Boolean> nodeentry:delta.addOldNodes.entrySet()) {
+
+ for(Map.Entry<AllocNode, Boolean> nodeentry : delta.addOldNodes.entrySet()) {
AllocNode node=nodeentry.getKey();
Boolean ispresent=nodeentry.getValue();
graph.oldNodes.put(node, ispresent);
FlatSetFieldNode n=(FlatSetFieldNode)node;
return !accessible.isAccessible(n, n.getDst());
}
+
case FKind.FlatSetElementNode: {
FlatSetElementNode n=(FlatSetElementNode)node;
return !accessible.isAccessible(n, n.getDst());
}
+
case FKind.FlatFieldNode: {
FlatFieldNode n=(FlatFieldNode)node;
return !accessible.isAccessible(n, n.getSrc());
}
+
case FKind.FlatElementNode: {
FlatElementNode n=(FlatElementNode)node;
return !accessible.isAccessible(n, n.getSrc());
Delta processCopyNode(FlatNode node, Delta delta, Graph graph) {
TempDescriptor src;
TempDescriptor dst;
-
+
if (node.kind()==FKind.FlatOpNode) {
FlatOpNode fon=(FlatOpNode) node;
src=fon.getLeft();
/* Compute the union, and then the set of edges */
MySet<Edge> edgesToAdd=GraphManip.genEdges(dst, newSrcEdges);
-
+
/* Compute set of edges to remove */
- MySet<Edge> edgesToRemove=GraphManip.getDiffEdges(delta, dst);
+ MySet<Edge> edgesToRemove=GraphManip.getDiffEdges(delta, dst);
/* Update diff */
updateVarDelta(graph, delta, dst, edgesToAdd, edgesToRemove);
/* Compute the union, and then the set of edges */
Edge.mergeEdgesInto(edgesToAdd, newfdedges);
-
+
/* Compute set of edges to remove */
- MySet<Edge> edgesToRemove=GraphManip.getDiffEdges(delta, dst);
+ MySet<Edge> edgesToRemove=GraphManip.getDiffEdges(delta, dst);
+
-
/* Update diff */
updateVarDelta(graph, delta, dst, edgesToAdd, edgesToRemove);
applyDiffs(graph, delta);
MySet<Edge> edgeRemove=delta.varedgeremove.get(tmp);
MySet<Edge> existingEdges=graph.getEdges(tmp);
if (edgestoRemove!=null)
- for(Edge e: edgestoRemove) {
+ for(Edge e : edgestoRemove) {
//remove edge from delta
if (edgeAdd!=null)
edgeAdd.remove(e);
if (existingEdges.contains(e))
delta.removeVarEdge(e);
}
- for(Edge e: edgestoAdd) {
+ for(Edge e : edgestoAdd) {
//Remove the edge from the remove set
if (edgeRemove!=null)
edgeRemove.remove(e);
void updateHeapDelta(Graph graph, Delta delta, MySet<Edge> edgestoAdd, MySet<Edge> edgestoRemove) {
if (edgestoRemove!=null)
- for(Edge e: edgestoRemove) {
+ for(Edge e : edgestoRemove) {
AllocNode src=e.src;
MySet<Edge> edgeAdd=delta.heapedgeadd.get(src);
MySet<Edge> existingEdges=graph.getEdges(src);
}
}
if (edgestoAdd!=null)
- for(Edge e: edgestoAdd) {
+ for(Edge e : edgestoAdd) {
AllocNode src=e.src;
MySet<Edge> edgeRemove=delta.heapedgeremove.get(src);
MySet<Edge> existingEdges=graph.getEdges(src);
applyDiffs(graph, delta);
return delta;
}
-
+
Delta processNewNode(FlatNew node, Delta delta, Graph graph) {
AllocNode summary=allocFactory.getAllocNode(node, true);
AllocNode single=allocFactory.getAllocNode(node, false);
//Remove the old edges
MySet<Edge> oldedges=graph.getEdges(tmp);
if (!oldedges.isEmpty())
- delta.varedgeremove.put(tmp, (MySet<Edge>) oldedges);
+ delta.varedgeremove.put(tmp, (MySet<Edge>)oldedges);
//Note that we create a single node
delta.addNodeAges.add(single);
//Kill the old node
}
} else {
/* 1. Fix up the variable edge additions */
- for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> entryIt=delta.varedgeadd.entrySet().iterator();entryIt.hasNext();) {
+ for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> entryIt=delta.varedgeadd.entrySet().iterator(); entryIt.hasNext(); ) {
Map.Entry<TempDescriptor, MySet<Edge>> entry=entryIt.next();
if (entry.getKey()==tmp) {
/* 2. Fix up the base variable edges */
- for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> entryIt=delta.basevaredge.entrySet().iterator();entryIt.hasNext();) {
+ for(Iterator<Map.Entry<TempDescriptor, MySet<Edge>>> entryIt=delta.basevaredge.entrySet().iterator(); entryIt.hasNext(); ) {
Map.Entry<TempDescriptor, MySet<Edge>> entry=entryIt.next();
TempDescriptor entrytmp=entry.getKey();
if (entrytmp==tmp) {
/* Check is this is the tmp we overwrite, if so add to remove set */
Util.relationUpdate(delta.varedgeremove, tmp, null, entry.getValue());
} else if (graph.varMap.containsKey(entrytmp)) {
- /* Check if the target of the edge is changed */
+ /* Check if the target of the edge is changed */
MySet<Edge> newset=(MySet<Edge>)entry.getValue().clone();
MySet<Edge> removeset=shrinkSet(newset, graph.varMap.get(entrytmp), single, summary);
Util.relationUpdate(delta.varedgeremove, entrytmp, newset, removeset);
Util.relationUpdate(delta.varedgeadd, entrytmp, null, newset);
} else {
- /* Check if the target of the edge is changed */
+ /* Check if the target of the edge is changed */
MySet<Edge> newset=(MySet<Edge>)entry.getValue().clone();
MySet<Edge> removeset=shrinkSet(newset, graph.parent.varMap.get(entrytmp), single, summary);
Util.relationUpdate(delta.varedgeremove, entrytmp, newset, removeset);
/* 3. Fix up heap edge additions */
HashMap<AllocNode, MySet<Edge>> addheapedge=new HashMap<AllocNode, MySet<Edge>>();
- for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> entryIt=delta.heapedgeadd.entrySet().iterator();entryIt.hasNext();) {
+ for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> entryIt=delta.heapedgeadd.entrySet().iterator(); entryIt.hasNext(); ) {
Map.Entry<AllocNode, MySet<Edge>> entry=entryIt.next();
MySet<Edge> edgeset=entry.getValue();
AllocNode allocnode=entry.getKey();
summarizeSet(edgeset, graph.nodeMap.get(allocnode), single, summary);
}
}
-
+
/* Merge in diffs */
- for(Map.Entry<AllocNode, MySet<Edge>> entry:addheapedge.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : addheapedge.entrySet()) {
AllocNode allocnode=entry.getKey();
Util.relationUpdate(delta.heapedgeadd, allocnode, null, entry.getValue());
}
/* 4. Fix up the base heap edges */
- for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> entryIt=delta.baseheapedge.entrySet().iterator();entryIt.hasNext();) {
+ for(Iterator<Map.Entry<AllocNode, MySet<Edge>>> entryIt=delta.baseheapedge.entrySet().iterator(); entryIt.hasNext(); ) {
Map.Entry<AllocNode, MySet<Edge>> entry=entryIt.next();
MySet<Edge> edgeset=entry.getValue();
AllocNode allocnode=entry.getKey();
}
AllocNode addnode=(allocnode==single)?summary:allocnode;
- MySet<Edge> newset=(MySet<Edge>) edgeset.clone();
+ MySet<Edge> newset=(MySet<Edge>)edgeset.clone();
MySet<Edge> removeset=shrinkSet(newset, graph.nodeMap.get(addnode), single, summary);
Util.relationUpdate(delta.heapedgeadd, addnode, null, newset);
Util.relationUpdate(delta.heapedgeremove, allocnode, null, removeset);
if (delta.addOldNodes.containsKey(single)||delta.baseOldNodes.containsKey(single)) {
delta.addOldNodes.put(single, Boolean.FALSE);
}
-
+
}
//Apply incoming diffs to graph
- applyDiffs(graph, delta);
+ applyDiffs(graph, delta);
return delta;
}
void summarizeSet(MySet<Edge> edgeset, MySet<Edge> oldedgeset, AllocNode oldnode, AllocNode sumnode) {
MySet<Edge> newSet=null;
- for(Iterator<Edge> edgeit=edgeset.iterator();edgeit.hasNext();) {
+ for(Iterator<Edge> edgeit=edgeset.iterator(); edgeit.hasNext(); ) {
Edge e=edgeit.next();
if (e.dst==oldnode||e.src==oldnode) {
if (newSet==null) {
MySet<Edge> shrinkSet(MySet<Edge> edgeset, MySet<Edge> oldedgeset, AllocNode oldnode, AllocNode newnode) {
MySet<Edge> newSet=null;
MySet<Edge> removeSet=null;
- for(Iterator<Edge> edgeit=edgeset.iterator();edgeit.hasNext();) {
+ for(Iterator<Edge> edgeit=edgeset.iterator(); edgeit.hasNext(); ) {
Edge e=edgeit.next();
edgeit.remove();
if (e.dst==oldnode||e.src==oldnode) {
if (newSet!=null)
edgeset.addAll(newSet);
return removeSet;
- }
+ }
/* This function returns a completely new Delta... It is safe to
* modify this */
Delta newdelta=new Delta(null, true);
//Add in heap edges and throw away original diff
- for(Map.Entry<AllocNode, MySet<Edge>> entry:delta.heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> entry : delta.heapedgeadd.entrySet()) {
graph.nodeMap.put(entry.getKey(), new MySet<Edge>(entry.getValue()));
}
//Add in var edges and throw away original diff
Set<TempDescriptor> livetemps=bblivetemps.get(block);
- for(Map.Entry<TempDescriptor, MySet<Edge>> entry:delta.varedgeadd.entrySet()) {
+ for(Map.Entry<TempDescriptor, MySet<Edge>> entry : delta.varedgeadd.entrySet()) {
if (livetemps.contains(entry.getKey()))
graph.varMap.put(entry.getKey(), new MySet<Edge>(entry.getValue()));
}
//Record that this is initial set...
graph.nodeAges.addAll(delta.addNodeAges);
//Add old nodes
- for(Map.Entry<AllocNode, Boolean> oldentry:delta.addOldNodes.entrySet()) {
+ for(Map.Entry<AllocNode, Boolean> oldentry : delta.addOldNodes.entrySet()) {
if (oldentry.getValue().booleanValue()) {
graph.oldNodes.put(oldentry.getKey(), Boolean.TRUE);
}
void mergeHeapEdges(Graph graph, Delta delta, Delta newdelta) {
//Merge in edges
- for(Map.Entry<AllocNode, MySet<Edge>> heapedge:delta.heapedgeadd.entrySet()) {
+ for(Map.Entry<AllocNode, MySet<Edge>> heapedge : delta.heapedgeadd.entrySet()) {
AllocNode nsrc=heapedge.getKey();
MySet<Edge> edges=heapedge.getValue();
if (graph.backMap!=null) {
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (!graph.backMap.containsKey(e.dst))
graph.backMap.put(e.dst, new MySet<Edge>());
graph.backMap.get(e.dst).add(e);
}
MySet<Edge> dstedges=graph.nodeMap.get(nsrc);
MySet<Edge> diffedges=new MySet<Edge>();
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (!dstedges.contains(e)) {
//We have a new edge
diffedges.add(e);
void mergeVarEdges(Graph graph, Delta delta, Delta newdelta, BBlock block) {
//Merge in edges
Set<TempDescriptor> livetemps=bblivetemps.get(block);
-
- for(Map.Entry<TempDescriptor, MySet<Edge>> varedge:delta.varedgeadd.entrySet()) {
+
+ for(Map.Entry<TempDescriptor, MySet<Edge>> varedge : delta.varedgeadd.entrySet()) {
TempDescriptor tmpsrc=varedge.getKey();
if (livetemps.contains(tmpsrc)) {
MySet<Edge> edges=varedge.getValue();
if (graph.backMap!=null) {
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (!graph.backMap.containsKey(e.dst))
graph.backMap.put(e.dst, new MySet<Edge>());
graph.backMap.get(e.dst).add(e);
}
}
-
+
if (!graph.varMap.containsKey(tmpsrc)) {
graph.varMap.put(tmpsrc, new MySet<Edge>());
}
MySet<Edge> dstedges=graph.varMap.get(tmpsrc);
MySet<Edge> diffedges=new MySet<Edge>();
- for(Edge e:edges) {
+ for(Edge e : edges) {
if (!dstedges.contains(e)) {
//We have a new edge
diffedges.add(e);
void mergeAges(Graph graph, Delta delta, Delta newDelta) {
//Merge in edges
- for(AllocNode node:delta.addNodeAges) {
+ for(AllocNode node : delta.addNodeAges) {
if (!graph.nodeAges.contains(node)) {
graph.nodeAges.add(node);
newDelta.baseNodeAges.add(node);
}
}
- for(Map.Entry<AllocNode, Boolean> oldentry:delta.addOldNodes.entrySet()) {
+ for(Map.Entry<AllocNode, Boolean> oldentry : delta.addOldNodes.entrySet()) {
AllocNode node=oldentry.getKey();
boolean ispresent=oldentry.getValue().booleanValue();
if (ispresent&&!graph.oldNodes.containsKey(node)) {
public class Util {
public static <T> MySet<T> setSubtract(Set <T> orig, Set<T> sub) {
MySet<T> newset=new MySet<T>();
- for(T e: orig) {
+ for(T e : orig) {
if (!sub.contains(e))
newset.add(e);
}
map.get(key).addAll(toadd);
} else {
if (toadd!=null)
- map.put(key, (MySet<V>) toadd.clone());
+ map.put(key, (MySet<V>)toadd.clone());
}
}
return label;
} else {
ListIterator lit = getTempDesc().listIterator();
- for(; lit.hasNext();) {
+ for(; lit.hasNext(); ) {
TempDescriptor td = (TempDescriptor) lit.next();
label += td.toString()+"+";
}
Set mapping = mappair.entrySet();
Iterator it = mapping.iterator();
label = "Mappings are: ";
- for(; it.hasNext();) {
+ for(; it.hasNext(); ) {
Object o = it.next();
label += o.toString() + " , ";
}
/** This function starts the prefetch analysis */
private void DoPrefetch() {
- for (Iterator methodit=locality.getMethods().iterator(); methodit.hasNext();) {
+ for (Iterator methodit=locality.getMethods().iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)methodit.next();
if (state.excprefetch.contains(md.getClassMethodName()))
continue; //Skip this method
if (oldPrefetchSet.size()!=newPrefetchSet.size())
return true;
- for(Enumeration e = newPrefetchSet.keys(); e.hasMoreElements();) {
+ for(Enumeration e = newPrefetchSet.keys(); e.hasMoreElements(); ) {
PrefetchPair pp = (PrefetchPair) e.nextElement();
double newprob = newPrefetchSet.get(pp).doubleValue();
if (!oldPrefetchSet.containsKey(pp))
/* Get each prefetch pair of the child and match it with the destination temp descriptor of curr FlatFieldNode */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
if (childpp.base == currffn.getDst() && (childpp.getDesc()!= null)) {
if (currffn.getField().getType().isPtr()) {
}
}
- for(Iterator<PrefetchPair> it=tocompare.keySet().iterator(); it.hasNext();) {
+ for(Iterator<PrefetchPair> it=tocompare.keySet().iterator(); it.hasNext(); ) {
PrefetchPair pp=it.next();
if (tocompare.get(pp)<ANALYSIS_THRESHOLD_PROB)
it.remove();
/* Get each prefetch pair of the child and match it with the destination temp descriptor of curr FlatFieldNode */
PrefetchPair currpp = null;
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
if (childpp.base == currfen.getDst() && (childpp.getDesc()!= null)) {
if (currfen.getDst().getType().isPtr()) {
}
/* Check if curr prefetch set and the child prefetch set have same prefetch pairs
* if so calculate the new probability */
- for(Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for(Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
- for(Enumeration e = currcopy.keys(); e.hasMoreElements();) {
+ for(Enumeration e = currcopy.keys(); e.hasMoreElements(); ) {
currpp = (PrefetchPair) e.nextElement();
if(currpp.equals(childpp)) {
pm.addPair(childpp, currpp);
}
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
}
/* Merge curr prefetch pairs */
- for (Enumeration e = currcopy.keys(); e.hasMoreElements();) {
+ for (Enumeration e = currcopy.keys(); e.hasMoreElements(); ) {
currpp = (PrefetchPair) e.nextElement();
tocompare.put(currpp, currcopy.get(currpp).doubleValue());
currcopy.remove(currpp);
FlatSetFieldNode currfsfn = (FlatSetFieldNode) curr;
PairMap pm = new PairMap();
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
if(childpp.base == currfsfn.getDst()) {
int size = childpp.desc.size();
}
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
FlatSetElementNode currfsen = (FlatSetElementNode) curr;
PairMap pm = new PairMap();
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
if (childpp.base == currfsen.getDst()) {
int sizedesc = childpp.desc.size();
}
}
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
PairMap pm = new PairMap();
if(currfopn.getOp().getOp() == Operation.ASSIGN) {
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
PrefetchPair copyofchildpp = (PrefetchPair) childpp.clone();
}
//case i = i+z with child prefetch set a[i].x
} else if(currfopn.getRight()!=null && (currfopn.getOp().getOp() == Operation.ADD)) {
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
PrefetchPair copyofchildpp = (PrefetchPair) childpp.clone();
}
}
} else if(currfopn.getRight()!=null && (currfopn.getOp().getOp() == Operation.SUB)) {
- for(Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for(Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
if(childpp.containsTemp(currfopn.getDest())) {
child_prefetch_set_copy.remove(childpp);
}
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
PairMap pm = new PairMap();
if(currfln.getType().isIntegerType()) {
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
PrefetchPair copyofchildpp = (PrefetchPair) childpp.clone();
if(copyofchildpp.containsTemp(currfln.getDst())) {
ArrayList<Descriptor> copychilddesc = (ArrayList<Descriptor>)copyofchildpp.getDesc();
int sizetempdesc = copychilddesc.size();
- for(ListIterator it = copychilddesc.listIterator(); it.hasNext();) {
+ for(ListIterator it = copychilddesc.listIterator(); it.hasNext(); ) {
Object o = it.next();
if(o instanceof IndexDescriptor) {
ArrayList<TempDescriptor> td = (ArrayList<TempDescriptor>)((IndexDescriptor)o).tddesc;
}
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
PairMap pm = new PairMap();
/* Merge child prefetch pairs */
- for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements();) {
+ for (Enumeration ecld = child_prefetch_set_copy.keys(); ecld.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) ecld.nextElement();
tocompare.put(childpp, child_prefetch_set_copy.get(childpp).doubleValue());
pm.addPair(childpp, childpp);
allpp.addAll(truechild.keySet());
allpp.addAll(falsechild.keySet());
- for(Iterator<PrefetchPair> ppit=allpp.iterator(); ppit.hasNext();) {
+ for(Iterator<PrefetchPair> ppit=allpp.iterator(); ppit.hasNext(); ) {
PrefetchPair pp=ppit.next();
double trueprob=0,falseprob=0;
if (truechild.containsKey(pp))
newprob<falseprob) {
newprob=falseprob;
}
-
+
if(newprob < ANALYSIS_THRESHOLD_PROB) //Skip pp that are below threshold
continue;
-
+
tocompare.put(pp, newprob);
if (truechild.containsKey(pp))
truepm.addPair(pp, pp);
/* Propagate all child nodes */
nexttemp:
- for(Enumeration e = child_prefetch_set_copy.keys(); e.hasMoreElements();) {
+ for(Enumeration e = child_prefetch_set_copy.keys(); e.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) e.nextElement();
TempDescriptor[] writearray=curr.writesTemps();
for(int i=0; i<writearray.length; i++) {
curr.getMethod().getClassMethodName().equals("Barrier.enterBarrier"))) {
/* Propagate all child nodes */
nexttemp:
- for(Enumeration e = child_prefetch_set_copy.keys(); e.hasMoreElements();) {
+ for(Enumeration e = child_prefetch_set_copy.keys(); e.hasMoreElements(); ) {
PrefetchPair childpp = (PrefetchPair) e.nextElement();
TempDescriptor[] writearray=curr.writesTemps();
for(int i=0; i<writearray.length; i++) {
if(prefetch_hash.containsKey(fn)) {
System.out.print("Prefetch" + "(");
Hashtable<PrefetchPair, Double> currhash = (Hashtable) prefetch_hash.get(fn);
- for(Enumeration pphash= currhash.keys(); pphash.hasMoreElements();) {
+ for(Enumeration pphash= currhash.keys(); pphash.hasMoreElements(); ) {
PrefetchPair pp = (PrefetchPair) pphash.nextElement();
double v=currhash.get(pp).doubleValue();
if (v>.2)
- System.out.print(pp.toString() +"-"+v + ", ");
+ System.out.print(pp.toString() +"-"+v + ", ");
}
System.out.println(")");
} else {
* for e.g. if there are 2 prefetch pairs a.b.c.d and a.b.c for a given flatnode
* then this function drops a.b.c from the prefetch set of the flatnode */
private void delSubsetPPairs(Hashtable<FlatNode, HashSet<PrefetchPair>> newprefetchset) {
- for (Enumeration e = newprefetchset.keys(); e.hasMoreElements();) {
+ for (Enumeration e = newprefetchset.keys(); e.hasMoreElements(); ) {
FlatNode fn = (FlatNode) e.nextElement();
Set<PrefetchPair> ppairs = newprefetchset.get(fn);
Set<PrefetchPair> toremove=new HashSet<PrefetchPair>();
- for(Iterator<PrefetchPair> it1=ppairs.iterator(); it1.hasNext();) {
+ for(Iterator<PrefetchPair> it1=ppairs.iterator(); it1.hasNext(); ) {
PrefetchPair pp1=it1.next();
if (toremove.contains(pp1))
continue;
int l1=pp1.desc.size()+1;
- for(Iterator<PrefetchPair> it2=ppairs.iterator(); it2.hasNext();) {
+ for(Iterator<PrefetchPair> it2=ppairs.iterator(); it2.hasNext(); ) {
PrefetchPair pp2=it2.next();
int l2=pp2.desc.size()+1;
if(oldPSet.size() != newPSet.size()) {
return true;
} else {
- for(Iterator it = newPSet.iterator(); it.hasNext();) {
+ for(Iterator it = newPSet.iterator(); it.hasNext(); ) {
if(!oldPSet.contains((PrefetchPair)it.next())) {
return true;
}
if(fn.kind() == FKind.FlatMethod) {
HashSet<PrefetchPair> pset1 = new HashSet<PrefetchPair>();
Hashtable<PrefetchPair, Double> prefetchset = prefetch_hash.get(fn);
- for(Enumeration e = prefetchset.keys(); e.hasMoreElements();) {
+ for(Enumeration e = prefetchset.keys(); e.hasMoreElements(); ) {
PrefetchPair pp = (PrefetchPair) e.nextElement();
/* Apply initial rule */
if(prefetchset.get(pp).doubleValue() >= PREFETCH_THRESHOLD_PROB) {
HashSet<PrefetchPair> newpset = new HashSet<PrefetchPair>();
Hashtable<PrefetchPair, Double> prefetchset = prefetch_hash.get(fn);
Hashtable<FlatNode, PairMap> ppairmaphash = pmap_hash.get(fn);
- for(Enumeration epset = prefetchset.keys(); epset.hasMoreElements();) {
+ for(Enumeration epset = prefetchset.keys(); epset.hasMoreElements(); ) {
PrefetchPair pp = (PrefetchPair) epset.nextElement();
boolean pprobIsGreater = (prefetchset.get(pp).doubleValue() >= PREFETCH_THRESHOLD_PROB);
boolean mapprobIsLess=false;
private void addFlatPrefetchNode(Hashtable<FlatNode, HashSet<PrefetchPair>> newprefetchset) {
/* This modifies the Flat representation graph */
- for(Enumeration e = newprefetchset.keys(); e.hasMoreElements();) {
+ for(Enumeration e = newprefetchset.keys(); e.hasMoreElements(); ) {
FlatNode fn = (FlatNode) e.nextElement();
FlatPrefetchNode fpn = new FlatPrefetchNode();
if(newprefetchset.get(fn).size() > 0) {
if(getDesc() == null)
return label;
ListIterator it=getDesc().listIterator();
- for(; it.hasNext();) {
+ for(; it.hasNext(); ) {
Object o = it.next();
if(o instanceof FieldDescriptor) {
FieldDescriptor fd = (FieldDescriptor) o;
* for a given prefetch pair else returns false*/
public boolean containsTemp(TempDescriptor td) {
ArrayList<Descriptor> desc = (ArrayList<Descriptor>)getDesc();
- for(ListIterator it = desc.listIterator(); it.hasNext();) {
+ for(ListIterator it = desc.listIterator(); it.hasNext(); ) {
Object o = it.next();
if(o instanceof IndexDescriptor) {
ArrayList<TempDescriptor> tdarray = (ArrayList<TempDescriptor>)((IndexDescriptor)o).tddesc;
public PrefetchPair replaceTemp(TempDescriptor td, TempDescriptor[] newtd) {
PrefetchPair npp=(PrefetchPair)clone();
ArrayList<Descriptor> desc = (ArrayList<Descriptor>)npp.getDesc();
- for(ListIterator it = desc.listIterator(); it.hasNext();) {
+ for(ListIterator it = desc.listIterator(); it.hasNext(); ) {
Object currdesc = it.next();
if(currdesc instanceof IndexDescriptor) {
ArrayList<TempDescriptor> tdarray = (ArrayList<TempDescriptor>)((IndexDescriptor)currdesc).tddesc;
import IR.ClassDescriptor;
-public class CompositeLocation extends Location{
+public class CompositeLocation extends Location {
protected NTuple<Location> locTuple;
public void addLocationSet(Set<Location> set) {
- for (Iterator iterator = set.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
Location location = (Location) iterator.next();
locTuple.addElement(location);
}
// need to get more optimization version later
Set<Location> locSet = getBaseLocationSet();
- for (Iterator iterator = locSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = locSet.iterator(); iterator.hasNext(); ) {
Location location = (Location) iterator.next();
if (location.getClassDescriptor().equals(cd)) {
- return location;
+ return location;
}
}
Map<ClassDescriptor, Location> cd2loc = new Hashtable<ClassDescriptor, Location>();
Set<Location> baseLocSet = getBaseLocationSet();
- for (Iterator iterator = baseLocSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = baseLocSet.iterator(); iterator.hasNext(); ) {
Location location = (Location) iterator.next();
cd2loc.put(location.getClassDescriptor(), location);
}
Location locElement = locTuple.at(i);
if (locElement instanceof DeltaLocation) {
- // baseLocationSet.addAll(((DeltaLocation)
- // locElement).getDeltaOperandLocationVec());
- baseLocationTuple.addAll(((DeltaLocation) locElement).getBaseLocationTuple());
+ // baseLocationSet.addAll(((DeltaLocation)
+ // locElement).getDeltaOperandLocationVec());
+ baseLocationTuple.addAll(((DeltaLocation) locElement).getBaseLocationTuple());
} else {
- baseLocationTuple.addElement(locElement);
+ baseLocationTuple.addElement(locElement);
}
}
return baseLocationTuple;
Location locElement = locTuple.at(i);
if (locElement instanceof DeltaLocation) {
- // baseLocationSet.addAll(((DeltaLocation)
- // locElement).getDeltaOperandLocationVec());
- baseLocationSet.addAll(((DeltaLocation) locElement).getBaseLocationSet());
+ // baseLocationSet.addAll(((DeltaLocation)
+ // locElement).getDeltaOperandLocationVec());
+ baseLocationSet.addAll(((DeltaLocation) locElement).getBaseLocationSet());
} else {
- baseLocationSet.add(locElement);
+ baseLocationSet.add(locElement);
}
}
return baseLocationSet;
if (locTuple.size() == 1) {
Location locElement = locTuple.at(0);
if (locElement instanceof DeltaLocation) {
- result++;
- result += getNumofDelta((DeltaLocation) locElement);
+ result++;
+ result += getNumofDelta((DeltaLocation) locElement);
}
}
return result;
if (delta.getDeltaOperandLocationVec().size() == 1) {
Location locElement = delta.getDeltaOperandLocationVec().at(0);
if (locElement instanceof DeltaLocation) {
- result++;
- result += getNumofDelta((DeltaLocation) locElement);
+ result++;
+ result += getNumofDelta((DeltaLocation) locElement);
}
}
for (int i = 0; i < tupleSize; i++) {
Location locElement = locTuple.at(i);
if (i != 0) {
- rtr += ",";
+ rtr += ",";
}
rtr += locElement;
}
this.state = state;
this.toanalyze = new HashSet();
this.definitelyWrittenResults =
- new Hashtable<FlatNode, Hashtable<Descriptor, Hashtable<FlatNode, Boolean>>>();
+ new Hashtable<FlatNode, Hashtable<Descriptor, Hashtable<FlatNode, Boolean>>>();
}
public void definitelyWrittenCheck() {
toanalyze.remove(cd);
// if (cd.isClassLibrary()) {
- // doesn't care about class libraries now
+ // doesn't care about class libraries now
// continue;
// }
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
- MethodDescriptor md = (MethodDescriptor) method_it.next();
- FlatMethod fm = state.getMethodFlat(md);
- if (fm != null) {
-
- }
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) method_it.next();
+ FlatMethod fm = state.getMethodFlat(md);
+ if (fm != null) {
+
+ }
}
}
-
-
- /*
- // creating map
- SymbolTable classtable = state.getClassSymbolTable();
- toanalyze.addAll(classtable.getValueSet());
- toanalyze.addAll(state.getTaskSymbolTable().getValueSet());
- while (!toanalyze.isEmpty()) {
- Object obj = toanalyze.iterator().next();
- ClassDescriptor cd = (ClassDescriptor) obj;
- toanalyze.remove(cd);
- if (cd.isClassLibrary()) {
+
+ /*
+ // creating map
+ SymbolTable classtable = state.getClassSymbolTable();
+ toanalyze.addAll(classtable.getValueSet());
+ toanalyze.addAll(state.getTaskSymbolTable().getValueSet());
+ while (!toanalyze.isEmpty()) {
+ Object obj = toanalyze.iterator().next();
+ ClassDescriptor cd = (ClassDescriptor) obj;
+ toanalyze.remove(cd);
+
+ if (cd.isClassLibrary()) {
// doesn't care about class libraries now
continue;
- }
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
+ }
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
MethodDescriptor md = (MethodDescriptor) method_it.next();
FlatMethod fm = state.getMethodFlat(md);
if (fm != null) {
}
}
- }
- }
-
- // check if there is a read statement with flag=TRUE
- toanalyze.addAll(classtable.getValueSet());
- toanalyze.addAll(state.getTaskSymbolTable().getValueSet());
- while (!toanalyze.isEmpty()) {
- Object obj = toanalyze.iterator().next();
- ClassDescriptor cd = (ClassDescriptor) obj;
- toanalyze.remove(cd);
- if (cd.isClassLibrary()) {
+ }
+ }
+
+ // check if there is a read statement with flag=TRUE
+ toanalyze.addAll(classtable.getValueSet());
+ toanalyze.addAll(state.getTaskSymbolTable().getValueSet());
+ while (!toanalyze.isEmpty()) {
+ Object obj = toanalyze.iterator().next();
+ ClassDescriptor cd = (ClassDescriptor) obj;
+ toanalyze.remove(cd);
+ if (cd.isClassLibrary()) {
// doesn't care about class libraries now
continue;
- }
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
+ }
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
MethodDescriptor md = (MethodDescriptor) method_it.next();
FlatMethod fm = state.getMethodFlat(md);
try {
System.out.println("Error in " + md);
throw e;
}
- }
- }
- */
+ }
+ }
+ */
}
-
-
+
+
private void checkMethodBody(FlatMethod fm) {
// if a new result, schedule forward nodes for analysis
for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- if (!visited.contains(nn)) {
- flatNodesToVisit.add(nn);
- }
+ FlatNode nn = fn.getNext(i);
+ if (!visited.contains(nn)) {
+ flatNodesToVisit.add(nn);
+ }
}
}
FlatOpNode fon = (FlatOpNode) fn;
if (fon.getOp().getOp() == Operation.ASSIGN) {
- lhs = fon.getDest();
- rhs = fon.getLeft();
- // read(rhs)
- Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> map = definitelyWrittenResults.get(fn);
- if (map != null) {
- if (map.get(rhs).get(fn).booleanValue()) {
- // throw new Error("variable " + rhs
- // +
- // " was not overwritten in-between the same read statement by the out-most loop.");
- }
- }
+ lhs = fon.getDest();
+ rhs = fon.getLeft();
+ // read(rhs)
+ Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> map = definitelyWrittenResults.get(fn);
+ if (map != null) {
+ if (map.get(rhs).get(fn).booleanValue()) {
+ // throw new Error("variable " + rhs
+ // +
+ // " was not overwritten in-between the same read statement by the out-most loop.");
+ }
+ }
}
}
- break;
+ break;
case FKind.FlatFieldNode: {
fld = ffn.getField();
}
- break;
+ break;
case FKind.FlatElementNode: {
}
- break;
+ break;
case FKind.FlatSetFieldNode: {
}
- break;
+ break;
case FKind.FlatSetElementNode: {
}
- break;
+ break;
case FKind.FlatCall: {
}
- break;
+ break;
}
Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> prev = definitelyWrittenResults.get(fn);
Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> curr =
- new Hashtable<Descriptor, Hashtable<FlatNode, Boolean>>();
+ new Hashtable<Descriptor, Hashtable<FlatNode, Boolean>>();
for (int i = 0; i < fn.numPrev(); i++) {
- FlatNode nn = fn.getPrev(i);
- Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> dwIn = definitelyWrittenResults.get(nn);
- if (dwIn != null) {
- mergeResults(curr, dwIn);
- }
+ FlatNode nn = fn.getPrev(i);
+ Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> dwIn = definitelyWrittenResults.get(nn);
+ if (dwIn != null) {
+ mergeResults(curr, dwIn);
+ }
}
definitelyWritten_nodeActions(fn, curr, entrance);
// if a new result, schedule forward nodes for analysis
if (!curr.equals(prev)) {
- definitelyWrittenResults.put(fn, curr);
+ definitelyWrittenResults.put(fn, curr);
- for (int i = 0; i < fn.numNext(); i++) {
- FlatNode nn = fn.getNext(i);
- flatNodesToVisit.add(nn);
- }
+ for (int i = 0; i < fn.numNext(); i++) {
+ FlatNode nn = fn.getNext(i);
+ flatNodesToVisit.add(nn);
+ }
}
}
}
private void mergeResults(Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> curr,
- Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> in) {
+ Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> in) {
Set<Descriptor> inKeySet = in.keySet();
- for (Iterator iterator = inKeySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = inKeySet.iterator(); iterator.hasNext(); ) {
Descriptor inKey = (Descriptor) iterator.next();
Hashtable<FlatNode, Boolean> inPair = in.get(inKey);
Set<FlatNode> pairKeySet = inPair.keySet();
- for (Iterator iterator2 = pairKeySet.iterator(); iterator2.hasNext();) {
- FlatNode pairKey = (FlatNode) iterator2.next();
- Boolean inFlag = inPair.get(pairKey);
-
- Hashtable<FlatNode, Boolean> currPair = curr.get(inKey);
- if (currPair == null) {
- currPair = new Hashtable<FlatNode, Boolean>();
- curr.put(inKey, currPair);
- }
-
- Boolean currFlag = currPair.get(pairKey);
- // by default, flag is set by false
- if (currFlag == null) {
- currFlag = Boolean.FALSE;
- }
- currFlag = Boolean.valueOf(inFlag.booleanValue() | currFlag.booleanValue());
- currPair.put(pairKey, currFlag);
+ for (Iterator iterator2 = pairKeySet.iterator(); iterator2.hasNext(); ) {
+ FlatNode pairKey = (FlatNode) iterator2.next();
+ Boolean inFlag = inPair.get(pairKey);
+
+ Hashtable<FlatNode, Boolean> currPair = curr.get(inKey);
+ if (currPair == null) {
+ currPair = new Hashtable<FlatNode, Boolean>();
+ curr.put(inKey, currPair);
+ }
+
+ Boolean currFlag = currPair.get(pairKey);
+ // by default, flag is set by false
+ if (currFlag == null) {
+ currFlag = Boolean.FALSE;
+ }
+ currFlag = Boolean.valueOf(inFlag.booleanValue() | currFlag.booleanValue());
+ currPair.put(pairKey, currFlag);
}
}
}
private void definitelyWritten_nodeActions(FlatNode fn,
- Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> curr, FlatNode entrance) {
+ Hashtable<Descriptor, Hashtable<FlatNode, Boolean>> curr, FlatNode entrance) {
if (fn == entrance) {
Set<Descriptor> keySet = curr.keySet();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
- Descriptor key = (Descriptor) iterator.next();
- Hashtable<FlatNode, Boolean> pair = curr.get(key);
- if (pair != null) {
- Set<FlatNode> pairKeySet = pair.keySet();
- for (Iterator iterator2 = pairKeySet.iterator(); iterator2.hasNext();) {
- FlatNode pairKey = (FlatNode) iterator2.next();
- pair.put(pairKey, Boolean.TRUE);
- }
- }
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
+ Descriptor key = (Descriptor) iterator.next();
+ Hashtable<FlatNode, Boolean> pair = curr.get(key);
+ if (pair != null) {
+ Set<FlatNode> pairKeySet = pair.keySet();
+ for (Iterator iterator2 = pairKeySet.iterator(); iterator2.hasNext(); ) {
+ FlatNode pairKey = (FlatNode) iterator2.next();
+ pair.put(pairKey, Boolean.TRUE);
+ }
+ }
}
} else {
case FKind.FlatOpNode: {
- FlatOpNode fon = (FlatOpNode) fn;
- lhs = fon.getDest();
- rhs = fon.getLeft();
- System.out.println("\nfon=" + fon);
-
- if (fon.getOp().getOp() == Operation.ASSIGN) {
-
- // read(rhs)
- Hashtable<FlatNode, Boolean> gen = curr.get(rhs);
- if (gen == null) {
- gen = new Hashtable<FlatNode, Boolean>();
- curr.put(rhs, gen);
- }
- System.out.println("READ LOC=" + rhs.getType().getExtension());
-
- Boolean currentStatus = gen.get(fn);
- if (currentStatus == null) {
- gen.put(fn, Boolean.FALSE);
- }
- }
- // write(lhs)
- curr.put(lhs, new Hashtable<FlatNode, Boolean>());
- System.out.println("WRITING LOC=" + lhs.getType().getExtension());
+ FlatOpNode fon = (FlatOpNode) fn;
+ lhs = fon.getDest();
+ rhs = fon.getLeft();
+ System.out.println("\nfon=" + fon);
+
+ if (fon.getOp().getOp() == Operation.ASSIGN) {
+
+ // read(rhs)
+ Hashtable<FlatNode, Boolean> gen = curr.get(rhs);
+ if (gen == null) {
+ gen = new Hashtable<FlatNode, Boolean>();
+ curr.put(rhs, gen);
+ }
+ System.out.println("READ LOC=" + rhs.getType().getExtension());
+
+ Boolean currentStatus = gen.get(fn);
+ if (currentStatus == null) {
+ gen.put(fn, Boolean.FALSE);
+ }
+ }
+ // write(lhs)
+ curr.put(lhs, new Hashtable<FlatNode, Boolean>());
+ System.out.println("WRITING LOC=" + lhs.getType().getExtension());
}
- break;
+ break;
case FKind.FlatLiteralNode: {
- FlatLiteralNode fln = (FlatLiteralNode) fn;
- lhs = fln.getDst();
+ FlatLiteralNode fln = (FlatLiteralNode) fn;
+ lhs = fln.getDst();
- // write(lhs)
- curr.put(lhs, new Hashtable<FlatNode, Boolean>());
+ // write(lhs)
+ curr.put(lhs, new Hashtable<FlatNode, Boolean>());
- System.out.println("WRITING LOC=" + lhs.getType().getExtension());
+ System.out.println("WRITING LOC=" + lhs.getType().getExtension());
}
- break;
+ break;
case FKind.FlatFieldNode:
case FKind.FlatElementNode: {
- FlatFieldNode ffn = (FlatFieldNode) fn;
- lhs = ffn.getSrc();
- fld = ffn.getField();
-
- // read field
- Hashtable<FlatNode, Boolean> gen = curr.get(fld);
- if (gen == null) {
- gen = new Hashtable<FlatNode, Boolean>();
- curr.put(fld, gen);
- }
- Boolean currentStatus = gen.get(fn);
- if (currentStatus == null) {
- gen.put(fn, Boolean.FALSE);
- }
-
- System.out.println("\nffn=" + ffn);
- System.out.println("READ LOCfld=" + fld.getType().getExtension());
- System.out.println("READ LOClhs=" + lhs.getType().getExtension());
+ FlatFieldNode ffn = (FlatFieldNode) fn;
+ lhs = ffn.getSrc();
+ fld = ffn.getField();
+
+ // read field
+ Hashtable<FlatNode, Boolean> gen = curr.get(fld);
+ if (gen == null) {
+ gen = new Hashtable<FlatNode, Boolean>();
+ curr.put(fld, gen);
+ }
+ Boolean currentStatus = gen.get(fn);
+ if (currentStatus == null) {
+ gen.put(fn, Boolean.FALSE);
+ }
+
+ System.out.println("\nffn=" + ffn);
+ System.out.println("READ LOCfld=" + fld.getType().getExtension());
+ System.out.println("READ LOClhs=" + lhs.getType().getExtension());
}
- break;
+ break;
case FKind.FlatSetFieldNode:
case FKind.FlatSetElementNode: {
- FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
- fld = fsfn.getField();
+ FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
+ fld = fsfn.getField();
- // write(field)
- curr.put(fld, new Hashtable<FlatNode, Boolean>());
+ // write(field)
+ curr.put(fld, new Hashtable<FlatNode, Boolean>());
- System.out.println("\nfsfn=" + fsfn);
- System.out.println("WRITELOC LOC=" + fld.getType().getExtension());
+ System.out.println("\nfsfn=" + fsfn);
+ System.out.println("WRITELOC LOC=" + fld.getType().getExtension());
}
- break;
+ break;
case FKind.FlatCall: {
}
- break;
+ break;
}
}
if (locTuple.size() != 0) {
int tupleSize = locTuple.size();
for (int i = 0; i < tupleSize; i++) {
- Location locElement = locTuple.at(i);
- if (i != 0) {
- rtr += ",";
- }
- rtr += locElement;
+ Location locElement = locTuple.at(i);
+ if (i != 0) {
+ rtr += ",";
+ }
+ rtr += locElement;
}
} else {
rtr += "LOC_REF";
Hashtable cd2lattice = state.getCd2LocationOrder();
Set cdSet = cd2lattice.keySet();
- for (Iterator iterator = cdSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = cdSet.iterator(); iterator.hasNext(); ) {
ClassDescriptor cd = (ClassDescriptor) iterator.next();
- Lattice<String> lattice = (Lattice<String>) cd2lattice.get(cd);
+ Lattice<String> lattice = (Lattice<String>)cd2lattice.get(cd);
Set<String> locIdSet = lattice.getKeySet();
- for (Iterator iterator2 = locIdSet.iterator(); iterator2.hasNext();) {
- String locID = (String) iterator2.next();
- id2cd.put(locID, cd);
+ for (Iterator iterator2 = locIdSet.iterator(); iterator2.hasNext(); ) {
+ String locID = (String) iterator2.next();
+ id2cd.put(locID, cd);
}
}
ClassDescriptor cd = (ClassDescriptor) obj;
toanalyze.remove(cd);
// if (cd.isClassLibrary()) {
- // doesn't care about class libraries now
+ // doesn't care about class libraries now
// continue;
// }
checkDeclarationInClass(cd);
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
- MethodDescriptor md = (MethodDescriptor) method_it.next();
- try {
- checkDeclarationInMethodBody(cd, md);
- } catch (Error e) {
- System.out.println("Error in " + md);
- throw e;
- }
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) method_it.next();
+ try {
+ checkDeclarationInMethodBody(cd, md);
+ } catch (Error e) {
+ System.out.println("Error in " + md);
+ throw e;
+ }
}
}
// for a nested delta location, assigning a concrete reference to delta
// operand
Set<Descriptor> tdSet = td2loc.keySet();
- for (Iterator iterator = tdSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = tdSet.iterator(); iterator.hasNext(); ) {
Descriptor td = (Descriptor) iterator.next();
Location loc = td2loc.get(td);
if (loc.getType() == Location.DELTA) {
- // if it contains delta reference pointing to another location element
- CompositeLocation compLoc = (CompositeLocation) loc;
+ // if it contains delta reference pointing to another location element
+ CompositeLocation compLoc = (CompositeLocation) loc;
- Location locElement = compLoc.getTuple().at(0);
- assert (locElement instanceof DeltaLocation);
+ Location locElement = compLoc.getTuple().at(0);
+ assert(locElement instanceof DeltaLocation);
- DeltaLocation delta = (DeltaLocation) locElement;
- Descriptor refType = delta.getRefLocationId();
- if (refType != null) {
- Location refLoc = td2loc.get(refType);
+ DeltaLocation delta = (DeltaLocation) locElement;
+ Descriptor refType = delta.getRefLocationId();
+ if (refType != null) {
+ Location refLoc = td2loc.get(refType);
- assert (refLoc instanceof CompositeLocation);
- CompositeLocation refCompLoc = (CompositeLocation) refLoc;
+ assert(refLoc instanceof CompositeLocation);
+ CompositeLocation refCompLoc = (CompositeLocation) refLoc;
- assert (refCompLoc.getTuple().at(0) instanceof DeltaLocation);
- DeltaLocation refDelta = (DeltaLocation) refCompLoc.getTuple().at(0);
+ assert(refCompLoc.getTuple().at(0) instanceof DeltaLocation);
+ DeltaLocation refDelta = (DeltaLocation) refCompLoc.getTuple().at(0);
- delta.addDeltaOperand(refDelta);
- // compLoc.addLocation(refDelta);
- }
+ delta.addDeltaOperand(refDelta);
+ // compLoc.addLocation(refDelta);
+ }
}
}
ClassDescriptor cd = (ClassDescriptor) obj;
toanalyze.remove(cd);
// if (cd.isClassLibrary()) {
- // doesn't care about class libraries now
+ // doesn't care about class libraries now
// continue;
// }
checkClass(cd);
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
- MethodDescriptor md = (MethodDescriptor) method_it.next();
- try {
- checkMethodBody(cd, md);
- } catch (Error e) {
- System.out.println("Error in " + md);
- throw e;
- }
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) method_it.next();
+ try {
+ checkMethodBody(cd, md);
+ } catch (Error e) {
+ System.out.println("Error in " + md);
+ throw e;
+ }
}
}
}
private void checkDeclarationInBlockStatementNode(MethodDescriptor md, SymbolTable nametable,
- BlockStatementNode bsn) {
+ BlockStatementNode bsn) {
switch (bsn.kind()) {
case Kind.SubBlockNode:
checkDeclarationInSubBlockNode(md, nametable, (SubBlockNode) bsn);
return;
+
case Kind.DeclarationNode:
checkDeclarationNode(md, nametable, (DeclarationNode) bsn);
break;
+
case Kind.LoopNode:
checkDeclarationInLoopNode(md, nametable, (LoopNode) bsn);
break;
ClassDescriptor cd = md.getClassDesc();
BlockNode bn = ln.getInitializer();
for (int i = 0; i < bn.size(); i++) {
- BlockStatementNode bsn = bn.get(i);
- checkDeclarationInBlockStatementNode(md, nametable, bsn);
+ BlockStatementNode bsn = bn.get(i);
+ checkDeclarationInBlockStatementNode(md, nametable, bsn);
}
}
}
private CompositeLocation checkLocationFromBlockNode(MethodDescriptor md, SymbolTable nametable,
- BlockNode bn) {
+ BlockNode bn) {
// it will return the lowest location in the block node
CompositeLocation lowestLoc = null;
for (int i = 0; i < bn.size(); i++) {
CompositeLocation bLoc = checkLocationFromBlockStatementNode(md, bn.getVarTable(), bsn);
if (lowestLoc == null) {
- lowestLoc = bLoc;
+ lowestLoc = bLoc;
} else {
- if (CompositeLattice.isGreaterThan(lowestLoc, bLoc, md.getClassDesc())) {
- lowestLoc = bLoc;
- }
+ if (CompositeLattice.isGreaterThan(lowestLoc, bLoc, md.getClassDesc())) {
+ lowestLoc = bLoc;
+ }
}
}
return lowestLoc;
}
private CompositeLocation checkLocationFromBlockStatementNode(MethodDescriptor md,
- SymbolTable nametable, BlockStatementNode bsn) {
+ SymbolTable nametable, BlockStatementNode bsn) {
CompositeLocation compLoc = null;
switch (bsn.kind()) {
compLoc = checkLocationFromSubBlockNode(md, nametable, (SubBlockNode) bsn);
break;
- // case Kind.ContinueBreakNode:
- // checkLocationFromContinueBreakNode(md, nametable,(ContinueBreakNode)
- // bsn);
- // return null;
+ // case Kind.ContinueBreakNode:
+ // checkLocationFromContinueBreakNode(md, nametable,(ContinueBreakNode)
+ // bsn);
+ // return null;
}
return compLoc;
}
private CompositeLocation checkLocationFromLoopNode(MethodDescriptor md, SymbolTable nametable,
- LoopNode ln) {
+ LoopNode ln) {
ClassDescriptor cd = md.getClassDesc();
if (ln.getType() == LoopNode.WHILELOOP || ln.getType() == LoopNode.DOWHILELOOP) {
CompositeLocation condLoc =
- checkLocationFromExpressionNode(md, nametable, ln.getCondition(), new CompositeLocation(
- cd));
+ checkLocationFromExpressionNode(md, nametable, ln.getCondition(), new CompositeLocation(
+ cd));
addTypeLocation(ln.getCondition().getType(), (condLoc));
CompositeLocation bodyLoc = checkLocationFromBlockNode(md, nametable, ln.getBody());
if (!CompositeLattice.isGreaterThan(condLoc, bodyLoc, cd)) {
- // loop condition should be higher than loop body
- throw new Error(
- "The location of the while-condition statement is lower than the loop body at "
- + cd.getSourceFileName() + ":" + ln.getCondition().getNumLine());
+ // loop condition should be higher than loop body
+ throw new Error(
+ "The location of the while-condition statement is lower than the loop body at "
+ + cd.getSourceFileName() + ":" + ln.getCondition().getNumLine());
}
return bodyLoc;
// calculate glb location of condition and update statements
CompositeLocation condLoc =
- checkLocationFromExpressionNode(md, bn.getVarTable(), ln.getCondition(),
- new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, bn.getVarTable(), ln.getCondition(),
+ new CompositeLocation(cd));
addTypeLocation(ln.getCondition().getType(), condLoc);
CompositeLocation updateLoc =
- checkLocationFromBlockNode(md, bn.getVarTable(), ln.getUpdate());
+ checkLocationFromBlockNode(md, bn.getVarTable(), ln.getUpdate());
Set<CompositeLocation> glbInputSet = new HashSet<CompositeLocation>();
glbInputSet.add(condLoc);
CompositeLocation blockLoc = checkLocationFromBlockNode(md, bn.getVarTable(), ln.getBody());
if (blockLoc == null) {
- // when there is no statement in the loop body
- return glbLocOfForLoopCond;
+ // when there is no statement in the loop body
+ return glbLocOfForLoopCond;
}
if (!CompositeLattice.isGreaterThan(glbLocOfForLoopCond, blockLoc, cd)) {
- throw new Error(
- "The location of the for-condition statement is lower than the for-loop body at "
- + cd.getSourceFileName() + ":" + ln.getCondition().getNumLine());
+ throw new Error(
+ "The location of the for-condition statement is lower than the for-loop body at "
+ + cd.getSourceFileName() + ":" + ln.getCondition().getNumLine());
}
return blockLoc;
}
}
private CompositeLocation checkLocationFromSubBlockNode(MethodDescriptor md,
- SymbolTable nametable, SubBlockNode sbn) {
+ SymbolTable nametable, SubBlockNode sbn) {
CompositeLocation compLoc = checkLocationFromBlockNode(md, nametable, sbn.getBlockNode());
return compLoc;
}
private CompositeLocation checkLocationFromIfStatementNode(MethodDescriptor md,
- SymbolTable nametable, IfStatementNode isn) {
+ SymbolTable nametable, IfStatementNode isn) {
ClassDescriptor localCD = md.getClassDesc();
Set<CompositeLocation> glbInputSet = new HashSet<CompositeLocation>();
CompositeLocation condLoc =
- checkLocationFromExpressionNode(md, nametable, isn.getCondition(), new CompositeLocation(
- localCD));
+ checkLocationFromExpressionNode(md, nametable, isn.getCondition(), new CompositeLocation(
+ localCD));
addTypeLocation(isn.getCondition().getType(), condLoc);
glbInputSet.add(condLoc);
if (!CompositeLattice.isGreaterThan(condLoc, locTrueBlock, localCD)) {
// error
throw new Error(
- "The location of the if-condition statement is lower than the conditional block at "
- + localCD.getSourceFileName() + ":" + isn.getCondition().getNumLine());
+ "The location of the if-condition statement is lower than the conditional block at "
+ + localCD.getSourceFileName() + ":" + isn.getCondition().getNumLine());
}
if (isn.getFalseBlock() != null) {
CompositeLocation locFalseBlock =
- checkLocationFromBlockNode(md, nametable, isn.getFalseBlock());
+ checkLocationFromBlockNode(md, nametable, isn.getFalseBlock());
glbInputSet.add(locFalseBlock);
if (!CompositeLattice.isGreaterThan(condLoc, locFalseBlock, localCD)) {
- // error
- throw new Error(
- "The location of the if-condition statement is lower than the conditional block at "
- + localCD.getSourceFileName() + ":" + isn.getCondition().getNumLine());
+ // error
+ throw new Error(
+ "The location of the if-condition statement is lower than the conditional block at "
+ + localCD.getSourceFileName() + ":" + isn.getCondition().getNumLine());
}
}
}
private CompositeLocation checkLocationFromDeclarationNode(MethodDescriptor md,
- SymbolTable nametable, DeclarationNode dn) {
+ SymbolTable nametable, DeclarationNode dn) {
VarDescriptor vd = dn.getVarDescriptor();
Location destLoc = td2loc.get(vd);
ClassDescriptor localCD = md.getClassDesc();
if (dn.getExpression() != null) {
CompositeLocation expressionLoc =
- checkLocationFromExpressionNode(md, nametable, dn.getExpression(), new CompositeLocation(
- localCD));
+ checkLocationFromExpressionNode(md, nametable, dn.getExpression(), new CompositeLocation(
+ localCD));
addTypeLocation(dn.getExpression().getType(), expressionLoc);
if (expressionLoc != null) {
- // checking location order
- if (!CompositeLattice.isGreaterThan(expressionLoc, destLoc, localCD)) {
- throw new Error("The value flow from " + expressionLoc + " to " + destLoc
- + " does not respect location hierarchy on the assignment " + dn.printNode(0));
- }
+ // checking location order
+ if (!CompositeLattice.isGreaterThan(expressionLoc, destLoc, localCD)) {
+ throw new Error("The value flow from " + expressionLoc + " to " + destLoc
+ + " does not respect location hierarchy on the assignment " + dn.printNode(0));
+ }
}
return expressionLoc;
}
private void checkDeclarationInSubBlockNode(MethodDescriptor md, SymbolTable nametable,
- SubBlockNode sbn) {
+ SubBlockNode sbn) {
checkDeclarationInBlockNode(md, nametable, sbn.getBlockNode());
}
private CompositeLocation checkLocationFromBlockExpressionNode(MethodDescriptor md,
- SymbolTable nametable, BlockExpressionNode ben) {
+ SymbolTable nametable, BlockExpressionNode ben) {
CompositeLocation compLoc =
- checkLocationFromExpressionNode(md, nametable, ben.getExpression(), null);
+ checkLocationFromExpressionNode(md, nametable, ben.getExpression(), null);
addTypeLocation(ben.getExpression().getType(), compLoc);
return compLoc;
}
private CompositeLocation checkLocationFromExpressionNode(MethodDescriptor md,
- SymbolTable nametable, ExpressionNode en, CompositeLocation loc) {
+ SymbolTable nametable, ExpressionNode en, CompositeLocation loc) {
CompositeLocation compLoc = null;
switch (en.kind()) {
}
private CompositeLocation checkLocationFromTertiaryNode(MethodDescriptor md,
- SymbolTable nametable, TertiaryNode tn) {
+ SymbolTable nametable, TertiaryNode tn) {
ClassDescriptor cd = md.getClassDesc();
CompositeLocation condLoc =
- checkLocationFromExpressionNode(md, nametable, tn.getCond(), new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, tn.getCond(), new CompositeLocation(cd));
addTypeLocation(tn.getCond().getType(), condLoc);
CompositeLocation trueLoc =
- checkLocationFromExpressionNode(md, nametable, tn.getTrueExpr(), new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, tn.getTrueExpr(), new CompositeLocation(cd));
addTypeLocation(tn.getTrueExpr().getType(), trueLoc);
CompositeLocation falseLoc =
- checkLocationFromExpressionNode(md, nametable, tn.getFalseExpr(), new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, tn.getFalseExpr(), new CompositeLocation(cd));
addTypeLocation(tn.getFalseExpr().getType(), falseLoc);
// check if condLoc is higher than trueLoc & falseLoc
if (!CompositeLattice.isGreaterThan(condLoc, trueLoc, cd)) {
throw new Error(
- "The location of the condition expression is lower than the true expression at "
- + cd.getSourceFileName() + ":" + tn.getCond().getNumLine());
+ "The location of the condition expression is lower than the true expression at "
+ + cd.getSourceFileName() + ":" + tn.getCond().getNumLine());
}
if (!CompositeLattice.isGreaterThan(condLoc, falseLoc, cd)) {
throw new Error(
- "The location of the condition expression is lower than the true expression at "
- + cd.getSourceFileName() + ":" + tn.getCond().getNumLine());
+ "The location of the condition expression is lower than the true expression at "
+ + cd.getSourceFileName() + ":" + tn.getCond().getNumLine());
}
// then, return glb of trueLoc & falseLoc
}
private CompositeLocation checkLocationFromMethodInvokeNode(MethodDescriptor md,
- SymbolTable nametable, MethodInvokeNode min) {
+ SymbolTable nametable, MethodInvokeNode min) {
ClassDescriptor cd = md.getClassDesc();
// callee's hierarchy
for (int i = 0; i < min.numArgs(); i++) {
- ExpressionNode en = min.getArg(i);
- CompositeLocation callerArg1 =
- checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
-
- ClassDescriptor calleecd = min.getMethod().getClassDesc();
- VarDescriptor calleevd = (VarDescriptor) min.getMethod().getParameter(i);
- Location calleeLoc1 = td2loc.get(calleevd);
-
- if (!callerArg1.getLocation(cd).isTop()) {
- // here, check if ordering relations among caller's args respect
- // ordering relations in-between callee's args
- for (int currentIdx = 0; currentIdx < min.numArgs(); currentIdx++) {
- if (currentIdx != i) {// skip itself
- ExpressionNode argExp = min.getArg(currentIdx);
- CompositeLocation callerArg2 =
- checkLocationFromExpressionNode(md, nametable, argExp, new CompositeLocation(cd));
-
- VarDescriptor calleevd2 = (VarDescriptor) min.getMethod().getParameter(currentIdx);
- Location calleeLoc2 = td2loc.get(calleevd2);
- boolean callerResult = CompositeLattice.isGreaterThan(callerArg1, callerArg2, cd);
- boolean calleeResult =
- CompositeLattice.isGreaterThan(calleeLoc1, calleeLoc2, calleecd);
-
- if (calleeResult && !callerResult) {
- // in callee, calleeLoc1 is higher than calleeLoc2
- // then, caller should have same ordering relation in-bet
- // callerLoc1 & callerLoc2
-
- throw new Error("Caller doesn't respect ordering relations among method arguments:"
- + cd.getSourceFileName() + ":" + min.getNumLine());
- }
-
- }
- }
- }
+ ExpressionNode en = min.getArg(i);
+ CompositeLocation callerArg1 =
+ checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
+
+ ClassDescriptor calleecd = min.getMethod().getClassDesc();
+ VarDescriptor calleevd = (VarDescriptor) min.getMethod().getParameter(i);
+ Location calleeLoc1 = td2loc.get(calleevd);
+
+ if (!callerArg1.getLocation(cd).isTop()) {
+ // here, check if ordering relations among caller's args respect
+ // ordering relations in-between callee's args
+ for (int currentIdx = 0; currentIdx < min.numArgs(); currentIdx++) {
+ if (currentIdx != i) { // skip itself
+ ExpressionNode argExp = min.getArg(currentIdx);
+ CompositeLocation callerArg2 =
+ checkLocationFromExpressionNode(md, nametable, argExp, new CompositeLocation(cd));
+
+ VarDescriptor calleevd2 = (VarDescriptor) min.getMethod().getParameter(currentIdx);
+ Location calleeLoc2 = td2loc.get(calleevd2);
+ boolean callerResult = CompositeLattice.isGreaterThan(callerArg1, callerArg2, cd);
+ boolean calleeResult =
+ CompositeLattice.isGreaterThan(calleeLoc1, calleeLoc2, calleecd);
+
+ if (calleeResult && !callerResult) {
+ // in callee, calleeLoc1 is higher than calleeLoc2
+ // then, caller should have same ordering relation in-bet
+ // callerLoc1 & callerLoc2
+
+ throw new Error("Caller doesn't respect ordering relations among method arguments:"
+ + cd.getSourceFileName() + ":" + min.getNumLine());
+ }
+
+ }
+ }
+ }
}
for (int i = 0; i < min.numArgs(); i++) {
ExpressionNode en = min.getArg(i);
CompositeLocation argLoc =
- checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
addTypeLocation(en.getType(), argLoc);
argLocSet.add(argLoc);
}
}
private CompositeLocation checkLocationFromArrayAccessNode(MethodDescriptor md,
- SymbolTable nametable, ArrayAccessNode aan) {
+ SymbolTable nametable, ArrayAccessNode aan) {
// return glb location of array itself and index
Set<CompositeLocation> glbInputSet = new HashSet<CompositeLocation>();
CompositeLocation arrayLoc =
- checkLocationFromExpressionNode(md, nametable, aan.getExpression(), new CompositeLocation(
- cd));
+ checkLocationFromExpressionNode(md, nametable, aan.getExpression(), new CompositeLocation(
+ cd));
addTypeLocation(aan.getExpression().getType(), arrayLoc);
glbInputSet.add(arrayLoc);
CompositeLocation indexLoc =
- checkLocationFromExpressionNode(md, nametable, aan.getIndex(), new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, aan.getIndex(), new CompositeLocation(cd));
glbInputSet.add(indexLoc);
addTypeLocation(aan.getIndex().getType(), indexLoc);
}
private CompositeLocation checkLocationFromCreateObjectNode(MethodDescriptor md,
- SymbolTable nametable, CreateObjectNode con) {
+ SymbolTable nametable, CreateObjectNode con) {
ClassDescriptor cd = md.getClassDesc();
for (int i = 0; i < con.numArgs(); i++) {
ExpressionNode en = con.getArg(i);
CompositeLocation argLoc =
- checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, en, new CompositeLocation(cd));
glbInputSet.add(argLoc);
addTypeLocation(en.getType(), argLoc);
}
}
private CompositeLocation checkLocationFromOpNode(MethodDescriptor md, SymbolTable nametable,
- OpNode on) {
+ OpNode on) {
ClassDescriptor cd = md.getClassDesc();
CompositeLocation leftLoc = new CompositeLocation(cd);
}
private CompositeLocation checkLocationFromLiteralNode(MethodDescriptor md,
- SymbolTable nametable, LiteralNode en, CompositeLocation loc) {
+ SymbolTable nametable, LiteralNode en, CompositeLocation loc) {
// literal value has the top location so that value can be flowed into any
// location
}
private CompositeLocation checkLocationFromNameNode(MethodDescriptor md, SymbolTable nametable,
- NameNode nn, CompositeLocation loc) {
+ NameNode nn, CompositeLocation loc) {
NameDescriptor nd = nn.getName();
if (nd.getBase() != null) {
Location localLoc = null;
if (d instanceof VarDescriptor) {
- VarDescriptor vd = (VarDescriptor) d;
- localLoc = td2loc.get(vd);
+ VarDescriptor vd = (VarDescriptor) d;
+ localLoc = td2loc.get(vd);
} else if (d instanceof FieldDescriptor) {
- FieldDescriptor fd = (FieldDescriptor) d;
- localLoc = td2loc.get(fd);
+ FieldDescriptor fd = (FieldDescriptor) d;
+ localLoc = td2loc.get(fd);
}
- assert (localLoc != null);
+ assert(localLoc != null);
if (localLoc instanceof CompositeLocation) {
- loc = (CompositeLocation) localLoc;
+ loc = (CompositeLocation) localLoc;
} else {
- loc.addLocation(localLoc);
+ loc.addLocation(localLoc);
}
}
}
private CompositeLocation checkLocationFromFieldAccessNode(MethodDescriptor md,
- SymbolTable nametable, FieldAccessNode fan, CompositeLocation loc) {
+ SymbolTable nametable, FieldAccessNode fan, CompositeLocation loc) {
FieldDescriptor fd = fan.getField();
Location fieldLoc = td2loc.get(fd);
}
private CompositeLocation checkLocationFromAssignmentNode(MethodDescriptor md,
- SymbolTable nametable, AssignmentNode an, CompositeLocation loc) {
+ SymbolTable nametable, AssignmentNode an, CompositeLocation loc) {
ClassDescriptor cd = md.getClassDesc();
boolean postinc = true;
postinc = false;
CompositeLocation destLocation =
- checkLocationFromExpressionNode(md, nametable, an.getDest(), new CompositeLocation(cd));
+ checkLocationFromExpressionNode(md, nametable, an.getDest(), new CompositeLocation(cd));
CompositeLocation srcLocation = new CompositeLocation(cd);
if (!postinc) {
srcLocation = checkLocationFromExpressionNode(md, nametable, an.getSrc(), srcLocation);
if (!CompositeLattice.isGreaterThan(srcLocation, destLocation, cd)) {
- throw new Error("The value flow from " + srcLocation + " to " + destLocation
- + " does not respect location hierarchy on the assignment " + an.printNode(0));
+ throw new Error("The value flow from " + srcLocation + " to " + destLocation
+ + " does not respect location hierarchy on the assignment " + an.printNode(0));
}
} else {
destLocation =
- srcLocation = checkLocationFromExpressionNode(md, nametable, an.getDest(), srcLocation);
+ srcLocation = checkLocationFromExpressionNode(md, nametable, an.getDest(), srcLocation);
- if (!((Set<String>) state.getCd2LocationPropertyMap().get(cd)).contains(destLocation
- .getLocation(cd).getLocIdentifier())) {
- throw new Error("Location " + destLocation + " is not allowed to have spinning values at "
- + cd.getSourceFileName() + ":" + an.getNumLine());
+ if (!((Set<String>)state.getCd2LocationPropertyMap().get(cd)).contains(destLocation
+ .getLocation(cd).getLocIdentifier())) {
+ throw new Error("Location " + destLocation + " is not allowed to have spinning values at "
+ + cd.getSourceFileName() + ":" + an.getNumLine());
}
}
- if(an.getSrc()!=null){
+ if(an.getSrc()!=null) {
addTypeLocation(an.getSrc().getType(), srcLocation);
}
addTypeLocation(an.getDest().getType(), destLocation);
}
private void assignLocationOfVarDescriptor(VarDescriptor vd, MethodDescriptor md,
- SymbolTable nametable, TreeNode n) {
+ SymbolTable nametable, TreeNode n) {
ClassDescriptor cd = md.getClassDesc();
Vector<AnnotationDescriptor> annotationVec = vd.getType().getAnnotationMarkers();
// currently enforce every variable to have corresponding location
if (annotationVec.size() == 0) {
throw new Error("Location is not assigned to variable " + vd.getSymbol() + " in the method "
- + md.getSymbol() + " of the class " + cd.getSymbol());
+ + md.getSymbol() + " of the class " + cd.getSymbol());
}
if (annotationVec.size() > 1) {
// check if location is defined
String locationID = ad.getMarker();
- Lattice<String> lattice = (Lattice<String>) state.getCd2LocationOrder().get(cd);
+ Lattice<String> lattice = (Lattice<String>)state.getCd2LocationOrder().get(cd);
if (lattice == null || (!lattice.containsKey(locationID))) {
- throw new Error("Location " + locationID
- + " is not defined in the location hierarchy of class " + cd.getSymbol() + ".");
+ throw new Error("Location " + locationID
+ + " is not defined in the location hierarchy of class " + cd.getSymbol() + ".");
}
Location loc = new Location(cd, locationID);
} else if (ad.getType() == AnnotationDescriptor.SINGLE_ANNOTATION) {
if (ad.getMarker().equals(SSJavaAnalysis.DELTA)) {
- CompositeLocation compLoc = new CompositeLocation(cd);
-
- if (ad.getData().length() == 0) {
- throw new Error("Delta function of " + vd.getSymbol() + " does not have any locations: "
- + cd.getSymbol() + ".");
- }
-
- String deltaStr = ad.getData();
- if (deltaStr.startsWith("LOC(")) {
-
- if (!deltaStr.endsWith(")")) {
- throw new Error("The declaration of the delta location is wrong at "
- + cd.getSourceFileName() + ":" + n.getNumLine());
- }
- String locationOperand = deltaStr.substring(4, deltaStr.length() - 1);
-
- nametable.get(locationOperand);
- Descriptor d = (Descriptor) nametable.get(locationOperand);
-
- if (d instanceof VarDescriptor) {
- VarDescriptor varDescriptor = (VarDescriptor) d;
- DeltaLocation deltaLoc = new DeltaLocation(cd, varDescriptor);
- // td2loc.put(vd.getType(), compLoc);
- compLoc.addLocation(deltaLoc);
- } else if (d instanceof FieldDescriptor) {
- throw new Error("Applying delta operation to the field " + locationOperand
- + " is not allowed at " + cd.getSourceFileName() + ":" + n.getNumLine());
- }
- } else {
- StringTokenizer token = new StringTokenizer(deltaStr, ",");
- DeltaLocation deltaLoc = new DeltaLocation(cd);
-
- while (token.hasMoreTokens()) {
- String deltaOperand = token.nextToken();
- ClassDescriptor deltaCD = id2cd.get(deltaOperand);
- if (deltaCD == null) {
- // delta operand is not defined in the location hierarchy
- throw new Error("Delta operand '" + deltaOperand + "' of declaration node '" + vd
- + "' is not defined by location hierarchies.");
- }
-
- Location loc = new Location(deltaCD, deltaOperand);
- deltaLoc.addDeltaOperand(loc);
- }
- compLoc.addLocation(deltaLoc);
-
- }
-
- td2loc.put(vd, compLoc);
- addTypeLocation(vd.getType(), compLoc);
+ CompositeLocation compLoc = new CompositeLocation(cd);
+
+ if (ad.getData().length() == 0) {
+ throw new Error("Delta function of " + vd.getSymbol() + " does not have any locations: "
+ + cd.getSymbol() + ".");
+ }
+
+ String deltaStr = ad.getData();
+ if (deltaStr.startsWith("LOC(")) {
+
+ if (!deltaStr.endsWith(")")) {
+ throw new Error("The declaration of the delta location is wrong at "
+ + cd.getSourceFileName() + ":" + n.getNumLine());
+ }
+ String locationOperand = deltaStr.substring(4, deltaStr.length() - 1);
+
+ nametable.get(locationOperand);
+ Descriptor d = (Descriptor) nametable.get(locationOperand);
+
+ if (d instanceof VarDescriptor) {
+ VarDescriptor varDescriptor = (VarDescriptor) d;
+ DeltaLocation deltaLoc = new DeltaLocation(cd, varDescriptor);
+ // td2loc.put(vd.getType(), compLoc);
+ compLoc.addLocation(deltaLoc);
+ } else if (d instanceof FieldDescriptor) {
+ throw new Error("Applying delta operation to the field " + locationOperand
+ + " is not allowed at " + cd.getSourceFileName() + ":" + n.getNumLine());
+ }
+ } else {
+ StringTokenizer token = new StringTokenizer(deltaStr, ",");
+ DeltaLocation deltaLoc = new DeltaLocation(cd);
+
+ while (token.hasMoreTokens()) {
+ String deltaOperand = token.nextToken();
+ ClassDescriptor deltaCD = id2cd.get(deltaOperand);
+ if (deltaCD == null) {
+ // delta operand is not defined in the location hierarchy
+ throw new Error("Delta operand '" + deltaOperand + "' of declaration node '" + vd
+ + "' is not defined by location hierarchies.");
+ }
+
+ Location loc = new Location(deltaCD, deltaOperand);
+ deltaLoc.addDeltaOperand(loc);
+ }
+ compLoc.addLocation(deltaLoc);
+
+ }
+
+ td2loc.put(vd, compLoc);
+ addTypeLocation(vd.getType(), compLoc);
}
}
private void checkClass(ClassDescriptor cd) {
// Check to see that methods respects ss property
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext(); ) {
MethodDescriptor md = (MethodDescriptor) method_it.next();
checkMethodDeclaration(cd, md);
}
private void checkDeclarationInClass(ClassDescriptor cd) {
// Check to see that fields are okay
- for (Iterator field_it = cd.getFields(); field_it.hasNext();) {
+ for (Iterator field_it = cd.getFields(); field_it.hasNext(); ) {
FieldDescriptor fd = (FieldDescriptor) field_it.next();
checkFieldDeclaration(cd, fd);
}
// currently enforce every variable to have corresponding location
if (annotationVec.size() == 0) {
throw new Error("Location is not assigned to the field " + fd.getSymbol() + " of the class "
- + cd.getSymbol());
+ + cd.getSymbol());
}
if (annotationVec.size() > 1) {
// variable can have at most one location
throw new Error("Field " + fd.getSymbol() + " of class " + cd
- + " has more than one location.");
+ + " has more than one location.");
}
// check if location is defined
AnnotationDescriptor ad = annotationVec.elementAt(0);
if (ad.getType() == AnnotationDescriptor.MARKER_ANNOTATION) {
String locationID = annotationVec.elementAt(0).getMarker();
- Lattice<String> lattice = (Lattice<String>) state.getCd2LocationOrder().get(cd);
+ Lattice<String> lattice = (Lattice<String>)state.getCd2LocationOrder().get(cd);
if (lattice == null || (!lattice.containsKey(locationID))) {
- throw new Error("Location " + locationID
- + " is not defined in the location hierarchy of class " + cd.getSymbol() + ".");
+ throw new Error("Location " + locationID
+ + " is not defined in the location hierarchy of class " + cd.getSymbol() + ".");
}
Location localLoc = new Location(cd, locationID);
} else if (ad.getType() == AnnotationDescriptor.SINGLE_ANNOTATION) {
if (ad.getMarker().equals(SSJavaAnalysis.DELTA)) {
- if (ad.getData().length() == 0) {
- throw new Error("Delta function of " + fd.getSymbol() + " does not have any locations: "
- + cd.getSymbol() + ".");
- }
-
- CompositeLocation compLoc = new CompositeLocation(cd);
- DeltaLocation deltaLoc = new DeltaLocation(cd);
-
- StringTokenizer token = new StringTokenizer(ad.getData(), ",");
- while (token.hasMoreTokens()) {
- String deltaOperand = token.nextToken();
- ClassDescriptor deltaCD = id2cd.get(deltaOperand);
- if (deltaCD == null) {
- // delta operand is not defined in the location hierarchy
- throw new Error("Delta operand '" + deltaOperand + "' of field node '" + fd
- + "' is not defined by location hierarchies.");
- }
-
- Location loc = new Location(deltaCD, deltaOperand);
- deltaLoc.addDeltaOperand(loc);
- }
- compLoc.addLocation(deltaLoc);
- td2loc.put(fd, compLoc);
- addTypeLocation(fd.getType(), compLoc);
+ if (ad.getData().length() == 0) {
+ throw new Error("Delta function of " + fd.getSymbol() + " does not have any locations: "
+ + cd.getSymbol() + ".");
+ }
+
+ CompositeLocation compLoc = new CompositeLocation(cd);
+ DeltaLocation deltaLoc = new DeltaLocation(cd);
+
+ StringTokenizer token = new StringTokenizer(ad.getData(), ",");
+ while (token.hasMoreTokens()) {
+ String deltaOperand = token.nextToken();
+ ClassDescriptor deltaCD = id2cd.get(deltaOperand);
+ if (deltaCD == null) {
+ // delta operand is not defined in the location hierarchy
+ throw new Error("Delta operand '" + deltaOperand + "' of field node '" + fd
+ + "' is not defined by location hierarchies.");
+ }
+
+ Location loc = new Location(deltaCD, deltaOperand);
+ deltaLoc.addDeltaOperand(loc);
+ }
+ compLoc.addLocation(deltaLoc);
+ td2loc.put(fd, compLoc);
+ addTypeLocation(fd.getType(), compLoc);
}
}
CompositeLocation compLoc2;
if (loc1 instanceof CompositeLocation) {
- compLoc1 = (CompositeLocation) loc1;
+ compLoc1 = (CompositeLocation) loc1;
} else {
- // create a bogus composite location for a single location
- compLoc1 = new CompositeLocation(loc1.getClassDescriptor());
- compLoc1.addLocation(loc1);
+ // create a bogus composite location for a single location
+ compLoc1 = new CompositeLocation(loc1.getClassDescriptor());
+ compLoc1.addLocation(loc1);
}
if (loc2 instanceof CompositeLocation) {
- compLoc2 = (CompositeLocation) loc2;
+ compLoc2 = (CompositeLocation) loc2;
} else {
- // create a bogus composite location for a single location
- compLoc2 = new CompositeLocation(loc2.getClassDescriptor());
- compLoc2.addLocation(loc2);
+ // create a bogus composite location for a single location
+ compLoc2 = new CompositeLocation(loc2.getClassDescriptor());
+ compLoc2.addLocation(loc2);
}
// comparing two composite locations
int baseCompareResult = compareBaseLocationSet(compLoc1, compLoc2, priorityCD);
if (baseCompareResult == ComparisonResult.EQUAL) {
- if (compareDelta(compLoc1, compLoc2) == ComparisonResult.GREATER) {
- return true;
- } else {
- return false;
- }
+ if (compareDelta(compLoc1, compLoc2) == ComparisonResult.GREATER) {
+ return true;
+ } else {
+ return false;
+ }
} else if (baseCompareResult == ComparisonResult.GREATER) {
- return true;
+ return true;
} else {
- return false;
+ return false;
}
}
private static int compareDelta(CompositeLocation compLoc1, CompositeLocation compLoc2) {
if (compLoc1.getNumofDelta() < compLoc2.getNumofDelta()) {
- return ComparisonResult.GREATER;
+ return ComparisonResult.GREATER;
} else {
- return ComparisonResult.LESS;
+ return ComparisonResult.LESS;
}
}
private static int compareBaseLocationSet(CompositeLocation compLoc1,
- CompositeLocation compLoc2, ClassDescriptor priorityCD) {
+ CompositeLocation compLoc2, ClassDescriptor priorityCD) {
// if compLoc1 is greater than compLoc2, return true
// else return false;
Location priorityLoc1 = cd2loc1.get(priorityCD);
Location priorityLoc2 = cd2loc2.get(priorityCD);
- assert (priorityLoc1.getClassDescriptor().equals(priorityLoc2.getClassDescriptor()));
+ assert(priorityLoc1.getClassDescriptor().equals(priorityLoc2.getClassDescriptor()));
ClassDescriptor cd = priorityLoc1.getClassDescriptor();
- Lattice<String> locationOrder = (Lattice<String>) state.getCd2LocationOrder().get(cd);
+ Lattice<String> locationOrder = (Lattice<String>)state.getCd2LocationOrder().get(cd);
if (priorityLoc1.getLocIdentifier().equals(priorityLoc2.getLocIdentifier())) {
- // have the same level of local hierarchy
+ // have the same level of local hierarchy
- Set<String> spinSet = (Set<String>) state.getCd2LocationPropertyMap().get(cd);
- if (spinSet != null && spinSet.contains(priorityLoc1.getLocIdentifier())) {
- // this location can be spinning
- return ComparisonResult.GREATER;
- }
+ Set<String> spinSet = (Set<String>)state.getCd2LocationPropertyMap().get(cd);
+ if (spinSet != null && spinSet.contains(priorityLoc1.getLocIdentifier())) {
+ // this location can be spinning
+ return ComparisonResult.GREATER;
+ }
} else if (locationOrder.isGreaterThan(priorityLoc1.getLocIdentifier(),
- priorityLoc2.getLocIdentifier())) {
- // if priority loc of compLoc1 is higher than compLoc2
- // then, compLoc 1 is higher than compLoc2
- return ComparisonResult.GREATER;
+ priorityLoc2.getLocIdentifier())) {
+ // if priority loc of compLoc1 is higher than compLoc2
+ // then, compLoc 1 is higher than compLoc2
+ return ComparisonResult.GREATER;
} else {
- // if priority loc of compLoc1 is NOT higher than compLoc2
- // then, compLoc 1 is NOT higher than compLoc2
- return ComparisonResult.LESS;
+ // if priority loc of compLoc1 is NOT higher than compLoc2
+ // then, compLoc 1 is NOT higher than compLoc2
+ return ComparisonResult.LESS;
}
// compare base locations except priority by class descriptor
Set<ClassDescriptor> keySet1 = cd2loc1.keySet();
int numEqualLoc = 0;
- for (Iterator iterator = keySet1.iterator(); iterator.hasNext();) {
- ClassDescriptor cd1 = (ClassDescriptor) iterator.next();
-
- Location loc1 = cd2loc1.get(cd1);
- Location loc2 = cd2loc2.get(cd1);
-
- if (priorityLoc1.equals(loc1)) {
- continue;
- }
-
- if (loc2 == null) {
- // if comploc2 doesn't have corresponding location,
- // then we determines that comploc1 is lower than comploc 2
- return ComparisonResult.LESS;
- }
-
- System.out.println("lattice comparison:" + loc1.getLocIdentifier() + " ? "
- + loc2.getLocIdentifier());
- locationOrder = (Lattice<String>) state.getCd2LocationOrder().get(cd1);
- if (loc1.getLocIdentifier().equals(loc2.getLocIdentifier())) {
- // have the same level of local hierarchy
- numEqualLoc++;
- } else if (!locationOrder.isGreaterThan(loc1.getLocIdentifier(), loc2.getLocIdentifier())) {
- // if one element of composite location 1 is not higher than composite
- // location 2
- // then, composite loc 1 is not higher than composite loc 2
-
- System.out.println(compLoc1 + " < " + compLoc2);
- return ComparisonResult.LESS;
- }
+ for (Iterator iterator = keySet1.iterator(); iterator.hasNext(); ) {
+ ClassDescriptor cd1 = (ClassDescriptor) iterator.next();
+
+ Location loc1 = cd2loc1.get(cd1);
+ Location loc2 = cd2loc2.get(cd1);
+
+ if (priorityLoc1.equals(loc1)) {
+ continue;
+ }
+
+ if (loc2 == null) {
+ // if comploc2 doesn't have corresponding location,
+ // then we determines that comploc1 is lower than comploc 2
+ return ComparisonResult.LESS;
+ }
+
+ System.out.println("lattice comparison:" + loc1.getLocIdentifier() + " ? "
+ + loc2.getLocIdentifier());
+ locationOrder = (Lattice<String>)state.getCd2LocationOrder().get(cd1);
+ if (loc1.getLocIdentifier().equals(loc2.getLocIdentifier())) {
+ // have the same level of local hierarchy
+ numEqualLoc++;
+ } else if (!locationOrder.isGreaterThan(loc1.getLocIdentifier(), loc2.getLocIdentifier())) {
+ // if one element of composite location 1 is not higher than composite
+ // location 2
+ // then, composite loc 1 is not higher than composite loc 2
+
+ System.out.println(compLoc1 + " < " + compLoc2);
+ return ComparisonResult.LESS;
+ }
}
if (numEqualLoc == (compLoc1.getBaseLocationSize() - 1)) {
- return ComparisonResult.EQUAL;
+ return ComparisonResult.EQUAL;
}
System.out.println(compLoc1 + " > " + compLoc2);
}
public static CompositeLocation calculateGLB(ClassDescriptor cd,
- Set<CompositeLocation> inputSet, ClassDescriptor priorityCD) {
+ Set<CompositeLocation> inputSet, ClassDescriptor priorityCD) {
CompositeLocation glbCompLoc = new CompositeLocation(cd);
int maxDeltaFunction = 0;
// calculate GLB of priority element first
Hashtable<ClassDescriptor, Set<Location>> cd2locSet =
- new Hashtable<ClassDescriptor, Set<Location>>();
+ new Hashtable<ClassDescriptor, Set<Location>>();
// creating mapping from class to set of locations
- for (Iterator iterator = inputSet.iterator(); iterator.hasNext();) {
- CompositeLocation compLoc = (CompositeLocation) iterator.next();
+ for (Iterator iterator = inputSet.iterator(); iterator.hasNext(); ) {
+ CompositeLocation compLoc = (CompositeLocation) iterator.next();
- int numOfDelta = compLoc.getNumofDelta();
- if (numOfDelta > maxDeltaFunction) {
- maxDeltaFunction = numOfDelta;
- }
+ int numOfDelta = compLoc.getNumofDelta();
+ if (numOfDelta > maxDeltaFunction) {
+ maxDeltaFunction = numOfDelta;
+ }
- Set<Location> baseLocationSet = compLoc.getBaseLocationSet();
- for (Iterator iterator2 = baseLocationSet.iterator(); iterator2.hasNext();) {
- Location locElement = (Location) iterator2.next();
- ClassDescriptor locCD = locElement.getClassDescriptor();
+ Set<Location> baseLocationSet = compLoc.getBaseLocationSet();
+ for (Iterator iterator2 = baseLocationSet.iterator(); iterator2.hasNext(); ) {
+ Location locElement = (Location) iterator2.next();
+ ClassDescriptor locCD = locElement.getClassDescriptor();
- Set<Location> locSet = cd2locSet.get(locCD);
- if (locSet == null) {
- locSet = new HashSet<Location>();
- }
- locSet.add(locElement);
+ Set<Location> locSet = cd2locSet.get(locCD);
+ if (locSet == null) {
+ locSet = new HashSet<Location>();
+ }
+ locSet.add(locElement);
- cd2locSet.put(locCD, locSet);
+ cd2locSet.put(locCD, locSet);
- }
+ }
}
Set<Location> locSetofClass = cd2locSet.get(priorityCD);
Set<String> locIdentifierSet = new HashSet<String>();
- for (Iterator<Location> locIterator = locSetofClass.iterator(); locIterator.hasNext();) {
- Location locElement = locIterator.next();
- locIdentifierSet.add(locElement.getLocIdentifier());
+ for (Iterator<Location> locIterator = locSetofClass.iterator(); locIterator.hasNext(); ) {
+ Location locElement = locIterator.next();
+ locIdentifierSet.add(locElement.getLocIdentifier());
}
- Lattice<String> locOrder = (Lattice<String>) state.getCd2LocationOrder().get(priorityCD);
+ Lattice<String> locOrder = (Lattice<String>)state.getCd2LocationOrder().get(priorityCD);
String glbLocIdentifer = locOrder.getGLB(locIdentifierSet);
Location priorityGLB = new Location(priorityCD, glbLocIdentifer);
Set<CompositeLocation> sameGLBLoc = new HashSet<CompositeLocation>();
- for (Iterator<CompositeLocation> iterator = inputSet.iterator(); iterator.hasNext();) {
- CompositeLocation inputComploc = iterator.next();
- Location locElement = inputComploc.getLocation(priorityCD);
+ for (Iterator<CompositeLocation> iterator = inputSet.iterator(); iterator.hasNext(); ) {
+ CompositeLocation inputComploc = iterator.next();
+ Location locElement = inputComploc.getLocation(priorityCD);
- if (locElement.equals(priorityGLB)) {
- sameGLBLoc.add(inputComploc);
- }
+ if (locElement.equals(priorityGLB)) {
+ sameGLBLoc.add(inputComploc);
+ }
}
glbCompLoc.addLocation(priorityGLB);
if (sameGLBLoc.size() > 0) {
- // if more than one location shares the same priority GLB
- // need to calculate the rest of GLB loc
-
- Set<Location> glbElementSet = new HashSet<Location>();
-
- for (Iterator<ClassDescriptor> iterator = cd2locSet.keySet().iterator(); iterator.hasNext();) {
- ClassDescriptor localCD = iterator.next();
- if (!localCD.equals(priorityCD)) {
- Set<Location> localLocSet = cd2locSet.get(localCD);
- Set<String> LocalLocIdSet = new HashSet<String>();
-
- for (Iterator<Location> locIterator = localLocSet.iterator(); locIterator.hasNext();) {
- Location locElement = locIterator.next();
- LocalLocIdSet.add(locElement.getLocIdentifier());
- }
-
- Lattice<String> localOrder = (Lattice<String>) state.getCd2LocationOrder().get(localCD);
- Location localGLBLoc = new Location(localCD, localOrder.getGLB(LocalLocIdSet));
- glbCompLoc.addLocation(localGLBLoc);
- }
- }
+ // if more than one location shares the same priority GLB
+ // need to calculate the rest of GLB loc
+
+ Set<Location> glbElementSet = new HashSet<Location>();
+
+ for (Iterator<ClassDescriptor> iterator = cd2locSet.keySet().iterator(); iterator.hasNext(); ) {
+ ClassDescriptor localCD = iterator.next();
+ if (!localCD.equals(priorityCD)) {
+ Set<Location> localLocSet = cd2locSet.get(localCD);
+ Set<String> LocalLocIdSet = new HashSet<String>();
+
+ for (Iterator<Location> locIterator = localLocSet.iterator(); locIterator.hasNext(); ) {
+ Location locElement = locIterator.next();
+ LocalLocIdSet.add(locElement.getLocIdentifier());
+ }
+
+ Lattice<String> localOrder = (Lattice<String>)state.getCd2LocationOrder().get(localCD);
+ Location localGLBLoc = new Location(localCD, localOrder.getGLB(LocalLocIdSet));
+ glbCompLoc.addLocation(localGLBLoc);
+ }
+ }
} else {
- // if priority glb loc is lower than all of input loc
- // assign top location to the rest of loc element
+ // if priority glb loc is lower than all of input loc
+ // assign top location to the rest of loc element
- for (Iterator<ClassDescriptor> iterator = cd2locSet.keySet().iterator(); iterator.hasNext();) {
- ClassDescriptor localCD = iterator.next();
- if (!localCD.equals(priorityCD)) {
- Location localGLBLoc = Location.createTopLocation(localCD);
- glbCompLoc.addLocation(localGLBLoc);
- }
+ for (Iterator<ClassDescriptor> iterator = cd2locSet.keySet().iterator(); iterator.hasNext(); ) {
+ ClassDescriptor localCD = iterator.next();
+ if (!localCD.equals(priorityCD)) {
+ Location localGLBLoc = Location.createTopLocation(localCD);
+ glbCompLoc.addLocation(localGLBLoc);
+ }
- }
+ }
}
if (loc.getClassDescriptor().equals(getClassDescriptor())) {
if (loc.getLocIdentifier() == null || getLocIdentifier() == null) {
- if (loc.getType() == getType()) {
- return true;
- }
+ if (loc.getType() == getType()) {
+ return true;
+ }
} else {
- if (loc.getLocIdentifier().equals(getLocIdentifier())) {
- return true;
- }
+ if (loc.getLocIdentifier().equals(getLocIdentifier())) {
+ return true;
+ }
}
}
bottomLoc.loc = "_bottom_";
return bottomLoc;
}
-
- public boolean isTop(){
+
+ public boolean isTop() {
return type==TOP;
}
private List<T> elements;
- public NTuple(T... elements) {
+ public NTuple(T...elements) {
this.elements = Arrays.asList(elements);
}
private int cid;
private static int nodeID=0;
private static int colorID = 1;
- private static Hashtable<ClassDescriptor, Integer> cd2cid =
- new Hashtable<ClassDescriptor, Integer>();
+ private static Hashtable<ClassDescriptor, Integer> cd2cid =
+ new Hashtable<ClassDescriptor, Integer>();
private final ClassDescriptor cd;
private ScheduleNode sn;
* @param cd ClassDescriptor
* @param fStates
*/
- public ClassNode(ClassDescriptor cd,
- Vector<FlagState> fStates) {
+ public ClassNode(ClassDescriptor cd,
+ Vector<FlagState> fStates) {
this.cd=cd;
this.flagStates = fStates;
this.sn = null;
this.uid=ClassNode.nodeID++;
// TODO: potential bug here
// DO NOT consider splitting a class node here.
- // need to fix: 1. when a class node is splitted, the pieces should have
+ // need to fix: 1. when a class node is splitted, the pieces should have
// different cid
// 2. when two pieces merged, it should have right cid as have
// never been splitted
// 3. NOTE: a piece could be splitted further
if(this.cd2cid.containsKey(cd)) {
- this.cid = this.cd2cid.get(cd);
+ this.cid = this.cd2cid.get(cd);
} else {
- this.cid = ClassNode.colorID++;
- this.cd2cid.put(this.cd, this.cid);
+ this.cid = ClassNode.colorID++;
+ this.cd2cid.put(this.cd, this.cid);
}
this.transTime = 0;
}
}
public int getCid() {
- return cid;
+ return cid;
}
public ScheduleNode getScheduleNode() {
if (o instanceof ClassNode) {
ClassNode fs=(ClassNode)o;
if ((fs.getClassDescriptor()!= cd) ||
- (fs.getuid()!= uid) ||
- (fs.getCid()!= cid) ||
+ (fs.getuid()!= uid) ||
+ (fs.getCid()!= cid) ||
(fs.isSorted() != sorted) ||
(fs.clone != this.clone) ||
(fs.transTime != this.transTime)) {
return cu;
}
- public static RootsGenerator allocateRootsGenerator(Vector<Vector<ScheduleNode>> snodevecs,
- int rootNum) {
+ public static RootsGenerator allocateRootsGenerator(Vector<Vector<ScheduleNode>> snodevecs,
+ int rootNum) {
return CombinationUtil.allocateCombinationUtil().new RootsGenerator(snodevecs, rootNum);
}
- public static CombineGenerator allocateCombineGenerator(Vector<Vector<ScheduleNode>> rootnodes,
- Vector<Vector<ScheduleNode>> node2combine) {
+ public static CombineGenerator allocateCombineGenerator(Vector<Vector<ScheduleNode>> rootnodes,
+ Vector<Vector<ScheduleNode>> node2combine) {
return CombinationUtil.allocateCombinationUtil().new CombineGenerator(rootnodes, node2combine);
}
-
- public static RandomGenerator allocateRandomGenerator(Vector<Vector<ScheduleNode>> snodevecs,
- int rootNum) {
- return CombinationUtil.allocateCombinationUtil().new RandomGenerator(snodevecs, rootNum);
+
+ public static RandomGenerator allocateRandomGenerator(Vector<Vector<ScheduleNode>> snodevecs,
+ int rootNum) {
+ return CombinationUtil.allocateCombinationUtil().new RandomGenerator(snodevecs, rootNum);
}
public class RootsGenerator {
Vector<Vector<ScheduleNode>> rootNodes;
int rootNum;
- public RootsGenerator(Vector<Vector<ScheduleNode>> snodevecs,
- int rootNum) {
+ public RootsGenerator(Vector<Vector<ScheduleNode>> snodevecs,
+ int rootNum) {
this.sNodeVecs = snodevecs;
this.rootNum = rootNum;
this.node2Combine = null;
this.rootNodes = null;
}
-
+
public void clear() {
- this.sNodeVecs = null;
- this.node2Combine.clear();
- this.node2Combine = null;
- this.rootNodes.clear();
- this.rootNodes = null;
- this.rootNum = 0;
+ this.sNodeVecs = null;
+ this.node2Combine.clear();
+ this.node2Combine = null;
+ this.rootNodes.clear();
+ this.rootNodes = null;
+ this.rootNum = 0;
}
public boolean nextGen() {
return trial;
}
- private boolean trial(int num2choose,
- int next) {
+ private boolean trial(int num2choose,
+ int next) {
int index = 0;
boolean first = true;
while(num2choose > 0) {
int limit;
int[][] rootLoads;
- public CombineGenerator(Vector<Vector<ScheduleNode>> rootnodes,
- Vector<Vector<ScheduleNode>> node2combine) {
+ public CombineGenerator(Vector<Vector<ScheduleNode>> rootnodes,
+ Vector<Vector<ScheduleNode>> node2combine) {
this.rootNodes = rootnodes;
this.node2Combine = node2combine;
this.rootNStates = new Vector<Vector<int[]>>();
this.lastchoices = null;
this.first4choice = false;
this.rand = new Random();
-
+
this.limit = (tomapnum-1)/rootnum+1;
this.rootLoads = null;
}
-
+
public void clear() {
- this.rootNodes = null;
- this.rootNStates.clear();
- this.rootNStates = null;
- this.node2Combine = null;
- this.combine.clear();
- this.combine = null;
- this.lastchoices = null;
- this.first4choice = false;
+ this.rootNodes = null;
+ this.rootNStates.clear();
+ this.rootNStates = null;
+ this.node2Combine = null;
+ this.combine.clear();
+ this.combine = null;
+ this.lastchoices = null;
+ this.first4choice = false;
}
public Vector<Vector<Combine>> getCombine() {
return combine;
}
-
+
// generate next mapping randomly evenly
public boolean randomGenE() {
- this.rootLoads = new int[this.rootNodes.size()][];
- for(int i = 0; i < this.rootNodes.size(); i++) {
- this.rootLoads[i] = new int[this.rootNodes.elementAt(i).size()];
- }
- int rootx = this.rootNodes.size();
- for(int i = 0; i < this.node2Combine.size(); i++) {
- for(int j = 0; j < this.node2Combine.elementAt(i).size(); j++) {
- Combine tmp = this.combine.elementAt(i).elementAt(j);
- do {
- int x = Math.abs(rand.nextInt()) % rootx;
- int y = Math.abs(rand.nextInt()) % this.rootNodes.elementAt(x).size();
- if(this.rootLoads[x][y] < this.limit) {
- tmp.root = x;
- tmp.index = y;
- this.rootLoads[tmp.root][tmp.index]++;
- break;
- }
- }while(true);
+ this.rootLoads = new int[this.rootNodes.size()][];
+ for(int i = 0; i < this.rootNodes.size(); i++) {
+ this.rootLoads[i] = new int[this.rootNodes.elementAt(i).size()];
+ }
+ int rootx = this.rootNodes.size();
+ for(int i = 0; i < this.node2Combine.size(); i++) {
+ for(int j = 0; j < this.node2Combine.elementAt(i).size(); j++) {
+ Combine tmp = this.combine.elementAt(i).elementAt(j);
+ do {
+ int x = Math.abs(rand.nextInt()) % rootx;
+ int y = Math.abs(rand.nextInt()) % this.rootNodes.elementAt(x).size();
+ if(this.rootLoads[x][y] < this.limit) {
+ tmp.root = x;
+ tmp.index = y;
+ this.rootLoads[tmp.root][tmp.index]++;
+ break;
}
+ } while(true);
}
- return true;
+ }
+ return true;
}
-
+
public boolean randomGen() {
- int rootx = this.rootNodes.size();
- for(int i = 0; i < this.node2Combine.size(); i++) {
- for(int j = 0; j < this.node2Combine.elementAt(i).size(); j++) {
- Combine tmp = this.combine.elementAt(i).elementAt(j);
- tmp.root = Math.abs(rand.nextInt()) % rootx;
- tmp.index = Math.abs(rand.nextInt()) % this.rootNodes.elementAt(tmp.root).size();
- }
+ int rootx = this.rootNodes.size();
+ for(int i = 0; i < this.node2Combine.size(); i++) {
+ for(int j = 0; j < this.node2Combine.elementAt(i).size(); j++) {
+ Combine tmp = this.combine.elementAt(i).elementAt(j);
+ tmp.root = Math.abs(rand.nextInt()) % rootx;
+ tmp.index = Math.abs(rand.nextInt()) % this.rootNodes.elementAt(tmp.root).size();
}
- return true;
+ }
+ return true;
}
public boolean nextGen() {
return suc;
}
- private boolean firstexpand(int next,
- boolean first) {
+ private boolean firstexpand(int next,
+ boolean first) {
for(int i = next; i < this.node2Combine.size(); i++) {
if(this.node2Combine.elementAt(i) != null) {
int choice = this.lastchoices[i];
return true;
}
- private boolean innertrial(int next,
- int layer) {
+ private boolean innertrial(int next,
+ int layer) {
if((this.combine.elementAt(next) == null) ||
(this.combine.elementAt(next).size() < 2)) {
// skip over empty buckets and bucket with only one obj ( make sure
}
}
- private boolean propagateOne(int next,
- int rooti,
- int indexi,
- int ti,
- Combine tmp) {
+ private boolean propagateOne(int next,
+ int rooti,
+ int indexi,
+ int ti,
+ Combine tmp) {
int root = rooti;
int index = indexi;
int t = ti;
}
}
}
-
+
public class RandomGenerator {
- Vector<Vector<ScheduleNode>> sNodeVecs;
- Vector<Vector<ScheduleNode>> mapping;
- int rootNum;
- Random rand;
-
- public RandomGenerator(Vector<Vector<ScheduleNode>> snodevecs,
- int rootNum) {
- this.sNodeVecs = snodevecs;
- this.rootNum = rootNum;
-
- this.mapping = new Vector<Vector<ScheduleNode>>();
- for(int i = 0; i < this.rootNum; i++) {
- this.mapping.add(null);
- }
- this.rand = new Random();
+ Vector<Vector<ScheduleNode>> sNodeVecs;
+ Vector<Vector<ScheduleNode>> mapping;
+ int rootNum;
+ Random rand;
+
+ public RandomGenerator(Vector<Vector<ScheduleNode>> snodevecs,
+ int rootNum) {
+ this.sNodeVecs = snodevecs;
+ this.rootNum = rootNum;
+
+ this.mapping = new Vector<Vector<ScheduleNode>>();
+ for(int i = 0; i < this.rootNum; i++) {
+ this.mapping.add(null);
}
-
- public void clear() {
- this.sNodeVecs = null;
- this.rootNum = 0;
- this.mapping = null;
+ this.rand = new Random();
+ }
+
+ public void clear() {
+ this.sNodeVecs = null;
+ this.rootNum = 0;
+ this.mapping = null;
+ }
+
+ public boolean nextGen() {
+ this.mapping = null;
+ this.mapping = new Vector<Vector<ScheduleNode>>();
+ for(int i = 0; i < this.rootNum; i++) {
+ this.mapping.add(null);
}
- public boolean nextGen() {
- this.mapping = null;
- this.mapping = new Vector<Vector<ScheduleNode>>();
- for(int i = 0; i < this.rootNum; i++) {
- this.mapping.add(null);
+ // randomly choose a core for each node in sNodeVecs
+ for(int i = 0; i < this.sNodeVecs.size(); i++) {
+ Vector<ScheduleNode> sNodes = this.sNodeVecs.elementAt(i);
+ for(int j = 0; j < sNodes.size(); j++) {
+ ScheduleNode snode = sNodes.elementAt(j);
+ int core = Math.abs(rand.nextInt()) % this.rootNum;
+ if(this.mapping.elementAt(core) == null) {
+ this.mapping.setElementAt(new Vector<ScheduleNode>(), core);
}
-
- // randomly choose a core for each node in sNodeVecs
- for(int i = 0; i < this.sNodeVecs.size(); i++) {
- Vector<ScheduleNode> sNodes = this.sNodeVecs.elementAt(i);
- for(int j = 0; j < sNodes.size(); j++) {
- ScheduleNode snode = sNodes.elementAt(j);
- int core = Math.abs(rand.nextInt()) % this.rootNum;
- if(this.mapping.elementAt(core) == null) {
- this.mapping.setElementAt(new Vector<ScheduleNode>(), core);
- }
- this.mapping.elementAt(core).add(snode);
- }
- }
- return true;
- }
-
- public Vector<Vector<ScheduleNode>> getMapping() {
- return this.mapping;
+ this.mapping.elementAt(core).add(snode);
+ }
}
+ return true;
+ }
+
+ public Vector<Vector<ScheduleNode>> getMapping() {
+ return this.mapping;
+ }
}
}
\ No newline at end of file
int coreNum;
long activeTime;
- public CoreSimulator(RuntimeSchedule schedule,
- int coreNum) {
+ public CoreSimulator(RuntimeSchedule schedule,
+ int coreNum) {
super();
reset();
this.rSchedule = schedule;
return targetCSimulator.get(fstate);
}
- public void setTargetCSimulator(Hashtable<FlagState,
- Queue<Integer>> targetCSimulator) {
+ public void setTargetCSimulator(Hashtable<FlagState,
+ Queue<Integer>> targetCSimulator) {
this.targetCSimulator = targetCSimulator;
}
return allyCSimulator.get(fstate);
}
- public void setAllyCSimulator(Hashtable<FlagState,
- Vector<Integer>> allyCSimulator) {
+ public void setAllyCSimulator(Hashtable<FlagState,
+ Vector<Integer>> allyCSimulator) {
this.allyCSimulator = allyCSimulator;
}
}
}
- public void addObject(ObjectSimulator newObj,
- FlagState fs,
- int version) {
+ public void addObject(ObjectSimulator newObj,
+ FlagState fs,
+ int version) {
if(this.tasks == null) {
return;
}
ObjectSimulator obj = paraQueues.elementAt(i).poll();
obj.setHold(false);
boolean remove = false;
- if((this.targetFState != null)
- && (this.targetFState.containsKey(obj.getCurrentFS()))) {
+ if((this.targetFState != null)
+ && (this.targetFState.containsKey(obj.getCurrentFS()))) {
if(transObjs == null) {
transObjs = new Vector<ObjectSimulator>();
}
if(allycores != null) {
obj.setShared(true);
//for(int k = 0; k < allycores.size(); ++k) {
- //Integer allyCore = allycores.elementAt(k);
- if(transObjs == null) {
- transObjs = new Vector<ObjectSimulator>();
- }
- if(!transObjs.contains(obj)) {
- transObjs.add(obj);
- }
- remove = false;
+ //Integer allyCore = allycores.elementAt(k);
+ if(transObjs == null) {
+ transObjs = new Vector<ObjectSimulator>();
+ }
+ if(!transObjs.contains(obj)) {
+ transObjs.add(obj);
+ }
+ remove = false;
//}
allycores = null;
}
// check if need to transfer to other cores
Queue<Integer> targetcores = this.getTargetCores(obj.getCurrentFS());
if(targetcores != null) {
- if(transObjs == null) {
- transObjs = new Vector<ObjectSimulator>();
- }
- if(!transObjs.contains(obj)) {
- transObjs.add(obj);
- }
- remove = true;
+ if(transObjs == null) {
+ transObjs = new Vector<ObjectSimulator>();
+ }
+ if(!transObjs.contains(obj)) {
+ transObjs.add(obj);
+ }
+ remove = true;
}
for(int j = 0; j < this.tasks.size(); j++) {
this.tasks.elementAt(j).refreshPara(obj, remove);
int coreNum;
int scheduleThreshold; // # of starting points generated by schedule analysis
- int probThreshold; // the probability to stop when no accelaration achieved
+ int probThreshold; // the probability to stop when no accelaration achieved
// in the directed simulated annealing
- int generateThreshold; // how many optimized implementation generated in
+ int generateThreshold; // how many optimized implementation generated in
// each iteration of the directed simulated annealing
- int skipThreshold; // the probability to skip to producing more optimization
+ int skipThreshold; // the probability to skip to producing more optimization
// with the same root sets(see ScheduleAnalysis.coremapping)
- public MCImplSynthesis(State state,
+ public MCImplSynthesis(State state,
TaskAnalysis ta,
OwnershipAnalysis oa) {
this.state = state;
this.taskAnalysis = ta;
this.ownershipAnalysis = oa;
this.scheduleAnalysis = new ScheduleAnalysis(state,
- ta);
+ ta);
this.scheduleAnalysis.setCoreNum(this.coreNum);
this.scheduleSimulator = new ScheduleSimulator(this.coreNum,
- state,
- ta);
+ state,
+ ta);
this.scheduleThreshold = 1000;
this.probThreshold = 0;
this.generateThreshold = 30;
PrintStream stdout = null;
try {
if(!state.BAMBOOCOMPILETIME) {
- stdout = new PrintStream(
- new FileOutputStream(this.state.outputdir + "SimulatorResult_"
- + this.coreNum + ".out"));
+ stdout = new PrintStream(
+ new FileOutputStream(this.state.outputdir + "SimulatorResult_"
+ + this.coreNum + ".out"));
}
} catch (Exception e) {
// Sigh. Couldn't open the file.
Vector<Schedule> scheduling = null;
Vector<ScheduleNode> schedulinggraph = null;
int gid = 1;
-
+
// check all multi-parameter tasks
Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
- Iterator it_tasks =
+ Iterator it_tasks =
this.state.getTaskSymbolTable().getDescriptorsIterator();
while(it_tasks.hasNext()) {
TaskDescriptor td = (TaskDescriptor)it_tasks.next();
if(td.numParameters() > 1) {
- multiparamtds.addElement(td);
+ multiparamtds.addElement(td);
}
}
it_tasks = null;
// generate multiple schedulings
this.scheduleAnalysis.setScheduleThreshold(this.scheduleThreshold);
- boolean tooptimize =
- this.scheduleAnalysis.schedule(this.generateThreshold,
+ boolean tooptimize =
+ this.scheduleAnalysis.schedule(this.generateThreshold,
this.skipThreshold,
multiparamtds);
if(this.generateThreshold > 5) {
this.scheduleSimulator.init();
Vector<Vector<ScheduleNode>> scheduleGraphs = null;
- Vector<Vector<ScheduleNode>> newscheduleGraphs =
+ Vector<Vector<ScheduleNode>> newscheduleGraphs =
this.scheduleAnalysis.getScheduleGraphs();
- Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
+ Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
this.scheduleAnalysis.getTd2maincd();
Vector<Vector<Schedule>> schedulings = new Vector<Vector<Schedule>>();
Vector<Integer> selectedSchedulings = new Vector<Integer>();
- Vector<SimExecutionNode> selectedSimExeGraphs =
+ Vector<SimExecutionNode> selectedSimExeGraphs =
new Vector<SimExecutionNode>();
SimExecutionNode selectedSimExeGraph_bk = null;
int threshold = this.scheduleThreshold;
// simulate the generated schedulings and try to optimize it
do {
- if(!state.BAMBOOCOMPILETIME) {
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
- }
+ if(!state.BAMBOOCOMPILETIME) {
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
+ }
gid += newscheduleGraphs.size();
if(scheduleGraphs != null) {
- for(int i = 0; i < scheduleGraphs.size(); i++) {
- Vector<ScheduleNode> tmpgraph = scheduleGraphs.elementAt(i);
- for(int j = 0; j < tmpgraph.size(); j++) {
- ScheduleNode snode = tmpgraph.elementAt(j);
- snode.getEdgeVector().clear();
- snode.getInedgeVector().clear();
- snode.getScheduleEdges().clear();
- snode.getClassNodes().clear();
- }
- tmpgraph.clear();
- tmpgraph = null;
- }
- scheduleGraphs.clear();
+ for(int i = 0; i < scheduleGraphs.size(); i++) {
+ Vector<ScheduleNode> tmpgraph = scheduleGraphs.elementAt(i);
+ for(int j = 0; j < tmpgraph.size(); j++) {
+ ScheduleNode snode = tmpgraph.elementAt(j);
+ snode.getEdgeVector().clear();
+ snode.getInedgeVector().clear();
+ snode.getScheduleEdges().clear();
+ snode.getClassNodes().clear();
+ }
+ tmpgraph.clear();
+ tmpgraph = null;
+ }
+ scheduleGraphs.clear();
}
scheduleGraphs = newscheduleGraphs;
schedulings.clear();
// get scheduling layouts from schedule graphs
for(int i = 0; i < scheduleGraphs.size(); i++) {
- Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
- Vector<Schedule> tmpscheduling =
- generateScheduling(scheduleGraph, td2maincd);
- schedulings.add(tmpscheduling);
- scheduleGraph = null;
- tmpscheduling = null;
+ Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
+ Vector<Schedule> tmpscheduling =
+ generateScheduling(scheduleGraph, td2maincd);
+ schedulings.add(tmpscheduling);
+ scheduleGraph = null;
+ tmpscheduling = null;
}
selectedSchedulings.clear();
selectedSimExeGraphs.clear();
- long tmpexetime = this.scheduleSimulator.simulate(schedulings,
- selectedSchedulings,
- selectedSimExeGraphs);
+ long tmpexetime = this.scheduleSimulator.simulate(schedulings,
+ selectedSchedulings,
+ selectedSimExeGraphs);
boolean remove = false;
if(tmpexetime < bestexetime) {
- remove = true;
- bestexetime = tmpexetime;
- if(scheduling != null) {
- scheduling.clear();
- for(int j = 0; j < schedulinggraph.size(); j++) {
- ScheduleNode snode = schedulinggraph.elementAt(j);
- snode.getEdgeVector().clear();
- snode.getInedgeVector().clear();
- snode.getScheduleEdges().clear();
- snode.getClassNodes().clear();
- }
- schedulinggraph.clear();
- selectedSimExeGraph_bk = null;
- }
- scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
- schedulinggraph = scheduleGraphs.elementAt(
- selectedSchedulings.elementAt(0));
- selectedSimExeGraph_bk = selectedSimExeGraphs.elementAt(0);
-
- if(!state.BAMBOOCOMPILETIME) {
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- }
- tryindex++;
- threshold = this.scheduleThreshold;
+ remove = true;
+ bestexetime = tmpexetime;
+ if(scheduling != null) {
+ scheduling.clear();
+ for(int j = 0; j < schedulinggraph.size(); j++) {
+ ScheduleNode snode = schedulinggraph.elementAt(j);
+ snode.getEdgeVector().clear();
+ snode.getInedgeVector().clear();
+ snode.getScheduleEdges().clear();
+ snode.getClassNodes().clear();
+ }
+ schedulinggraph.clear();
+ selectedSimExeGraph_bk = null;
+ }
+ scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
+ schedulinggraph = scheduleGraphs.elementAt(
+ selectedSchedulings.elementAt(0));
+ selectedSimExeGraph_bk = selectedSimExeGraphs.elementAt(0);
+
+ if(!state.BAMBOOCOMPILETIME) {
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ }
+ tryindex++;
+ threshold = this.scheduleThreshold;
} else if(tmpexetime == bestexetime) {
- if(!state.BAMBOOCOMPILETIME) {
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- }
- tryindex++;
- threshold += 10;
- if((threshold > 40) ||
- ((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 10)) {
- break;
- }
+ if(!state.BAMBOOCOMPILETIME) {
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ }
+ tryindex++;
+ threshold += 10;
+ if((threshold > 40) ||
+ ((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 10)) {
+ break;
+ }
} else {
- if(!state.BAMBOOCOMPILETIME) {
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- }
- tryindex++;
- if(threshold == this.scheduleThreshold) {
- if(scheduleGraphs != null) {
- scheduleGraphs.clear();
- }
- scheduleGraphs.addElement(schedulinggraph);
- if(selectedSchedulings != null) {
- selectedSchedulings.clear();
- }
- selectedSchedulings.addElement(Integer.valueOf(0));
- if(selectedSimExeGraphs != null) {
- selectedSimExeGraphs.clear();
- }
- selectedSimExeGraphs.addElement(selectedSimExeGraph_bk);
- }
- threshold += 10;
- if( (threshold > 40) ||
- ((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 1)) {
- break;
- }
- break;
+ if(!state.BAMBOOCOMPILETIME) {
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ }
+ tryindex++;
+ if(threshold == this.scheduleThreshold) {
+ if(scheduleGraphs != null) {
+ scheduleGraphs.clear();
+ }
+ scheduleGraphs.addElement(schedulinggraph);
+ if(selectedSchedulings != null) {
+ selectedSchedulings.clear();
+ }
+ selectedSchedulings.addElement(Integer.valueOf(0));
+ if(selectedSimExeGraphs != null) {
+ selectedSimExeGraphs.clear();
+ }
+ selectedSimExeGraphs.addElement(selectedSimExeGraph_bk);
+ }
+ threshold += 10;
+ if( (threshold > 40) ||
+ ((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 1)) {
+ break;
+ }
+ break;
}
if(tooptimize) {
- // try to optimize the best one scheduling
- //do {
- newscheduleGraphs = optimizeScheduling(scheduleGraphs,
- selectedSchedulings,
- selectedSimExeGraphs,
- gid,
- threshold);
- /*if(newscheduleGraphs != null) {
- if(this.generateThreshold < 30) {
- this.generateThreshold = 30;
- }
- break;
+ // try to optimize the best one scheduling
+ //do {
+ newscheduleGraphs = optimizeScheduling(scheduleGraphs,
+ selectedSchedulings,
+ selectedSimExeGraphs,
+ gid,
+ threshold);
+ /*if(newscheduleGraphs != null) {
+ if(this.generateThreshold < 30) {
+ this.generateThreshold = 30;
+ }
+ break;
+ } else {
+ threshold += 10;
+ if(this.generateThreshold > 0) {
+ this.generateThreshold -= 3;
+ }
+ if((Math.abs(rand.nextInt()) % 10000) < this.probThreshold + 1) {
+ break;
+ }
+ }
+ }while(true);*/
+ if(remove) {
+ scheduleGraphs.removeElementAt(selectedSchedulings.elementAt(0));
+ selectedSimExeGraphs.removeElementAt(0);
+ }
} else {
- threshold += 10;
- if(this.generateThreshold > 0) {
- this.generateThreshold -= 3;
- }
- if((Math.abs(rand.nextInt()) % 10000) < this.probThreshold + 1) {
- break;
- }
+ break;
}
- }while(true);*/
- if(remove) {
- scheduleGraphs.removeElementAt(selectedSchedulings.elementAt(0));
- selectedSimExeGraphs.removeElementAt(0);
- }
- } else {
- break;
- }
- }while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
+ } while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
if(scheduleGraphs != null) {
scheduleGraphs.clear();
td2maincd = null;
if(!state.BAMBOOCOMPILETIME) {
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
}
System.out.print("selected bestexetime: " + bestexetime + "\n");
if(!state.BAMBOOCOMPILETIME) {
- String path = this.state.outputdir + "scheduling_selected.dot";
- SchedulingUtil.printScheduleGraph(path, schedulinggraph);
+ String path = this.state.outputdir + "scheduling_selected.dot";
+ SchedulingUtil.printScheduleGraph(path, schedulinggraph);
}
// Close the streams.
try {
if(!state.BAMBOOCOMPILETIME) {
- stdout.close();
- stdout = null;
- System.setOut(origOut);
+ stdout.close();
+ stdout = null;
+ System.setOut(origOut);
}
} catch (Exception e) {
origOut.println("Redirect: Unable to close files!");
PrintStream stdout = null;
try {
stdout = new PrintStream(
- new FileOutputStream(this.state.outputdir + "SimulatorResult_"
- + this.coreNum + ".out"));
+ new FileOutputStream(this.state.outputdir + "SimulatorResult_"
+ + this.coreNum + ".out"));
} catch (Exception e) {
// Sigh. Couldn't open the file.
System.out.println("Redirect: Unable to open output file!");
if(isall) {
// check all multi-parameter tasks
Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
- Iterator it_tasks =
+ Iterator it_tasks =
this.state.getTaskSymbolTable().getDescriptorsIterator();
while(it_tasks.hasNext()) {
- TaskDescriptor td = (TaskDescriptor)it_tasks.next();
- if(td.numParameters() > 1) {
- multiparamtds.addElement(td);
- }
+ TaskDescriptor td = (TaskDescriptor)it_tasks.next();
+ if(td.numParameters() > 1) {
+ multiparamtds.addElement(td);
+ }
}
it_tasks = null;
-
+
// Generate all possible schedulings
//this.scheduleAnalysis.setScheduleThreshold(Integer.MAX_VALUE);
//this.scheduleAnalysis.schedule(-1, multiparamtds);
this.scheduleAnalysis.setScheduleThreshold(10000);
- this.scheduleAnalysis.schedule(80,
+ this.scheduleAnalysis.schedule(80,
20, // might skip
multiparamtds);
this.scheduleSimulator.init();
- Vector<Vector<ScheduleNode>> totestscheduleGraphs =
+ Vector<Vector<ScheduleNode>> totestscheduleGraphs =
this.scheduleAnalysis.getScheduleGraphs();
- Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
+ Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
this.scheduleAnalysis.getTd2maincd();
Vector<Vector<Schedule>> schedulings = new Vector<Vector<Schedule>>();
Vector<Integer> selectedSchedulings = new Vector<Integer>();
- Vector<SimExecutionNode> selectedSimExeGraphs =
+ Vector<SimExecutionNode> selectedSimExeGraphs =
new Vector<SimExecutionNode>();
-
+
File file=new File(this.state.outputdir+"distributeinfo_s_"+this.coreNum
+".out");
- FileOutputStream dotstream = null;
+ FileOutputStream dotstream = null;
try {
- dotstream = new FileOutputStream(file,false);
+ dotstream = new FileOutputStream(file,false);
} catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
+ e.printStackTrace();
+ System.exit(-1);
}
PrintWriter output = new java.io.PrintWriter(dotstream, true);
- output.println("start time(1,000,000 cycles): "
+ output.println("start time(1,000,000 cycles): "
+ totestscheduleGraphs.size());
for(int ii = 0; ii < totestscheduleGraphs.size(); ii++) {
- Vector<Vector<ScheduleNode>> newscheduleGraphs =
- new Vector<Vector<ScheduleNode>>();
- newscheduleGraphs.add(totestscheduleGraphs.elementAt(ii));
- // simulate the generated schedulings and try to optimize it
- schedulings.clear();
- // get scheduling layouts from schedule graphs
- for(int i = 0; i < newscheduleGraphs.size(); i++) {
- Vector<ScheduleNode> scheduleGraph = newscheduleGraphs.elementAt(i);
- Vector<Schedule> tmpscheduling =
- generateScheduling(scheduleGraph, td2maincd);
- schedulings.add(tmpscheduling);
- scheduleGraph = null;
- tmpscheduling = null;
- }
- selectedSchedulings.clear();
- selectedSimExeGraphs.clear();
- long tmpexetime = this.scheduleSimulator.simulate(schedulings,
- selectedSchedulings,
- selectedSimExeGraphs);
- output.println(((float)tmpexetime/100000000));
+ Vector<Vector<ScheduleNode>> newscheduleGraphs =
+ new Vector<Vector<ScheduleNode>>();
+ newscheduleGraphs.add(totestscheduleGraphs.elementAt(ii));
+ // simulate the generated schedulings and try to optimize it
+ schedulings.clear();
+ // get scheduling layouts from schedule graphs
+ for(int i = 0; i < newscheduleGraphs.size(); i++) {
+ Vector<ScheduleNode> scheduleGraph = newscheduleGraphs.elementAt(i);
+ Vector<Schedule> tmpscheduling =
+ generateScheduling(scheduleGraph, td2maincd);
+ schedulings.add(tmpscheduling);
+ scheduleGraph = null;
+ tmpscheduling = null;
+ }
+ selectedSchedulings.clear();
+ selectedSimExeGraphs.clear();
+ long tmpexetime = this.scheduleSimulator.simulate(schedulings,
+ selectedSchedulings,
+ selectedSimExeGraphs);
+ output.println(((float)tmpexetime/100000000));
}
} else {
// check all multi-parameter tasks
Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
- Iterator it_tasks =
+ Iterator it_tasks =
this.state.getTaskSymbolTable().getDescriptorsIterator();
while(it_tasks.hasNext()) {
- TaskDescriptor td = (TaskDescriptor)it_tasks.next();
- if(td.numParameters() > 1) {
- multiparamtds.addElement(td);
- }
+ TaskDescriptor td = (TaskDescriptor)it_tasks.next();
+ if(td.numParameters() > 1) {
+ multiparamtds.addElement(td);
+ }
}
it_tasks = null;
-
+
// generate multiple schedulings
this.scheduleThreshold = 20;
this.generateThreshold = 30;
this.probThreshold = 0;
this.scheduleAnalysis.setScheduleThreshold(1000);
- boolean tooptimize =
- this.scheduleAnalysis.schedule(this.generateThreshold,
+ boolean tooptimize =
+ this.scheduleAnalysis.schedule(this.generateThreshold,
60, // might skip
multiparamtds);
this.scheduleSimulator.init();
Vector<Vector<ScheduleNode>> scheduleGraphs = null;
- Vector<Vector<ScheduleNode>> totestscheduleGraphs =
+ Vector<Vector<ScheduleNode>> totestscheduleGraphs =
this.scheduleAnalysis.getScheduleGraphs();
- Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
+ Hashtable<TaskDescriptor, ClassDescriptor> td2maincd =
this.scheduleAnalysis.getTd2maincd();
Vector<Vector<Schedule>> schedulings = new Vector<Vector<Schedule>>();
Vector<Integer> selectedSchedulings = new Vector<Integer>();
- Vector<SimExecutionNode> selectedSimExeGraphs =
+ Vector<SimExecutionNode> selectedSimExeGraphs =
new Vector<SimExecutionNode>();
SimExecutionNode selectedSimExeGraph_bk = null;
- File file=new File(this.state.outputdir + "distributeinfo_s_"
+ File file=new File(this.state.outputdir + "distributeinfo_s_"
+ this.coreNum + ".out");
- FileOutputStream dotstream = null;
- File file2=new File(this.state.outputdir + "distributeinfo_o_"
+ FileOutputStream dotstream = null;
+ File file2=new File(this.state.outputdir + "distributeinfo_o_"
+ this.coreNum + ".out");
- FileOutputStream dotstream2 = null;
+ FileOutputStream dotstream2 = null;
try {
- dotstream = new FileOutputStream(file,false);
- dotstream2 = new FileOutputStream(file2,false);
+ dotstream = new FileOutputStream(file,false);
+ dotstream2 = new FileOutputStream(file2,false);
} catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
+ e.printStackTrace();
+ System.exit(-1);
}
PrintWriter output = new java.io.PrintWriter(dotstream, true);
PrintWriter output2 = new java.io.PrintWriter(dotstream2, true);
- output.println("start time(100,000,000 cycles): "
+ output.println("start time(100,000,000 cycles): "
+ totestscheduleGraphs.size());
- output2.println("optimized time(100,000,000 cycles): "
+ output2.println("optimized time(100,000,000 cycles): "
+ totestscheduleGraphs.size());
for(int ii = startnum; ii < totestscheduleGraphs.size(); ii++) {
- Vector<Vector<ScheduleNode>> newscheduleGraphs =
- new Vector<Vector<ScheduleNode>>();
- newscheduleGraphs.add(totestscheduleGraphs.elementAt(ii));
- int tryindex = 1;
- long bestexetime = Long.MAX_VALUE;
- int gid = 1;
- Vector<Schedule> scheduling = null;
- Vector<ScheduleNode> schedulinggraph = null;
- boolean isfirst = true;
- Random rand = new Random();
- int threshold = this.scheduleThreshold;
- // simulate the generated schedulings and try to optimize it
- System.out.print("=========================================================\n");
- System.out.print("# " + ii + ": \n");
- do {
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
- gid += newscheduleGraphs.size();
- if(scheduleGraphs != null) {
- for(int i = 0; i < scheduleGraphs.size(); i++) {
- Vector<ScheduleNode> tmpgraph = scheduleGraphs.elementAt(i);
- for(int j = 0; j < tmpgraph.size(); j++) {
- ScheduleNode snode = tmpgraph.elementAt(j);
- snode.getEdgeVector().clear();
- snode.getInedgeVector().clear();
- snode.getScheduleEdges().clear();
- snode.getClassNodes().clear();
- }
- tmpgraph.clear();
- tmpgraph = null;
- }
- scheduleGraphs.clear();
- }
- scheduleGraphs = newscheduleGraphs;
- schedulings.clear();
- // get scheduling layouts from schedule graphs
- for(int i = 0; i < scheduleGraphs.size(); i++) {
- Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
- Vector<Schedule> tmpscheduling =
- generateScheduling(scheduleGraph, td2maincd);
- schedulings.add(tmpscheduling);
- scheduleGraph = null;
- tmpscheduling = null;
- }
- selectedSchedulings.clear();
- selectedSimExeGraphs.clear();
- long tmpexetime = this.scheduleSimulator.simulate(schedulings,
- selectedSchedulings,
- selectedSimExeGraphs);
- if(isfirst) {
- output.println(((float)tmpexetime/100000000));
- isfirst = false;
- }
- if(tmpexetime < bestexetime) {
- bestexetime = tmpexetime;
- if(scheduling != null) {
- scheduling.clear();
- for(int j = 0; j < schedulinggraph.size(); j++) {
- ScheduleNode snode = schedulinggraph.elementAt(j);
- snode.getEdgeVector().clear();
- snode.getInedgeVector().clear();
- snode.getScheduleEdges().clear();
- snode.getClassNodes().clear();
- }
- schedulinggraph.clear();
- selectedSimExeGraph_bk = null;
- }
- scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
- schedulinggraph = scheduleGraphs.elementAt(
- selectedSchedulings.elementAt(0));
- selectedSimExeGraph_bk = selectedSimExeGraphs.elementAt(0);
- tryindex++;
- threshold = this.scheduleThreshold;
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- } else if(tmpexetime == bestexetime) {
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- tryindex++;
- threshold = this.scheduleThreshold;
- if((Math.abs(rand.nextInt()) % 100) < this.probThreshold) {
- break;
- }
- } else {
- System.out.print("end of: #" + tryindex + " (bestexetime: "
- + bestexetime + ")\n");
- System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
- tryindex++;
- if(threshold == this.scheduleThreshold) {
- if(scheduleGraphs != null) {
- scheduleGraphs.clear();
- }
- scheduleGraphs.addElement(schedulinggraph);
- if(selectedSchedulings != null) {
- selectedSchedulings.clear();
- }
- selectedSchedulings.addElement(Integer.valueOf(0));
- if(selectedSimExeGraphs != null) {
- selectedSimExeGraphs.clear();
- }
- selectedSimExeGraphs.addElement(selectedSimExeGraph_bk);
- }
- threshold += 10;
- if((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 1) {
- break;
- }
- //break;
- }
-
- if(tooptimize) {
- // try to optimize theschedulings best one scheduling
- newscheduleGraphs = optimizeScheduling(scheduleGraphs,
- selectedSchedulings,
- selectedSimExeGraphs,
- gid,
- this.scheduleThreshold);
- if(tmpexetime < bestexetime) {
- scheduleGraphs.remove(selectedSchedulings.elementAt(0));
- }
- } else {
- break;
- }
- }while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
-
- scheduleGraphs.clear();
- scheduleGraphs = null;
- scheduling = null;
- schedulinggraph = null;
- if(newscheduleGraphs != null) {
- newscheduleGraphs.clear();
- }
- newscheduleGraphs = null;
- totestscheduleGraphs.elementAt(ii).clear();
- for(int i = 0; i < schedulings.size(); i++) {
- schedulings.elementAt(i).clear();
- }
- schedulings.clear();
- selectedSchedulings.clear();
- selectedSimExeGraphs.clear();
-
- output2.println(((float)bestexetime/100000000));
- System.out.print("=========================================================\n");
+ Vector<Vector<ScheduleNode>> newscheduleGraphs =
+ new Vector<Vector<ScheduleNode>>();
+ newscheduleGraphs.add(totestscheduleGraphs.elementAt(ii));
+ int tryindex = 1;
+ long bestexetime = Long.MAX_VALUE;
+ int gid = 1;
+ Vector<Schedule> scheduling = null;
+ Vector<ScheduleNode> schedulinggraph = null;
+ boolean isfirst = true;
+ Random rand = new Random();
+ int threshold = this.scheduleThreshold;
+ // simulate the generated schedulings and try to optimize it
+ System.out.print("=========================================================\n");
+ System.out.print("# " + ii + ": \n");
+ do {
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
+ gid += newscheduleGraphs.size();
+ if(scheduleGraphs != null) {
+ for(int i = 0; i < scheduleGraphs.size(); i++) {
+ Vector<ScheduleNode> tmpgraph = scheduleGraphs.elementAt(i);
+ for(int j = 0; j < tmpgraph.size(); j++) {
+ ScheduleNode snode = tmpgraph.elementAt(j);
+ snode.getEdgeVector().clear();
+ snode.getInedgeVector().clear();
+ snode.getScheduleEdges().clear();
+ snode.getClassNodes().clear();
+ }
+ tmpgraph.clear();
+ tmpgraph = null;
+ }
+ scheduleGraphs.clear();
+ }
+ scheduleGraphs = newscheduleGraphs;
+ schedulings.clear();
+ // get scheduling layouts from schedule graphs
+ for(int i = 0; i < scheduleGraphs.size(); i++) {
+ Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
+ Vector<Schedule> tmpscheduling =
+ generateScheduling(scheduleGraph, td2maincd);
+ schedulings.add(tmpscheduling);
+ scheduleGraph = null;
+ tmpscheduling = null;
+ }
+ selectedSchedulings.clear();
+ selectedSimExeGraphs.clear();
+ long tmpexetime = this.scheduleSimulator.simulate(schedulings,
+ selectedSchedulings,
+ selectedSimExeGraphs);
+ if(isfirst) {
+ output.println(((float)tmpexetime/100000000));
+ isfirst = false;
+ }
+ if(tmpexetime < bestexetime) {
+ bestexetime = tmpexetime;
+ if(scheduling != null) {
+ scheduling.clear();
+ for(int j = 0; j < schedulinggraph.size(); j++) {
+ ScheduleNode snode = schedulinggraph.elementAt(j);
+ snode.getEdgeVector().clear();
+ snode.getInedgeVector().clear();
+ snode.getScheduleEdges().clear();
+ snode.getClassNodes().clear();
+ }
+ schedulinggraph.clear();
+ selectedSimExeGraph_bk = null;
+ }
+ scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
+ schedulinggraph = scheduleGraphs.elementAt(
+ selectedSchedulings.elementAt(0));
+ selectedSimExeGraph_bk = selectedSimExeGraphs.elementAt(0);
+ tryindex++;
+ threshold = this.scheduleThreshold;
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ } else if(tmpexetime == bestexetime) {
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ tryindex++;
+ threshold = this.scheduleThreshold;
+ if((Math.abs(rand.nextInt()) % 100) < this.probThreshold) {
+ break;
+ }
+ } else {
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
+ System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ tryindex++;
+ if(threshold == this.scheduleThreshold) {
+ if(scheduleGraphs != null) {
+ scheduleGraphs.clear();
+ }
+ scheduleGraphs.addElement(schedulinggraph);
+ if(selectedSchedulings != null) {
+ selectedSchedulings.clear();
+ }
+ selectedSchedulings.addElement(Integer.valueOf(0));
+ if(selectedSimExeGraphs != null) {
+ selectedSimExeGraphs.clear();
+ }
+ selectedSimExeGraphs.addElement(selectedSimExeGraph_bk);
+ }
+ threshold += 10;
+ if((Math.abs(rand.nextInt()) % 100) < this.probThreshold + 1) {
+ break;
+ }
+ //break;
+ }
+
+ if(tooptimize) {
+ // try to optimize theschedulings best one scheduling
+ newscheduleGraphs = optimizeScheduling(scheduleGraphs,
+ selectedSchedulings,
+ selectedSimExeGraphs,
+ gid,
+ this.scheduleThreshold);
+ if(tmpexetime < bestexetime) {
+ scheduleGraphs.remove(selectedSchedulings.elementAt(0));
+ }
+ } else {
+ break;
+ }
+ } while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
+
+ scheduleGraphs.clear();
+ scheduleGraphs = null;
+ scheduling = null;
+ schedulinggraph = null;
+ if(newscheduleGraphs != null) {
+ newscheduleGraphs.clear();
+ }
+ newscheduleGraphs = null;
+ totestscheduleGraphs.elementAt(ii).clear();
+ for(int i = 0; i < schedulings.size(); i++) {
+ schedulings.elementAt(i).clear();
+ }
+ schedulings.clear();
+ selectedSchedulings.clear();
+ selectedSimExeGraphs.clear();
+
+ output2.println(((float)bestexetime/100000000));
+ System.out.print("=========================================================\n");
}
if(scheduleGraphs != null) {
- scheduleGraphs.clear();
+ scheduleGraphs.clear();
}
scheduleGraphs = null;
totestscheduleGraphs = null;
for(int i = 0; i < schedulings.size(); i++) {
- schedulings.elementAt(i).clear();
+ schedulings.elementAt(i).clear();
}
schedulings.clear();
schedulings = null;
// Close the streams.
try {
- output.close();
- stdout.close();
- output = null;
- stdout = null;
- System.setOut(origOut);
+ output.close();
+ stdout.close();
+ output = null;
+ stdout = null;
+ System.setOut(origOut);
} catch (Exception e) {
- origOut.println("Redirect: Unable to close files!");
+ origOut.println("Redirect: Unable to close files!");
}
}
return;
}
- private Vector<Vector<ScheduleNode>>
+ private Vector<Vector<ScheduleNode>>
optimizeScheduling(Vector<Vector<ScheduleNode>> scheduleGraphs,
Vector<Integer> selectedScheduleGraphs,
Vector<SimExecutionNode> selectedSimExeGraphs,
for(int i = 0; i < selectedScheduleGraphs.size(); i++) {
Vector<ScheduleNode> schedulegraph = scheduleGraphs.elementAt(
- selectedScheduleGraphs.elementAt(i));
+ selectedScheduleGraphs.elementAt(i));
SimExecutionNode startnode = selectedSimExeGraphs.elementAt(i);
- Vector<SimExecutionEdge> criticalPath = analyzeCriticalPath(startnode);
+ Vector<SimExecutionEdge> criticalPath = analyzeCriticalPath(startnode);
// for Test
if(this.state.PRINTCRITICALPATH) {
- System.err.println("gid: " + lgid + " endpoint: " + startnode.getTimepoint());
+ System.err.println("gid: " + lgid + " endpoint: " + startnode.getTimepoint());
}
- Vector<Vector<ScheduleNode>> tmposchedulegraphs =
- optimizeCriticalPath(schedulegraph,
+ Vector<Vector<ScheduleNode>> tmposchedulegraphs =
+ optimizeCriticalPath(schedulegraph,
criticalPath,
lgid,
left);
if(tmposchedulegraphs != null) {
- if(optimizeschedulegraphs == null) {
- optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
- }
- optimizeschedulegraphs.addAll(tmposchedulegraphs);
- lgid += tmposchedulegraphs.size();
- left -= tmposchedulegraphs.size();
- if(left == 0) {
- schedulegraph = null;
- criticalPath = null;
- tmposchedulegraphs = null;
- break;
- }
- }
+ if(optimizeschedulegraphs == null) {
+ optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
+ }
+ optimizeschedulegraphs.addAll(tmposchedulegraphs);
+ lgid += tmposchedulegraphs.size();
+ left -= tmposchedulegraphs.size();
+ if(left == 0) {
+ schedulegraph = null;
+ criticalPath = null;
+ tmposchedulegraphs = null;
+ break;
+ }
+ }
schedulegraph = null;
criticalPath = null;
tmposchedulegraphs = null;
return optimizeschedulegraphs;
}
- private Vector<SimExecutionEdge>
+ private Vector<SimExecutionEdge>
analyzeCriticalPath(SimExecutionNode startnode) {
// first figure out the critical path
Vector<SimExecutionEdge> criticalPath = new Vector<SimExecutionEdge>();
// TODO: currently only get one critical path. It's possible that there are
// multiple critical paths and some of them can not be optimized while others
// can. Need to fix up for this situation.
- private long getCriticalPath(SimExecutionNode startnode,
+ private long getCriticalPath(SimExecutionNode startnode,
Vector<SimExecutionEdge> criticalPath) {
long sum = 0;
SimExecutionNode snode = startnode;
// go reversely to find the critical path
while(snode != null) {
SimExecutionNode nsnode = null;
- Iterator<SimExecutionEdge> it_iedges =
+ Iterator<SimExecutionEdge> it_iedges =
(Iterator<SimExecutionEdge>)snode.inedges();
while(it_iedges.hasNext()) {
- SimExecutionEdge sedge = it_iedges.next();
- //if(sedge.getWeight() != 0) {
- SimExecutionNode tsnode = (SimExecutionNode)(sedge.getSource());
- if(tsnode.getTimepoint() + sedge.getWeight() == snode.getTimepoint()) {
- nsnode = tsnode;
- criticalPath.insertElementAt(sedge, 0);
- sum += sedge.getWeight();
- break;
- }
- //}
+ SimExecutionEdge sedge = it_iedges.next();
+ //if(sedge.getWeight() != 0) {
+ SimExecutionNode tsnode = (SimExecutionNode)(sedge.getSource());
+ if(tsnode.getTimepoint() + sedge.getWeight() == snode.getTimepoint()) {
+ nsnode = tsnode;
+ criticalPath.insertElementAt(sedge, 0);
+ sum += sedge.getWeight();
+ break;
+ }
+ //}
}
it_iedges = null;
snode = nsnode;
SimExecutionEdge seedge = criticalPath.elementAt(i);
Vector<SimExecutionEdge> predicates = seedge.getPredicates();
if(predicates != null) {
- // have predicates
- long starttime = 0;
- // check the latest finish time of all the predicates
- for(int j = 0; j < predicates.size(); j++) {
- SimExecutionEdge predicate = predicates.elementAt(j);
- long tmptime = predicate.getBestStartPoint() + predicate.getWeight();
- if(tmptime > starttime) {
- starttime = tmptime;
- seedge.setLastpredicateEdge(predicate);
- if(predicate.getTd() != null) {
- seedge.setLastpredicateNode(
- (SimExecutionNode)predicate.getTarget());
- } else {
- // transfer edge
- seedge.setLastpredicateNode(
- (SimExecutionNode)predicate.getSource());
- }
- }
- }
- seedge.setBestStartPoint(starttime);
+ // have predicates
+ long starttime = 0;
+ // check the latest finish time of all the predicates
+ for(int j = 0; j < predicates.size(); j++) {
+ SimExecutionEdge predicate = predicates.elementAt(j);
+ long tmptime = predicate.getBestStartPoint() + predicate.getWeight();
+ if(tmptime > starttime) {
+ starttime = tmptime;
+ seedge.setLastpredicateEdge(predicate);
+ if(predicate.getTd() != null) {
+ seedge.setLastpredicateNode(
+ (SimExecutionNode)predicate.getTarget());
+ } else {
+ // transfer edge
+ seedge.setLastpredicateNode(
+ (SimExecutionNode)predicate.getSource());
+ }
+ }
+ }
+ seedge.setBestStartPoint(starttime);
} else if(seedge.getSource().getInedgeVector().size() > 0) {
- // should have only one in edge
- long starttime = ((SimExecutionNode)seedge.getSource()).getTimepoint();
- seedge.setBestStartPoint(starttime);
+ // should have only one in edge
+ long starttime = ((SimExecutionNode)seedge.getSource()).getTimepoint();
+ seedge.setBestStartPoint(starttime);
} else {
- // no predicates
- seedge.setBestStartPoint(0);
+ // no predicates
+ seedge.setBestStartPoint(0);
}
predicates = null;
}
}
- private Vector<Vector<ScheduleNode>>
+ private Vector<Vector<ScheduleNode>>
optimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
Vector<SimExecutionEdge> criticalPath,
int gid,
// for test, print out the criticalPath
if(this.state.PRINTCRITICALPATH) {
- SchedulingUtil.printCriticalPath(this.state.outputdir + "criticalpath_"
+ SchedulingUtil.printCriticalPath(this.state.outputdir + "criticalpath_"
+ lgid + ".dot", criticalPath);
}
long opcheckpoint = Long.MAX_VALUE;
Vector<Integer> sparecores = null;
// group according to core index
- Hashtable<Long, Hashtable<Integer, Vector<SimExecutionEdge>>> toselects =
+ Hashtable<Long, Hashtable<Integer, Vector<SimExecutionEdge>>> toselects =
new Hashtable<Long, Hashtable<Integer, Vector<SimExecutionEdge>>>();
Random rand = new Random();
for(int i = 0; i < criticalPath.size(); i++) {
SimExecutionEdge seedge = criticalPath.elementAt(i);
long starttime = seedge.getBestStartPoint();
- if((starttime < ((SimExecutionNode)seedge.getSource()).getTimepoint())
- && (seedge.getTd() != null)){
- // Note: must be a task related edge, can not be an object transfer edge
- // no restrictions due to data dependencies
- // have potential to be parallelled and start execution earlier
- seedge.setFixedTime(false);
- // consider to optimize it only when its predicates can NOT
- // be optimized, otherwise first considering optimize its predicates
- //SimExecutionEdge lastpredicateedge = seedge.getLastpredicateEdge();
- // TODO
- //if(lastpredicateedge.isFixedTime()) {
- int corenum = seedge.getCoreNum();
- if(!toselects.containsKey(starttime)) {
- toselects.put(starttime,
- new Hashtable<Integer, Vector<SimExecutionEdge>>());
- }
- if(!toselects.get(starttime).containsKey(corenum)) {
- toselects.get(starttime).put(corenum,
- new Vector<SimExecutionEdge>());
- }
- toselects.get(starttime).get(corenum).add(seedge);
- //}
+ if((starttime < ((SimExecutionNode)seedge.getSource()).getTimepoint())
+ && (seedge.getTd() != null)) {
+ // Note: must be a task related edge, can not be an object transfer edge
+ // no restrictions due to data dependencies
+ // have potential to be parallelled and start execution earlier
+ seedge.setFixedTime(false);
+ // consider to optimize it only when its predicates can NOT
+ // be optimized, otherwise first considering optimize its predicates
+ //SimExecutionEdge lastpredicateedge = seedge.getLastpredicateEdge();
+ // TODO
+ //if(lastpredicateedge.isFixedTime()) {
+ int corenum = seedge.getCoreNum();
+ if(!toselects.containsKey(starttime)) {
+ toselects.put(starttime,
+ new Hashtable<Integer, Vector<SimExecutionEdge>>());
+ }
+ if(!toselects.get(starttime).containsKey(corenum)) {
+ toselects.get(starttime).put(corenum,
+ new Vector<SimExecutionEdge>());
+ }
+ toselects.get(starttime).get(corenum).add(seedge);
+ //}
}
}
- // Randomly choose the tasks to optimize(previously only
+ // Randomly choose the tasks to optimize(previously only
// consider the tasks with smallest best start time)
Vector<Long> keys = new Vector<Long>(toselects.keySet());
- do{
+ do {
int length = keys.size();
if(length == 0) {
- return optimizeschedulegraphs;
+ return optimizeschedulegraphs;
}
int tochoose = Math.abs(rand.nextInt()) % length;
opcheckpoint = (keys.elementAt(tochoose)).longValue();
keys.removeElementAt(tochoose);
- Hashtable<Integer, Vector<SimExecutionEdge>> tooptimize =
+ Hashtable<Integer, Vector<SimExecutionEdge>> tooptimize =
toselects.get(opcheckpoint);
- SimExecutionEdge seedge =
+ SimExecutionEdge seedge =
tooptimize.values().iterator().next().elementAt(0);
SimExecutionNode lastpredicatenode = seedge.getLastpredicateNode();
SimExecutionEdge lastpredicateedge = seedge.getLastpredicateEdge();
long timepoint = lastpredicatenode.getTimepoint();
if(lastpredicateedge.getTd() == null) {
- // transfer edge
- timepoint += lastpredicateedge.getWeight();
+ // transfer edge
+ timepoint += lastpredicateedge.getWeight();
}
// mapping to critical path
for(int index = 0; index < criticalPath.size(); index++) {
- SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
- SimExecutionNode tmpsenode =
- (SimExecutionNode)tmpseedge.getTarget();
- if(tmpsenode.getTimepoint() > timepoint) {
- // get the spare core info
- sparecores = tmpsenode.getSpareCores();
- break;
- }
+ SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
+ SimExecutionNode tmpsenode =
+ (SimExecutionNode)tmpseedge.getTarget();
+ if(tmpsenode.getTimepoint() > timepoint) {
+ // get the spare core info
+ sparecores = tmpsenode.getSpareCores();
+ break;
+ }
}
if(tooptimize.size() > 0) {
- Iterator<Integer> it_cores = tooptimize.keySet().iterator();
- // check if it is possible to optimize these tasks
- if((sparecores == null) || (sparecores.size() == 0)) {
- // lack of spare cores
- while(it_cores.hasNext()) {
- int corenum = it_cores.next();
- Vector<SimExecutionEdge> tmptasks = tooptimize.get(corenum);
- // group the task instantiations according to whether it
- // has backward data dependences or not
- Vector<SimExecutionEdge> candidatetasks = new Vector();
- for(int ii= 0; ii < tmptasks.size(); ii++) {
- SimExecutionEdge tmpseedge = tmptasks.elementAt(ii);
- SimExecutionNode target = (SimExecutionNode)tmpseedge.getTarget();
- Vector<SimExecutionEdge> children =
- (Vector<SimExecutionEdge>)target.getEdgeVector();
- int jj = 0;
- for(; jj < children.size(); jj++) {
- SimExecutionEdge tmpedge = children.elementAt(jj);
- if(tmpedge.getTd() != null) {
- Vector<SimExecutionEdge> predicates =
- tmpedge.getPredicates();
- if((predicates != null) &&
- (predicates.contains(tmpseedge))) {
- break;
- }
- predicates = null;
- } else if(tmpedge.getWeight() != 0) {
- // transfer edge
- if(((SimExecutionNode)tmpedge.getTarget()).getTimepoint()
- == tmpedge.getWeight() + target.getTimepoint()) {
- break;
- }
- }
- }
- if(jj == children.size()) {
- candidatetasks.add(tmpseedge);
- }
- }
- if((candidatetasks.size() > 0) &&
- (candidatetasks.size() < tmptasks.size())) {
- // there are less important tasks which have no backward
- // data dependences at this timepoint, try to change
- // original execution order
- Hashtable<Integer, Vector<SimExecutionEdge>> tooptimize2 =
- new Hashtable<Integer, Vector<SimExecutionEdge>>();
- tooptimize2.put(corenum, candidatetasks);
- Vector<Vector<ScheduleNode>> ops =
- innerOptimizeCriticalPath(scheduleGraph,
- tooptimize2,
- null,
- lgid,
- left);
- if(ops != null) {
- if(optimizeschedulegraphs == null) {
- optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
- }
- optimizeschedulegraphs.addAll(ops);
- lgid += ops.size();
- left -= ops.size();
- }
- tooptimize2 = null;
- ops = null;
- }
- tmptasks = null;
- candidatetasks = null;
- }
-
- if(left == 0) {
- it_cores = null;
- return optimizeschedulegraphs;
- }
-
- // flush the dependences and earliest start time
- if(!state.BAMBOOCOMPILETIME) {
- it_cores = tooptimize.keySet().iterator();
- while(it_cores.hasNext()) {
- int corenum = it_cores.next();
- Vector<SimExecutionEdge> edgevec =
- tooptimize.get(corenum);
- for(int j = 0; j < edgevec.size(); j++) {
- SimExecutionEdge edge = edgevec.elementAt(j);
- lastpredicateedge = edge.getLastpredicateEdge();
- lastpredicatenode = edge.getLastpredicateNode();
- // if(edge.getCoreNum() != lastpredicate.getCoreNum()) // should never hit this
- timepoint = lastpredicatenode.getTimepoint();
- if(lastpredicateedge.getTd() == null) {
- // transfer edge
- timepoint += lastpredicateedge.getWeight();
- }
- // mapping to critical path
- for(int index = 0; index < criticalPath.size(); index++) {
- SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
- SimExecutionNode tmpsenode =
- (SimExecutionNode)tmpseedge.getTarget();
- if(tmpsenode.getTimepoint() > timepoint) {
- // update the predicate info
- if(edge.getPredicates() != null) {
- edge.getPredicates().remove(lastpredicateedge);
- }
- edge.addPredicate(criticalPath.elementAt(index));
- break;
- }
- }
- }
- edgevec = null;
- }
- it_cores = null;
- computeBestStartPoint(criticalPath);
- Vector<Vector<ScheduleNode>> ops = optimizeCriticalPath(scheduleGraph,
- criticalPath,
- lgid,
- left);
- if(ops != null) {
- if(optimizeschedulegraphs == null) {
- optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
- }
- optimizeschedulegraphs.addAll(ops);
- lgid += ops.size();
- left -= ops.size();
- }
- ops = null;
- }
- } else {
- // there are spare cores, try to reorganize the tasks to the spare
- // cores
- Vector<Vector<ScheduleNode>> ops =
- innerOptimizeCriticalPath(scheduleGraph,
- tooptimize,
- sparecores,
- lgid,
- left);
- if(ops != null) {
- if(optimizeschedulegraphs == null) {
- optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
- }
- optimizeschedulegraphs.addAll(ops);
- lgid += ops.size();
- left -= ops.size();
- }
- ops = null;
- }
+ Iterator<Integer> it_cores = tooptimize.keySet().iterator();
+ // check if it is possible to optimize these tasks
+ if((sparecores == null) || (sparecores.size() == 0)) {
+ // lack of spare cores
+ while(it_cores.hasNext()) {
+ int corenum = it_cores.next();
+ Vector<SimExecutionEdge> tmptasks = tooptimize.get(corenum);
+ // group the task instantiations according to whether it
+ // has backward data dependences or not
+ Vector<SimExecutionEdge> candidatetasks = new Vector();
+ for(int ii= 0; ii < tmptasks.size(); ii++) {
+ SimExecutionEdge tmpseedge = tmptasks.elementAt(ii);
+ SimExecutionNode target = (SimExecutionNode)tmpseedge.getTarget();
+ Vector<SimExecutionEdge> children =
+ (Vector<SimExecutionEdge>)target.getEdgeVector();
+ int jj = 0;
+ for(; jj < children.size(); jj++) {
+ SimExecutionEdge tmpedge = children.elementAt(jj);
+ if(tmpedge.getTd() != null) {
+ Vector<SimExecutionEdge> predicates =
+ tmpedge.getPredicates();
+ if((predicates != null) &&
+ (predicates.contains(tmpseedge))) {
+ break;
+ }
+ predicates = null;
+ } else if(tmpedge.getWeight() != 0) {
+ // transfer edge
+ if(((SimExecutionNode)tmpedge.getTarget()).getTimepoint()
+ == tmpedge.getWeight() + target.getTimepoint()) {
+ break;
+ }
+ }
+ }
+ if(jj == children.size()) {
+ candidatetasks.add(tmpseedge);
+ }
+ }
+ if((candidatetasks.size() > 0) &&
+ (candidatetasks.size() < tmptasks.size())) {
+ // there are less important tasks which have no backward
+ // data dependences at this timepoint, try to change
+ // original execution order
+ Hashtable<Integer, Vector<SimExecutionEdge>> tooptimize2 =
+ new Hashtable<Integer, Vector<SimExecutionEdge>>();
+ tooptimize2.put(corenum, candidatetasks);
+ Vector<Vector<ScheduleNode>> ops =
+ innerOptimizeCriticalPath(scheduleGraph,
+ tooptimize2,
+ null,
+ lgid,
+ left);
+ if(ops != null) {
+ if(optimizeschedulegraphs == null) {
+ optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
+ }
+ optimizeschedulegraphs.addAll(ops);
+ lgid += ops.size();
+ left -= ops.size();
+ }
+ tooptimize2 = null;
+ ops = null;
+ }
+ tmptasks = null;
+ candidatetasks = null;
+ }
+
+ if(left == 0) {
+ it_cores = null;
+ return optimizeschedulegraphs;
+ }
+
+ // flush the dependences and earliest start time
+ if(!state.BAMBOOCOMPILETIME) {
+ it_cores = tooptimize.keySet().iterator();
+ while(it_cores.hasNext()) {
+ int corenum = it_cores.next();
+ Vector<SimExecutionEdge> edgevec =
+ tooptimize.get(corenum);
+ for(int j = 0; j < edgevec.size(); j++) {
+ SimExecutionEdge edge = edgevec.elementAt(j);
+ lastpredicateedge = edge.getLastpredicateEdge();
+ lastpredicatenode = edge.getLastpredicateNode();
+ // if(edge.getCoreNum() != lastpredicate.getCoreNum()) // should never hit this
+ timepoint = lastpredicatenode.getTimepoint();
+ if(lastpredicateedge.getTd() == null) {
+ // transfer edge
+ timepoint += lastpredicateedge.getWeight();
+ }
+ // mapping to critical path
+ for(int index = 0; index < criticalPath.size(); index++) {
+ SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
+ SimExecutionNode tmpsenode =
+ (SimExecutionNode)tmpseedge.getTarget();
+ if(tmpsenode.getTimepoint() > timepoint) {
+ // update the predicate info
+ if(edge.getPredicates() != null) {
+ edge.getPredicates().remove(lastpredicateedge);
+ }
+ edge.addPredicate(criticalPath.elementAt(index));
+ break;
+ }
+ }
+ }
+ edgevec = null;
+ }
+ it_cores = null;
+ computeBestStartPoint(criticalPath);
+ Vector<Vector<ScheduleNode>> ops = optimizeCriticalPath(scheduleGraph,
+ criticalPath,
+ lgid,
+ left);
+ if(ops != null) {
+ if(optimizeschedulegraphs == null) {
+ optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
+ }
+ optimizeschedulegraphs.addAll(ops);
+ lgid += ops.size();
+ left -= ops.size();
+ }
+ ops = null;
+ }
+ } else {
+ // there are spare cores, try to reorganize the tasks to the spare
+ // cores
+ Vector<Vector<ScheduleNode>> ops =
+ innerOptimizeCriticalPath(scheduleGraph,
+ tooptimize,
+ sparecores,
+ lgid,
+ left);
+ if(ops != null) {
+ if(optimizeschedulegraphs == null) {
+ optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
+ }
+ optimizeschedulegraphs.addAll(ops);
+ lgid += ops.size();
+ left -= ops.size();
+ }
+ ops = null;
+ }
}
sparecores = null;
tooptimize.clear();
tooptimize = null;
- }while(left > 0);
+ } while(left > 0);
toselects.clear();
toselects = null;
return optimizeschedulegraphs;
}
- private Vector<Vector<ScheduleNode>>
+ private Vector<Vector<ScheduleNode>>
innerOptimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
Hashtable<Integer, Vector<SimExecutionEdge>> tooptimize,
Vector<Integer> sparecores,
Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
// first clone the whole graph
- Vector<ScheduleNode> newscheduleGraph =
+ Vector<ScheduleNode> newscheduleGraph =
cloneScheduleGraph(scheduleGraph, lgid);
-
+
if(newscheduleGraph.size() == 0) {
//System.err.println("empty schedule graph!");
return optimizeschedulegraphs;
Vector<ScheduleNode> roots = new Vector<ScheduleNode>();
for(int i = 0; i < newscheduleGraph.size(); i++) {
if((sparecores == null) || (sparecores.contains(i))) {
- roots.add(newscheduleGraph.elementAt(i));
+ roots.add(newscheduleGraph.elementAt(i));
}
}
- // map the tasks associated to SimExecutionedges to original
- // ClassNode in the ScheduleGraph and split them from previous
+ // map the tasks associated to SimExecutionedges to original
+ // ClassNode in the ScheduleGraph and split them from previous
// ScheduleGraph
Vector<ScheduleNode> tocombines = new Vector<ScheduleNode>();
Iterator<Integer> it_cores = tooptimize.keySet().iterator();
while(it_cores.hasNext()) {
int corenum = it_cores.next();
- Vector<SimExecutionEdge> candidatetasks =
+ Vector<SimExecutionEdge> candidatetasks =
tooptimize.get(corenum);
for(int i = 0; i < candidatetasks.size(); i++) {
- TaskDescriptor td = candidatetasks.elementAt(i).getTd();
- // TODO: currently do not consider multi-param tasks
- if(td.numParameters() == 1) {
- ClassDescriptor cd = td.getParamType(0).getClassDesc();
- ScheduleNode snode = newscheduleGraph.elementAt(corenum); // corresponding ScheduleNode
- Iterator<ClassNode> it_cnodes = snode.getClassNodesIterator();
- ClassNode tosplit = null;
- while(it_cnodes.hasNext()) {
- ClassNode cnode = it_cnodes.next();
- if(cnode.getClassDescriptor().equals(cd)) {
- tosplit= cnode;
- break;
- }
- }
- it_cnodes = null;
-
- // split the node
- ScheduleNode splitnode = snode.spliteClassNode(tosplit);
- newscheduleGraph.add(splitnode);
- tocombines.add(splitnode);
- tosplit = null;
- }
+ TaskDescriptor td = candidatetasks.elementAt(i).getTd();
+ // TODO: currently do not consider multi-param tasks
+ if(td.numParameters() == 1) {
+ ClassDescriptor cd = td.getParamType(0).getClassDesc();
+ ScheduleNode snode = newscheduleGraph.elementAt(corenum); // corresponding ScheduleNode
+ Iterator<ClassNode> it_cnodes = snode.getClassNodesIterator();
+ ClassNode tosplit = null;
+ while(it_cnodes.hasNext()) {
+ ClassNode cnode = it_cnodes.next();
+ if(cnode.getClassDescriptor().equals(cd)) {
+ tosplit= cnode;
+ break;
+ }
+ }
+ it_cnodes = null;
+
+ // split the node
+ ScheduleNode splitnode = snode.spliteClassNode(tosplit);
+ newscheduleGraph.add(splitnode);
+ tocombines.add(splitnode);
+ tosplit = null;
+ }
}
candidatetasks = null;
}
Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
for(int i= 0; i < newscheduleGraph.size(); i++) {
scheduleEdges.addAll(
- (Vector<ScheduleEdge>)newscheduleGraph.elementAt(i).getEdgeVector());
+ (Vector<ScheduleEdge>)newscheduleGraph.elementAt(i).getEdgeVector());
}
- Vector<Vector<ScheduleNode>> rootNodes =
+ Vector<Vector<ScheduleNode>> rootNodes =
SchedulingUtil.rangeScheduleNodes(roots);
if(rootNodes == null) {
return optimizeschedulegraphs;
}
- Vector<Vector<ScheduleNode>> nodes2combine =
+ Vector<Vector<ScheduleNode>> nodes2combine =
SchedulingUtil.rangeScheduleNodes(tocombines);
if(nodes2combine == null) {
return optimizeschedulegraphs;
}
- CombinationUtil.CombineGenerator cGen =
+ CombinationUtil.CombineGenerator cGen =
CombinationUtil.allocateCombineGenerator(rootNodes, nodes2combine);
Random rand = new Random();
while ((left > 0) && (cGen.nextGen())) {
- //while ((left > 0) && (cGen.randomGenE())) {
+ //while ((left > 0) && (cGen.randomGenE())) {
if(Math.abs(rand.nextInt()) % 100 > this.generateThreshold) {
- Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
- Vector<ScheduleNode> sNodes =
- SchedulingUtil.generateScheduleGraph(this.state,
- newscheduleGraph,
- scheduleEdges,
- rootNodes,
- combine,
- lgid++);
- if(optimizeschedulegraphs == null) {
- optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
- }
- optimizeschedulegraphs.add(sNodes);
- combine = null;
- sNodes = null;
- left--;
+ Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
+ Vector<ScheduleNode> sNodes =
+ SchedulingUtil.generateScheduleGraph(this.state,
+ newscheduleGraph,
+ scheduleEdges,
+ rootNodes,
+ combine,
+ lgid++);
+ if(optimizeschedulegraphs == null) {
+ optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
+ }
+ optimizeschedulegraphs.add(sNodes);
+ combine = null;
+ sNodes = null;
+ left--;
}
}
cGen.clear();
for(int i = 0; i < rootNodes.size(); i++) {
if(rootNodes.elementAt(i) != null) {
- rootNodes.elementAt(i).clear();
+ rootNodes.elementAt(i).clear();
}
}
rootNodes = null;
for(int i = 0; i < nodes2combine.size(); i++) {
if(nodes2combine.elementAt(i) != null) {
- nodes2combine.elementAt(i).clear();
+ nodes2combine.elementAt(i).clear();
}
}
nodes2combine = null;
return optimizeschedulegraphs;
}
- private Vector<ScheduleNode>
+ private Vector<ScheduleNode>
cloneScheduleGraph(Vector<ScheduleNode> scheduleGraph,
int gid) {
Vector<ScheduleNode> result = new Vector<ScheduleNode>();
Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
for(int i= 0; i < scheduleGraph.size(); i++) {
scheduleEdges.addAll(
- (Vector<ScheduleEdge>)scheduleGraph.elementAt(i).getEdgeVector());
+ (Vector<ScheduleEdge>)scheduleGraph.elementAt(i).getEdgeVector());
}
- Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
+ Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
- Hashtable<ScheduleNode, ScheduleNode> sn2sn =
+ Hashtable<ScheduleNode, ScheduleNode> sn2sn =
new Hashtable<ScheduleNode, ScheduleNode>();
SchedulingUtil.cloneScheduleGraph(scheduleGraph,
- scheduleEdges,
- sn2hash,
- sn2sn,
- result,
- gid);
+ scheduleEdges,
+ sn2hash,
+ sn2sn,
+ result,
+ gid);
SchedulingUtil.assignCids(result);
scheduleEdges.clear();
return result;
}
- private Vector<Schedule>
+ private Vector<Schedule>
generateScheduling(Vector<ScheduleNode> scheduleGraph,
Hashtable<TaskDescriptor, ClassDescriptor> td2maincd) {
- Hashtable<TaskDescriptor, Vector<Schedule>> td2cores =
+ Hashtable<TaskDescriptor, Vector<Schedule>> td2cores =
new Hashtable<TaskDescriptor, Vector<Schedule>>(); // tasks reside on which cores
Vector<Schedule> scheduling = new Vector<Schedule>(scheduleGraph.size());
// for each ScheduleNode create a schedule node representing a core
- Hashtable<ScheduleNode, Integer> sn2coreNum =
+ Hashtable<ScheduleNode, Integer> sn2coreNum =
new Hashtable<ScheduleNode, Integer>();
- Hashtable<TaskDescriptor, Integer> td2maincore =
+ Hashtable<TaskDescriptor, Integer> td2maincore =
new Hashtable<TaskDescriptor, Integer>();
- Hashtable<TaskDescriptor, Vector<Schedule>> td2allycores =
- new Hashtable<TaskDescriptor, Vector<Schedule>>(); // multiparam tasks --
- // ally cores which might have parameters
- // for the task
-
+ Hashtable<TaskDescriptor, Vector<Schedule>> td2allycores =
+ new Hashtable<TaskDescriptor, Vector<Schedule>>(); // multiparam tasks --
+ // ally cores which might have parameters
+ // for the task
+
int j = 0;
for(j = 0; j < scheduleGraph.size(); j++) {
sn2coreNum.put(scheduleGraph.elementAt(j), j);
Vector<ClassNode> cNodes = sn.getClassNodes();
for(int k = 0; k < cNodes.size(); k++) {
- ClassNode cNode = cNodes.elementAt(k);
- ClassDescriptor cd = cNode.getClassDescriptor();
- Iterator it_flags = cNode.getFlags();
- while(it_flags.hasNext()) {
- FlagState fs = (FlagState)it_flags.next();
- Iterator it_edges = fs.edges();
- while(it_edges.hasNext()) {
- FEdge tmpfe = (FEdge)it_edges.next();
- TaskDescriptor td = (tmpfe).getTask();
- boolean contain = true;
- if(td.numParameters() > 1) {
- // td is a multi-param task, check if this core contains the
- // main cd of it
- ClassDescriptor cd1 = td2maincd.get(td);
- if(td2maincd.get(td).equals(cd)) {
- contain = true;
- td2maincore.put(td, tmpSchedule.getCoreNum());
- } else {
- contain = false;
- if(!td2allycores.containsKey(td)) {
- td2allycores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> allycores = td2allycores.get(td);
- if(!allycores.contains(tmpSchedule)) {
- allycores.addElement(tmpSchedule);
- }
- allycores = null;
- }
- // If the FlagState can be fed to some multi-param tasks,
- // need to record corresponding ally cores later.
- tmpSchedule.addFState4TD(td, fs);
- }
- if(contain) {
- tmpSchedule.addTask(td);
- if(!td2cores.containsKey(td)) {
- td2cores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> tmpcores = td2cores.get(td);
- if(!tmpcores.contains(tmpSchedule)) {
- tmpcores.add(tmpSchedule);
- }
- tmpcores = null;
- }
- if(td.getParamType(0).getClassDesc().getSymbol().equals(
- TypeUtil.StartupClass)) {
- assert(!setstartupcore);
- startupcore = j;
- startup = tmpSchedule;
- setstartupcore = true;
- }
- }
- it_edges = null;
- }
- it_flags = null;
+ ClassNode cNode = cNodes.elementAt(k);
+ ClassDescriptor cd = cNode.getClassDescriptor();
+ Iterator it_flags = cNode.getFlags();
+ while(it_flags.hasNext()) {
+ FlagState fs = (FlagState)it_flags.next();
+ Iterator it_edges = fs.edges();
+ while(it_edges.hasNext()) {
+ FEdge tmpfe = (FEdge)it_edges.next();
+ TaskDescriptor td = (tmpfe).getTask();
+ boolean contain = true;
+ if(td.numParameters() > 1) {
+ // td is a multi-param task, check if this core contains the
+ // main cd of it
+ ClassDescriptor cd1 = td2maincd.get(td);
+ if(td2maincd.get(td).equals(cd)) {
+ contain = true;
+ td2maincore.put(td, tmpSchedule.getCoreNum());
+ } else {
+ contain = false;
+ if(!td2allycores.containsKey(td)) {
+ td2allycores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> allycores = td2allycores.get(td);
+ if(!allycores.contains(tmpSchedule)) {
+ allycores.addElement(tmpSchedule);
+ }
+ allycores = null;
+ }
+ // If the FlagState can be fed to some multi-param tasks,
+ // need to record corresponding ally cores later.
+ tmpSchedule.addFState4TD(td, fs);
+ }
+ if(contain) {
+ tmpSchedule.addTask(td);
+ if(!td2cores.containsKey(td)) {
+ td2cores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> tmpcores = td2cores.get(td);
+ if(!tmpcores.contains(tmpSchedule)) {
+ tmpcores.add(tmpSchedule);
+ }
+ tmpcores = null;
+ }
+ if(td.getParamType(0).getClassDesc().getSymbol().equals(
+ TypeUtil.StartupClass)) {
+ assert(!setstartupcore);
+ startupcore = j;
+ startup = tmpSchedule;
+ setstartupcore = true;
+ }
+ }
+ it_edges = null;
+ }
+ it_flags = null;
}
cNodes = null;
- // For each of the ScheduleEdge out of this ScheduleNode, add the
+ // For each of the ScheduleEdge out of this ScheduleNode, add the
// target ScheduleNode into the queue inside sn
Iterator it_edges = sn.edges();
while(it_edges.hasNext()) {
- ScheduleEdge se = (ScheduleEdge)it_edges.next();
- ScheduleNode target = (ScheduleNode)se.getTarget();
- Integer targetcore = sn2coreNum.get(target);
- switch(se.getType()) {
- case ScheduleEdge.NEWEDGE: {
- FlagState fs = se.getFstate();
- // Check if the new obj could be fed to some
- // multi-parameter task, if so, add for ally cores
- // checking
- Iterator it = fs.edges();
- boolean canTriggerSTask = false; // Flag indicates if fs can trigger
- // some single-param task
- while(it.hasNext()) {
- TaskDescriptor td = ((FEdge)it.next()).getTask();
- if(td.numParameters() > 1) {
- tmpSchedule.addFState4TD(td, fs); // TODO
- // add this core as a allycore of td
- if(!td2allycores.containsKey(td)) {
- td2allycores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> allycores = td2allycores.get(td);
- if(!allycores.contains(tmpSchedule)) {
- allycores.addElement(tmpSchedule);
- }
- } else {
- canTriggerSTask = true;
- }
- }
- if(canTriggerSTask) {
- // Only transfer the obj when it can trigger some single-parm task
- // TODO: ensure that multi-param tasks have these objects
- for(int k = 0; k < se.getNewRate(); k++) {
- tmpSchedule.addTargetCore(fs, targetcore);
- }
- }
- break;
- }
-
- case ScheduleEdge.TRANSEDGE: {
- // 'transmit' edge
- tmpSchedule.addTargetCore(se.getFstate(),
- targetcore,
- se.getTargetFState());
- // check if missed some FlagState associated with some
- // multi-parameter task, which has been cloned when
- // splitting a ClassNode
- FlagState fs = se.getSourceFState();
- FlagState tfs = se.getTargetFState();
- Iterator it = tfs.edges();
- while(it.hasNext()) {
- TaskDescriptor td = ((FEdge)it.next()).getTask();
- if(td.numParameters() > 1) {
- tmpSchedule.addFState4TD(td, fs);
- // add this core as a allycore of td
- if(!td2allycores.containsKey(td)) {
- td2allycores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> allycores = td2allycores.get(td);
- if(!allycores.contains(tmpSchedule)) {
- allycores.addElement(tmpSchedule);
- }
- }
- }
- break;
- }
- }
+ ScheduleEdge se = (ScheduleEdge)it_edges.next();
+ ScheduleNode target = (ScheduleNode)se.getTarget();
+ Integer targetcore = sn2coreNum.get(target);
+ switch(se.getType()) {
+ case ScheduleEdge.NEWEDGE: {
+ FlagState fs = se.getFstate();
+ // Check if the new obj could be fed to some
+ // multi-parameter task, if so, add for ally cores
+ // checking
+ Iterator it = fs.edges();
+ boolean canTriggerSTask = false; // Flag indicates if fs can trigger
+ // some single-param task
+ while(it.hasNext()) {
+ TaskDescriptor td = ((FEdge)it.next()).getTask();
+ if(td.numParameters() > 1) {
+ tmpSchedule.addFState4TD(td, fs); // TODO
+ // add this core as a allycore of td
+ if(!td2allycores.containsKey(td)) {
+ td2allycores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> allycores = td2allycores.get(td);
+ if(!allycores.contains(tmpSchedule)) {
+ allycores.addElement(tmpSchedule);
+ }
+ } else {
+ canTriggerSTask = true;
+ }
+ }
+ if(canTriggerSTask) {
+ // Only transfer the obj when it can trigger some single-parm task
+ // TODO: ensure that multi-param tasks have these objects
+ for(int k = 0; k < se.getNewRate(); k++) {
+ tmpSchedule.addTargetCore(fs, targetcore);
+ }
+ }
+ break;
+ }
+
+ case ScheduleEdge.TRANSEDGE: {
+ // 'transmit' edge
+ tmpSchedule.addTargetCore(se.getFstate(),
+ targetcore,
+ se.getTargetFState());
+ // check if missed some FlagState associated with some
+ // multi-parameter task, which has been cloned when
+ // splitting a ClassNode
+ FlagState fs = se.getSourceFState();
+ FlagState tfs = se.getTargetFState();
+ Iterator it = tfs.edges();
+ while(it.hasNext()) {
+ TaskDescriptor td = ((FEdge)it.next()).getTask();
+ if(td.numParameters() > 1) {
+ tmpSchedule.addFState4TD(td, fs);
+ // add this core as a allycore of td
+ if(!td2allycores.containsKey(td)) {
+ td2allycores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> allycores = td2allycores.get(td);
+ if(!allycores.contains(tmpSchedule)) {
+ allycores.addElement(tmpSchedule);
+ }
+ }
+ }
+ break;
+ }
+ }
}
it_edges = sn.getScheduleEdgesIterator();
while(it_edges.hasNext()) {
- ScheduleEdge se = (ScheduleEdge)it_edges.next();
- switch(se.getType()) {
- case ScheduleEdge.NEWEDGE: {
- // TODO, added 09/07/06
- FlagState fs = se.getFstate();
- // Check if the new obj could be fed to some
- // multi-parameter task, if so, add for ally cores
- // checking
- Iterator it = fs.edges();
- boolean canTriggerSTask = false; // Flag indicates if fs can trigger
- // some single-param task
- while(it.hasNext()) {
- TaskDescriptor td = ((FEdge)it.next()).getTask();
- if(td.numParameters() > 1) {
- tmpSchedule.addFState4TD(td, fs); // TODO
- // add this core as a allycore of td
- if(!td2allycores.containsKey(td)) {
- td2allycores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> allycores = td2allycores.get(td);
- if(!allycores.contains(tmpSchedule)) {
- allycores.addElement(tmpSchedule);
- }
- } else {
- canTriggerSTask = true;
- }
- }
- if(canTriggerSTask) {
- for(int k = 0; k < se.getNewRate(); k++) {
- tmpSchedule.addTargetCore(se.getFstate(), j);
- }
- }
- break;
- }
-
- case ScheduleEdge.TRANSEDGE: {
- // 'transmit' edge
- tmpSchedule.addTargetCore(se.getFstate(),
- j,
- se.getTargetFState());
- // check if missed some FlagState associated with some
- // multi-parameter task, which has been cloned when
- // splitting a ClassNode
- FlagState fs = se.getSourceFState();
- FlagState tfs = se.getTargetFState();
- Iterator it = tfs.edges();
- while(it.hasNext()) {
- TaskDescriptor td = ((FEdge)it.next()).getTask();
- if(td.numParameters() > 1) {
- tmpSchedule.addFState4TD(td, fs);
- // add this core as a allycore of td
- if(!td2allycores.containsKey(td)) {
- td2allycores.put(td, new Vector<Schedule>());
- }
- Vector<Schedule> allycores = td2allycores.get(td);
- if(!allycores.contains(tmpSchedule)) {
- allycores.addElement(tmpSchedule);
- }
- }
- }
- break;
- }
- }
+ ScheduleEdge se = (ScheduleEdge)it_edges.next();
+ switch(se.getType()) {
+ case ScheduleEdge.NEWEDGE: {
+ // TODO, added 09/07/06
+ FlagState fs = se.getFstate();
+ // Check if the new obj could be fed to some
+ // multi-parameter task, if so, add for ally cores
+ // checking
+ Iterator it = fs.edges();
+ boolean canTriggerSTask = false; // Flag indicates if fs can trigger
+ // some single-param task
+ while(it.hasNext()) {
+ TaskDescriptor td = ((FEdge)it.next()).getTask();
+ if(td.numParameters() > 1) {
+ tmpSchedule.addFState4TD(td, fs); // TODO
+ // add this core as a allycore of td
+ if(!td2allycores.containsKey(td)) {
+ td2allycores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> allycores = td2allycores.get(td);
+ if(!allycores.contains(tmpSchedule)) {
+ allycores.addElement(tmpSchedule);
+ }
+ } else {
+ canTriggerSTask = true;
+ }
+ }
+ if(canTriggerSTask) {
+ for(int k = 0; k < se.getNewRate(); k++) {
+ tmpSchedule.addTargetCore(se.getFstate(), j);
+ }
+ }
+ break;
+ }
+
+ case ScheduleEdge.TRANSEDGE: {
+ // 'transmit' edge
+ tmpSchedule.addTargetCore(se.getFstate(),
+ j,
+ se.getTargetFState());
+ // check if missed some FlagState associated with some
+ // multi-parameter task, which has been cloned when
+ // splitting a ClassNode
+ FlagState fs = se.getSourceFState();
+ FlagState tfs = se.getTargetFState();
+ Iterator it = tfs.edges();
+ while(it.hasNext()) {
+ TaskDescriptor td = ((FEdge)it.next()).getTask();
+ if(td.numParameters() > 1) {
+ tmpSchedule.addFState4TD(td, fs);
+ // add this core as a allycore of td
+ if(!td2allycores.containsKey(td)) {
+ td2allycores.put(td, new Vector<Schedule>());
+ }
+ Vector<Schedule> allycores = td2allycores.get(td);
+ if(!allycores.contains(tmpSchedule)) {
+ allycores.addElement(tmpSchedule);
+ }
+ }
+ }
+ break;
+ }
+ }
}
it_edges = null;
scheduling.add(tmpSchedule);
while(it_mptds.hasNext()) {
TaskDescriptor td = it_mptds.next();
Vector<FEdge> fes = (Vector<FEdge>) this.taskAnalysis.getFEdgesFromTD(td);
- Vector<Schedule> cores = td2cores.get(td);
+ Vector<Schedule> cores = td2cores.get(td);
assert(cores.size() == 1); // should have only one core
for(int k = 0; k < cores.size(); ++k) {
- Schedule tmpSchedule = cores.elementAt(k);
-
- // Make sure all the parameter objs of a multi-parameter
- // task would be send to right place after the task finished
- for(int h = 0; h < fes.size(); ++h) {
- FEdge tmpfe = fes.elementAt(h);
- FlagState tmpfs = (FlagState)tmpfe.getTarget();
- Vector<TaskDescriptor> tmptds = new Vector<TaskDescriptor>();
- if((tmpSchedule.getTargetCoreTable() == null)
- || (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) {
- // add up all possible cores' info
- Iterator it_edges = tmpfs.edges();
- while(it_edges.hasNext()) {
- TaskDescriptor tmptd = ((FEdge)it_edges.next()).getTask();
- if(!tmptds.contains(tmptd)) {
- tmptds.add(tmptd);
- // only multiparam task will be processed here!!! TODO
- Vector<Schedule> tmpcores = td2cores.get(tmptd);
- for(int m = 0; m < tmpcores.size(); ++m) {
- Schedule target = tmpcores.elementAt(m);
- int targetcore = target.getCoreNum();
- int num = target.getTaskNum(tmptd);
- for(int n = 0; n < num; n++) {
- tmpSchedule.addTargetCore(tmpfs, targetcore);
- }
- }
- tmpcores = null;
- }
- }
- it_edges = null;
- }
- tmptds = null;
- }
+ Schedule tmpSchedule = cores.elementAt(k);
+
+ // Make sure all the parameter objs of a multi-parameter
+ // task would be send to right place after the task finished
+ for(int h = 0; h < fes.size(); ++h) {
+ FEdge tmpfe = fes.elementAt(h);
+ FlagState tmpfs = (FlagState)tmpfe.getTarget();
+ Vector<TaskDescriptor> tmptds = new Vector<TaskDescriptor>();
+ if((tmpSchedule.getTargetCoreTable() == null)
+ || (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) {
+ // add up all possible cores' info
+ Iterator it_edges = tmpfs.edges();
+ while(it_edges.hasNext()) {
+ TaskDescriptor tmptd = ((FEdge)it_edges.next()).getTask();
+ if(!tmptds.contains(tmptd)) {
+ tmptds.add(tmptd);
+ // only multiparam task will be processed here!!! TODO
+ Vector<Schedule> tmpcores = td2cores.get(tmptd);
+ for(int m = 0; m < tmpcores.size(); ++m) {
+ Schedule target = tmpcores.elementAt(m);
+ int targetcore = target.getCoreNum();
+ int num = target.getTaskNum(tmptd);
+ for(int n = 0; n < num; n++) {
+ tmpSchedule.addTargetCore(tmpfs, targetcore);
+ }
+ }
+ tmpcores = null;
+ }
+ }
+ it_edges = null;
+ }
+ tmptds = null;
+ }
}
fes = null;
cores = null;
TaskDescriptor td = it_mptds.next();
Vector<Schedule> allycores = td2allycores.get(td);
for(int i = 0; i < allycores.size(); i++) {
- Schedule tSchedule = allycores.elementAt(i);
- Vector<FlagState> tmpfss = tSchedule.getFStates4TD(td);
- int targetcore = td2maincore.get(td).intValue();
- for(int h = 0; h < tmpfss.size(); ++h) {
- tSchedule.addAllyCore(tmpfss.elementAt(h), targetcore);
- }
- tmpfss = null;
+ Schedule tSchedule = allycores.elementAt(i);
+ Vector<FlagState> tmpfss = tSchedule.getFStates4TD(td);
+ int targetcore = td2maincore.get(td).intValue();
+ for(int h = 0; h < tmpfss.size(); ++h) {
+ tSchedule.addAllyCore(tmpfss.elementAt(h), targetcore);
+ }
+ tmpfss = null;
}
}
it_mptds = null;
public class ObjectSimulator {
static int objid = 0;
-
+
int oid;
ClassDescriptor cd;
FlagState currentFS;
boolean shared;
boolean hold;
int version;
-
+
// TODO, crack for KMeans
int counter;
- public ObjectSimulator(ClassDescriptor cd,
- FlagState currentFS) {
+ public ObjectSimulator(ClassDescriptor cd,
+ FlagState currentFS) {
super();
this.oid = ObjectSimulator.objid++;
this.cd = cd;
this.changed = true;
currentFS = (FlagState)fedge.getTarget();
if(this.counter > 0) {
- //System.err.println(this.counter);
- this.counter--;
+ //System.err.println(this.counter);
+ this.counter--;
}
if((this.cd.getSymbol().equals("Cluster")) && (this.counter == 0)) {
- // go to end state
- this.currentFS = new FlagState(this.cd);
+ // go to end state
+ this.currentFS = new FlagState(this.cd);
}
} else {
this.changed = false;
private Hashtable<TaskDescriptor, Vector<FlagState>> td2fs;
public Schedule(int coreNum,
- int gid) {
+ int gid) {
this.gid = gid;
this.coreNum = coreNum;
this.tasks = null;
}
public int getGid() {
- return gid;
+ return gid;
}
public int getCoreNum() {
return this.td2fs.get(td);
}
- public void addTargetCore(FlagState fstate,
- Integer targetCore) {
+ public void addTargetCore(FlagState fstate,
+ Integer targetCore) {
if(this.targetCores == null) {
this.targetCores = new Hashtable<FlagState, Queue<Integer>>();
}
// which reflects probabilities.
}
- public void addTargetCore(FlagState fstate,
- Integer targetCore,
- FlagState tfstate) {
+ public void addTargetCore(FlagState fstate,
+ Integer targetCore,
+ FlagState tfstate) {
if(this.targetCores == null) {
this.targetCores = new Hashtable<FlagState, Queue<Integer>>();
}
this.targetFState.put(fstate, tfstate);
}
- public void addAllyCore(FlagState fstate,
- Integer targetCore) {
+ public void addAllyCore(FlagState fstate,
+ Integer targetCore) {
if(this.allyCores == null) {
this.allyCores = new Hashtable<FlagState, Vector<Integer>>();
}
this.allyCores.put(fstate, new Vector<Integer>());
}
if((this.coreNum != targetCore.intValue()) && (!this.allyCores.get(fstate).contains(targetCore))) {
- this.allyCores.get(fstate).add(targetCore);
+ this.allyCores.get(fstate).add(targetCore);
}
}
- public void addFState4TD(TaskDescriptor td,
- FlagState fstate) {
+ public void addFState4TD(TaskDescriptor td,
+ FlagState fstate) {
if(this.td2fs == null) {
this.td2fs = new Hashtable<TaskDescriptor, Vector<FlagState>>();
}
this.td2num.put(task, this.td2num.get(task).intValue()+1);
}
}
-
+
public int getTaskNum(TaskDescriptor task) {
return this.td2num.get(task);
}
int scheduleThreshold;
int coreNum;
Vector<Vector<ScheduleNode>> scheduleGraphs;
-
+
// Main CD table for multi-param tasks
Hashtable<TaskDescriptor, ClassDescriptor> td2maincd;
- public ScheduleAnalysis(State state,
+ public ScheduleAnalysis(State state,
TaskAnalysis taskanalysis) {
this.state = state;
this.taskanalysis = taskanalysis;
this.scheduleGraphs = null;
this.td2maincd = null;
}
-
+
public void setTransThreshold(int tt) {
this.transThreshold = tt;
}
public Hashtable<TaskDescriptor, ClassDescriptor> getTd2maincd() {
// TODO, for test
/*Iterator<TaskDescriptor> key = td2maincd.keySet().iterator();
- while(key.hasNext()) {
- TaskDescriptor td = key.next();
- System.err.println(td.getSymbol() + ", maincd: "
- + this.td2maincd.get(td).getSymbol());
- }*/
-
+ while(key.hasNext()) {
+ TaskDescriptor td = key.next();
+ System.err.println(td.getSymbol() + ", maincd: "
+ + this.td2maincd.get(td).getSymbol());
+ }*/
+
return td2maincd;
}
try {
Vector<ScheduleEdge> toBreakDown = new Vector<ScheduleEdge>();
ScheduleNode startupNode = null;
-
+
if((multiparamtds != null) || (multiparamtds.size() > 0)) {
- this.td2maincd = new Hashtable<TaskDescriptor, ClassDescriptor>();
+ this.td2maincd = new Hashtable<TaskDescriptor, ClassDescriptor>();
}
// necessary preparation such as read profile info etc.
// set up profiling data
if(state.USEPROFILE) {
- java.util.Hashtable<String, TaskInfo> taskinfos =
+ java.util.Hashtable<String, TaskInfo> taskinfos =
new java.util.Hashtable<String, TaskInfo>();
this.readProfileInfo(taskinfos);
long tint = 0;
Iterator it_classes = state.getClassSymbolTable().getDescriptorsIterator();
while(it_classes.hasNext()) {
- ClassDescriptor cd = (ClassDescriptor) it_classes.next();
- if(cd.hasFlags()) {
- Vector rootnodes = this.taskanalysis.getRootNodes(cd);
- if(rootnodes!=null) {
- Iterator it_rootnodes = rootnodes.iterator();
- while(it_rootnodes.hasNext()) {
- FlagState root = (FlagState)it_rootnodes.next();
- Vector allocatingTasks = root.getAllocatingTasks();
- if(allocatingTasks != null) {
- for(int k = 0; k < allocatingTasks.size(); k++) {
- TaskDescriptor td =
- (TaskDescriptor)allocatingTasks.elementAt(k);
- Vector<FEdge> fev = this.taskanalysis.getFEdgesFromTD(td);
- int numEdges = fev.size();
- for(int j = 0; j < numEdges; j++) {
- FEdge pfe = fev.elementAt(j);
- TaskInfo taskinfo = taskinfos.get(td.getSymbol());
- tint = taskinfo.m_exetime[pfe.getTaskExitIndex()];
- pfe.setExeTime(tint);
- double idouble =
- taskinfo.m_probability[pfe.getTaskExitIndex()];
- pfe.setProbability(idouble);
- int newRate = 0;
- int tindex = pfe.getTaskExitIndex();
- if((taskinfo.m_newobjinfo.elementAt(tindex) != null)
- && (taskinfo.m_newobjinfo.elementAt(tindex).containsKey(
- cd.getSymbol()))) {
- newRate = taskinfo.m_newobjinfo.elementAt(tindex).get(
- cd.getSymbol());
- }
- pfe.addNewObjInfo(cd, newRate, idouble);
- if(taskinfo.m_byObj != -1) {
- ((FlagState)pfe.getSource()).setByObj(taskinfo.m_byObj);
- }
- // TODO for test
- /*System.err.println("task " + td.getSymbol() + " exit# " +
- pfe.getTaskExitIndex() + " exetime: " + pfe.getExeTime()
- + " prob: " + pfe.getProbability() + "% newobj: "
- + pfe.getNewObjInfoHashtable().size());*/
- }
- fev = null;
- }
- }
- }
- it_rootnodes = null;
- }
- Iterator it_flags = this.taskanalysis.getFlagStates(cd).iterator();
- while(it_flags.hasNext()) {
- FlagState fs = (FlagState)it_flags.next();
- Iterator it_edges = fs.edges();
- while(it_edges.hasNext()) {
- FEdge edge = (FEdge)it_edges.next();
- TaskInfo taskinfo = taskinfos.get(edge.getTask().getSymbol());
- double idouble = 0.0;
- if(edge.getTaskExitIndex() >= taskinfo.m_exetime.length) {
- tint = 0;
- } else {
- tint = taskinfo.m_exetime[edge.getTaskExitIndex()];
- idouble = taskinfo.m_probability[edge.getTaskExitIndex()];
- }
- edge.setExeTime(tint);
- edge.setProbability(idouble);
- if(taskinfo.m_byObj != -1) {
- ((FlagState)edge.getSource()).setByObj(taskinfo.m_byObj);
- }
- // TODO for test
- /*System.err.println("task " + edge.getTask().getSymbol() + " exit# " +
- edge.getTaskExitIndex() + " exetime: " + edge.getExeTime()
- + " prob: " + edge.getProbability());*/
- }
- it_edges = null;
- }
- it_flags = null;
- }
+ ClassDescriptor cd = (ClassDescriptor) it_classes.next();
+ if(cd.hasFlags()) {
+ Vector rootnodes = this.taskanalysis.getRootNodes(cd);
+ if(rootnodes!=null) {
+ Iterator it_rootnodes = rootnodes.iterator();
+ while(it_rootnodes.hasNext()) {
+ FlagState root = (FlagState)it_rootnodes.next();
+ Vector allocatingTasks = root.getAllocatingTasks();
+ if(allocatingTasks != null) {
+ for(int k = 0; k < allocatingTasks.size(); k++) {
+ TaskDescriptor td =
+ (TaskDescriptor)allocatingTasks.elementAt(k);
+ Vector<FEdge> fev = this.taskanalysis.getFEdgesFromTD(td);
+ int numEdges = fev.size();
+ for(int j = 0; j < numEdges; j++) {
+ FEdge pfe = fev.elementAt(j);
+ TaskInfo taskinfo = taskinfos.get(td.getSymbol());
+ tint = taskinfo.m_exetime[pfe.getTaskExitIndex()];
+ pfe.setExeTime(tint);
+ double idouble =
+ taskinfo.m_probability[pfe.getTaskExitIndex()];
+ pfe.setProbability(idouble);
+ int newRate = 0;
+ int tindex = pfe.getTaskExitIndex();
+ if((taskinfo.m_newobjinfo.elementAt(tindex) != null)
+ && (taskinfo.m_newobjinfo.elementAt(tindex).containsKey(
+ cd.getSymbol()))) {
+ newRate = taskinfo.m_newobjinfo.elementAt(tindex).get(
+ cd.getSymbol());
+ }
+ pfe.addNewObjInfo(cd, newRate, idouble);
+ if(taskinfo.m_byObj != -1) {
+ ((FlagState)pfe.getSource()).setByObj(taskinfo.m_byObj);
+ }
+ // TODO for test
+ /*System.err.println("task " + td.getSymbol() + " exit# " +
+ pfe.getTaskExitIndex() + " exetime: " + pfe.getExeTime()
+ + " prob: " + pfe.getProbability() + "% newobj: "
+ + pfe.getNewObjInfoHashtable().size());*/
+ }
+ fev = null;
+ }
+ }
+ }
+ it_rootnodes = null;
+ }
+ Iterator it_flags = this.taskanalysis.getFlagStates(cd).iterator();
+ while(it_flags.hasNext()) {
+ FlagState fs = (FlagState)it_flags.next();
+ Iterator it_edges = fs.edges();
+ while(it_edges.hasNext()) {
+ FEdge edge = (FEdge)it_edges.next();
+ TaskInfo taskinfo = taskinfos.get(edge.getTask().getSymbol());
+ double idouble = 0.0;
+ if(edge.getTaskExitIndex() >= taskinfo.m_exetime.length) {
+ tint = 0;
+ } else {
+ tint = taskinfo.m_exetime[edge.getTaskExitIndex()];
+ idouble = taskinfo.m_probability[edge.getTaskExitIndex()];
+ }
+ edge.setExeTime(tint);
+ edge.setProbability(idouble);
+ if(taskinfo.m_byObj != -1) {
+ ((FlagState)edge.getSource()).setByObj(taskinfo.m_byObj);
+ }
+ // TODO for test
+ /*System.err.println("task " + edge.getTask().getSymbol() + " exit# " +
+ edge.getTaskExitIndex() + " exetime: " + edge.getExeTime()
+ + " prob: " + edge.getProbability());*/
+ }
+ it_edges = null;
+ }
+ it_flags = null;
+ }
}
taskinfos = null;
it_classes = null;
while(it_classes.hasNext()) {
ClassDescriptor cd=(ClassDescriptor) it_classes.next();
if(cd.hasFlags()) {
- Set<FlagState> fss = this.taskanalysis.getFlagStates(cd);
- SCC scc=GraphNode.DFS.computeSCC(fss);
- if (scc.hasCycles()) {
- for(int i=0; i<scc.numSCC(); i++) {
- if (scc.hasCycle(i)) {
- Set cycleset = scc.getSCC(i);
- Iterator it_fs = cycleset.iterator();
- while(it_fs.hasNext()) {
- FlagState fs = (FlagState)it_fs.next();
- Iterator it_edges = fs.edges();
- while(it_edges.hasNext()) {
- FEdge edge = (FEdge)it_edges.next();
- if(cycleset.contains(edge.getTarget())) {
- // a backedge
- edge.setisbackedge(true);
- }
- }
- it_edges = null;
- }
- it_fs = null;
- }
- }
- }
- fss = null;
+ Set<FlagState> fss = this.taskanalysis.getFlagStates(cd);
+ SCC scc=GraphNode.DFS.computeSCC(fss);
+ if (scc.hasCycles()) {
+ for(int i=0; i<scc.numSCC(); i++) {
+ if (scc.hasCycle(i)) {
+ Set cycleset = scc.getSCC(i);
+ Iterator it_fs = cycleset.iterator();
+ while(it_fs.hasNext()) {
+ FlagState fs = (FlagState)it_fs.next();
+ Iterator it_edges = fs.edges();
+ while(it_edges.hasNext()) {
+ FEdge edge = (FEdge)it_edges.next();
+ if(cycleset.contains(edge.getTarget())) {
+ // a backedge
+ edge.setisbackedge(true);
+ }
+ }
+ it_edges = null;
+ }
+ it_fs = null;
+ }
+ }
+ }
+ fss = null;
}
}
it_classes = null;
}
- private void readProfileInfo(java.util.Hashtable<String, TaskInfo> taskinfos){
+ private void readProfileInfo(java.util.Hashtable<String, TaskInfo> taskinfos) {
try {
// read in profile data and set
//FileInputStream inStream = new FileInputStream("/scratch/profile.rst");
- FileInputStream inStream =
- new FileInputStream(/*"/scratch/" + */this.state.profilename);
+ FileInputStream inStream =
+ new FileInputStream(/*"/scratch/" + */ this.state.profilename);
byte[] b = new byte[1024 * 100];
int length = inStream.read(b);
if(length < 0) {
- System.out.print("No content in input file: /scratch/"
- + this.state.profilename + "\n");
- System.exit(-1);
+ System.out.print("No content in input file: /scratch/"
+ + this.state.profilename + "\n");
+ System.exit(-1);
}
String profiledata = new String(b, 0, length);
// profile data format:
- // taskname, numoftaskexits(; exetime, probability, numofnewobjtypes(,
+ // taskname, numoftaskexits(; exetime, probability, numofnewobjtypes(,
// newobj type, num of objs)+)+
int inindex = profiledata.indexOf('\n');
while((inindex != -1) ) {
- String inline = profiledata.substring(0, inindex);
- profiledata = profiledata.substring(inindex + 1);
- //System.printString(inline + "\n");
- int tmpinindex = inline.indexOf(',');
- if(tmpinindex == -1) {
- break;
- }
- String inname = inline.substring(0, tmpinindex);
- String inint = inline.substring(tmpinindex + 1);
- while(inint.startsWith(" ")) {
- inint = inint.substring(1);
- }
- tmpinindex = inint.indexOf(',');
- if(tmpinindex == -1) {
- break;
- }
- int numofexits = Integer.parseInt(inint.substring(0, tmpinindex));
- TaskInfo tinfo = new TaskInfo(numofexits);
- inint = inint.substring(tmpinindex + 1);
- while(inint.startsWith(" ")) {
- inint = inint.substring(1);
- }
- tmpinindex = inint.indexOf(';');
- int byObj = Integer.parseInt(inint.substring(0, tmpinindex));
- if(byObj != -1) {
- tinfo.m_byObj = byObj;
- }
- inint = inint.substring(tmpinindex + 1);
- while(inint.startsWith(" ")) {
- inint = inint.substring(1);
- }
- for(int i = 0; i < numofexits; i++) {
- String tmpinfo = null;
- if(i < numofexits - 1) {
- tmpinindex = inint.indexOf(';');
- tmpinfo = inint.substring(0, tmpinindex);
- inint = inint.substring(tmpinindex + 1);
- while(inint.startsWith(" ")) {
- inint = inint.substring(1);
- }
- } else {
- tmpinfo = inint;
- }
-
- tmpinindex = tmpinfo.indexOf(',');
- tinfo.m_exetime[i] = Long.parseLong(tmpinfo.substring(0, tmpinindex));
- tmpinfo = tmpinfo.substring(tmpinindex + 1);
- while(tmpinfo.startsWith(" ")) {
- tmpinfo = tmpinfo.substring(1);
- }
- tmpinindex = tmpinfo.indexOf(',');
- tinfo.m_probability[i] = Double.parseDouble(
- tmpinfo.substring(0,tmpinindex));
- tmpinfo = tmpinfo.substring(tmpinindex + 1);
- while(tmpinfo.startsWith(" ")) {
- tmpinfo = tmpinfo.substring(1);
- }
- tmpinindex = tmpinfo.indexOf(',');
- int numofnobjs = 0;
- if(tmpinindex == -1) {
- numofnobjs = Integer.parseInt(tmpinfo);
- if(numofnobjs != 0) {
- System.err.println("Error profile data format!");
- System.exit(-1);
- }
- } else {
- tinfo.m_newobjinfo.setElementAt(new Hashtable<String,Integer>(), i);
- numofnobjs = Integer.parseInt(tmpinfo.substring(0, tmpinindex));
- tmpinfo = tmpinfo.substring(tmpinindex + 1);
- while(tmpinfo.startsWith(" ")) {
- tmpinfo = tmpinfo.substring(1);
- }
- for(int j = 0; j < numofnobjs; j++) {
- tmpinindex = tmpinfo.indexOf(',');
- String nobjtype = tmpinfo.substring(0, tmpinindex);
- tmpinfo = tmpinfo.substring(tmpinindex + 1);
- while(tmpinfo.startsWith(" ")) {
- tmpinfo = tmpinfo.substring(1);
- }
- int objnum = 0;
- if(j < numofnobjs - 1) {
- tmpinindex = tmpinfo.indexOf(',');
- objnum = Integer.parseInt(tmpinfo.substring(0, tmpinindex));
- tmpinfo = tmpinfo.substring(tmpinindex + 1);
- while(tmpinfo.startsWith(" ")) {
- tmpinfo = tmpinfo.substring(1);
- }
- } else {
- objnum = Integer.parseInt(tmpinfo);
- }
- tinfo.m_newobjinfo.elementAt(i).put(nobjtype, objnum);
- }
- }
- }
- taskinfos.put(inname, tinfo);
- inindex = profiledata.indexOf('\n');
+ String inline = profiledata.substring(0, inindex);
+ profiledata = profiledata.substring(inindex + 1);
+ //System.printString(inline + "\n");
+ int tmpinindex = inline.indexOf(',');
+ if(tmpinindex == -1) {
+ break;
+ }
+ String inname = inline.substring(0, tmpinindex);
+ String inint = inline.substring(tmpinindex + 1);
+ while(inint.startsWith(" ")) {
+ inint = inint.substring(1);
+ }
+ tmpinindex = inint.indexOf(',');
+ if(tmpinindex == -1) {
+ break;
+ }
+ int numofexits = Integer.parseInt(inint.substring(0, tmpinindex));
+ TaskInfo tinfo = new TaskInfo(numofexits);
+ inint = inint.substring(tmpinindex + 1);
+ while(inint.startsWith(" ")) {
+ inint = inint.substring(1);
+ }
+ tmpinindex = inint.indexOf(';');
+ int byObj = Integer.parseInt(inint.substring(0, tmpinindex));
+ if(byObj != -1) {
+ tinfo.m_byObj = byObj;
+ }
+ inint = inint.substring(tmpinindex + 1);
+ while(inint.startsWith(" ")) {
+ inint = inint.substring(1);
+ }
+ for(int i = 0; i < numofexits; i++) {
+ String tmpinfo = null;
+ if(i < numofexits - 1) {
+ tmpinindex = inint.indexOf(';');
+ tmpinfo = inint.substring(0, tmpinindex);
+ inint = inint.substring(tmpinindex + 1);
+ while(inint.startsWith(" ")) {
+ inint = inint.substring(1);
+ }
+ } else {
+ tmpinfo = inint;
+ }
+
+ tmpinindex = tmpinfo.indexOf(',');
+ tinfo.m_exetime[i] = Long.parseLong(tmpinfo.substring(0, tmpinindex));
+ tmpinfo = tmpinfo.substring(tmpinindex + 1);
+ while(tmpinfo.startsWith(" ")) {
+ tmpinfo = tmpinfo.substring(1);
+ }
+ tmpinindex = tmpinfo.indexOf(',');
+ tinfo.m_probability[i] = Double.parseDouble(
+ tmpinfo.substring(0,tmpinindex));
+ tmpinfo = tmpinfo.substring(tmpinindex + 1);
+ while(tmpinfo.startsWith(" ")) {
+ tmpinfo = tmpinfo.substring(1);
+ }
+ tmpinindex = tmpinfo.indexOf(',');
+ int numofnobjs = 0;
+ if(tmpinindex == -1) {
+ numofnobjs = Integer.parseInt(tmpinfo);
+ if(numofnobjs != 0) {
+ System.err.println("Error profile data format!");
+ System.exit(-1);
+ }
+ } else {
+ tinfo.m_newobjinfo.setElementAt(new Hashtable<String,Integer>(), i);
+ numofnobjs = Integer.parseInt(tmpinfo.substring(0, tmpinindex));
+ tmpinfo = tmpinfo.substring(tmpinindex + 1);
+ while(tmpinfo.startsWith(" ")) {
+ tmpinfo = tmpinfo.substring(1);
+ }
+ for(int j = 0; j < numofnobjs; j++) {
+ tmpinindex = tmpinfo.indexOf(',');
+ String nobjtype = tmpinfo.substring(0, tmpinindex);
+ tmpinfo = tmpinfo.substring(tmpinindex + 1);
+ while(tmpinfo.startsWith(" ")) {
+ tmpinfo = tmpinfo.substring(1);
+ }
+ int objnum = 0;
+ if(j < numofnobjs - 1) {
+ tmpinindex = tmpinfo.indexOf(',');
+ objnum = Integer.parseInt(tmpinfo.substring(0, tmpinindex));
+ tmpinfo = tmpinfo.substring(tmpinindex + 1);
+ while(tmpinfo.startsWith(" ")) {
+ tmpinfo = tmpinfo.substring(1);
+ }
+ } else {
+ objnum = Integer.parseInt(tmpinfo);
+ }
+ tinfo.m_newobjinfo.elementAt(i).put(nobjtype, objnum);
+ }
+ }
+ }
+ taskinfos.put(inname, tinfo);
+ inindex = profiledata.indexOf('\n');
}
inStream.close();
inStream = null;
java.util.Random r=new java.util.Random();
int tint = 0;
Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator();
- for(; it_classes.hasNext();) {
+ for(; it_classes.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) it_classes.next();
if(cd.hasFlags()) {
- Vector rootnodes=this.taskanalysis.getRootNodes(cd);
- if(rootnodes!=null) {
- Iterator it_rootnodes=rootnodes.iterator();
- for(; it_rootnodes.hasNext();) {
- FlagState root=(FlagState)it_rootnodes.next();
- Vector allocatingTasks = root.getAllocatingTasks();
- if(allocatingTasks != null) {
- for(int k = 0; k < allocatingTasks.size(); k++) {
- TaskDescriptor td = (TaskDescriptor)allocatingTasks.elementAt(k);
- Vector<FEdge> fev =
- (Vector<FEdge>)this.taskanalysis.getFEdgesFromTD(td);
- int numEdges = fev.size();
- int total = 100;
- for(int j = 0; j < numEdges; j++) {
- FEdge pfe = fev.elementAt(j);
- if(numEdges - j == 1) {
- pfe.setProbability(total);
- } else {
- if((total != 0) && (total != 1)) {
- do {
- tint = r.nextInt()%total;
- } while(tint <= 0);
- }
- pfe.setProbability(tint);
- total -= tint;
- }
- //do {
- // tint = r.nextInt()%10;
- // } while(tint <= 0);
- //int newRate = tint;
- //int newRate = (j+1)%2+1;
- int newRate = 1;
- String cdname = cd.getSymbol();
- if((cdname.equals("SeriesRunner")) ||
- (cdname.equals("MDRunner")) ||
- (cdname.equals("Stage")) ||
- (cdname.equals("AppDemoRunner")) ||
- (cdname.equals("FilterBankAtom")) ||
- (cdname.equals("Grid")) ||
- (cdname.equals("Fractal")) ||
- (cdname.equals("KMeans")) ||
- (cdname.equals("ZTransform")) ||
- (cdname.equals("TestRunner")) ||
- (cdname.equals("TestRunner2")) ||
- (cdname.equals("LinkList")) ||
- (cdname.equals("BHRunner"))) {
- newRate = this.coreNum;
- } else if(cdname.equals("SentenceParser")) {
- newRate = 4;
- } else if(cdname.equals("BlurPiece")){
- newRate = 4;
- } else if(cdname.equals("ImageX")){
- newRate = 2 * 2;
- } else if(cdname.equals("ImageY")){
- newRate = 1 * 4;
- }
- //do {
- // tint = r.nextInt()%100;
- // } while(tint <= 0);
- // int probability = tint;
- int probability = 100;
- pfe.addNewObjInfo(cd, newRate, probability);
- }
- fev = null;
- }
- }
- }
- it_rootnodes = null;
- }
-
- Iterator it_flags = this.taskanalysis.getFlagStates(cd).iterator();
- while(it_flags.hasNext()) {
- FlagState fs = (FlagState)it_flags.next();
- Iterator it_edges = fs.edges();
- int total = 100;
- while(it_edges.hasNext()) {
- //do {
- // tint = r.nextInt()%10;
- // } while(tint <= 0);
- tint = 3;
- FEdge edge = (FEdge)it_edges.next();
- edge.setExeTime(tint);
- if((fs.getClassDescriptor().getSymbol().equals("MD"))
- && (edge.getTask().getSymbol().equals("t6"))) {
- if(edge.isbackedge()) {
- if(edge.getTarget().equals(edge.getSource())) {
- edge.setProbability(93.75);
- } else {
- edge.setProbability(3.125);
- }
- } else {
- edge.setProbability(3.125);
- }
- continue;
- }
- if(!it_edges.hasNext()) {
- edge.setProbability(total);
- } else {
- if((total != 0) && (total != 1)) {
- do {
- tint = r.nextInt()%total;
- } while(tint <= 0);
- }
- edge.setProbability(tint);
- total -= tint;
- }
- }
- it_edges = null;
- }
- it_flags = null;
+ Vector rootnodes=this.taskanalysis.getRootNodes(cd);
+ if(rootnodes!=null) {
+ Iterator it_rootnodes=rootnodes.iterator();
+ for(; it_rootnodes.hasNext(); ) {
+ FlagState root=(FlagState)it_rootnodes.next();
+ Vector allocatingTasks = root.getAllocatingTasks();
+ if(allocatingTasks != null) {
+ for(int k = 0; k < allocatingTasks.size(); k++) {
+ TaskDescriptor td = (TaskDescriptor)allocatingTasks.elementAt(k);
+ Vector<FEdge> fev =
+ (Vector<FEdge>) this.taskanalysis.getFEdgesFromTD(td);
+ int numEdges = fev.size();
+ int total = 100;
+ for(int j = 0; j < numEdges; j++) {
+ FEdge pfe = fev.elementAt(j);
+ if(numEdges - j == 1) {
+ pfe.setProbability(total);
+ } else {
+ if((total != 0) && (total != 1)) {
+ do {
+ tint = r.nextInt()%total;
+ } while(tint <= 0);
+ }
+ pfe.setProbability(tint);
+ total -= tint;
+ }
+ //do {
+ // tint = r.nextInt()%10;
+ // } while(tint <= 0);
+ //int newRate = tint;
+ //int newRate = (j+1)%2+1;
+ int newRate = 1;
+ String cdname = cd.getSymbol();
+ if((cdname.equals("SeriesRunner")) ||
+ (cdname.equals("MDRunner")) ||
+ (cdname.equals("Stage")) ||
+ (cdname.equals("AppDemoRunner")) ||
+ (cdname.equals("FilterBankAtom")) ||
+ (cdname.equals("Grid")) ||
+ (cdname.equals("Fractal")) ||
+ (cdname.equals("KMeans")) ||
+ (cdname.equals("ZTransform")) ||
+ (cdname.equals("TestRunner")) ||
+ (cdname.equals("TestRunner2")) ||
+ (cdname.equals("LinkList")) ||
+ (cdname.equals("BHRunner"))) {
+ newRate = this.coreNum;
+ } else if(cdname.equals("SentenceParser")) {
+ newRate = 4;
+ } else if(cdname.equals("BlurPiece")) {
+ newRate = 4;
+ } else if(cdname.equals("ImageX")) {
+ newRate = 2 * 2;
+ } else if(cdname.equals("ImageY")) {
+ newRate = 1 * 4;
+ }
+ //do {
+ // tint = r.nextInt()%100;
+ // } while(tint <= 0);
+ // int probability = tint;
+ int probability = 100;
+ pfe.addNewObjInfo(cd, newRate, probability);
+ }
+ fev = null;
+ }
+ }
+ }
+ it_rootnodes = null;
+ }
+
+ Iterator it_flags = this.taskanalysis.getFlagStates(cd).iterator();
+ while(it_flags.hasNext()) {
+ FlagState fs = (FlagState)it_flags.next();
+ Iterator it_edges = fs.edges();
+ int total = 100;
+ while(it_edges.hasNext()) {
+ //do {
+ // tint = r.nextInt()%10;
+ // } while(tint <= 0);
+ tint = 3;
+ FEdge edge = (FEdge)it_edges.next();
+ edge.setExeTime(tint);
+ if((fs.getClassDescriptor().getSymbol().equals("MD"))
+ && (edge.getTask().getSymbol().equals("t6"))) {
+ if(edge.isbackedge()) {
+ if(edge.getTarget().equals(edge.getSource())) {
+ edge.setProbability(93.75);
+ } else {
+ edge.setProbability(3.125);
+ }
+ } else {
+ edge.setProbability(3.125);
+ }
+ continue;
+ }
+ if(!it_edges.hasNext()) {
+ edge.setProbability(total);
+ } else {
+ if((total != 0) && (total != 1)) {
+ do {
+ tint = r.nextInt()%total;
+ } while(tint <= 0);
+ }
+ edge.setProbability(tint);
+ total -= tint;
+ }
+ }
+ it_edges = null;
+ }
+ it_flags = null;
}
}
it_classes = null;
private ScheduleNode buildCFSTG(Vector<ScheduleEdge> toBreakDown,
Vector<TaskDescriptor> multiparamtds) {
- Hashtable<ClassDescriptor, ClassNode> cdToCNodes =
+ Hashtable<ClassDescriptor, ClassNode> cdToCNodes =
new Hashtable<ClassDescriptor, ClassNode>();
// Build the combined flag transition diagram
// First, for each class create a ClassNode
Vector<FlagState> sFStates = FlagState.DFS.topology(fStates, null);
Vector rootnodes = taskanalysis.getRootNodes(cd);
- if(((rootnodes != null) && (rootnodes.size() > 0))
- || (cd.getSymbol().equals(TypeUtil.StartupClass))) {
- ClassNode cNode = new ClassNode(cd, sFStates);
- cNode.setSorted(true);
- classNodes.add(cNode);
- cd2ClassNode.put(cd, cNode);
- cdToCNodes.put(cd, cNode);
- cNode.calExeTime();
+ if(((rootnodes != null) && (rootnodes.size() > 0))
+ || (cd.getSymbol().equals(TypeUtil.StartupClass))) {
+ ClassNode cNode = new ClassNode(cd, sFStates);
+ cNode.setSorted(true);
+ classNodes.add(cNode);
+ cd2ClassNode.put(cd, cNode);
+ cdToCNodes.put(cd, cNode);
+ cNode.calExeTime();
}
rootnodes = null;
fStates = null;
ClassNode cn = classNodes.elementAt(i);
ScheduleNode sn = new ScheduleNode(cn, 0);
if(cn.getClassDescriptor().getSymbol().equals(TypeUtil.StartupClass)) {
- startupNode = sn;
+ startupNode = sn;
}
cn.setScheduleNode(sn);
scheduleNodes.add(sn);
try {
- sn.calExeTime();
+ sn.calExeTime();
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace();
}
}
ClassDescriptor cd = cNode.getClassDescriptor();
Vector rootnodes = taskanalysis.getRootNodes(cd);
if(rootnodes != null) {
- for(int h = 0; h < rootnodes.size(); h++) {
- FlagState root=(FlagState)rootnodes.elementAt(h);
- Vector allocatingTasks = root.getAllocatingTasks();
- if(allocatingTasks != null) {
- for(int k = 0; k < allocatingTasks.size(); k++) {
- TaskDescriptor td = (TaskDescriptor)allocatingTasks.elementAt(k);
- Vector<FEdge> fev =
- (Vector<FEdge>)taskanalysis.getFEdgesFromTD(td);
- int numEdges = fev.size();
- ScheduleNode sNode = cNode.getScheduleNode();
- for(int j = 0; j < numEdges; j++) {
- FEdge pfe = fev.elementAt(j);
- FEdge.NewObjInfo noi = pfe.getNewObjInfo(cd);
- if ((noi == null) || (noi.getNewRate() == 0)
- || (noi.getProbability() == 0)) {
- // fake creating edge, do not need to create corresponding
- // 'new' edge
- continue;
- }
- if(noi.getRoot() == null) {
- // set root FlagState
- noi.setRoot(root);
- }
- FlagState pfs = (FlagState)pfe.getTarget();
- ClassDescriptor pcd = pfs.getClassDescriptor();
- ClassNode pcNode = cdToCNodes.get(pcd);
-
- ScheduleEdge sEdge = new ScheduleEdge(sNode,
- "new",
- root,
- ScheduleEdge.NEWEDGE,
- 0);
- sEdge.setFEdge(pfe);
- sEdge.setSourceCNode(pcNode);
- sEdge.setTargetCNode(cNode);
- sEdge.setTargetFState(root);
- sEdge.setNewRate(noi.getNewRate());
- sEdge.setProbability(noi.getProbability());
- pcNode.getScheduleNode().addEdge(sEdge);
- scheduleEdges.add(sEdge);
- if((j !=0 ) || (k != 0) || (h != 0)) {
- toBreakDown.add(sEdge);
- }
- }
- fev = null;
- }
- allocatingTasks = null;
- }
- }
- rootnodes = null;
+ for(int h = 0; h < rootnodes.size(); h++) {
+ FlagState root=(FlagState)rootnodes.elementAt(h);
+ Vector allocatingTasks = root.getAllocatingTasks();
+ if(allocatingTasks != null) {
+ for(int k = 0; k < allocatingTasks.size(); k++) {
+ TaskDescriptor td = (TaskDescriptor)allocatingTasks.elementAt(k);
+ Vector<FEdge> fev =
+ (Vector<FEdge>)taskanalysis.getFEdgesFromTD(td);
+ int numEdges = fev.size();
+ ScheduleNode sNode = cNode.getScheduleNode();
+ for(int j = 0; j < numEdges; j++) {
+ FEdge pfe = fev.elementAt(j);
+ FEdge.NewObjInfo noi = pfe.getNewObjInfo(cd);
+ if ((noi == null) || (noi.getNewRate() == 0)
+ || (noi.getProbability() == 0)) {
+ // fake creating edge, do not need to create corresponding
+ // 'new' edge
+ continue;
+ }
+ if(noi.getRoot() == null) {
+ // set root FlagState
+ noi.setRoot(root);
+ }
+ FlagState pfs = (FlagState)pfe.getTarget();
+ ClassDescriptor pcd = pfs.getClassDescriptor();
+ ClassNode pcNode = cdToCNodes.get(pcd);
+
+ ScheduleEdge sEdge = new ScheduleEdge(sNode,
+ "new",
+ root,
+ ScheduleEdge.NEWEDGE,
+ 0);
+ sEdge.setFEdge(pfe);
+ sEdge.setSourceCNode(pcNode);
+ sEdge.setTargetCNode(cNode);
+ sEdge.setTargetFState(root);
+ sEdge.setNewRate(noi.getNewRate());
+ sEdge.setProbability(noi.getProbability());
+ pcNode.getScheduleNode().addEdge(sEdge);
+ scheduleEdges.add(sEdge);
+ if((j !=0 ) || (k != 0) || (h != 0)) {
+ toBreakDown.add(sEdge);
+ }
+ }
+ fev = null;
+ }
+ allocatingTasks = null;
+ }
+ }
+ rootnodes = null;
}
}
cdToCNodes = null;
-
+
for(i = 0; i < multiparamtds.size(); i++) {
TaskDescriptor td = multiparamtds.elementAt(i);
ClassDescriptor cd = td.getParamType(0).getClassDesc();
// set the first parameter as main cd
- // NOTE: programmer should write in such a style that
+ // NOTE: programmer should write in such a style that
// for all multi-param tasks, the main class should be
// the first parameter
// TODO: may have bug when cd has multiple new flag states
return startupNode;
}
- private void treeTransform(Vector<ScheduleEdge> toBreakDown,
+ private void treeTransform(Vector<ScheduleEdge> toBreakDown,
ScheduleNode startupNode) {
int i = 0;
// Break down the 'cycle's
try {
for(i = 0; i < toBreakDown.size(); i++ ) {
- cloneSNodeList(toBreakDown.elementAt(i), false);
+ cloneSNodeList(toBreakDown.elementAt(i), false);
}
} catch (Exception e) {
e.printStackTrace();
for(i = 0; i < scheduleEdges.size(); i++) {
ScheduleEdge se = (ScheduleEdge)scheduleEdges.elementAt(i);
if((0 == se.getNewRate()) || (0 == se.getProbability())) {
- scheduleEdges.removeElement(se);
- scheduleNodes.removeElement(se.getTarget());
+ scheduleEdges.removeElement(se);
+ scheduleNodes.removeElement(se.getTarget());
}
}
// Do topology sort of the ClassNodes and ScheduleEdges.
Vector<ScheduleEdge> ssev = new Vector<ScheduleEdge>();
- Vector<ScheduleNode> tempSNodes =
+ Vector<ScheduleNode> tempSNodes =
ClassNode.DFS.topology(scheduleNodes, ssev);
scheduleNodes.removeAllElements();
scheduleNodes = tempSNodes;
while(!toVisit.isEmpty()) {
ScheduleNode sn = toVisit.poll();
if(sn.getCid() == -1) {
- // not visited before
- sn.setCid(ScheduleNode.colorID++);
- Iterator it_edge = sn.edges();
- while(it_edge.hasNext()) {
- toVisit.add((ScheduleNode)((ScheduleEdge)it_edge.next()).getTarget());
- }
- it_edge = null;
+ // not visited before
+ sn.setCid(ScheduleNode.colorID++);
+ Iterator it_edge = sn.edges();
+ while(it_edge.hasNext()) {
+ toVisit.add((ScheduleNode)((ScheduleEdge)it_edge.next()).getTarget());
+ }
+ it_edge = null;
}
}
toVisit = null;
if(this.state.PRINTSCHEDULING) {
SchedulingUtil.printScheduleGraph(
- this.state.outputdir + "scheduling_ori.dot", this.scheduleNodes);
+ this.state.outputdir + "scheduling_ori.dot", this.scheduleNodes);
}
}
-
+
private void handleDescenSEs(Vector<ScheduleEdge> ses,
boolean isflag) {
if(isflag) {
long temptime = tempse.getListExeTime();
// find out the ScheduleEdge with least exeTime
for(int k = 1; k < ses.size(); k++) {
- long ttemp = ses.elementAt(k).getListExeTime();
- if(ttemp < temptime) {
- tempse = ses.elementAt(k);
- temptime = ttemp;
- } // if(ttemp < temptime)
+ long ttemp = ses.elementAt(k).getListExeTime();
+ if(ttemp < temptime) {
+ tempse = ses.elementAt(k);
+ temptime = ttemp;
+ } // if(ttemp < temptime)
} // for(int k = 1; k < ses.size(); k++)
- // handle the tempse
+ // handle the tempse
handleScheduleEdge(tempse, true);
ses.removeElement(tempse);
}
private void CFSTGTransform() {
// First iteration
int i = 0;
-
+
// table of all schedule edges associated to one fedge
- Hashtable<FEdge, Vector<ScheduleEdge>> fe2ses =
+ Hashtable<FEdge, Vector<ScheduleEdge>> fe2ses =
new Hashtable<FEdge, Vector<ScheduleEdge>>();
// table of all fedges associated to one schedule node
- Hashtable<ScheduleNode, Vector<FEdge>> sn2fes =
+ Hashtable<ScheduleNode, Vector<FEdge>> sn2fes =
new Hashtable<ScheduleNode, Vector<FEdge>>();
ScheduleNode preSNode = null;
// Access the ScheduleEdges in reverse topology order
for(i = scheduleEdges.size(); i > 0; i--) {
ScheduleEdge se = (ScheduleEdge)scheduleEdges.elementAt(i-1);
if(ScheduleEdge.NEWEDGE == se.getType()) {
- if(preSNode == null) {
- preSNode = (ScheduleNode)se.getSource();
- }
-
- boolean split = false;
- FEdge fe = se.getFEdge();
- if(fe.getSource() == fe.getTarget()) {
- // the associated start fe is a back edge
- try {
- // check the number of newly created objs
- int repeat = (int)Math.ceil(se.getNewRate()*se.getProbability()/100);
- int rate = 0;
- /*if(repeat > 1) {
- // more than one new objs, expand the new edge
- for(int j = 1; j< repeat; j++ ) {
- cloneSNodeList(se, true);
- } // for(int j = 1; j< repeat; j++ )
- se.setNewRate(1);
- se.setProbability(100);
- } // if(repeat > 1)*/
- try {
- // match the rates of obj creation and new obj consumption
- rate = (int)Math.ceil(
- se.getListExeTime()/calInExeTime(se.getSourceFState()));
- } catch (Exception e) {
- e.printStackTrace();
- } // try-catch {}
- repeat = (rate > repeat)? rate : repeat;
- // expand the new edge
- for(int j = 1; j< repeat; j++ ) {
- cloneSNodeList(se, true);
- } // for(int j = 1; j< repeat; j++ )
- se.setNewRate(1);
- se.setProbability(100);
- /*for(int j = rate - 1; j > 0; j--) {
- for(int k = repeat; k > 0; k--) {
- cloneSNodeList(se, true);
- } // for(int k = repeat; k > 0; k--)
- } // for(int j = rate - 1; j > 0; j--)*/
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
- } // try-catch{}
- } else { // if(fe.getSource() == fe.getTarget())
- // the associated start fe is not a back edge
- // Note: if preSNode is not the same as se's source ScheduleNode
- // handle any ScheduleEdges previously put into fe2ses whose source
- // ScheduleNode is preSNode
- boolean same = (preSNode == se.getSource());
- if(!same) {
- // check the topology sort, only process those after se.getSource()
- if(preSNode.getFinishingTime() < se.getSource().getFinishingTime()){
- if(sn2fes.containsKey(preSNode)) {
- Vector<FEdge> fes = sn2fes.remove(preSNode);
- for(int j = 0; j < fes.size(); j++) {
- FEdge tempfe = fes.elementAt(j);
- Vector<ScheduleEdge> ses = fe2ses.get(tempfe);
- boolean isflag = !(preSNode.edges().hasNext());
- this.handleDescenSEs(ses, isflag);
- ses = null;
- fe2ses.remove(tempfe);
- } // for(int j = 0; j < fes.size(); j++)
- fes = null;
- }
- }
- preSNode = (ScheduleNode)se.getSource();
- } // if(!same)
-
- if(fe.getTarget().edges().hasNext()) {
- // not associated with the last task, check if to split the snode
- if((!(se.getTransTime() < this.transThreshold))
- && (se.getSourceCNode().getTransTime() < se.getTransTime())) {
- // it's better to transfer the other obj with preSnode
- split = true;
- splitSNode(se, true);
- }
- } // if(!fe.getTarget().edges().hasNext())
-
- if(!split) {
- // delay the expanding and merging until we find all such 'new'
- // edges associated with a last task inside this ClassNode
- if(fe2ses.get(fe) == null) {
- fe2ses.put(fe, new Vector<ScheduleEdge>());
- }
- if(sn2fes.get((ScheduleNode)se.getSource()) == null) {
- sn2fes.put((ScheduleNode)se.getSource(), new Vector<FEdge>());
- }
- if(!fe2ses.get(fe).contains(se)) {
- fe2ses.get(fe).add(se);
- }
- if(!sn2fes.get((ScheduleNode)se.getSource()).contains(fe)) {
- sn2fes.get((ScheduleNode)se.getSource()).add(fe);
- }
- } // if(!split)
- } // if(fe.getSource() == fe.getTarget())
+ if(preSNode == null) {
+ preSNode = (ScheduleNode)se.getSource();
+ }
+
+ boolean split = false;
+ FEdge fe = se.getFEdge();
+ if(fe.getSource() == fe.getTarget()) {
+ // the associated start fe is a back edge
+ try {
+ // check the number of newly created objs
+ int repeat = (int)Math.ceil(se.getNewRate()*se.getProbability()/100);
+ int rate = 0;
+ /*if(repeat > 1) {
+ // more than one new objs, expand the new edge
+ for(int j = 1; j< repeat; j++ ) {
+ cloneSNodeList(se, true);
+ } // for(int j = 1; j< repeat; j++ )
+ se.setNewRate(1);
+ se.setProbability(100);
+ } // if(repeat > 1)*/
+ try {
+ // match the rates of obj creation and new obj consumption
+ rate = (int)Math.ceil(
+ se.getListExeTime()/calInExeTime(se.getSourceFState()));
+ } catch (Exception e) {
+ e.printStackTrace();
+ } // try-catch {}
+ repeat = (rate > repeat)?rate:repeat;
+ // expand the new edge
+ for(int j = 1; j< repeat; j++ ) {
+ cloneSNodeList(se, true);
+ } // for(int j = 1; j< repeat; j++ )
+ se.setNewRate(1);
+ se.setProbability(100);
+ /*for(int j = rate - 1; j > 0; j--) {
+ for(int k = repeat; k > 0; k--) {
+ cloneSNodeList(se, true);
+ } // for(int k = repeat; k > 0; k--)
+ } // for(int j = rate - 1; j > 0; j--)*/
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
+ } // try-catch{}
+ } else { // if(fe.getSource() == fe.getTarget())
+ // the associated start fe is not a back edge
+ // Note: if preSNode is not the same as se's source ScheduleNode
+ // handle any ScheduleEdges previously put into fe2ses whose source
+ // ScheduleNode is preSNode
+ boolean same = (preSNode == se.getSource());
+ if(!same) {
+ // check the topology sort, only process those after se.getSource()
+ if(preSNode.getFinishingTime() < se.getSource().getFinishingTime()) {
+ if(sn2fes.containsKey(preSNode)) {
+ Vector<FEdge> fes = sn2fes.remove(preSNode);
+ for(int j = 0; j < fes.size(); j++) {
+ FEdge tempfe = fes.elementAt(j);
+ Vector<ScheduleEdge> ses = fe2ses.get(tempfe);
+ boolean isflag = !(preSNode.edges().hasNext());
+ this.handleDescenSEs(ses, isflag);
+ ses = null;
+ fe2ses.remove(tempfe);
+ } // for(int j = 0; j < fes.size(); j++)
+ fes = null;
+ }
+ }
+ preSNode = (ScheduleNode)se.getSource();
+ } // if(!same)
+
+ if(fe.getTarget().edges().hasNext()) {
+ // not associated with the last task, check if to split the snode
+ if((!(se.getTransTime() < this.transThreshold))
+ && (se.getSourceCNode().getTransTime() < se.getTransTime())) {
+ // it's better to transfer the other obj with preSnode
+ split = true;
+ splitSNode(se, true);
+ }
+ } // if(!fe.getTarget().edges().hasNext())
+
+ if(!split) {
+ // delay the expanding and merging until we find all such 'new'
+ // edges associated with a last task inside this ClassNode
+ if(fe2ses.get(fe) == null) {
+ fe2ses.put(fe, new Vector<ScheduleEdge>());
+ }
+ if(sn2fes.get((ScheduleNode)se.getSource()) == null) {
+ sn2fes.put((ScheduleNode)se.getSource(), new Vector<FEdge>());
+ }
+ if(!fe2ses.get(fe).contains(se)) {
+ fe2ses.get(fe).add(se);
+ }
+ if(!sn2fes.get((ScheduleNode)se.getSource()).contains(fe)) {
+ sn2fes.get((ScheduleNode)se.getSource()).add(fe);
+ }
+ } // if(!split)
+ } // if(fe.getSource() == fe.getTarget())
} // if(ScheduleEdge.NEWEDGE == se.getType())
} // for(i = scheduleEdges.size(); i > 0; i--)
if(!fe2ses.isEmpty()) {
Set<FEdge> keys = fe2ses.keySet();
Iterator it_keys = keys.iterator();
while(it_keys.hasNext()) {
- FEdge tempfe = (FEdge)it_keys.next();
- Vector<ScheduleEdge> ses = fe2ses.get(tempfe);
- boolean isflag = !(tempfe.getTarget().edges().hasNext());
- this.handleDescenSEs(ses, isflag);
- ses = null;
+ FEdge tempfe = (FEdge)it_keys.next();
+ Vector<ScheduleEdge> ses = fe2ses.get(tempfe);
+ boolean isflag = !(tempfe.getTarget().edges().hasNext());
+ this.handleDescenSEs(ses, isflag);
+ ses = null;
}
keys = null;
it_keys = null;
if(this.state.PRINTSCHEDULING) {
SchedulingUtil.printScheduleGraph(
- this.state.outputdir + "scheduling_extend.dot", this.scheduleNodes);
+ this.state.outputdir + "scheduling_extend.dot", this.scheduleNodes);
}
}
- private void handleScheduleEdge(ScheduleEdge se,
+ private void handleScheduleEdge(ScheduleEdge se,
boolean merge) {
try {
int rate = 0;
int repeat = (int)Math.ceil(se.getNewRate() * se.getProbability() / 100);
if(merge) {
- try {
- if(se.getListExeTime() == 0) {
- rate = repeat;
- } else {
- rate = (int)Math.ceil(
- (se.getTransTime()-calInExeTime(se.getSourceFState()))
- /se.getListExeTime());
- }
- if(rate < 0 ) {
- rate = 0;
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- if(0 == rate) {
- // clone the whole ScheduleNode lists starting with se's target
- for(int j = 1; j < repeat; j++ ) {
- cloneSNodeList(se, true);
- }
- se.setNewRate(1);
- se.setProbability(100);
- } else {
- repeat -= rate;
- if(repeat > 0) {
- // clone the whole ScheduleNode lists starting with se's target
- for(int j = 0; j < repeat; j++ ) {
- cloneSNodeList(se, true);
- }
- se.setNewRate(rate);
- se.setProbability(100);
- }
- }
- // merge the original ScheduleNode to the source ScheduleNode
- ((ScheduleNode)se.getSource()).mergeSEdge(se);
- scheduleNodes.remove(se.getTarget());
- scheduleEdges.remove(se);
- // As se has been changed into an internal edge inside a ScheduleNode,
- // change the source and target of se from original ScheduleNodes
- // into ClassNodes.
- if(se.getType() == ScheduleEdge.NEWEDGE) {
- se.setTarget(se.getTargetCNode());
- //se.setSource(se.getSourceCNode());
- //se.getTargetCNode().addEdge(se);
- se.getSourceCNode().addEdge(se);
- }
+ try {
+ if(se.getListExeTime() == 0) {
+ rate = repeat;
+ } else {
+ rate = (int)Math.ceil(
+ (se.getTransTime()-calInExeTime(se.getSourceFState()))
+ /se.getListExeTime());
+ }
+ if(rate < 0 ) {
+ rate = 0;
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ if(0 == rate) {
+ // clone the whole ScheduleNode lists starting with se's target
+ for(int j = 1; j < repeat; j++ ) {
+ cloneSNodeList(se, true);
+ }
+ se.setNewRate(1);
+ se.setProbability(100);
+ } else {
+ repeat -= rate;
+ if(repeat > 0) {
+ // clone the whole ScheduleNode lists starting with se's target
+ for(int j = 0; j < repeat; j++ ) {
+ cloneSNodeList(se, true);
+ }
+ se.setNewRate(rate);
+ se.setProbability(100);
+ }
+ }
+ // merge the original ScheduleNode to the source ScheduleNode
+ ((ScheduleNode)se.getSource()).mergeSEdge(se);
+ scheduleNodes.remove(se.getTarget());
+ scheduleEdges.remove(se);
+ // As se has been changed into an internal edge inside a ScheduleNode,
+ // change the source and target of se from original ScheduleNodes
+ // into ClassNodes.
+ if(se.getType() == ScheduleEdge.NEWEDGE) {
+ se.setTarget(se.getTargetCNode());
+ //se.setSource(se.getSourceCNode());
+ //se.getTargetCNode().addEdge(se);
+ se.getSourceCNode().addEdge(se);
+ }
} else {
- // clone the whole ScheduleNode lists starting with se's target
- for(int j = 1; j < repeat; j++ ) {
- cloneSNodeList(se, true);
- }
- se.setNewRate(1);
- se.setProbability(100);
+ // clone the whole ScheduleNode lists starting with se's target
+ for(int j = 1; j < repeat; j++ ) {
+ cloneSNodeList(se, true);
+ }
+ se.setNewRate(1);
+ se.setProbability(100);
}
} catch (Exception e) {
e.printStackTrace();
}
}
- private void cloneSNodeList(ScheduleEdge sEdge,
- boolean copyIE) throws Exception {
- Hashtable<ClassNode, ClassNode> cn2cn =
- new Hashtable<ClassNode, ClassNode>(); // hashtable from classnode in
+ private void cloneSNodeList(ScheduleEdge sEdge,
+ boolean copyIE) throws Exception {
+ Hashtable<ClassNode, ClassNode> cn2cn =
+ new Hashtable<ClassNode, ClassNode>(); // hashtable from classnode in
// orignal se's targe to cloned one
- ScheduleNode csNode =
+ ScheduleNode csNode =
(ScheduleNode)((ScheduleNode)sEdge.getTarget()).clone(cn2cn, 0);
scheduleNodes.add(csNode);
if(copyIE) {
Vector inedges = sEdge.getTarget().getInedgeVector();
for(i = 0; i < inedges.size(); i++) {
- ScheduleEdge tse = (ScheduleEdge)inedges.elementAt(i);
- ScheduleEdge se;
- switch(tse.getType()) {
- case ScheduleEdge.NEWEDGE: {
- se = new ScheduleEdge(csNode,"new",tse.getFstate(),tse.getType(),0);
- se.setProbability(100);
- se.setNewRate(1);
- break;
- }
-
- case ScheduleEdge.TRANSEDGE: {
- se = new ScheduleEdge(csNode,"transmit",tse.getFstate(),tse.getType(),0);
- se.setProbability(tse.getProbability());
- se.setNewRate(tse.getNewRate());
- break;
- }
-
- default: {
- throw new Exception("Error: not valid ScheduleEdge here");
- }
- }
- se.setSourceCNode(tse.getSourceCNode());
- se.setTargetCNode(cn2cn.get(tse.getTargetCNode()));
- se.setFEdge(tse.getFEdge());
- se.setTargetFState(tse.getTargetFState());
- se.setIsclone(true);
- tse.getSource().addEdge(se);
- scheduleEdges.add(se);
+ ScheduleEdge tse = (ScheduleEdge)inedges.elementAt(i);
+ ScheduleEdge se;
+ switch(tse.getType()) {
+ case ScheduleEdge.NEWEDGE: {
+ se = new ScheduleEdge(csNode,"new",tse.getFstate(),tse.getType(),0);
+ se.setProbability(100);
+ se.setNewRate(1);
+ break;
+ }
+
+ case ScheduleEdge.TRANSEDGE: {
+ se = new ScheduleEdge(csNode,"transmit",tse.getFstate(),tse.getType(),0);
+ se.setProbability(tse.getProbability());
+ se.setNewRate(tse.getNewRate());
+ break;
+ }
+
+ default: {
+ throw new Exception("Error: not valid ScheduleEdge here");
+ }
+ }
+ se.setSourceCNode(tse.getSourceCNode());
+ se.setTargetCNode(cn2cn.get(tse.getTargetCNode()));
+ se.setFEdge(tse.getFEdge());
+ se.setTargetFState(tse.getTargetFState());
+ se.setIsclone(true);
+ tse.getSource().addEdge(se);
+ scheduleEdges.add(se);
}
inedges = null;
} else {
Queue<ScheduleNode> clone = new LinkedList<ScheduleNode>(); //clone nodes
Queue<Hashtable> qcn2cn = new LinkedList<Hashtable>(); // queue of the mappings of classnodes inside cloned ScheduleNode
Vector<ScheduleNode> origins = new Vector<ScheduleNode>(); // queue of source ScheduleNode cloned
- Hashtable<ScheduleNode, ScheduleNode> sn2sn =
+ Hashtable<ScheduleNode, ScheduleNode> sn2sn =
new Hashtable<ScheduleNode, ScheduleNode>(); // mapping from cloned ScheduleNode to clone ScheduleNode
clone.add(csNode);
toClone.add((ScheduleNode)sEdge.getTarget());
sn2sn.put((ScheduleNode)sEdge.getTarget(), csNode);
qcn2cn.add(cn2cn);
while(!toClone.isEmpty()) {
- Hashtable<ClassNode, ClassNode> tocn2cn =
+ Hashtable<ClassNode, ClassNode> tocn2cn =
new Hashtable<ClassNode, ClassNode>();
csNode = clone.poll();
ScheduleNode osNode = toClone.poll();
// Clone all the external ScheduleEdges and the following ScheduleNodes
Vector edges = osNode.getEdgeVector();
for(i = 0; i < edges.size(); i++) {
- ScheduleEdge tse = (ScheduleEdge)edges.elementAt(i);
- ScheduleNode tSNode =
- (ScheduleNode)((ScheduleNode)tse.getTarget()).clone(tocn2cn, 0);
- scheduleNodes.add(tSNode);
- clone.add(tSNode);
- toClone.add((ScheduleNode)tse.getTarget());
- origins.addElement((ScheduleNode)tse.getTarget());
- sn2sn.put((ScheduleNode)tse.getTarget(), tSNode);
- qcn2cn.add(tocn2cn);
- ScheduleEdge se = null;
- switch(tse.getType()) {
- case ScheduleEdge.NEWEDGE: {
- se = new ScheduleEdge(tSNode,"new",tse.getFstate(),tse.getType(),0);
- break;
- }
-
- case ScheduleEdge.TRANSEDGE: {
- se = new ScheduleEdge(tSNode,"transmit",tse.getFstate(),tse.getType(),0);
- break;
- }
-
- default: {
- throw new Exception("Error: not valid ScheduleEdge here");
- }
- }
- se.setSourceCNode(cn2cn.get(tse.getSourceCNode()));
- se.setTargetCNode(tocn2cn.get(tse.getTargetCNode()));
- se.setFEdge(tse.getFEdge());
- se.setTargetFState(tse.getTargetFState());
- se.setProbability(tse.getProbability());
- se.setNewRate(tse.getNewRate());
- se.setIsclone(true);
- csNode.addEdge(se);
- scheduleEdges.add(se);
+ ScheduleEdge tse = (ScheduleEdge)edges.elementAt(i);
+ ScheduleNode tSNode =
+ (ScheduleNode)((ScheduleNode)tse.getTarget()).clone(tocn2cn, 0);
+ scheduleNodes.add(tSNode);
+ clone.add(tSNode);
+ toClone.add((ScheduleNode)tse.getTarget());
+ origins.addElement((ScheduleNode)tse.getTarget());
+ sn2sn.put((ScheduleNode)tse.getTarget(), tSNode);
+ qcn2cn.add(tocn2cn);
+ ScheduleEdge se = null;
+ switch(tse.getType()) {
+ case ScheduleEdge.NEWEDGE: {
+ se = new ScheduleEdge(tSNode,"new",tse.getFstate(),tse.getType(),0);
+ break;
+ }
+
+ case ScheduleEdge.TRANSEDGE: {
+ se = new ScheduleEdge(tSNode,"transmit",tse.getFstate(),tse.getType(),0);
+ break;
+ }
+
+ default: {
+ throw new Exception("Error: not valid ScheduleEdge here");
+ }
+ }
+ se.setSourceCNode(cn2cn.get(tse.getSourceCNode()));
+ se.setTargetCNode(tocn2cn.get(tse.getTargetCNode()));
+ se.setFEdge(tse.getFEdge());
+ se.setTargetFState(tse.getTargetFState());
+ se.setProbability(tse.getProbability());
+ se.setNewRate(tse.getNewRate());
+ se.setIsclone(true);
+ csNode.addEdge(se);
+ scheduleEdges.add(se);
}
tocn2cn = null;
edges = null;
exeTime = cNode.getFlagStates().elementAt(0).getExeTime() - fs.getExeTime();
while(true) {
Vector inedges = cNode.getInedgeVector();
- // Now that there are associate ScheduleEdges, there may be
+ // Now that there are associate ScheduleEdges, there may be
// multiple inedges of a ClassNode
if(inedges.size() > 1) {
- throw new Exception("Error: ClassNode's inedges more than one!");
+ throw new Exception("Error: ClassNode's inedges more than one!");
}
if(inedges.size() > 0) {
- ScheduleEdge sEdge = (ScheduleEdge)inedges.elementAt(0);
- cNode = (ClassNode)sEdge.getSource();
- exeTime += cNode.getFlagStates().elementAt(0).getExeTime();
+ ScheduleEdge sEdge = (ScheduleEdge)inedges.elementAt(0);
+ cNode = (ClassNode)sEdge.getSource();
+ exeTime += cNode.getFlagStates().elementAt(0).getExeTime();
} else {
- break;
+ break;
}
inedges = null;
}
return exeTime;
}
- private ScheduleNode splitSNode(ScheduleEdge se,
+ private ScheduleNode splitSNode(ScheduleEdge se,
boolean copy) {
assert(ScheduleEdge.NEWEDGE == se.getType());
FlagState tfs = toiterate.poll();
Iterator it_edges = tfs.edges();
while(it_edges.hasNext()) {
- FlagState temp = (FlagState)((FEdge)it_edges.next()).getTarget();
- if(!fStates.contains(temp)) {
- fStates.add(temp);
- toiterate.add(temp);
- sfss.removeElement(temp);
- }
+ FlagState temp = (FlagState)((FEdge)it_edges.next()).getTarget();
+ if(!fStates.contains(temp)) {
+ fStates.add(temp);
+ toiterate.add(temp);
+ sfss.removeElement(temp);
+ }
}
it_edges = null;
}
long ttime = tfs.getExeTime();
Iterator it_inedges = tfs.inedges();
while(it_inedges.hasNext()) {
- FEdge fEdge = (FEdge)it_inedges.next();
- FlagState temp = (FlagState)fEdge.getSource();
- long time = fEdge.getExeTime() + ttime;
- if(temp.getExeTime() > time) {
- temp.setExeTime(time);
- toiterate.add(temp);
- }
+ FEdge fEdge = (FEdge)it_inedges.next();
+ FlagState temp = (FlagState)fEdge.getSource();
+ long time = fEdge.getExeTime() + ttime;
+ if(temp.getExeTime() > time) {
+ temp.setExeTime(time);
+ toiterate.add(temp);
+ }
}
it_inedges = null;
}
toiterate = null;
- // create a 'trans' ScheudleEdge between this new ScheduleNode and se's
+ // create a 'trans' ScheudleEdge between this new ScheduleNode and se's
// source ScheduleNode
- ScheduleEdge sEdge =
+ ScheduleEdge sEdge =
new ScheduleEdge(sNode, "transmit", fs, ScheduleEdge.TRANSEDGE, 0);
sEdge.setFEdge(fe);
sEdge.setSourceCNode(sCNode);
sEdge.setTransTime(cNode.getTransTime());
se.getSource().addEdge(sEdge);
scheduleEdges.add(sEdge);
- // remove the ClassNodes and internal ScheduleEdges out of this subtree
+ // remove the ClassNodes and internal ScheduleEdges out of this subtree
// to the new ScheduleNode
ScheduleNode oldSNode = (ScheduleNode)se.getSource();
Iterator it_isEdges = oldSNode.getScheduleEdgesIterator();
rCNodes.addElement(sCNode);
if(it_isEdges != null) {
while(it_isEdges.hasNext()) {
- ScheduleEdge tse = (ScheduleEdge)it_isEdges.next();
- if(rCNodes.contains(tse.getSourceCNode())) {
- if(sCNode.equals(tse.getSourceCNode())) {
- if (!(tse.getSourceFState().equals(fs))
- && (sFStates.contains(tse.getSourceFState()))) {
- tse.setSource(cNode);
- tse.setSourceCNode(cNode);
- } else {
- continue;
- }
- }
- sNode.getScheduleEdges().addElement(tse);
- sNode.getClassNodes().addElement(tse.getTargetCNode());
- rCNodes.addElement(tse.getTargetCNode());
- oldSNode.getClassNodes().removeElement(tse.getTargetCNode());
- toremove.addElement(tse);
- }
+ ScheduleEdge tse = (ScheduleEdge)it_isEdges.next();
+ if(rCNodes.contains(tse.getSourceCNode())) {
+ if(sCNode.equals(tse.getSourceCNode())) {
+ if (!(tse.getSourceFState().equals(fs))
+ && (sFStates.contains(tse.getSourceFState()))) {
+ tse.setSource(cNode);
+ tse.setSourceCNode(cNode);
+ } else {
+ continue;
+ }
+ }
+ sNode.getScheduleEdges().addElement(tse);
+ sNode.getClassNodes().addElement(tse.getTargetCNode());
+ rCNodes.addElement(tse.getTargetCNode());
+ oldSNode.getClassNodes().removeElement(tse.getTargetCNode());
+ toremove.addElement(tse);
+ }
}
}
it_isEdges = null;
Iterator it_sEdges = se.getSource().edges();
while(it_sEdges.hasNext()) {
ScheduleEdge tse = (ScheduleEdge)it_sEdges.next();
- if(!(tse.equals(se)) && !(tse.equals(sEdge))
- && (tse.getSourceCNode().equals(sCNode))) {
- if(!(tse.getSourceFState().equals(fs))
- && (sFStates.contains(tse.getSourceFState()))) {
- tse.setSource(sNode);
- tse.setSourceCNode(cNode);
- sNode.getEdgeVector().addElement(tse);
- toremove.add(tse);
- }
+ if(!(tse.equals(se)) && !(tse.equals(sEdge))
+ && (tse.getSourceCNode().equals(sCNode))) {
+ if(!(tse.getSourceFState().equals(fs))
+ && (sFStates.contains(tse.getSourceFState()))) {
+ tse.setSource(sNode);
+ tse.setSourceCNode(cNode);
+ sNode.getEdgeVector().addElement(tse);
+ toremove.add(tse);
+ }
}
}
it_sEdges = null;
try {
if(!copy) {
- //merge se into its source ScheduleNode
- sNode.setCid(((ScheduleNode)se.getSource()).getCid());
- ((ScheduleNode)se.getSource()).mergeSEdge(se);
- scheduleNodes.remove(se.getTarget());
- scheduleEdges.removeElement(se);
- // As se has been changed into an internal edge inside a ScheduleNode,
- // change the source and target of se from original ScheduleNodes
- // into ClassNodes.
- if(se.getType() == ScheduleEdge.NEWEDGE) {
- se.setTarget(se.getTargetCNode());
- //se.setSource(se.getSourceCNode());
- //se.getTargetCNode().addEdge(se);
- se.getSourceCNode().addEdge(se);
- }
+ //merge se into its source ScheduleNode
+ sNode.setCid(((ScheduleNode)se.getSource()).getCid());
+ ((ScheduleNode)se.getSource()).mergeSEdge(se);
+ scheduleNodes.remove(se.getTarget());
+ scheduleEdges.removeElement(se);
+ // As se has been changed into an internal edge inside a ScheduleNode,
+ // change the source and target of se from original ScheduleNodes
+ // into ClassNodes.
+ if(se.getType() == ScheduleEdge.NEWEDGE) {
+ se.setTarget(se.getTargetCNode());
+ //se.setSource(se.getSourceCNode());
+ //se.getTargetCNode().addEdge(se);
+ se.getSourceCNode().addEdge(se);
+ }
} else {
- sNode.setCid(ScheduleNode.colorID++);
- handleScheduleEdge(se, true);
+ sNode.setCid(ScheduleNode.colorID++);
+ handleScheduleEdge(se, true);
}
} catch (Exception e) {
e.printStackTrace();
this.scheduleGraphs.addElement(this.scheduleNodes);
int gid = 1;
if(this.state.PRINTSCHEDULING) {
- String path = this.state.outputdir + "scheduling_" + gid + ".dot";
- SchedulingUtil.printScheduleGraph(path, this.scheduleNodes);
+ String path = this.state.outputdir + "scheduling_" + gid + ".dot";
+ SchedulingUtil.printScheduleGraph(path, this.scheduleNodes);
}
return false;
} else {
SchedulingUtil.assignCids(this.scheduleNodes);
// Go through all the Schedule Nodes, organize them in order of their cid
- Vector<Vector<ScheduleNode>> sNodeVecs =
+ Vector<Vector<ScheduleNode>> sNodeVecs =
SchedulingUtil.rangeScheduleNodes(this.scheduleNodes);
int gid = 1;
boolean isBig = Math.pow(this.coreNum, reduceNum) > 1000;
Random rand = new Random();
if(isBig && state.BAMBOOCOMPILETIME) {
- CombinationUtil.RootsGenerator rGen =
- CombinationUtil.allocateRootsGenerator(sNodeVecs,
- this.coreNum);
- while((gid <= this.scheduleThreshold) && (rGen.nextGen())) {
- // first get the chosen rootNodes
- Vector<Vector<ScheduleNode>> rootNodes = rGen.getRootNodes();
- Vector<Vector<ScheduleNode>> nodes2combine = rGen.getNode2Combine();
-
- CombinationUtil.CombineGenerator cGen =
- CombinationUtil.allocateCombineGenerator(rootNodes,
- nodes2combine);
- while((gid <= this.scheduleThreshold) && (cGen.randomGenE())) {
- boolean implement = true;
- /*if(isBig) {
- implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
- }*/
- if(implement) {
- Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
- Vector<ScheduleNode> sNodes =
- SchedulingUtil.generateScheduleGraph(this.state,
- this.scheduleNodes,
- this.scheduleEdges,
- rootNodes,
- combine,
- gid++);
- this.scheduleGraphs.add(sNodes);
- sNodes = null;
- combine = null;
- } else if(Math.abs(rand.nextInt()) % 100 > skipThreshold){
- break;
- }
- }
- cGen.clear();
- rootNodes = null;
- nodes2combine = null;
+ CombinationUtil.RootsGenerator rGen =
+ CombinationUtil.allocateRootsGenerator(sNodeVecs,
+ this.coreNum);
+ while((gid <= this.scheduleThreshold) && (rGen.nextGen())) {
+ // first get the chosen rootNodes
+ Vector<Vector<ScheduleNode>> rootNodes = rGen.getRootNodes();
+ Vector<Vector<ScheduleNode>> nodes2combine = rGen.getNode2Combine();
+
+ CombinationUtil.CombineGenerator cGen =
+ CombinationUtil.allocateCombineGenerator(rootNodes,
+ nodes2combine);
+ while((gid <= this.scheduleThreshold) && (cGen.randomGenE())) {
+ boolean implement = true;
+ /*if(isBig) {
+ implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
+ }*/
+ if(implement) {
+ Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
+ Vector<ScheduleNode> sNodes =
+ SchedulingUtil.generateScheduleGraph(this.state,
+ this.scheduleNodes,
+ this.scheduleEdges,
+ rootNodes,
+ combine,
+ gid++);
+ this.scheduleGraphs.add(sNodes);
+ sNodes = null;
+ combine = null;
+ } else if(Math.abs(rand.nextInt()) % 100 > skipThreshold) {
+ break;
+ }
}
- rGen.clear();
- sNodeVecs = null;
+ cGen.clear();
+ rootNodes = null;
+ nodes2combine = null;
+ }
+ rGen.clear();
+ sNodeVecs = null;
} else if (false) {
- CombinationUtil.RandomGenerator rGen =
- CombinationUtil.allocateRandomGenerator(sNodeVecs,
- this.coreNum);
- // random genenration
- while((gid <= this.scheduleThreshold) && (rGen.nextGen())) {
- Vector<Vector<ScheduleNode>> mapping = rGen.getMapping();
- boolean implement = true;
- if(isBig) {
- implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
- }
- if(implement) {
- Vector<ScheduleNode> sNodes =
- SchedulingUtil.generateScheduleGraph(this.state,
- this.scheduleNodes,
- this.scheduleEdges,
- mapping,
- gid++);
- this.scheduleGraphs.add(sNodes);
- sNodes = null;
- }
- mapping = null;
+ CombinationUtil.RandomGenerator rGen =
+ CombinationUtil.allocateRandomGenerator(sNodeVecs,
+ this.coreNum);
+ // random genenration
+ while((gid <= this.scheduleThreshold) && (rGen.nextGen())) {
+ Vector<Vector<ScheduleNode>> mapping = rGen.getMapping();
+ boolean implement = true;
+ if(isBig) {
+ implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
+ }
+ if(implement) {
+ Vector<ScheduleNode> sNodes =
+ SchedulingUtil.generateScheduleGraph(this.state,
+ this.scheduleNodes,
+ this.scheduleEdges,
+ mapping,
+ gid++);
+ this.scheduleGraphs.add(sNodes);
+ sNodes = null;
}
- rGen.clear();
- sNodeVecs = null;
+ mapping = null;
+ }
+ rGen.clear();
+ sNodeVecs = null;
} else {
- CombinationUtil.RootsGenerator rGen =
- CombinationUtil.allocateRootsGenerator(sNodeVecs,
- this.coreNum);
- while((!isBig || (gid <= this.scheduleThreshold)) && (rGen.nextGen())) {
- // first get the chosen rootNodes
- Vector<Vector<ScheduleNode>> rootNodes = rGen.getRootNodes();
- Vector<Vector<ScheduleNode>> nodes2combine = rGen.getNode2Combine();
-
- CombinationUtil.CombineGenerator cGen =
- CombinationUtil.allocateCombineGenerator(rootNodes,
- nodes2combine);
- while((!isBig || (gid <= this.scheduleThreshold)) && (cGen.nextGen())) {
- boolean implement = true;
- if(isBig) {
- implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
- }
- if(implement) {
- Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
- Vector<ScheduleNode> sNodes =
- SchedulingUtil.generateScheduleGraph(this.state,
- this.scheduleNodes,
- this.scheduleEdges,
- rootNodes,
- combine,
- gid++);
- this.scheduleGraphs.add(sNodes);
- sNodes = null;
- combine = null;
- } else if(Math.abs(rand.nextInt()) % 100 > skipThreshold){
- break;
- }
- }
- cGen.clear();
- rootNodes = null;
- nodes2combine = null;
- }
- rGen.clear();
- sNodeVecs = null;
+ CombinationUtil.RootsGenerator rGen =
+ CombinationUtil.allocateRootsGenerator(sNodeVecs,
+ this.coreNum);
+ while((!isBig || (gid <= this.scheduleThreshold)) && (rGen.nextGen())) {
+ // first get the chosen rootNodes
+ Vector<Vector<ScheduleNode>> rootNodes = rGen.getRootNodes();
+ Vector<Vector<ScheduleNode>> nodes2combine = rGen.getNode2Combine();
+
+ CombinationUtil.CombineGenerator cGen =
+ CombinationUtil.allocateCombineGenerator(rootNodes,
+ nodes2combine);
+ while((!isBig || (gid <= this.scheduleThreshold)) && (cGen.nextGen())) {
+ boolean implement = true;
+ if(isBig) {
+ implement = Math.abs(rand.nextInt()) % 100 > generateThreshold;
+ }
+ if(implement) {
+ Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
+ Vector<ScheduleNode> sNodes =
+ SchedulingUtil.generateScheduleGraph(this.state,
+ this.scheduleNodes,
+ this.scheduleEdges,
+ rootNodes,
+ combine,
+ gid++);
+ this.scheduleGraphs.add(sNodes);
+ sNodes = null;
+ combine = null;
+ } else if(Math.abs(rand.nextInt()) % 100 > skipThreshold) {
+ break;
+ }
+ }
+ cGen.clear();
+ rootNodes = null;
+ nodes2combine = null;
+ }
+ rGen.clear();
+ sNodeVecs = null;
}
return isBig;
}
this.m_probability = new double[this.m_numofexits];
this.m_newobjinfo = new Vector<Hashtable<String, Integer>>();
for(int i = 0; i < this.m_numofexits; i++) {
- this.m_newobjinfo.add(null);
+ this.m_newobjinfo.add(null);
}
this.m_byObj = -1;
}
/** Class Constructor
*
*/
- public ScheduleEdge(ScheduleNode target,
- String label,
- FlagState fstate,
- int type,
- int gid) {
+ public ScheduleEdge(ScheduleNode target,
+ String label,
+ FlagState fstate,
+ int type,
+ int gid) {
super(target);
this.uid = ScheduleEdge.nodeID++;
this.gid = gid;
private Vector<ScheduleEdge> scheduleEdges;
private long executionTime;
-
+
private int hashcid;
/** Class constructor
}
public int getGid() {
- return gid;
+ return gid;
}
public int getuid() {
}
public int getHashcid() {
- return hashcid;
+ return hashcid;
}
public void computeHashcid() {
- this.hashcid = 0;
- /*if(this.mergedcids != null) {
- for(int i = 0; i < this.mergedcids.size(); i++) {
- this.hashcid = this.hashcid * 31 + this.mergedcids.elementAt(i);
- }
- }*/
- Vector<Integer> mergedcids = new Vector<Integer>();
- for(int i = 0; i < this.classNodes.size(); i++) {
- int tomerge = this.classNodes.elementAt(i).getCid();
- mergedcids.add(tomerge);
- // insert tomerge in accent order
- int j = mergedcids.size() - 1;
- for( ; j > 0; j--) {
- int tmp = mergedcids.elementAt(j-1);
- if(tmp > tomerge) {
- mergedcids.setElementAt(tmp, j);
- } else {
- break;
- }
- }
- mergedcids.setElementAt(tomerge, j);
- }
- for(int i = 0; i < mergedcids.size(); i++) {
- this.hashcid = this.hashcid * 31 + mergedcids.elementAt(i);
+ this.hashcid = 0;
+ /*if(this.mergedcids != null) {
+ for(int i = 0; i < this.mergedcids.size(); i++) {
+ this.hashcid = this.hashcid * 31 + this.mergedcids.elementAt(i);
+ }
+ }*/
+ Vector<Integer> mergedcids = new Vector<Integer>();
+ for(int i = 0; i < this.classNodes.size(); i++) {
+ int tomerge = this.classNodes.elementAt(i).getCid();
+ mergedcids.add(tomerge);
+ // insert tomerge in accent order
+ int j = mergedcids.size() - 1;
+ for(; j > 0; j--) {
+ int tmp = mergedcids.elementAt(j-1);
+ if(tmp > tomerge) {
+ mergedcids.setElementAt(tmp, j);
+ } else {
+ break;
+ }
}
- mergedcids = null;
+ mergedcids.setElementAt(tomerge, j);
+ }
+ for(int i = 0; i < mergedcids.size(); i++) {
+ this.hashcid = this.hashcid * 31 + mergedcids.elementAt(i);
+ }
+ mergedcids = null;
}
public long getExeTime() {
return label;
}
- public Object clone(Hashtable<ClassNode, ClassNode> cn2cn,
- int gid) {
+ public Object clone(Hashtable<ClassNode, ClassNode> cn2cn,
+ int gid) {
ScheduleNode o = null;
try {
o = (ScheduleNode) super.clone();
ScheduleEdge se = null;
switch(temp.getType()) {
case ScheduleEdge.NEWEDGE: {
- se = new ScheduleEdge(o,
- "new",
- temp.getFstate(),
- ScheduleEdge.NEWEDGE,
- gid);
+ se = new ScheduleEdge(o,
+ "new",
+ temp.getFstate(),
+ ScheduleEdge.NEWEDGE,
+ gid);
se.setProbability(temp.getProbability());
se.setNewRate(temp.getNewRate());
break;
}
case ScheduleEdge.TRANSEDGE: {
- se = new ScheduleEdge(o,
- "transmit",
- temp.getFstate(),
- ScheduleEdge.TRANSEDGE,
- gid);
+ se = new ScheduleEdge(o,
+ "transmit",
+ temp.getFstate(),
+ ScheduleEdge.TRANSEDGE,
+ gid);
se.setProbability(temp.getProbability());
se.setNewRate(temp.getNewRate());
break;
}
this.executionTime += sn.getExeTime();
}
-
+
public ScheduleNode spliteClassNode(ClassNode cd) {
- ScheduleNode sNode = new ScheduleNode(cd, this.gid);
- // clean all inedges and edges
- sNode.edges.clear();
- sNode.inedges.clear();
-
- this.classNodes.remove(cd);
- cd.setScheduleNode(sNode);
- try {
- sNode.calExeTime();
- } catch (Exception e) {
- e.printStackTrace();
- }
-
- // redirect all corresponding internal ScheduleEdge to the new snode
- Iterator it_innersEdges = this.scheduleEdges.iterator();
- Vector<ScheduleEdge> toremove = new Vector<ScheduleEdge>();
- if(it_innersEdges != null) {
- while(it_innersEdges.hasNext()) {
- ScheduleEdge tse = (ScheduleEdge)it_innersEdges.next();
- if((cd.equals(tse.getSourceCNode())) || (cd.equals(tse.getTargetCNode()))) {
- // related edge
- toremove.addElement(tse);
- }
- }
+ ScheduleNode sNode = new ScheduleNode(cd, this.gid);
+ // clean all inedges and edges
+ sNode.edges.clear();
+ sNode.inedges.clear();
+
+ this.classNodes.remove(cd);
+ cd.setScheduleNode(sNode);
+ try {
+ sNode.calExeTime();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // redirect all corresponding internal ScheduleEdge to the new snode
+ Iterator it_innersEdges = this.scheduleEdges.iterator();
+ Vector<ScheduleEdge> toremove = new Vector<ScheduleEdge>();
+ if(it_innersEdges != null) {
+ while(it_innersEdges.hasNext()) {
+ ScheduleEdge tse = (ScheduleEdge)it_innersEdges.next();
+ if((cd.equals(tse.getSourceCNode())) || (cd.equals(tse.getTargetCNode()))) {
+ // related edge
+ toremove.addElement(tse);
+ }
}
- it_innersEdges = null;
- this.scheduleEdges.removeAll(toremove);
- for(int i = 0; i < toremove.size(); i++) {
- ScheduleEdge tse = toremove.elementAt(i);
- if(cd.equals(tse.getSourceCNode())) {
- // outedge
- tse.setTarget(this);
- sNode.addEdge(tse);
- } else if(cd.equals(tse.getTargetCNode())){
- // inedge
- tse.setTarget(sNode);
- this.addEdge(tse);
- }
+ }
+ it_innersEdges = null;
+ this.scheduleEdges.removeAll(toremove);
+ for(int i = 0; i < toremove.size(); i++) {
+ ScheduleEdge tse = toremove.elementAt(i);
+ if(cd.equals(tse.getSourceCNode())) {
+ // outedge
+ tse.setTarget(this);
+ sNode.addEdge(tse);
+ } else if(cd.equals(tse.getTargetCNode())) {
+ // inedge
+ tse.setTarget(sNode);
+ this.addEdge(tse);
}
- toremove.clear();
-
- // redirect external ScheudleEdges out of this cd to the new ScheduleNode
- Iterator it_exsEdges = this.edges();
- while(it_exsEdges.hasNext()) {
- ScheduleEdge tse = (ScheduleEdge)it_exsEdges.next();
- if(tse.getSourceCNode().equals(cd)) {
- toremove.add(tse);
- //this.removeEdge(tse);
- //sNode.addEdge(tse);
- tse.setSource(sNode);
- sNode.edges.addElement(tse);
- }
+ }
+ toremove.clear();
+
+ // redirect external ScheudleEdges out of this cd to the new ScheduleNode
+ Iterator it_exsEdges = this.edges();
+ while(it_exsEdges.hasNext()) {
+ ScheduleEdge tse = (ScheduleEdge)it_exsEdges.next();
+ if(tse.getSourceCNode().equals(cd)) {
+ toremove.add(tse);
+ //this.removeEdge(tse);
+ //sNode.addEdge(tse);
+ tse.setSource(sNode);
+ sNode.edges.addElement(tse);
}
- this.edges.removeAll(toremove);
- toremove.clear();
-
- it_exsEdges = null;
- // redirect inedges whose target is this Classnode to new ScheduleNode
- Iterator it_insEdges = this.inedges();
- while(it_insEdges.hasNext()) {
- ScheduleEdge tse = (ScheduleEdge)it_insEdges.next();
- if(tse.getTargetCNode().equals(cd)) {
- toremove.add(tse);
- tse.setTarget(sNode);
- sNode.inedges.addElement(tse);
- }
+ }
+ this.edges.removeAll(toremove);
+ toremove.clear();
+
+ it_exsEdges = null;
+ // redirect inedges whose target is this Classnode to new ScheduleNode
+ Iterator it_insEdges = this.inedges();
+ while(it_insEdges.hasNext()) {
+ ScheduleEdge tse = (ScheduleEdge)it_insEdges.next();
+ if(tse.getTargetCNode().equals(cd)) {
+ toremove.add(tse);
+ tse.setTarget(sNode);
+ sNode.inedges.addElement(tse);
}
- it_insEdges = null;
- this.inedges.removeAll(toremove);
- toremove.clear();
- toremove = null;
-
- // As all tasks inside one ScheduleNode are executed sequentially,
- // simply subtract the execution time of the ClassNode .
- assert(this.executionTime != -1);
- this.executionTime -= sNode.getExeTime();
-
- return sNode;
+ }
+ it_insEdges = null;
+ this.inedges.removeAll(toremove);
+ toremove.clear();
+ toremove = null;
+
+ // As all tasks inside one ScheduleNode are executed sequentially,
+ // simply subtract the execution time of the ClassNode .
+ assert(this.executionTime != -1);
+ this.executionTime -= sNode.getExeTime();
+
+ return sNode;
}
}
private Vector<TaskSimulator> tasks;
private long processTime;
private int invoketime;
-
+
private Vector<FlagState> fstates;
private Vector<FEdge> fedges;
State state;
TaskAnalysis taskanalysis;
- public ScheduleSimulator(int corenum,
- State state,
- TaskAnalysis taskanalysis) {
+ public ScheduleSimulator(int corenum,
+ State state,
+ TaskAnalysis taskanalysis) {
this.coreNum = corenum;
this.scheduling = null;
this.cores = null;
this.state = state;
this.taskanalysis = taskanalysis;
this.fstates = new Vector<FlagState>();
- this.fedges = new Vector<FEdge>();
+ this.fedges = new Vector<FEdge>();
}
- public ScheduleSimulator(int corenum,
- Vector<Schedule> scheduling,
- State state,
- TaskAnalysis taskanalysis) {
+ public ScheduleSimulator(int corenum,
+ Vector<Schedule> scheduling,
+ State state,
+ TaskAnalysis taskanalysis) {
super();
this.coreNum = corenum;
this.scheduling = scheduling;
this.fedges = new Vector<FEdge>();
applyScheduling();
}
-
+
public void init() {
// gather all the flag states and fedges together
Iterator it_classes = this.state.getClassSymbolTable().getDescriptorsIterator();
while(it_classes.hasNext()) {
ClassDescriptor cd = (ClassDescriptor) it_classes.next();
Iterator<FlagState> it_fStates = this.taskanalysis.getFlagStates(cd).iterator();
-
+
while(it_fStates.hasNext()) {
- FlagState fs = it_fStates.next();
- if(!this.fstates.contains(fs)) {
- this.fstates.addElement(fs);
- }
- Iterator<FEdge> it_fe = (Iterator<FEdge>)fs.edges();
- while(it_fe.hasNext()) {
- FEdge next = it_fe.next();
- if(!this.fedges.contains(next)) {
- this.fedges.addElement(next);
- }
- }
+ FlagState fs = it_fStates.next();
+ if(!this.fstates.contains(fs)) {
+ this.fstates.addElement(fs);
+ }
+ Iterator<FEdge> it_fe = (Iterator<FEdge>)fs.edges();
+ while(it_fe.hasNext()) {
+ FEdge next = it_fe.next();
+ if(!this.fedges.contains(next)) {
+ this.fedges.addElement(next);
+ }
+ }
}
}
}
-
+
public long simulate(Vector<Vector<Schedule>> schedulings,
- Vector<Integer> selectedScheduling,
- Vector<SimExecutionNode> selectedSimExeGraphs) {
- long processTime = Long.MAX_VALUE;
- /*if(schedulings.size() > 1500) {
- int index = 0;
- int upperbound = schedulings.size();
- long seed = 0;
- java.util.Random r = new java.util.Random(seed);
- for(int ii = 0; ii < 1500; ii++) {
- index = (int)((Math.abs((double)r.nextInt()
- /(double)Integer.MAX_VALUE)) * upperbound);
- System.out.println("Scheduling index:" + index);
- Vector<Schedule> scheduling = schedulings.elementAt(index);
- this.setScheduling(scheduling);
- Vector<SimExecutionEdge> simexegraph = new Vector<SimExecutionEdge>();
- Vector<CheckPoint> checkpoints = new Vector<CheckPoint>();
- int tmpTime = this.process(checkpoints, simexegraph);
- if(tmpTime < processTime) {
- selectedScheduling.clear();
- selectedScheduling.add(index);
- selectedSimExeGraphs.clear();
- selectedSimExeGraphs.add(simexegraph);
- processTime = tmpTime;
- } else if(tmpTime == processTime) {
- selectedScheduling.add(index);
- selectedSimExeGraphs.add(simexegraph);
- }
- scheduling = null;
- checkpoints = null;
- simexegraph = null;
- }
- } else {*/
- // TODO
- Iterator it_scheduling = schedulings.iterator();
- int index = 0;
- while(it_scheduling.hasNext()) {
- Vector<Schedule> scheduling =
- (Vector<Schedule>)it_scheduling.next();
- if(!state.BAMBOOCOMPILETIME) {
- System.out.println("Scheduling index:" + scheduling.elementAt(0).getGid());
- }
- this.setScheduling(scheduling);
- Vector<SimExecutionNode> simexegraph = new Vector<SimExecutionNode>();
- Vector<CheckPoint> checkpoints = new Vector<CheckPoint>();
- long tmpTime = process(checkpoints, simexegraph);
- if(tmpTime < processTime) {
- selectedScheduling.clear();
- selectedScheduling.add(index);
- selectedSimExeGraphs.clear();
- selectedSimExeGraphs.add(simexegraph.elementAt(0));
- processTime = tmpTime;
- } else if(tmpTime == processTime) {
- if(!selectedScheduling.contains(index)) {
- selectedScheduling.add(index);
- selectedSimExeGraphs.add(simexegraph.elementAt(0));
- }
- }
- scheduling = null;
- checkpoints.clear();
- checkpoints = null;
- simexegraph = null;
- index++;
- }
- it_scheduling = null;
- //}
-
+ Vector<Integer> selectedScheduling,
+ Vector<SimExecutionNode> selectedSimExeGraphs) {
+ long processTime = Long.MAX_VALUE;
+ /*if(schedulings.size() > 1500) {
+ int index = 0;
+ int upperbound = schedulings.size();
+ long seed = 0;
+ java.util.Random r = new java.util.Random(seed);
+ for(int ii = 0; ii < 1500; ii++) {
+ index = (int)((Math.abs((double)r.nextInt()
+ /(double)Integer.MAX_VALUE)) * upperbound);
+ System.out.println("Scheduling index:" + index);
+ Vector<Schedule> scheduling = schedulings.elementAt(index);
+ this.setScheduling(scheduling);
+ Vector<SimExecutionEdge> simexegraph = new Vector<SimExecutionEdge>();
+ Vector<CheckPoint> checkpoints = new Vector<CheckPoint>();
+ int tmpTime = this.process(checkpoints, simexegraph);
+ if(tmpTime < processTime) {
+ selectedScheduling.clear();
+ selectedScheduling.add(index);
+ selectedSimExeGraphs.clear();
+ selectedSimExeGraphs.add(simexegraph);
+ processTime = tmpTime;
+ } else if(tmpTime == processTime) {
+ selectedScheduling.add(index);
+ selectedSimExeGraphs.add(simexegraph);
+ }
+ scheduling = null;
+ checkpoints = null;
+ simexegraph = null;
+ }
+ } else {*/
+ // TODO
+ Iterator it_scheduling = schedulings.iterator();
+ int index = 0;
+ while(it_scheduling.hasNext()) {
+ Vector<Schedule> scheduling =
+ (Vector<Schedule>)it_scheduling.next();
if(!state.BAMBOOCOMPILETIME) {
+ System.out.println("Scheduling index:" + scheduling.elementAt(0).getGid());
+ }
+ this.setScheduling(scheduling);
+ Vector<SimExecutionNode> simexegraph = new Vector<SimExecutionNode>();
+ Vector<CheckPoint> checkpoints = new Vector<CheckPoint>();
+ long tmpTime = process(checkpoints, simexegraph);
+ if(tmpTime < processTime) {
+ selectedScheduling.clear();
+ selectedScheduling.add(index);
+ selectedSimExeGraphs.clear();
+ selectedSimExeGraphs.add(simexegraph.elementAt(0));
+ processTime = tmpTime;
+ } else if(tmpTime == processTime) {
+ if(!selectedScheduling.contains(index)) {
+ selectedScheduling.add(index);
+ selectedSimExeGraphs.add(simexegraph.elementAt(0));
+ }
+ }
+ scheduling = null;
+ checkpoints.clear();
+ checkpoints = null;
+ simexegraph = null;
+ index++;
+ }
+ it_scheduling = null;
+ //}
+
+ if(!state.BAMBOOCOMPILETIME) {
System.out.print("Selected schedulings with least exectution time " + processTime + ": \n\t");
for(int i = 0; i < selectedScheduling.size(); i++) {
- int gid = schedulings.elementAt(selectedScheduling.elementAt(i)).elementAt(0).getGid();
- System.out.print(gid + ", ");
+ int gid = schedulings.elementAt(selectedScheduling.elementAt(i)).elementAt(0).getGid();
+ System.out.print(gid + ", ");
}
System.out.println();
- }
-
- return processTime;
+ }
+
+ return processTime;
}
public int getCoreNum() {
public Vector<TaskSimulator> getTasks() {
return tasks;
}
-
+
private void init4Simulation() {
// TODO for test
- /*System.err.println("======Init for Sim # "
- + this.scheduling.elementAt(0).getGid() + "======");*/
+ /*System.err.println("======Init for Sim # "
+ + this.scheduling.elementAt(0).getGid() + "======");*/
for(int i = 0; i < this.fstates.size(); i++) {
this.fstates.elementAt(i).init4Simulate();
}
}
public long process(Vector<CheckPoint> checkpoints,
- Vector<SimExecutionNode> simexegraph) {
+ Vector<SimExecutionNode> simexegraph) {
assert(this.scheduling != null);
this.invoketime++;
this.processTime = 0;
-
+
// initialization
this.init4Simulation();
-
+
// helper structures for building SimExecutionGraph
- Hashtable<SimExecutionNode, Action> senode2action =
- new Hashtable<SimExecutionNode, Action>();
+ Hashtable<SimExecutionNode, Action> senode2action =
+ new Hashtable<SimExecutionNode, Action>();
SimExecutionNode[] lastseNodes = new SimExecutionNode[this.cores.size()];
- Hashtable<Action, Long> action2exetime =
- new Hashtable<Action, Long>();
- Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode =
- new Hashtable<TransTaskSimulator, SimExecutionNode>();
- Hashtable<Integer, Long> obj2transtime =
- new Hashtable<Integer, Long>();
- Hashtable<Integer, SimExecutionEdge> obj2lastseedge =
- new Hashtable<Integer, SimExecutionEdge>();
+ Hashtable<Action, Long> action2exetime =
+ new Hashtable<Action, Long>();
+ Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode =
+ new Hashtable<TransTaskSimulator, SimExecutionNode>();
+ Hashtable<Integer, Long> obj2transtime =
+ new Hashtable<Integer, Long>();
+ Hashtable<Integer, SimExecutionEdge> obj2lastseedge =
+ new Hashtable<Integer, SimExecutionEdge>();
// first decide next task to execute on each core
int i = 0;
// add STARTTASK checkpoint for all the initial tasks
CheckPoint cp = new CheckPoint(this.processTime,
- this.coreNum);
+ this.coreNum);
for(i = 0; i < this.tasks.size(); i++) {
TaskSimulator task = this.tasks.elementAt(i);
int coreid = task.getCs().getCoreNum();
- Action action = new Action(coreid,
- Action.TASKSTART,
- task);
+ Action action = new Action(coreid,
+ Action.TASKSTART,
+ task);
cp.addAction(action);
if(!(task instanceof TransTaskSimulator)) {
- cp.removeSpareCore(coreid);
- SimExecutionNode seNode = new SimExecutionNode(coreid, this.processTime);
- seNode.setSpareCores(cp.getSpareCores());
- senode2action.put(seNode, action);
- action2exetime.put(action, (long)-1);
- lastseNodes[coreid] = seNode;
+ cp.removeSpareCore(coreid);
+ SimExecutionNode seNode = new SimExecutionNode(coreid, this.processTime);
+ seNode.setSpareCores(cp.getSpareCores());
+ senode2action.put(seNode, action);
+ action2exetime.put(action, (long)-1);
+ lastseNodes[coreid] = seNode;
}
}
checkpoints.add(cp);
finishTasks.add(task);
}
}
-
+
// advance to next finish point
this.processTime += finishTime;
cp = new CheckPoint(this.processTime,
- this.coreNum);
+ this.coreNum);
for(i = 0; i < this.tasks.size(); i++) {
TaskSimulator task = this.tasks.elementAt(i);
if(!finishTasks.contains(task)) {
task.getCs().updateTask(finishTime);
if(!(task instanceof TransTaskSimulator)) {
- cp.removeSpareCore(task.getCs().getCoreNum());
+ cp.removeSpareCore(task.getCs().getCoreNum());
}
}
}
-
+
Action action = null;
for(i = 0; i < finishTasks.size(); i++) {
TaskSimulator task = finishTasks.elementAt(i);
this.tasks.removeElement(task);
if(task instanceof TransTaskSimulator) {
- // handle TransTaskSimulator task's completion
- finishTransTaskSimulator(task,
- cp,
- senode2action,
- lastseNodes,
- action2exetime,
- tttask2senode,
- obj2transtime);
+ // handle TransTaskSimulator task's completion
+ finishTransTaskSimulator(task,
+ cp,
+ senode2action,
+ lastseNodes,
+ action2exetime,
+ tttask2senode,
+ obj2transtime);
} else {
CoreSimulator cs = task.getCs();
Vector<TransTaskSimulator> tttasks = new Vector<TransTaskSimulator>();
-
+
Vector<ObjectSimulator> transObjs = null;
if(task.getCurrentRun().getExetype() == 0) {
- // normal execution of a task
- transObjs = finishTaskNormal(task,
- cp,
- tttasks,
- senode2action,
- lastseNodes,
- action2exetime);
+ // normal execution of a task
+ transObjs = finishTaskNormal(task,
+ cp,
+ tttasks,
+ senode2action,
+ lastseNodes,
+ action2exetime);
} else if (task.getCurrentRun().getExetype() == 1) {
- // task abort
- finishTaskAbnormal(cs,
- cp,
- senode2action,
- lastseNodes,
- action2exetime,
- Action.TASKABORT);
+ // task abort
+ finishTaskAbnormal(cs,
+ cp,
+ senode2action,
+ lastseNodes,
+ action2exetime,
+ Action.TASKABORT);
} else if (task.getCurrentRun().getExetype() == 2) {
- // task remove
- finishTaskAbnormal(cs,
- cp,
- senode2action,
- lastseNodes,
- action2exetime,
- Action.TASKREMOVE);
+ // task remove
+ finishTaskAbnormal(cs,
+ cp,
+ senode2action,
+ lastseNodes,
+ action2exetime,
+ Action.TASKREMOVE);
}
-
+
// Choose a new task for this core
generateNewTask(cs,
- cp,
- transObjs,
- tttasks,
- senode2action,
- lastseNodes,
- action2exetime,
- tttask2senode,
- obj2transtime,
- obj2lastseedge);
+ cp,
+ transObjs,
+ tttasks,
+ senode2action,
+ lastseNodes,
+ action2exetime,
+ tttask2senode,
+ obj2transtime,
+ obj2lastseedge);
tttasks.clear();
tttasks = null;
transObjs = null;
- }// end of if(task instanceof TransTaskSimulator) else
+ } // end of if(task instanceof TransTaskSimulator) else
}
checkpoints.add(cp);
finishTasks = null;
} // end of while(true)
-
+
// add the end node into the SimExecutionGraph
SimExecutionNode seNode = new SimExecutionNode(this.coreNum, this.processTime);
simexegraph.addElement(seNode);
for(int j = 0; j < lastseNodes.length; j++) {
- SimExecutionNode lastsenode = lastseNodes[j];
- // create edges between previous senode on this core to this node
- if(lastsenode != null) {
- Action tmpaction = senode2action.get(lastsenode);
- long weight = tmpaction != null? action2exetime.get(tmpaction) : 0; // TODO ????
- SimExecutionEdge seEdge = new SimExecutionEdge(seNode,
- lastsenode.getCoreNum(),
- tmpaction != null? tmpaction.getTd():null,
- weight,
- tmpaction != null? tmpaction.getTaskParams():null);
- lastsenode.addEdge(seEdge);
-
- // setup data dependencies for the task
- Vector<Integer> taskparams = seEdge.getTaskparams();
- if(taskparams != null) {
- for(int k = 0; k < taskparams.size(); k++) {
- Integer tparam = taskparams.elementAt(k);
- SimExecutionEdge lastedge = obj2lastseedge.get(tparam);
- if(lastedge != null) {
- if(lastedge.getCoreNum() != seEdge.getCoreNum()) {
- // the obj is transferred from another core
- // create an seEdge for this transfer
- long transweight = obj2transtime.get(tparam);
- SimExecutionEdge transseEdge = new SimExecutionEdge((SimExecutionNode)seEdge.getSource(),
- lastedge.getCoreNum(),
- null, // TODO: not sure if this is enough
- transweight,
- null);
- if(((SimExecutionNode)seEdge.getSource()).getTimepoint() <
- ((SimExecutionNode)lastedge.getTarget()).getTimepoint()) {
- System.err.println("ScheduleSimulator:393");
- System.exit(-1);
- }
- lastedge.getTarget().addEdge(transseEdge);
- transseEdge.addPredicate(lastedge);
- seEdge.addPredicate(transseEdge);
- } else {
- seEdge.addPredicate(lastedge);
- }
- }
- // update the last edge associated to the parameter obj
- obj2lastseedge.put(tparam, seEdge);
+ SimExecutionNode lastsenode = lastseNodes[j];
+ // create edges between previous senode on this core to this node
+ if(lastsenode != null) {
+ Action tmpaction = senode2action.get(lastsenode);
+ long weight = tmpaction != null?action2exetime.get(tmpaction):0; // TODO ????
+ SimExecutionEdge seEdge = new SimExecutionEdge(seNode,
+ lastsenode.getCoreNum(),
+ tmpaction != null?tmpaction.getTd():null,
+ weight,
+ tmpaction != null?tmpaction.getTaskParams():null);
+ lastsenode.addEdge(seEdge);
+
+ // setup data dependencies for the task
+ Vector<Integer> taskparams = seEdge.getTaskparams();
+ if(taskparams != null) {
+ for(int k = 0; k < taskparams.size(); k++) {
+ Integer tparam = taskparams.elementAt(k);
+ SimExecutionEdge lastedge = obj2lastseedge.get(tparam);
+ if(lastedge != null) {
+ if(lastedge.getCoreNum() != seEdge.getCoreNum()) {
+ // the obj is transferred from another core
+ // create an seEdge for this transfer
+ long transweight = obj2transtime.get(tparam);
+ SimExecutionEdge transseEdge = new SimExecutionEdge((SimExecutionNode)seEdge.getSource(),
+ lastedge.getCoreNum(),
+ null, // TODO: not sure if this is enough
+ transweight,
+ null);
+ if(((SimExecutionNode)seEdge.getSource()).getTimepoint() <
+ ((SimExecutionNode)lastedge.getTarget()).getTimepoint()) {
+ System.err.println("ScheduleSimulator:393");
+ System.exit(-1);
}
+ lastedge.getTarget().addEdge(transseEdge);
+ transseEdge.addPredicate(lastedge);
+ seEdge.addPredicate(transseEdge);
+ } else {
+ seEdge.addPredicate(lastedge);
+ }
}
- taskparams = null;
- }
- lastseNodes[j] = null;
+ // update the last edge associated to the parameter obj
+ obj2lastseedge.put(tparam, seEdge);
+ }
+ }
+ taskparams = null;
+ }
+ lastseNodes[j] = null;
}
senode2action.clear();
int gid = this.scheduling.elementAt(0).getGid();
if(this.state.PRINTSCHEDULESIM) {
- SchedulingUtil.printSimulationResult(this.state.outputdir + "SimGraph/" + "SimulatorResult_" + gid + ".dot",
- this.processTime,
- this.coreNum,
- checkpoints);
+ SchedulingUtil.printSimulationResult(this.state.outputdir + "SimGraph/" + "SimulatorResult_" + gid + ".dot",
+ this.processTime,
+ this.coreNum,
+ checkpoints);
}
if(!state.BAMBOOCOMPILETIME) {
- System.out.println("Simulate scheduling #" + gid + ": ");
- System.out.println("\tTotal execution time is: " + this.processTime);
- System.out.println("\tUtility of cores: ");
- for(int j = 0; j < this.cores.size(); j++) {
- System.out.println("\t\tcore" + j + ": " + getUtility(j) + "%");
- }
+ System.out.println("Simulate scheduling #" + gid + ": ");
+ System.out.println("\tTotal execution time is: " + this.processTime);
+ System.out.println("\tUtility of cores: ");
+ for(int j = 0; j < this.cores.size(); j++) {
+ System.out.println("\t\tcore" + j + ": " + getUtility(j) + "%");
+ }
}
-
+
return this.processTime;
}
-
- private void finishTransTaskSimulator(TaskSimulator task,
- CheckPoint cp,
- Hashtable<SimExecutionNode, Action> senode2action,
- SimExecutionNode[] lastseNodes,
- Hashtable<Action, Long> action2exetime,
- Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode,
- Hashtable<Integer, Long> obj2transtime) {
- TransTaskSimulator tmptask = (TransTaskSimulator)task;
- // add ADDOBJ task to targetCore
- int targetCoreNum = tmptask.getTargetCoreNum();
- ObjectInfo objinfo = tmptask.refreshTask();
- ObjectSimulator nobj = objinfo.obj;
- FlagState fs = objinfo.fs;
- int version = objinfo.version;
- this.cores.elementAt(targetCoreNum).addObject(nobj, fs, version);
- Action action = new Action(targetCoreNum, Action.ADDOBJ, 1, nobj.getCd());
- cp.addAction(action);
- // get the obj transfer time and associated senode
- SimExecutionNode senode = tttask2senode.get(tmptask);
- obj2transtime.put(nobj.getOid(), this.processTime - senode.getTimepoint());
+ private void finishTransTaskSimulator(TaskSimulator task,
+ CheckPoint cp,
+ Hashtable<SimExecutionNode, Action> senode2action,
+ SimExecutionNode[] lastseNodes,
+ Hashtable<Action, Long> action2exetime,
+ Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode,
+ Hashtable<Integer, Long> obj2transtime) {
+ TransTaskSimulator tmptask = (TransTaskSimulator)task;
+ // add ADDOBJ task to targetCore
+ int targetCoreNum = tmptask.getTargetCoreNum();
+ ObjectInfo objinfo = tmptask.refreshTask();
+ ObjectSimulator nobj = objinfo.obj;
+ FlagState fs = objinfo.fs;
+ int version = objinfo.version;
+ this.cores.elementAt(targetCoreNum).addObject(nobj, fs, version);
+ Action action = new Action(targetCoreNum, Action.ADDOBJ, 1, nobj.getCd());
+ cp.addAction(action);
+
+ // get the obj transfer time and associated senode
+ SimExecutionNode senode = tttask2senode.get(tmptask);
+ obj2transtime.put(nobj.getOid(), this.processTime - senode.getTimepoint());
+
+ if(!tmptask.isFinished()) {
+ // still have some objects to be transferred
+ this.tasks.add(task);
+ }
+ if(this.cores.elementAt(targetCoreNum).getRtask() == null) {
+ TaskSimulator newTask = this.cores.elementAt(targetCoreNum).process();
+ if(newTask != null) {
+ this.tasks.add(newTask);
+ // add a TASKSTART action into this checkpoint
+ action = new Action(targetCoreNum,
+ Action.TASKSTART,
+ newTask);
+ cp.addAction(action);
+ if(!(newTask instanceof TransTaskSimulator)) {
+ cp.removeSpareCore(targetCoreNum);
+ SimExecutionNode seNode = new SimExecutionNode(targetCoreNum, this.processTime);
+ seNode.setSpareCores(cp.getSpareCores());
+ senode2action.put(seNode, action);
+ action2exetime.put(action, (long)-1);
- if(!tmptask.isFinished()) {
- // still have some objects to be transferred
- this.tasks.add(task);
- }
- if(this.cores.elementAt(targetCoreNum).getRtask() == null) {
- TaskSimulator newTask = this.cores.elementAt(targetCoreNum).process();
- if(newTask != null) {
- this.tasks.add(newTask);
- // add a TASKSTART action into this checkpoint
- action = new Action(targetCoreNum,
- Action.TASKSTART,
- newTask);
- cp.addAction(action);
- if(!(newTask instanceof TransTaskSimulator)) {
- cp.removeSpareCore(targetCoreNum);
- SimExecutionNode seNode = new SimExecutionNode(targetCoreNum, this.processTime);
- seNode.setSpareCores(cp.getSpareCores());
- senode2action.put(seNode, action);
- action2exetime.put(action, (long)-1);
-
- SimExecutionNode lastsenode = lastseNodes[targetCoreNum];
- // create edges between previous senode on this core to this node
- if(lastsenode != null) {
- Action tmpaction = senode2action.get(lastsenode);
- SimExecutionEdge seEdge = null;
- if(tmpaction == null) {
- seEdge = new SimExecutionEdge(seNode,
- lastsenode.getCoreNum(),
- null,
- 0,
- null);
- } else {
- long weight = action2exetime.get(tmpaction);
- seEdge = new SimExecutionEdge(seNode,
- lastsenode.getCoreNum(),
- tmpaction.getTd(),
- weight,
- tmpaction.getTaskParams());
- }
- lastsenode.addEdge(seEdge);
- }
- lastseNodes[targetCoreNum] = seNode;
- }
+ SimExecutionNode lastsenode = lastseNodes[targetCoreNum];
+ // create edges between previous senode on this core to this node
+ if(lastsenode != null) {
+ Action tmpaction = senode2action.get(lastsenode);
+ SimExecutionEdge seEdge = null;
+ if(tmpaction == null) {
+ seEdge = new SimExecutionEdge(seNode,
+ lastsenode.getCoreNum(),
+ null,
+ 0,
+ null);
+ } else {
+ long weight = action2exetime.get(tmpaction);
+ seEdge = new SimExecutionEdge(seNode,
+ lastsenode.getCoreNum(),
+ tmpaction.getTd(),
+ weight,
+ tmpaction.getTaskParams());
+ }
+ lastsenode.addEdge(seEdge);
}
+ lastseNodes[targetCoreNum] = seNode;
+ }
}
+ }
}
-
+
private Vector<ObjectSimulator> finishTaskNormal(TaskSimulator task,
CheckPoint cp,
Vector<TransTaskSimulator> tttasks,
Hashtable<SimExecutionNode, Action> senode2action,
SimExecutionNode[] lastseNodes,
Hashtable<Action, Long> action2exetime) {
- Vector<ObjectSimulator> totransObjs = new Vector<ObjectSimulator>();
- CoreSimulator cs = task.getCs();
- int corenum = cs.getCoreNum();
- Hashtable<Integer, Queue<ObjectInfo>> transObjQueues =
- new Hashtable<Integer, Queue<ObjectInfo>>();
- Action action = null;
- if(task.getCurrentRun().getNewObjs() == null) {
- // task finish without new objects
- action = new Action(corenum,
- Action.TASKFINISH,
- cs.getRtask());
- // get the execution time of this task
- SimExecutionNode lastsenode = lastseNodes[corenum];
- Action startaction = senode2action.get(lastsenode);
- action2exetime.put(startaction, cp.getTimepoint() - lastsenode.getTimepoint());
-
- } else {
- // task finish with new objects
- action = new Action(corenum,
- Action.TFWITHOBJ,
- cs.getRtask());
- // get the execution time of this task
- SimExecutionNode lastsenode = lastseNodes[corenum];
- Action startaction = senode2action.get(lastsenode);
- action2exetime.put(startaction, cp.getTimepoint() - lastsenode.getTimepoint());
-
- // get the infomation of how to send new objects
- Vector<ObjectSimulator> nobjs = task.getCurrentRun().getNewObjs();
- for(int j = 0; j < nobjs.size(); j++) {
- ObjectSimulator nobj = nobjs.elementAt(j);
- totransObjs.add(nobj);
-
- action.addNewObj(nobj.getCd(), Integer.valueOf(1));
- // send the new object to target core according to pre-decide scheduling
- Queue<Integer> cores = cs.getTargetCores(nobj.getCurrentFS());
- if(cores == null) {
- // this obj will reside on this core
- cs.addObject(nobj);
- } else {
- Integer targetCore = cores.poll();
- if(targetCore == corenum) {
- // this obj will reside on this core
- cs.addObject(nobj);
- } else {
- if(!transObjQueues.containsKey(targetCore)) {
- transObjQueues.put(targetCore, new LinkedList<ObjectInfo>());
- }
- Queue<ObjectInfo> tmpqueue = transObjQueues.get(targetCore);
- tmpqueue.add(new ObjectInfo(nobj));
- tmpqueue = null;
- }
- // enqueue this core again
- cores.add(targetCore);
+ Vector<ObjectSimulator> totransObjs = new Vector<ObjectSimulator>();
+ CoreSimulator cs = task.getCs();
+ int corenum = cs.getCoreNum();
+ Hashtable<Integer, Queue<ObjectInfo>> transObjQueues =
+ new Hashtable<Integer, Queue<ObjectInfo>>();
+ Action action = null;
+ if(task.getCurrentRun().getNewObjs() == null) {
+ // task finish without new objects
+ action = new Action(corenum,
+ Action.TASKFINISH,
+ cs.getRtask());
+ // get the execution time of this task
+ SimExecutionNode lastsenode = lastseNodes[corenum];
+ Action startaction = senode2action.get(lastsenode);
+ action2exetime.put(startaction, cp.getTimepoint() - lastsenode.getTimepoint());
+
+ } else {
+ // task finish with new objects
+ action = new Action(corenum,
+ Action.TFWITHOBJ,
+ cs.getRtask());
+ // get the execution time of this task
+ SimExecutionNode lastsenode = lastseNodes[corenum];
+ Action startaction = senode2action.get(lastsenode);
+ action2exetime.put(startaction, cp.getTimepoint() - lastsenode.getTimepoint());
+
+ // get the infomation of how to send new objects
+ Vector<ObjectSimulator> nobjs = task.getCurrentRun().getNewObjs();
+ for(int j = 0; j < nobjs.size(); j++) {
+ ObjectSimulator nobj = nobjs.elementAt(j);
+ totransObjs.add(nobj);
+
+ action.addNewObj(nobj.getCd(), Integer.valueOf(1));
+ // send the new object to target core according to pre-decide scheduling
+ Queue<Integer> cores = cs.getTargetCores(nobj.getCurrentFS());
+ if(cores == null) {
+ // this obj will reside on this core
+ cs.addObject(nobj);
+ } else {
+ Integer targetCore = cores.poll();
+ if(targetCore == corenum) {
+ // this obj will reside on this core
+ cs.addObject(nobj);
+ } else {
+ if(!transObjQueues.containsKey(targetCore)) {
+ transObjQueues.put(targetCore, new LinkedList<ObjectInfo>());
+ }
+ Queue<ObjectInfo> tmpqueue = transObjQueues.get(targetCore);
+ tmpqueue.add(new ObjectInfo(nobj));
+ tmpqueue = null;
+ }
+ // enqueue this core again
+ cores.add(targetCore);
+ }
+ cores = null;
+ // check if this object becoming shared or not
+ Vector<Integer> allycores = cs.getAllyCores(nobj.getCurrentFS());
+ if(allycores != null) {
+ nobj.setShared(true);
+ // TODO, temporarily send to at most 2 cores
+ int numtosend = allycores.size() > 2?2:allycores.size();
+ for(int k = 0; k < numtosend; ++k) {
+ Integer allyCore = allycores.elementAt(k);
+ if(allyCore == corenum) {
+ cs.addObject(nobj);
+ } else {
+ if(!transObjQueues.containsKey(allyCore)) {
+ transObjQueues.put(allyCore, new LinkedList<ObjectInfo>());
}
- cores = null;
- // check if this object becoming shared or not
- Vector<Integer> allycores = cs.getAllyCores(nobj.getCurrentFS());
- if(allycores != null) {
- nobj.setShared(true);
- // TODO, temporarily send to at most 2 cores
- int numtosend = allycores.size() > 2 ? 2 : allycores.size();
- for(int k = 0; k < numtosend; ++k) {
- Integer allyCore = allycores.elementAt(k);
- if(allyCore == corenum) {
- cs.addObject(nobj);
- } else {
- if(!transObjQueues.containsKey(allyCore)) {
- transObjQueues.put(allyCore, new LinkedList<ObjectInfo>());
- }
- Queue<ObjectInfo> tmpqueue = transObjQueues.get(allyCore);
- ObjectInfo nobjinfo = new ObjectInfo(nobj);
- if(!tmpqueue.contains(nobjinfo)) {
- tmpqueue.add(nobjinfo);
- }
- tmpqueue = null;
- }
- }
- allycores = null;
+ Queue<ObjectInfo> tmpqueue = transObjQueues.get(allyCore);
+ ObjectInfo nobjinfo = new ObjectInfo(nobj);
+ if(!tmpqueue.contains(nobjinfo)) {
+ tmpqueue.add(nobjinfo);
}
+ tmpqueue = null;
+ }
}
- nobjs = null;
+ allycores = null;
+ }
}
- cp.addAction(action);
-
- // group the new objects need to transfer
- Vector<ObjectSimulator> transObjs = cs.finishTask();
- if(transObjs != null) {
- totransObjs.addAll(transObjs);
- for(int j = 0; j < transObjs.size(); j++) {
- ObjectSimulator tobj = transObjs.elementAt(j);
- // send the object to target core according to pre-decide scheduling
- Queue<Integer> cores = cs.getTargetCores(tobj.getCurrentFS());
- tobj.setCurrentFS(cs.getTargetFState(tobj.getCurrentFS()));
- if(cores == null) {
- // this obj will reside on this core
- cs.addObject(tobj);
- } else {
- Integer targetCore = cores.poll();
- if(targetCore == corenum) {
- // this obj will reside on this core
- cs.addObject(tobj);
- } else {
- if(!transObjQueues.containsKey(targetCore)) {
- transObjQueues.put(targetCore, new LinkedList<ObjectInfo>());
- }
- Queue<ObjectInfo> tmpqueue = transObjQueues.get(targetCore);
- tmpqueue.add(new ObjectInfo(tobj));
- tmpqueue = null;
- }
- cores.add(targetCore);
+ nobjs = null;
+ }
+ cp.addAction(action);
+
+ // group the new objects need to transfer
+ Vector<ObjectSimulator> transObjs = cs.finishTask();
+ if(transObjs != null) {
+ totransObjs.addAll(transObjs);
+ for(int j = 0; j < transObjs.size(); j++) {
+ ObjectSimulator tobj = transObjs.elementAt(j);
+ // send the object to target core according to pre-decide scheduling
+ Queue<Integer> cores = cs.getTargetCores(tobj.getCurrentFS());
+ tobj.setCurrentFS(cs.getTargetFState(tobj.getCurrentFS()));
+ if(cores == null) {
+ // this obj will reside on this core
+ cs.addObject(tobj);
+ } else {
+ Integer targetCore = cores.poll();
+ if(targetCore == corenum) {
+ // this obj will reside on this core
+ cs.addObject(tobj);
+ } else {
+ if(!transObjQueues.containsKey(targetCore)) {
+ transObjQueues.put(targetCore, new LinkedList<ObjectInfo>());
+ }
+ Queue<ObjectInfo> tmpqueue = transObjQueues.get(targetCore);
+ tmpqueue.add(new ObjectInfo(tobj));
+ tmpqueue = null;
+ }
+ cores.add(targetCore);
+ }
+ cores = null;
+ // check if this object becoming shared or not
+ Vector<Integer> allycores = cs.getAllyCores(tobj.getCurrentFS());
+ if(allycores != null) {
+ tobj.setShared(true);
+ // TODO, temporarily send to at most 2 cores
+ int numtosend = allycores.size() > 2?2:allycores.size();
+ for(int k = 0; k < numtosend; ++k) {
+ Integer allyCore = allycores.elementAt(k);
+ if(allyCore == corenum) {
+ cs.addObject(tobj);
+ } else {
+ if(!transObjQueues.containsKey(allyCore)) {
+ transObjQueues.put(allyCore, new LinkedList<ObjectInfo>());
}
- cores = null;
- // check if this object becoming shared or not
- Vector<Integer> allycores = cs.getAllyCores(tobj.getCurrentFS());
- if(allycores != null) {
- tobj.setShared(true);
- // TODO, temporarily send to at most 2 cores
- int numtosend = allycores.size() > 2 ? 2 : allycores.size();
- for(int k = 0; k < numtosend; ++k) {
- Integer allyCore = allycores.elementAt(k);
- if(allyCore == corenum) {
- cs.addObject(tobj);
- } else {
- if(!transObjQueues.containsKey(allyCore)) {
- transObjQueues.put(allyCore, new LinkedList<ObjectInfo>());
- }
- Queue<ObjectInfo> tmpqueue = transObjQueues.get(allyCore);
- ObjectInfo nobjinfo = new ObjectInfo(tobj);
- if(!tmpqueue.contains(nobjinfo)) {
- tmpqueue.add(nobjinfo);
- }
- tmpqueue = null;
- }
- }
- allycores = null;
+ Queue<ObjectInfo> tmpqueue = transObjQueues.get(allyCore);
+ ObjectInfo nobjinfo = new ObjectInfo(tobj);
+ if(!tmpqueue.contains(nobjinfo)) {
+ tmpqueue.add(nobjinfo);
}
+ tmpqueue = null;
+ }
}
+ allycores = null;
+ }
}
- transObjs = null;
-
- // add 'transport' tasks
- Iterator it_entries = transObjQueues.entrySet().iterator();
- while(it_entries.hasNext()) {
- Entry<Integer, Queue<ObjectInfo>> tmpentry = (Entry<Integer, Queue<ObjectInfo>>)it_entries.next();
- Integer tmpCoreNum = tmpentry.getKey();
- Queue<ObjectInfo> nobjs = tmpentry.getValue();
- TransTaskSimulator tmptask = new TransTaskSimulator(cs, tmpCoreNum, nobjs);
- this.tasks.add(tmptask);
- tttasks.add(tmptask);
- tmpentry = null;
- nobjs = null;
- }
- it_entries = null;
- transObjQueues = null;
-
- return totransObjs;
+ }
+ transObjs = null;
+
+ // add 'transport' tasks
+ Iterator it_entries = transObjQueues.entrySet().iterator();
+ while(it_entries.hasNext()) {
+ Entry<Integer, Queue<ObjectInfo>> tmpentry = (Entry<Integer, Queue<ObjectInfo>>)it_entries.next();
+ Integer tmpCoreNum = tmpentry.getKey();
+ Queue<ObjectInfo> nobjs = tmpentry.getValue();
+ TransTaskSimulator tmptask = new TransTaskSimulator(cs, tmpCoreNum, nobjs);
+ this.tasks.add(tmptask);
+ tttasks.add(tmptask);
+ tmpentry = null;
+ nobjs = null;
+ }
+ it_entries = null;
+ transObjQueues = null;
+
+ return totransObjs;
}
private void generateNewTask(CoreSimulator cs,
- CheckPoint cp,
- Vector<ObjectSimulator> nobjs,
- Vector<TransTaskSimulator> tttasks,
- Hashtable<SimExecutionNode, Action> senode2action,
- SimExecutionNode[] lastseNodes,
- Hashtable<Action, Long> action2exetime,
- Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode,
- Hashtable<Integer, Long> obj2transtime,
- Hashtable<Integer, SimExecutionEdge> obj2lastseedge) {
- TaskSimulator newTask = cs.process();
- int corenum = cs.getCoreNum();
- SimExecutionEdge seEdge = null;
- if(newTask != null) {
- this.tasks.add(newTask);
- // add a TASKSTART action into this checkpoint
- Action action = new Action(corenum,
- Action.TASKSTART,
- newTask);
- cp.addAction(action);
- if(!(newTask instanceof TransTaskSimulator)) {
- cp.removeSpareCore(cs.getCoreNum());
- SimExecutionNode seNode = new SimExecutionNode(corenum, this.processTime);
- seNode.setSpareCores(cp.getSpareCores());
- senode2action.put(seNode, action);
- action2exetime.put(action, (long)-1);
- SimExecutionNode lastsenode = lastseNodes[corenum];
- // create edges between previous senode on this core to this node
- if(lastsenode != null) {
- Action tmpaction = senode2action.get(lastsenode);
- long weight = tmpaction != null? action2exetime.get(tmpaction):0;
- seEdge = new SimExecutionEdge(seNode,
- lastsenode.getCoreNum(),
- tmpaction!= null?tmpaction.getTd():null,
- weight,
- tmpaction!=null?tmpaction.getTaskParams():null);
- lastsenode.addEdge(seEdge);
- }
- lastseNodes[corenum] = seNode;
- for(int tmpindex = 0; tmpindex < tttasks.size(); tmpindex++) {
- tttask2senode.put(tttasks.elementAt(tmpindex), seNode);
- }
- }
- } else if(tttasks.size() > 0) {
- SimExecutionNode seNode = new SimExecutionNode(corenum, this.processTime);
- //seNode.setSpareCores(cp.getSpareCores());
- // no action associated here
- SimExecutionNode lastsenode = lastseNodes[corenum];
- // create edges between previous senode on this core to this node
- if(lastsenode != null) {
- Action tmpaction = senode2action.get(lastsenode);
- long weight = action2exetime.get(tmpaction);
- seEdge = new SimExecutionEdge(seNode,
- lastsenode.getCoreNum(),
- tmpaction.getTd(),
- weight,
- tmpaction.getTaskParams());
- lastsenode.addEdge(seEdge);
- }
- lastseNodes[corenum] = seNode;
- for(int tmpindex = 0; tmpindex < tttasks.size(); tmpindex++) {
- tttask2senode.put(tttasks.elementAt(tmpindex), seNode);
- }
+ CheckPoint cp,
+ Vector<ObjectSimulator> nobjs,
+ Vector<TransTaskSimulator> tttasks,
+ Hashtable<SimExecutionNode, Action> senode2action,
+ SimExecutionNode[] lastseNodes,
+ Hashtable<Action, Long> action2exetime,
+ Hashtable<TransTaskSimulator, SimExecutionNode> tttask2senode,
+ Hashtable<Integer, Long> obj2transtime,
+ Hashtable<Integer, SimExecutionEdge> obj2lastseedge) {
+ TaskSimulator newTask = cs.process();
+ int corenum = cs.getCoreNum();
+ SimExecutionEdge seEdge = null;
+ if(newTask != null) {
+ this.tasks.add(newTask);
+ // add a TASKSTART action into this checkpoint
+ Action action = new Action(corenum,
+ Action.TASKSTART,
+ newTask);
+ cp.addAction(action);
+ if(!(newTask instanceof TransTaskSimulator)) {
+ cp.removeSpareCore(cs.getCoreNum());
+ SimExecutionNode seNode = new SimExecutionNode(corenum, this.processTime);
+ seNode.setSpareCores(cp.getSpareCores());
+ senode2action.put(seNode, action);
+ action2exetime.put(action, (long)-1);
+ SimExecutionNode lastsenode = lastseNodes[corenum];
+ // create edges between previous senode on this core to this node
+ if(lastsenode != null) {
+ Action tmpaction = senode2action.get(lastsenode);
+ long weight = tmpaction != null?action2exetime.get(tmpaction):0;
+ seEdge = new SimExecutionEdge(seNode,
+ lastsenode.getCoreNum(),
+ tmpaction!= null?tmpaction.getTd():null,
+ weight,
+ tmpaction!=null?tmpaction.getTaskParams():null);
+ lastsenode.addEdge(seEdge);
+ }
+ lastseNodes[corenum] = seNode;
+ for(int tmpindex = 0; tmpindex < tttasks.size(); tmpindex++) {
+ tttask2senode.put(tttasks.elementAt(tmpindex), seNode);
+ }
}
- if(seEdge != null) {
- // setup data dependencies for the task
- Vector<Integer> taskparams = seEdge.getTaskparams();
- if(taskparams != null) {
- for(int i = 0; i < taskparams.size(); i++) {
- Integer tparam = taskparams.elementAt(i);
- SimExecutionEdge lastedge = obj2lastseedge.get(tparam);
- if(lastedge != null) {
- if(lastedge.getCoreNum() != seEdge.getCoreNum()) {
- // the obj is transferred from another core
- // create an seEdge for this transfer
- long weight = obj2transtime.get(tparam);
- SimExecutionEdge transseEdge = new SimExecutionEdge((SimExecutionNode)seEdge.getSource(),
- lastedge.getCoreNum(),
- null, // TODO: not sure if this is enough
- weight,
- null);
- if(((SimExecutionNode)seEdge.getSource()).getTimepoint() <
- ((SimExecutionNode)lastedge.getTarget()).getTimepoint()) {
- System.err.println("ScheduleSimulator:757");
- System.exit(-1);
- }
- lastedge.getTarget().addEdge(transseEdge);
- transseEdge.addPredicate(lastedge);
- seEdge.addPredicate(transseEdge);
- } else {
- seEdge.addPredicate(lastedge);
- }
- }
- // update the last edge associated to the parameter obj
- obj2lastseedge.put(tparam, seEdge);
- }
- }
- taskparams = null;
-
- // set seEdge as the last execution edge for all newly created objs
- if(nobjs != null) {
- for(int i = 0; i < nobjs.size(); i++) {
- ObjectSimulator nobj = nobjs.elementAt(i);
- obj2lastseedge.put(nobj.getOid(), seEdge);
+ } else if(tttasks.size() > 0) {
+ SimExecutionNode seNode = new SimExecutionNode(corenum, this.processTime);
+ //seNode.setSpareCores(cp.getSpareCores());
+ // no action associated here
+ SimExecutionNode lastsenode = lastseNodes[corenum];
+ // create edges between previous senode on this core to this node
+ if(lastsenode != null) {
+ Action tmpaction = senode2action.get(lastsenode);
+ long weight = action2exetime.get(tmpaction);
+ seEdge = new SimExecutionEdge(seNode,
+ lastsenode.getCoreNum(),
+ tmpaction.getTd(),
+ weight,
+ tmpaction.getTaskParams());
+ lastsenode.addEdge(seEdge);
+ }
+ lastseNodes[corenum] = seNode;
+ for(int tmpindex = 0; tmpindex < tttasks.size(); tmpindex++) {
+ tttask2senode.put(tttasks.elementAt(tmpindex), seNode);
+ }
+ }
+ if(seEdge != null) {
+ // setup data dependencies for the task
+ Vector<Integer> taskparams = seEdge.getTaskparams();
+ if(taskparams != null) {
+ for(int i = 0; i < taskparams.size(); i++) {
+ Integer tparam = taskparams.elementAt(i);
+ SimExecutionEdge lastedge = obj2lastseedge.get(tparam);
+ if(lastedge != null) {
+ if(lastedge.getCoreNum() != seEdge.getCoreNum()) {
+ // the obj is transferred from another core
+ // create an seEdge for this transfer
+ long weight = obj2transtime.get(tparam);
+ SimExecutionEdge transseEdge = new SimExecutionEdge((SimExecutionNode)seEdge.getSource(),
+ lastedge.getCoreNum(),
+ null, // TODO: not sure if this is enough
+ weight,
+ null);
+ if(((SimExecutionNode)seEdge.getSource()).getTimepoint() <
+ ((SimExecutionNode)lastedge.getTarget()).getTimepoint()) {
+ System.err.println("ScheduleSimulator:757");
+ System.exit(-1);
}
+ lastedge.getTarget().addEdge(transseEdge);
+ transseEdge.addPredicate(lastedge);
+ seEdge.addPredicate(transseEdge);
+ } else {
+ seEdge.addPredicate(lastedge);
+ }
}
+ // update the last edge associated to the parameter obj
+ obj2lastseedge.put(tparam, seEdge);
+ }
}
+ taskparams = null;
+
+ // set seEdge as the last execution edge for all newly created objs
+ if(nobjs != null) {
+ for(int i = 0; i < nobjs.size(); i++) {
+ ObjectSimulator nobj = nobjs.elementAt(i);
+ obj2lastseedge.put(nobj.getOid(), seEdge);
+ }
+ }
+ }
}
-
+
private void finishTaskAbnormal(CoreSimulator cs,
- CheckPoint cp,
- Hashtable<SimExecutionNode, Action> senode2action,
- SimExecutionNode[] lastseNodes,
- Hashtable<Action, Long> action2exetime,
- int type) {
- Action action = new Action(cs.getCoreNum(),
- type,
- cs.getRtask());
- cp.addAction(action);
- cs.finishTask();
-
- // remove the corresponding action on the starting SimExecutionNode
- SimExecutionNode lastsenode = lastseNodes[cs.getCoreNum()];
- /*if(lastsenode.getInedgeVector().size() > 0) {
- //SimExecutionEdge inseedge = (SimExecutionEdge)lastsenode.getinedge(0);
- //lastseNodes[cs.getCoreNum()] = (SimExecutionNode)inseedge.getSource();
- } /*else {
- lastseNodes[cs.getCoreNum()] = null;
- }*/
- Action tmpaction = senode2action.remove(lastsenode);
- action2exetime.remove(tmpaction);
+ CheckPoint cp,
+ Hashtable<SimExecutionNode, Action> senode2action,
+ SimExecutionNode[] lastseNodes,
+ Hashtable<Action, Long> action2exetime,
+ int type) {
+ Action action = new Action(cs.getCoreNum(),
+ type,
+ cs.getRtask());
+ cp.addAction(action);
+ cs.finishTask();
+
+ // remove the corresponding action on the starting SimExecutionNode
+ SimExecutionNode lastsenode = lastseNodes[cs.getCoreNum()];
+ /*if(lastsenode.getInedgeVector().size() > 0) {
+ //SimExecutionEdge inseedge = (SimExecutionEdge)lastsenode.getinedge(0);
+ //lastseNodes[cs.getCoreNum()] = (SimExecutionNode)inseedge.getSource();
+ } /*else {
+ lastseNodes[cs.getCoreNum()] = null;
+ }*/
+ Action tmpaction = senode2action.remove(lastsenode);
+ action2exetime.remove(tmpaction);
}
-
+
public class CheckPoint {
private long timepoint;
private Vector<Action> actions;
private Vector<Integer> spareCores;
- public CheckPoint(long timepoint,
- int corenum) {
+ public CheckPoint(long timepoint,
+ int corenum) {
super();
this.timepoint = timepoint;
this.actions = new Vector<Action>();
this.spareCores = new Vector<Integer>();
for(int i = 0; i < corenum; i++) {
- this.spareCores.add(i);
+ this.spareCores.add(i);
}
}
public void addAction(Action action) {
this.actions.add(action);
}
-
+
public void removeSpareCore(int core) {
- for(int i = 0 ; i < this.spareCores.size(); i++) {
- if(this.spareCores.elementAt(i) == core) {
- for(int j = i; j < this.spareCores.size() - 1; j++) {
- this.spareCores.setElementAt(this.spareCores.elementAt(j + 1), j);
- }
- this.spareCores.remove(this.spareCores.size() - 1);
- return;
- }
+ for(int i = 0; i < this.spareCores.size(); i++) {
+ if(this.spareCores.elementAt(i) == core) {
+ for(int j = i; j < this.spareCores.size() - 1; j++) {
+ this.spareCores.setElementAt(this.spareCores.elementAt(j + 1), j);
+ }
+ this.spareCores.remove(this.spareCores.size() - 1);
+ return;
}
+ }
}
public long getTimepoint() {
}
public Vector<Integer> getSpareCores() {
- return spareCores;
+ return spareCores;
}
}
private int nObjNum;
private ClassDescriptor transObj;
- public Action(int corenum,
- int type) {
+ public Action(int corenum,
+ int type) {
this.coreNum = corenum;
this.type = type;
this.td = null;
this.nObjNum = -1;
this.transObj = null;
}
-
- public Action(int corenum,
- int type,
- TaskSimulator ts) {
- assert(this.type != ADDOBJ);
-
- this.coreNum = corenum;
- this.type = type;
- this.td = ts.getTd();
- Vector<Queue<ObjectSimulator>> paraQueues = ts.getParaQueues();
- if(this.type == TASKSTART) {
- this.taskparams = new Vector<Integer>();
- for(int i = 0; i < paraQueues.size(); i++) {
- ObjectSimulator tpara = paraQueues.elementAt(i).peek();
- this.taskparams.add(tpara.getOid());
- }
- } else {
- this.taskparams = null;
- }
- paraQueues = null;
- if(this.type == TFWITHOBJ) {
- this.nObjs = new Hashtable<ClassDescriptor, Integer>();
- } else {
- this.nObjs = null;
+
+ public Action(int corenum,
+ int type,
+ TaskSimulator ts) {
+ assert(this.type != ADDOBJ);
+
+ this.coreNum = corenum;
+ this.type = type;
+ this.td = ts.getTd();
+ Vector<Queue<ObjectSimulator>> paraQueues = ts.getParaQueues();
+ if(this.type == TASKSTART) {
+ this.taskparams = new Vector<Integer>();
+ for(int i = 0; i < paraQueues.size(); i++) {
+ ObjectSimulator tpara = paraQueues.elementAt(i).peek();
+ this.taskparams.add(tpara.getOid());
}
- this.nObjNum = -1;
- this.transObj = null;
+ } else {
+ this.taskparams = null;
+ }
+ paraQueues = null;
+ if(this.type == TFWITHOBJ) {
+ this.nObjs = new Hashtable<ClassDescriptor, Integer>();
+ } else {
+ this.nObjs = null;
+ }
+ this.nObjNum = -1;
+ this.transObj = null;
}
- public Action(int corenum,
- int type,
- int objNum,
- ClassDescriptor transObj) {
+ public Action(int corenum,
+ int type,
+ int objNum,
+ ClassDescriptor transObj) {
assert(type == ADDOBJ);
this.coreNum = corenum;
this.type = type;
this.transObj = transObj;
}
- public void addNewObj(ClassDescriptor cd,
- Integer num) {
+ public void addNewObj(ClassDescriptor cd,
+ Integer num) {
assert(this.type == TFWITHOBJ);
if(this.nObjs.containsKey(cd)) {
public TaskDescriptor getTd() {
return td;
}
-
+
public Vector<Integer> getTaskParams() {
- return this.taskparams;
+ return this.taskparams;
}
public Hashtable<ClassDescriptor, Integer> getNObjs() {
import Util.Namer;
public class SchedulingUtil {
-
- public static Vector<ScheduleNode> generateScheduleGraph(State state,
- Vector<ScheduleNode> scheduleNodes,
- Vector<ScheduleEdge> scheduleEdges,
- Vector<Vector<ScheduleNode>> mapping,
- int gid) {
- Vector<ScheduleNode> result = new Vector<ScheduleNode>();
-
- // clone the ScheduleNodes
- Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
- new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
- Hashtable<ScheduleNode, ScheduleNode> sn2sn =
- new Hashtable<ScheduleNode, ScheduleNode>();
- cloneScheduleGraph(scheduleNodes,
- scheduleEdges,
- sn2hash,
- sn2sn,
- result,
- gid);
-
- // combine those nodes in combine with corresponding rootnodes
- for(int i = 0; i < mapping.size(); i++) {
- Vector<ScheduleNode> sNodes = mapping.elementAt(i);
- if(sNodes != null) {
- ScheduleNode rootnode = sNodes.elementAt(0);
- for(int j = 1; j < sNodes.size(); j++) {
- ScheduleNode tocombine = sn2sn.get(sNodes.elementAt(j));
- ScheduleNode root = sn2sn.get(rootnode);
- ScheduleEdge se = (ScheduleEdge)tocombine.inedges().next();
- try {
- if(root.equals(((ScheduleNode)se.getSource()))) {
- root.mergeSEdge(se);
- if(ScheduleEdge.NEWEDGE == se.getType()) {
- // As se has been changed into an internal edge inside a ScheduleNode,
- // change the source and target of se from original ScheduleNodes into ClassNodes.
- se.setTarget(se.getTargetCNode());
- //se.setSource(se.getSourceCNode());
- //se.getTargetCNode().addEdge(se);
- se.getSourceCNode().addEdge(se);
- }
- } else {
- root.mergeSNode(tocombine);
- }
- } catch(Exception e) {
- e.printStackTrace();
- System.exit(-1);
- }
- result.removeElement(tocombine);
- }
+
+ public static Vector<ScheduleNode> generateScheduleGraph(State state,
+ Vector<ScheduleNode> scheduleNodes,
+ Vector<ScheduleEdge> scheduleEdges,
+ Vector<Vector<ScheduleNode>> mapping,
+ int gid) {
+ Vector<ScheduleNode> result = new Vector<ScheduleNode>();
+
+ // clone the ScheduleNodes
+ Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
+ new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
+ Hashtable<ScheduleNode, ScheduleNode> sn2sn =
+ new Hashtable<ScheduleNode, ScheduleNode>();
+ cloneScheduleGraph(scheduleNodes,
+ scheduleEdges,
+ sn2hash,
+ sn2sn,
+ result,
+ gid);
+
+ // combine those nodes in combine with corresponding rootnodes
+ for(int i = 0; i < mapping.size(); i++) {
+ Vector<ScheduleNode> sNodes = mapping.elementAt(i);
+ if(sNodes != null) {
+ ScheduleNode rootnode = sNodes.elementAt(0);
+ for(int j = 1; j < sNodes.size(); j++) {
+ ScheduleNode tocombine = sn2sn.get(sNodes.elementAt(j));
+ ScheduleNode root = sn2sn.get(rootnode);
+ ScheduleEdge se = (ScheduleEdge)tocombine.inedges().next();
+ try {
+ if(root.equals(((ScheduleNode)se.getSource()))) {
+ root.mergeSEdge(se);
+ if(ScheduleEdge.NEWEDGE == se.getType()) {
+ // As se has been changed into an internal edge inside a ScheduleNode,
+ // change the source and target of se from original ScheduleNodes into ClassNodes.
+ se.setTarget(se.getTargetCNode());
+ //se.setSource(se.getSourceCNode());
+ //se.getTargetCNode().addEdge(se);
+ se.getSourceCNode().addEdge(se);
+ }
+ } else {
+ root.mergeSNode(tocombine);
}
+ } catch(Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
+ }
+ result.removeElement(tocombine);
}
+ }
+ }
- assignCids(result);
-
- sn2hash.clear();
- sn2hash = null;
- sn2sn.clear();
- sn2sn = null;
+ assignCids(result);
- if(state.PRINTSCHEDULING) {
- String path = state.outputdir + "scheduling_" + gid + ".dot";
- SchedulingUtil.printScheduleGraph(path, result);
- }
+ sn2hash.clear();
+ sn2hash = null;
+ sn2sn.clear();
+ sn2sn = null;
- return result;
+ if(state.PRINTSCHEDULING) {
+ String path = state.outputdir + "scheduling_" + gid + ".dot";
+ SchedulingUtil.printScheduleGraph(path, result);
}
- public static Vector<ScheduleNode> generateScheduleGraph(State state,
- Vector<ScheduleNode> scheduleNodes,
- Vector<ScheduleEdge> scheduleEdges,
- Vector<Vector<ScheduleNode>> rootnodes,
- Vector<Vector<CombinationUtil.Combine>> combine,
- int gid) {
- Vector<ScheduleNode> result = new Vector<ScheduleNode>();
-
- // clone the ScheduleNodes
- Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
- new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
- Hashtable<ScheduleNode, ScheduleNode> sn2sn =
- new Hashtable<ScheduleNode, ScheduleNode>();
- cloneScheduleGraph(scheduleNodes,
- scheduleEdges,
- sn2hash,
- sn2sn,
- result,
- gid);
-
- // combine those nodes in combine with corresponding rootnodes
- for(int i = 0; i < combine.size(); i++) {
- if(combine.elementAt(i) != null) {
- for(int j = 0; j < combine.elementAt(i).size(); j++) {
- CombinationUtil.Combine tmpcombine = combine.elementAt(i).elementAt(j);
- ScheduleNode tocombine = sn2sn.get(tmpcombine.node);
- ScheduleNode root = sn2sn.get(rootnodes.elementAt(tmpcombine.root).elementAt(tmpcombine.index));
- ScheduleEdge se = (ScheduleEdge)tocombine.inedges().next();
- try {
- if(root.equals(((ScheduleNode)se.getSource()))) {
- root.mergeSEdge(se);
- if(ScheduleEdge.NEWEDGE == se.getType()) {
- // As se has been changed into an internal edge inside a ScheduleNode,
- // change the source and target of se from original ScheduleNodes into ClassNodes.
- se.setTarget(se.getTargetCNode());
- //se.setSource(se.getSourceCNode());
- //se.getTargetCNode().addEdge(se);
- se.getSourceCNode().addEdge(se);
- }
- } else {
- root.mergeSNode(tocombine);
- }
- } catch(Exception e) {
- e.printStackTrace();
- System.exit(-1);
- }
- result.removeElement(tocombine);
- }
+ return result;
+ }
+
+ public static Vector<ScheduleNode> generateScheduleGraph(State state,
+ Vector<ScheduleNode> scheduleNodes,
+ Vector<ScheduleEdge> scheduleEdges,
+ Vector<Vector<ScheduleNode>> rootnodes,
+ Vector<Vector<CombinationUtil.Combine>> combine,
+ int gid) {
+ Vector<ScheduleNode> result = new Vector<ScheduleNode>();
+
+ // clone the ScheduleNodes
+ Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
+ new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
+ Hashtable<ScheduleNode, ScheduleNode> sn2sn =
+ new Hashtable<ScheduleNode, ScheduleNode>();
+ cloneScheduleGraph(scheduleNodes,
+ scheduleEdges,
+ sn2hash,
+ sn2sn,
+ result,
+ gid);
+
+ // combine those nodes in combine with corresponding rootnodes
+ for(int i = 0; i < combine.size(); i++) {
+ if(combine.elementAt(i) != null) {
+ for(int j = 0; j < combine.elementAt(i).size(); j++) {
+ CombinationUtil.Combine tmpcombine = combine.elementAt(i).elementAt(j);
+ ScheduleNode tocombine = sn2sn.get(tmpcombine.node);
+ ScheduleNode root = sn2sn.get(rootnodes.elementAt(tmpcombine.root).elementAt(tmpcombine.index));
+ ScheduleEdge se = (ScheduleEdge)tocombine.inedges().next();
+ try {
+ if(root.equals(((ScheduleNode)se.getSource()))) {
+ root.mergeSEdge(se);
+ if(ScheduleEdge.NEWEDGE == se.getType()) {
+ // As se has been changed into an internal edge inside a ScheduleNode,
+ // change the source and target of se from original ScheduleNodes into ClassNodes.
+ se.setTarget(se.getTargetCNode());
+ //se.setSource(se.getSourceCNode());
+ //se.getTargetCNode().addEdge(se);
+ se.getSourceCNode().addEdge(se);
+ }
+ } else {
+ root.mergeSNode(tocombine);
}
+ } catch(Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
+ }
+ result.removeElement(tocombine);
}
+ }
+ }
- assignCids(result);
+ assignCids(result);
- sn2hash.clear();
- sn2hash = null;
- sn2sn.clear();
- sn2sn = null;
+ sn2hash.clear();
+ sn2hash = null;
+ sn2sn.clear();
+ sn2sn = null;
- if(state.PRINTSCHEDULING) {
- String path = state.outputdir + "scheduling_" + gid + ".dot";
- SchedulingUtil.printScheduleGraph(path, result);
- }
+ if(state.PRINTSCHEDULING) {
+ String path = state.outputdir + "scheduling_" + gid + ".dot";
+ SchedulingUtil.printScheduleGraph(path, result);
+ }
- return result;
+ return result;
+ }
+
+ public static void cloneScheduleGraph(Vector<ScheduleNode> scheduleNodes,
+ Vector<ScheduleEdge> scheduleEdges,
+ Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash,
+ Hashtable<ScheduleNode, ScheduleNode> sn2sn,
+ Vector<ScheduleNode> result,
+ int gid) {
+ for(int i = 0; i < scheduleNodes.size(); i++) {
+ Hashtable<ClassNode, ClassNode> cn2cn = new Hashtable<ClassNode, ClassNode>();
+ ScheduleNode tocopy = scheduleNodes.elementAt(i);
+ ScheduleNode temp = (ScheduleNode)tocopy.clone(cn2cn, gid);
+ result.add(i, temp);
+ sn2hash.put(temp, cn2cn);
+ sn2sn.put(tocopy, temp);
+ cn2cn = null;
}
+ // clone the ScheduleEdges
+ for(int i = 0; i < scheduleEdges.size(); i++) {
+ ScheduleEdge sse = scheduleEdges.elementAt(i);
+ ScheduleNode csource = sn2sn.get(sse.getSource());
+ ScheduleNode ctarget = sn2sn.get(sse.getTarget());
+ Hashtable<ClassNode, ClassNode> sourcecn2cn = sn2hash.get(csource);
+ Hashtable<ClassNode, ClassNode> targetcn2cn = sn2hash.get(ctarget);
+ ScheduleEdge se = null;
+ switch(sse.getType()) {
+ case ScheduleEdge.NEWEDGE: {
+ se = new ScheduleEdge(ctarget, "new", sse.getFstate(), sse.getType(), gid); //new ScheduleEdge(ctarget, "new", sse.getClassDescriptor(), sse.getIsNew(), gid);
+ se.setProbability(sse.getProbability());
+ se.setNewRate(sse.getNewRate());
+ break;
+ }
- public static void cloneScheduleGraph(Vector<ScheduleNode> scheduleNodes,
- Vector<ScheduleEdge> scheduleEdges,
- Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash,
- Hashtable<ScheduleNode, ScheduleNode> sn2sn,
- Vector<ScheduleNode> result,
- int gid) {
- for(int i = 0; i < scheduleNodes.size(); i++) {
- Hashtable<ClassNode, ClassNode> cn2cn = new Hashtable<ClassNode, ClassNode>();
- ScheduleNode tocopy = scheduleNodes.elementAt(i);
- ScheduleNode temp = (ScheduleNode)tocopy.clone(cn2cn, gid);
- result.add(i, temp);
- sn2hash.put(temp, cn2cn);
- sn2sn.put(tocopy, temp);
- cn2cn = null;
+ case ScheduleEdge.TRANSEDGE: {
+ se = new ScheduleEdge(ctarget, "transmit", sse.getFstate(), sse.getType(), gid); //new ScheduleEdge(ctarget, "transmit", sse.getClassDescriptor(), false, gid);
+ break;
+ }
+ }
+ se.setSourceCNode(sourcecn2cn.get(sse.getSourceCNode()));
+ se.setTargetCNode(targetcn2cn.get(sse.getTargetCNode()));
+ se.setFEdge(sse.getFEdge());
+ se.setTargetFState(sse.getTargetFState());
+ se.setIsclone(true);
+ csource.addEdge(se);
+ sourcecn2cn = null;
+ targetcn2cn = null;
+ }
+ }
+
+ public static void assignCids(Vector<ScheduleNode> result) {
+ Hashtable<Integer, Integer> hcid2cid = new Hashtable<Integer, Integer>();
+ int ncid = 0;
+ for(int i = 0; i < result.size(); i++) {
+ ScheduleNode tmpnode = result.elementAt(i);
+ tmpnode.computeHashcid();
+ int hcid = tmpnode.getHashcid();
+ if(hcid2cid.containsKey(hcid)) {
+ // already have a cid for this node
+ tmpnode.setCid(hcid2cid.get(hcid));
+ } else {
+ // generate a new cid for such node
+ tmpnode.setCid(ncid);
+ hcid2cid.put(hcid, ncid);
+ ncid++;
+ }
+ }
+ hcid2cid.clear();
+ hcid2cid = null;
+ }
+
+ // Organize the scheduleNodes in order of their cid
+ public static Vector<Vector<ScheduleNode>>
+ rangeScheduleNodes(Vector<ScheduleNode> scheduleNodes) {
+ try {
+ Vector<Vector<ScheduleNode>> sNodeVecs = new Vector<Vector<ScheduleNode>>();
+
+ for(int i = 0; i < scheduleNodes.size(); i++) {
+ ScheduleNode tmpn = scheduleNodes.elementAt(i);
+ int tmpcid = tmpn.getCid();
+ int index = 0;
+ for(index = 0; index < sNodeVecs.size(); index++) {
+ if(sNodeVecs.elementAt(index).elementAt(0).getCid() > tmpcid) {
+ // find the place to insert
+ sNodeVecs.insertElementAt(new Vector<ScheduleNode>(), index);
+ /*sNodeVecs.add(sNodeVecs.lastElement());
+ for(int j = sNodeVecs.size() - 2; j > index; j--) {
+ sNodeVecs.setElementAt(sNodeVecs.elementAt(j - 1), j);
+ }
+ sNodeVecs.setElementAt(new Vector<ScheduleNode>(), index);*/
+ break;
+ } else if(sNodeVecs.elementAt(index).elementAt(0).getCid() == tmpcid) {
+ break;
+ }
}
- // clone the ScheduleEdges
- for(int i = 0; i < scheduleEdges.size(); i++) {
- ScheduleEdge sse = scheduleEdges.elementAt(i);
- ScheduleNode csource = sn2sn.get(sse.getSource());
- ScheduleNode ctarget = sn2sn.get(sse.getTarget());
- Hashtable<ClassNode, ClassNode> sourcecn2cn = sn2hash.get(csource);
- Hashtable<ClassNode, ClassNode> targetcn2cn = sn2hash.get(ctarget);
- ScheduleEdge se = null;
- switch(sse.getType()) {
- case ScheduleEdge.NEWEDGE: {
- se = new ScheduleEdge(ctarget, "new", sse.getFstate(), sse.getType(), gid); //new ScheduleEdge(ctarget, "new", sse.getClassDescriptor(), sse.getIsNew(), gid);
- se.setProbability(sse.getProbability());
- se.setNewRate(sse.getNewRate());
- break;
- }
-
- case ScheduleEdge.TRANSEDGE: {
- se = new ScheduleEdge(ctarget, "transmit", sse.getFstate(), sse.getType(), gid); //new ScheduleEdge(ctarget, "transmit", sse.getClassDescriptor(), false, gid);
- break;
- }
- }
- se.setSourceCNode(sourcecn2cn.get(sse.getSourceCNode()));
- se.setTargetCNode(targetcn2cn.get(sse.getTargetCNode()));
- se.setFEdge(sse.getFEdge());
- se.setTargetFState(sse.getTargetFState());
- se.setIsclone(true);
- csource.addEdge(se);
- sourcecn2cn = null;
- targetcn2cn = null;
+ if(index == sNodeVecs.size()) {
+ sNodeVecs.add(new Vector<ScheduleNode>());
}
- }
- public static void assignCids(Vector<ScheduleNode> result) {
- Hashtable<Integer, Integer> hcid2cid = new Hashtable<Integer, Integer>();
- int ncid = 0;
- for(int i = 0; i < result.size(); i++) {
- ScheduleNode tmpnode = result.elementAt(i);
- tmpnode.computeHashcid();
- int hcid = tmpnode.getHashcid();
- if(hcid2cid.containsKey(hcid)) {
- // already have a cid for this node
- tmpnode.setCid(hcid2cid.get(hcid));
- } else {
- // generate a new cid for such node
- tmpnode.setCid(ncid);
- hcid2cid.put(hcid, ncid);
- ncid++;
- }
+ /*int index = tmpcid;
+ while(sNodeVecs.size() <= index) {
+ sNodeVecs.add(null);
+ }
+ if(sNodeVecs.elementAt(index) == null) {
+ sNodeVecs.setElementAt(new Vector<ScheduleNode>(), index);
+ }*/
+ if(!sNodeVecs.elementAt(index).contains(tmpn)) {
+ sNodeVecs.elementAt(index).addElement(tmpn);
}
- hcid2cid.clear();
- hcid2cid = null;
- }
-
- // Organize the scheduleNodes in order of their cid
- public static Vector<Vector<ScheduleNode>>
- rangeScheduleNodes(Vector<ScheduleNode> scheduleNodes) {
- try{
- Vector<Vector<ScheduleNode>> sNodeVecs = new Vector<Vector<ScheduleNode>>();
-
- for(int i = 0; i < scheduleNodes.size(); i++) {
- ScheduleNode tmpn = scheduleNodes.elementAt(i);
- int tmpcid = tmpn.getCid();
- int index = 0;
- for(index = 0; index < sNodeVecs.size(); index++) {
- if(sNodeVecs.elementAt(index).elementAt(0).getCid() > tmpcid) {
- // find the place to insert
- sNodeVecs.insertElementAt(new Vector<ScheduleNode>(), index);
- /*sNodeVecs.add(sNodeVecs.lastElement());
- for(int j = sNodeVecs.size() - 2; j > index; j--) {
- sNodeVecs.setElementAt(sNodeVecs.elementAt(j - 1), j);
- }
- sNodeVecs.setElementAt(new Vector<ScheduleNode>(), index);*/
- break;
- } else if(sNodeVecs.elementAt(index).elementAt(0).getCid() == tmpcid) {
- break;
- }
- }
- if(index == sNodeVecs.size()) {
- sNodeVecs.add(new Vector<ScheduleNode>());
- }
-
- /*int index = tmpcid;
- while(sNodeVecs.size() <= index) {
- sNodeVecs.add(null);
- }
- if(sNodeVecs.elementAt(index) == null) {
- sNodeVecs.setElementAt(new Vector<ScheduleNode>(), index);
- }*/
- if(!sNodeVecs.elementAt(index).contains(tmpn)) {
- sNodeVecs.elementAt(index).addElement(tmpn);
- }
- }
+ }
- return sNodeVecs;
- } catch(Error e) {
- System.err.println("Error in rangeScheduleNodes: " + scheduleNodes.size());
- e.printStackTrace();
- //System.exit(-1);
- return null;
- }
+ return sNodeVecs;
+ } catch(Error e) {
+ System.err.println("Error in rangeScheduleNodes: " + scheduleNodes.size());
+ e.printStackTrace();
+ //System.exit(-1);
+ return null;
}
-
- /*public static int maxDivisor(int l, int r) {
- int a = l;
- int b = r;
- int c = 0;
-
- while(true) {
- if(a == 0) {
- return b << c;
- } else if(b == 0) {
- return a << c;
- }
-
- if(((a&1)==0) && ((b&1)==0)) {
- // a and b are both even
- a >>= 1;
- b >>= 1;
+ }
+
+ /*public static int maxDivisor(int l, int r) {
+ int a = l;
+ int b = r;
+ int c = 0;
+
+ while(true) {
+ if(a == 0) {
+ return b << c;
+ } else if(b == 0) {
+ return a << c;
+ }
+
+ if(((a&1)==0) && ((b&1)==0)) {
+ // a and b are both even
+ a >>= 1;
+ b >>= 1;
++c;
- } else if(((a&1)==0) && ((b&1)!=0)) {
- // a is even, b is odd
- a >>= 1;
- } else if (((a&1)!=0) && ((b&1)==0)) {
- // a is odd, b is even
- b >>= 1;
- } else if (((a&1)!=0) && ((b&1)!=0)) {
- // a and b are both odd
- int tmp = a>b? b:a;
- a = a>b ? (a-b):(b-a);
- b = tmp;
- }
- }
+ } else if(((a&1)==0) && ((b&1)!=0)) {
+ // a is even, b is odd
+ a >>= 1;
+ } else if (((a&1)!=0) && ((b&1)==0)) {
+ // a is odd, b is even
+ b >>= 1;
+ } else if (((a&1)!=0) && ((b&1)!=0)) {
+ // a and b are both odd
+ int tmp = a>b? b:a;
+ a = a>b ? (a-b):(b-a);
+ b = tmp;
+ }
+ }
}*/
- public static boolean isTaskTrigger_flag(FlagExpressionNode fen,
- FlagState fs) {
- if (fen==null)
- return true;
- else if (fen instanceof FlagNode)
- return fs.get(((FlagNode)fen).getFlag());
- else
- switch (((FlagOpNode)fen).getOp().getOp()) {
- case Operation.LOGIC_AND:
- return ((isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs)) && (isTaskTrigger_flag(((FlagOpNode)fen).getRight(),fs)));
-
- case Operation.LOGIC_OR:
- return ((isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs)) || (isTaskTrigger_flag(((FlagOpNode)fen).getRight(),fs)));
-
- case Operation.LOGIC_NOT:
- return !(isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs));
-
- default:
- return false;
- }
+ public static boolean isTaskTrigger_flag(FlagExpressionNode fen,
+ FlagState fs) {
+ if (fen==null)
+ return true;
+ else if (fen instanceof FlagNode)
+ return fs.get(((FlagNode)fen).getFlag());
+ else
+ switch (((FlagOpNode)fen).getOp().getOp()) {
+ case Operation.LOGIC_AND:
+ return ((isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs)) && (isTaskTrigger_flag(((FlagOpNode)fen).getRight(),fs)));
+
+ case Operation.LOGIC_OR:
+ return ((isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs)) || (isTaskTrigger_flag(((FlagOpNode)fen).getRight(),fs)));
+
+ case Operation.LOGIC_NOT:
+ return !(isTaskTrigger_flag(((FlagOpNode)fen).getLeft(),fs));
+
+ default:
+ return false;
+ }
+ }
+
+ public static void printScheduleGraph(String path,
+ Vector<ScheduleNode> sNodes) {
+ try {
+ File file=new File(path);
+ FileOutputStream dotstream=new FileOutputStream(file,false);
+ PrintWriter output = new java.io.PrintWriter(dotstream, true);
+ output.println("digraph G {");
+ output.println("\tcompound=true;\n");
+ traverseSNodes(output, sNodes);
+ output.println("}\n");
+ output.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
}
-
- public static void printScheduleGraph(String path,
- Vector<ScheduleNode> sNodes) {
- try {
- File file=new File(path);
- FileOutputStream dotstream=new FileOutputStream(file,false);
- PrintWriter output = new java.io.PrintWriter(dotstream, true);
- output.println("digraph G {");
- output.println("\tcompound=true;\n");
- traverseSNodes(output, sNodes);
- output.println("}\n");
- output.close();
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
+ }
+
+ private static void traverseSNodes(PrintWriter output,
+ Vector<ScheduleNode> sNodes) {
+ //Draw clusters representing ScheduleNodes
+ Iterator it = sNodes.iterator();
+ while (it.hasNext()) {
+ ScheduleNode gn = (ScheduleNode) it.next();
+ Iterator edges = gn.edges();
+ output.println("\tsubgraph " + gn.getLabel() + "{");
+ output.println("\t\tlabel=\"" + gn.getTextLabel() + "\";");
+ Iterator it_cnodes = gn.getClassNodesIterator();
+ traverseCNodes(output, it_cnodes);
+ it_cnodes = null;
+ //Draw the internal 'new' edges
+ Iterator it_edges =gn.getScheduleEdgesIterator();
+ while(it_edges.hasNext()) {
+ ScheduleEdge se = (ScheduleEdge)it_edges.next();
+ output.print("\t");
+ if(se.getSourceCNode().isclone()) {
+ output.print(se.getSourceCNode().getLabel());
+ } else {
+ if(se.getSourceFState() == null) {
+ output.print(se.getSourceCNode().getClusterLabel());
+ } else {
+ output.print(se.getSourceFState().getLabel());
+ }
}
- }
- private static void traverseSNodes(PrintWriter output,
- Vector<ScheduleNode> sNodes) {
- //Draw clusters representing ScheduleNodes
- Iterator it = sNodes.iterator();
- while (it.hasNext()) {
- ScheduleNode gn = (ScheduleNode) it.next();
- Iterator edges = gn.edges();
- output.println("\tsubgraph " + gn.getLabel() + "{");
- output.println("\t\tlabel=\"" + gn.getTextLabel() + "\";");
- Iterator it_cnodes = gn.getClassNodesIterator();
- traverseCNodes(output, it_cnodes);
- it_cnodes = null;
- //Draw the internal 'new' edges
- Iterator it_edges =gn.getScheduleEdgesIterator();
- while(it_edges.hasNext()) {
- ScheduleEdge se = (ScheduleEdge)it_edges.next();
- output.print("\t");
- if(se.getSourceCNode().isclone()) {
- output.print(se.getSourceCNode().getLabel());
- } else {
- if(se.getSourceFState() == null) {
- output.print(se.getSourceCNode().getClusterLabel());
- } else {
- output.print(se.getSourceFState().getLabel());
- }
- }
-
- output.print(" -> ");
- if(se.isclone()) {
- if(se.getTargetCNode().isclone()) {
- output.print(se.getTargetCNode().getLabel());
- } else {
- output.print(se.getTargetCNode().getClusterLabel());
- }
- output.println(" [label=\"" + se.getLabel() + "\", color=red];");
- } else {
- output.print(se.getTargetFState().getLabel() + " [label=\"" + se.getLabel() + "\", color=red, ltail=");
- if(se.getSourceCNode().isclone()) {
- output.println(se.getSourceCNode().getLabel() + "];");
- } else {
- output.println(se.getSourceCNode().getClusterLabel() + "];");
- }
- }
- }
- output.println("\t}\n");
- it_edges = null;
- //Draw 'new' edges of this ScheduleNode
- while(edges.hasNext()) {
- ScheduleEdge se = (ScheduleEdge)edges.next();
- output.print("\t");
- if(se.getSourceCNode().isclone()) {
- output.print(se.getSourceCNode().getLabel());
- } else {
- if(se.getSourceFState() == null) {
- output.print(se.getSourceCNode().getClusterLabel());
- } else {
- output.print(se.getSourceFState().getLabel());
- }
- }
-
- output.print(" -> ");
- if(se.isclone()) {
- if(se.getTargetCNode().isclone()) {
- output.print(se.getTargetCNode().getLabel());
- } else {
- output.print(se.getTargetCNode().getClusterLabel());
- }
- output.println(" [label=\"" + se.getLabel() + "\", color=red, style=dashed];");
- } else {
- output.println(se.getTargetFState().getLabel() + " [label=\"" + se.getLabel() + "\", color=red, style=dashed];");
- }
- }
- edges = null;
+ output.print(" -> ");
+ if(se.isclone()) {
+ if(se.getTargetCNode().isclone()) {
+ output.print(se.getTargetCNode().getLabel());
+ } else {
+ output.print(se.getTargetCNode().getClusterLabel());
+ }
+ output.println(" [label=\"" + se.getLabel() + "\", color=red];");
+ } else {
+ output.print(se.getTargetFState().getLabel() + " [label=\"" + se.getLabel() + "\", color=red, ltail=");
+ if(se.getSourceCNode().isclone()) {
+ output.println(se.getSourceCNode().getLabel() + "];");
+ } else {
+ output.println(se.getSourceCNode().getClusterLabel() + "];");
+ }
+ }
+ }
+ output.println("\t}\n");
+ it_edges = null;
+ //Draw 'new' edges of this ScheduleNode
+ while(edges.hasNext()) {
+ ScheduleEdge se = (ScheduleEdge)edges.next();
+ output.print("\t");
+ if(se.getSourceCNode().isclone()) {
+ output.print(se.getSourceCNode().getLabel());
+ } else {
+ if(se.getSourceFState() == null) {
+ output.print(se.getSourceCNode().getClusterLabel());
+ } else {
+ output.print(se.getSourceFState().getLabel());
+ }
}
- it = null;
- }
- private static void traverseCNodes(PrintWriter output,
- Iterator it) {
- //Draw clusters representing ClassNodes
- while (it.hasNext()) {
- ClassNode gn = (ClassNode) it.next();
- if(gn.isclone()) {
- output.println("\t\t" + gn.getLabel() + " [style=dashed, label=\"" + gn.getTextLabel() + "\", shape=box];");
- } else {
- output.println("\tsubgraph " + gn.getClusterLabel() + "{");
- output.println("\t\tstyle=dashed;");
- output.println("\t\tlabel=\"" + gn.getTextLabel() + "\";");
- traverseFlagStates(output, gn.getFlagStates());
- output.println("\t}\n");
- }
+ output.print(" -> ");
+ if(se.isclone()) {
+ if(se.getTargetCNode().isclone()) {
+ output.print(se.getTargetCNode().getLabel());
+ } else {
+ output.print(se.getTargetCNode().getClusterLabel());
+ }
+ output.println(" [label=\"" + se.getLabel() + "\", color=red, style=dashed];");
+ } else {
+ output.println(se.getTargetFState().getLabel() + " [label=\"" + se.getLabel() + "\", color=red, style=dashed];");
}
+ }
+ edges = null;
}
-
- private static void traverseFlagStates(PrintWriter output,
- Collection nodes) {
- Set cycleset=GraphNode.findcycles(nodes);
- Vector namers=new Vector();
- namers.add(new Namer());
- namers.add(new Allocations());
-
- Iterator it = nodes.iterator();
- while (it.hasNext()) {
- GraphNode gn = (GraphNode) it.next();
- Iterator edges = gn.edges();
- String label = "";
- String dotnodeparams="";
-
- for(int i=0; i<namers.size(); i++) {
+ it = null;
+ }
+
+ private static void traverseCNodes(PrintWriter output,
+ Iterator it) {
+ //Draw clusters representing ClassNodes
+ while (it.hasNext()) {
+ ClassNode gn = (ClassNode) it.next();
+ if(gn.isclone()) {
+ output.println("\t\t" + gn.getLabel() + " [style=dashed, label=\"" + gn.getTextLabel() + "\", shape=box];");
+ } else {
+ output.println("\tsubgraph " + gn.getClusterLabel() + "{");
+ output.println("\t\tstyle=dashed;");
+ output.println("\t\tlabel=\"" + gn.getTextLabel() + "\";");
+ traverseFlagStates(output, gn.getFlagStates());
+ output.println("\t}\n");
+ }
+ }
+ }
+
+ private static void traverseFlagStates(PrintWriter output,
+ Collection nodes) {
+ Set cycleset=GraphNode.findcycles(nodes);
+ Vector namers=new Vector();
+ namers.add(new Namer());
+ namers.add(new Allocations());
+
+ Iterator it = nodes.iterator();
+ while (it.hasNext()) {
+ GraphNode gn = (GraphNode) it.next();
+ Iterator edges = gn.edges();
+ String label = "";
+ String dotnodeparams="";
+
+ for(int i=0; i<namers.size(); i++) {
+ Namer name=(Namer) namers.get(i);
+ String newlabel=name.nodeLabel(gn);
+ String newparams=name.nodeOption(gn);
+
+ if (!newlabel.equals("") && !label.equals("")) {
+ label+=", ";
+ }
+ if (!newparams.equals("")) {
+ dotnodeparams+=", " + name.nodeOption(gn);
+ }
+ label+=name.nodeLabel(gn);
+ }
+ label += ":[" + ((FlagState)gn).getExeTime() + "]";
+
+ if (!gn.merge)
+ output.println("\t" + gn.getLabel() + " [label=\"" + label + "\"" + dotnodeparams + "];");
+
+ if (!gn.merge)
+ while (edges.hasNext()) {
+ Edge edge = (Edge) edges.next();
+ GraphNode node = edge.getTarget();
+ if (nodes.contains(node)) {
+ Iterator nodeit=nonmerge(node, nodes).iterator();
+ for(; nodeit.hasNext(); ) {
+ GraphNode node2=(GraphNode)nodeit.next();
+ String edgelabel = "";
+ String edgedotnodeparams="";
+
+ for(int i=0; i<namers.size(); i++) {
Namer name=(Namer) namers.get(i);
- String newlabel=name.nodeLabel(gn);
- String newparams=name.nodeOption(gn);
-
- if (!newlabel.equals("") && !label.equals("")) {
- label+=", ";
+ String newlabel=name.edgeLabel(edge);
+ String newoption=name.edgeOption(edge);
+ if (!newlabel.equals("")&& !edgelabel.equals(""))
+ edgelabel+=", ";
+ edgelabel+=newlabel;
+ if (!newoption.equals(""))
+ edgedotnodeparams+=", "+newoption;
+ }
+ edgelabel+=":[" + ((FEdge)edge).getExeTime() + "]";
+ edgelabel+=":(" + ((FEdge)edge).getProbability() + "%)";
+ Hashtable<ClassDescriptor, NewObjInfo> hashtable = ((FEdge)edge).getNewObjInfoHashtable();
+ if(hashtable != null) {
+ Set<ClassDescriptor> keys = hashtable.keySet();
+ Iterator it_keys = keys.iterator();
+ while(it_keys.hasNext()) {
+ ClassDescriptor cd = (ClassDescriptor)it_keys.next();
+ NewObjInfo noi = hashtable.get(cd);
+ edgelabel += ":{ class " + cd.getSymbol() + " | " + noi.getNewRate() + " | (" + noi.getProbability() + "%) }";
}
- if (!newparams.equals("")) {
- dotnodeparams+=", " + name.nodeOption(gn);
- }
- label+=name.nodeLabel(gn);
+ keys = null;
+ it_keys = null;
+ }
+ output.println("\t" + gn.getLabel() + " -> " + node2.getLabel() + " [" + "label=\"" + edgelabel + "\"" + edgedotnodeparams + "];");
}
- label += ":[" + ((FlagState)gn).getExeTime() + "]";
-
- if (!gn.merge)
- output.println("\t" + gn.getLabel() + " [label=\"" + label + "\"" + dotnodeparams + "];");
-
- if (!gn.merge)
- while (edges.hasNext()) {
- Edge edge = (Edge) edges.next();
- GraphNode node = edge.getTarget();
- if (nodes.contains(node)) {
- Iterator nodeit=nonmerge(node, nodes).iterator();
- for(; nodeit.hasNext();) {
- GraphNode node2=(GraphNode)nodeit.next();
- String edgelabel = "";
- String edgedotnodeparams="";
-
- for(int i=0; i<namers.size(); i++) {
- Namer name=(Namer) namers.get(i);
- String newlabel=name.edgeLabel(edge);
- String newoption=name.edgeOption(edge);
- if (!newlabel.equals("")&& !edgelabel.equals(""))
- edgelabel+=", ";
- edgelabel+=newlabel;
- if (!newoption.equals(""))
- edgedotnodeparams+=", "+newoption;
- }
- edgelabel+=":[" + ((FEdge)edge).getExeTime() + "]";
- edgelabel+=":(" + ((FEdge)edge).getProbability() + "%)";
- Hashtable<ClassDescriptor, NewObjInfo> hashtable = ((FEdge)edge).getNewObjInfoHashtable();
- if(hashtable != null) {
- Set<ClassDescriptor> keys = hashtable.keySet();
- Iterator it_keys = keys.iterator();
- while(it_keys.hasNext()) {
- ClassDescriptor cd = (ClassDescriptor)it_keys.next();
- NewObjInfo noi = hashtable.get(cd);
- edgelabel += ":{ class " + cd.getSymbol() + " | " + noi.getNewRate() + " | (" + noi.getProbability() + "%) }";
- }
- keys = null;
- it_keys = null;
- }
- output.println("\t" + gn.getLabel() + " -> " + node2.getLabel() + " [" + "label=\"" + edgelabel + "\"" + edgedotnodeparams + "];");
- }
- nodeit = null;
- }
- }
- edges = null;
+ nodeit = null;
+ }
}
- cycleset = null;
- namers = null;
- it = null;
+ edges = null;
}
-
- private static Set nonmerge(GraphNode gn,
- Collection nodes) {
- HashSet newset=new HashSet();
- HashSet toprocess=new HashSet();
- toprocess.add(gn);
- while(!toprocess.isEmpty()) {
- GraphNode gn2=(GraphNode)toprocess.iterator().next();
- toprocess.remove(gn2);
- if (!gn2.merge)
- newset.add(gn2);
- else {
- Iterator edges = gn2.edges();
- while (edges.hasNext()) {
- Edge edge = (Edge) edges.next();
- GraphNode node = edge.getTarget();
- if (!newset.contains(node)&&nodes.contains(node))
- toprocess.add(node);
- }
- edges = null;
- }
+ cycleset = null;
+ namers = null;
+ it = null;
+ }
+
+ private static Set nonmerge(GraphNode gn,
+ Collection nodes) {
+ HashSet newset=new HashSet();
+ HashSet toprocess=new HashSet();
+ toprocess.add(gn);
+ while(!toprocess.isEmpty()) {
+ GraphNode gn2=(GraphNode)toprocess.iterator().next();
+ toprocess.remove(gn2);
+ if (!gn2.merge)
+ newset.add(gn2);
+ else {
+ Iterator edges = gn2.edges();
+ while (edges.hasNext()) {
+ Edge edge = (Edge) edges.next();
+ GraphNode node = edge.getTarget();
+ if (!newset.contains(node)&&nodes.contains(node))
+ toprocess.add(node);
}
- toprocess = null;
- return newset;
+ edges = null;
+ }
}
-
- public static void printSimulationResult(String path,
- long time,
- int coreNum,
- Vector<CheckPoint> checkpoints) {
- try {
- File file=new File(path);
- FileOutputStream dotstream=new FileOutputStream(file,false);
- PrintWriter output = new java.io.PrintWriter(dotstream, true);
- output.println("digraph simulation{");
- output.print("\t");
- output.println("node [shape=plaintext];");
- output.print("\t");
- output.println("edge [dir=none];");
- output.print("\t");
- output.println("ranksep=.05;");
- output.println();
- output.print("\t");
- int j = 0;
-
- // the capital line
- output.print("{rank=source; \"Time\"; ");
- for(j = 0; j < coreNum; j++) {
- output.print("\"core " + j + "\"; ");
+ toprocess = null;
+ return newset;
+ }
+
+ public static void printSimulationResult(String path,
+ long time,
+ int coreNum,
+ Vector<CheckPoint> checkpoints) {
+ try {
+ File file=new File(path);
+ FileOutputStream dotstream=new FileOutputStream(file,false);
+ PrintWriter output = new java.io.PrintWriter(dotstream, true);
+ output.println("digraph simulation{");
+ output.print("\t");
+ output.println("node [shape=plaintext];");
+ output.print("\t");
+ output.println("edge [dir=none];");
+ output.print("\t");
+ output.println("ranksep=.05;");
+ output.println();
+ output.print("\t");
+ int j = 0;
+
+ // the capital line
+ output.print("{rank=source; \"Time\"; ");
+ for(j = 0; j < coreNum; j++) {
+ output.print("\"core " + j + "\"; ");
+ }
+ output.println("}");
+ // time coordinate nodes
+ Vector<String> timeNodes = new Vector<String>();
+ String[] lastTaskNodes = new String[coreNum];
+ String[] lastTasks = new String[coreNum];
+ boolean[] isTaskFinish = new boolean[coreNum];
+ for(j = 0; j < coreNum; j++) {
+ lastTaskNodes[j] = "first";
+ isTaskFinish[j] = true;
+ lastTasks[j] = "";
+ }
+ timeNodes.add("0");
+ for(j = 0; j < checkpoints.size(); j++) {
+ CheckPoint tcp = checkpoints.elementAt(j);
+ Hashtable<Integer, String> tmplastTasks = new Hashtable<Integer, String>();
+ Vector<Integer> tmpisTaskFinish = new Vector<Integer>();
+ Vector<Integer> tmpisset = new Vector<Integer>();
+ String tnode = String.valueOf(tcp.getTimepoint());
+ if(!timeNodes.contains(tnode)) {
+ timeNodes.add(tnode);
+ }
+ Vector<Action> actions = tcp.getActions();
+ Hashtable<String, StringBuffer> tmpTaskNodes = new Hashtable<String, StringBuffer>();
+ for(int i = 0; i < actions.size(); i++) {
+ Action taction = actions.elementAt(i);
+ int cNum = taction.getCoreNum();
+ if(!tmplastTasks.containsKey(cNum)) {
+ tmplastTasks.put(cNum, lastTasks[cNum]);
+ }
+ if(!(tmpisset.contains(cNum))
+ && (isTaskFinish[cNum])
+ && !(tmpisTaskFinish.contains(cNum))) {
+ tmpisTaskFinish.add(cNum); // records those with task finished the first time visit it
+ }
+ String tmpTaskNode = "\"" + tnode + "core" + cNum + "\"";
+ StringBuffer tmpLabel = null;
+ boolean isfirst = false;
+ if(!tmpTaskNodes.containsKey(tmpTaskNode)) {
+ tmpTaskNodes.put(tmpTaskNode, new StringBuffer(tnode + ":"));
+ isfirst = true;
+ }
+ tmpLabel = tmpTaskNodes.get(tmpTaskNode);
+ switch(taction.getType()) {
+ case Action.ADDOBJ: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
}
- output.println("}");
- // time coordinate nodes
- Vector<String> timeNodes = new Vector<String>();
- String[] lastTaskNodes = new String[coreNum];
- String[] lastTasks = new String[coreNum];
- boolean[] isTaskFinish = new boolean[coreNum];
- for(j = 0; j < coreNum; j++) {
- lastTaskNodes[j] = "first";
- isTaskFinish[j] = true;
- lastTasks[j] = "";
+ tmpLabel.append("(" + taction.getTransObj().getSymbol() + ")arrives;");
+ if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
+ output.print("\t");
+ if(lastTaskNodes[cNum].equals("first")) {
+ output.print("\"core " + cNum + "\"->" + tmpTaskNode);
+ } else {
+ output.print(lastTaskNodes[cNum] + "->" + tmpTaskNode);
+ }
+ if(tmpisTaskFinish.contains(cNum)) {
+ output.print(" [style=invis]");
+ }
+ output.println(";");
+ lastTaskNodes[cNum] = tmpTaskNode;
}
- timeNodes.add("0");
- for(j = 0; j < checkpoints.size(); j++) {
- CheckPoint tcp = checkpoints.elementAt(j);
- Hashtable<Integer, String> tmplastTasks = new Hashtable<Integer, String>();
- Vector<Integer> tmpisTaskFinish = new Vector<Integer>();
- Vector<Integer> tmpisset = new Vector<Integer>();
- String tnode = String.valueOf(tcp.getTimepoint());
- if(!timeNodes.contains(tnode)) {
- timeNodes.add(tnode);
- }
- Vector<Action> actions = tcp.getActions();
- Hashtable<String, StringBuffer> tmpTaskNodes = new Hashtable<String, StringBuffer>();
- for(int i = 0; i < actions.size(); i++) {
- Action taction = actions.elementAt(i);
- int cNum = taction.getCoreNum();
- if(!tmplastTasks.containsKey(cNum)) {
- tmplastTasks.put(cNum, lastTasks[cNum]);
- }
- if(!(tmpisset.contains(cNum))
- && (isTaskFinish[cNum])
- && !(tmpisTaskFinish.contains(cNum))) {
- tmpisTaskFinish.add(cNum); // records those with task finished the first time visit it
- }
- String tmpTaskNode = "\"" + tnode + "core" + cNum + "\"";
- StringBuffer tmpLabel = null;
- boolean isfirst = false;
- if(!tmpTaskNodes.containsKey(tmpTaskNode)) {
- tmpTaskNodes.put(tmpTaskNode, new StringBuffer(tnode + ":"));
- isfirst = true;
- }
- tmpLabel = tmpTaskNodes.get(tmpTaskNode);
- switch(taction.getType()) {
- case Action.ADDOBJ: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("(" + taction.getTransObj().getSymbol() + ")arrives;");
- if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- if(lastTaskNodes[cNum].equals("first")) {
- output.print("\"core " + cNum + "\"->" + tmpTaskNode);
- } else {
- output.print(lastTaskNodes[cNum] + "->" + tmpTaskNode);
- }
- if(tmpisTaskFinish.contains(cNum)) {
- output.print(" [style=invis]");
- }
- output.println(";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- break;
- }
-
- case Action.TASKFINISH: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
- /*Vector<Integer> taskparams = taction.getTaskParams();
- for(int ii = 0; ii < taskparams.size(); ii++) {
- tmpLabel.append(taskparams.elementAt(ii));
- if(ii < taskparams.size() - 1) {
- tmpLabel.append(",");
- }
- }*/
- tmpLabel.append(")>finishes;");
- if(!(lastTaskNodes[cNum].equals("first"))) {
- if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- if(tmpisset.contains(cNum)) {
- isTaskFinish[cNum] &= true;
- } else {
- isTaskFinish[cNum] = true;
- tmpisset.add(cNum);
- }
- lastTasks[cNum] = "";
- } else {
- throw new Exception("Error: unexpected task finish");
- }
- break;
- }
-
- case Action.TFWITHOBJ: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
- /*Vector<Integer> taskparams = taction.getTaskParams();
- for(int ii = 0; ii < taskparams.size(); ii++) {
- tmpLabel.append(taskparams.elementAt(ii));
- if(ii < taskparams.size() - 1) {
- tmpLabel.append(",");
- }
- }*/
- tmpLabel.append(")>finishes;");
- Iterator<Entry<ClassDescriptor, Integer>> it_entry = (Iterator<Entry<ClassDescriptor, Integer>>)taction.getNObjs().entrySet().iterator();
- while(it_entry.hasNext()) {
- Entry<ClassDescriptor, Integer> entry = it_entry.next();
- tmpLabel.append(entry.getValue() + "(" + entry.getKey().getSymbol() + ")");
- if(it_entry.hasNext()) {
- tmpLabel.append(",");
- } else {
- tmpLabel.append(";");
- }
- entry = null;
- }
- it_entry = null;
- if(!(lastTaskNodes[cNum].equals("first"))) {
- if (!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- if(tmpisset.contains(cNum)) {
- isTaskFinish[cNum] &= true;
- } else {
- isTaskFinish[cNum] = true;
- tmpisset.add(cNum);
- }
- lastTasks[cNum] = "";
- } else {
- throw new Exception("Error: unexpected task finish");
- }
- break;
- }
-
- case Action.TASKSTART: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
- /*Vector<Integer> taskparams = taction.getTaskParams();
- for(int ii = 0; ii < taskparams.size(); ii++) {
- tmpLabel.append(taskparams.elementAt(ii));
- if(ii < taskparams.size() - 1) {
- tmpLabel.append(",");
- }
- }*/
- tmpLabel.append(")>starts;");
- lastTasks[cNum] = taction.getTd().getSymbol();
-
- if (!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- if(lastTaskNodes[cNum].equals("first")) {
- output.print("\"core " + cNum + "\"->" + tmpTaskNode);
- } else {
- output.print(lastTaskNodes[cNum] + "->" + tmpTaskNode);
- }
- if(tmpisTaskFinish.contains(cNum)) {
- output.print(" [style=invis]");
- }
- output.println(";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- isTaskFinish[cNum] &= false;
- break;
- }
-
- case Action.TASKABORT: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
- /*Vector<Integer> taskparams = taction.getTaskParams();
- for(int ii = 0; ii < taskparams.size(); ii++) {
- tmpLabel.append(taskparams.elementAt(ii));
- if(ii < taskparams.size() - 1) {
- tmpLabel.append(",");
- }
- }*/
- tmpLabel.append(")>aborts;");
- if(!(lastTaskNodes[cNum].equals("first")) &&
- (tmplastTasks.get(cNum).equals(taction.getTd().getSymbol()))) {
- if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- if(tmpisset.contains(cNum)) {
- isTaskFinish[cNum] &= true;
- } else {
- isTaskFinish[cNum] = true;
- tmpisset.add(cNum);
- }
- lastTasks[cNum] = "";
- } else {
- throw new Exception("Error: unexpected task aborts");
- }
- break;
- }
-
- case Action.TASKREMOVE: {
- if(!isfirst) {
- tmpLabel.append("\\n");
- }
- tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
- /*Vector<Integer> taskparams = taction.getTaskParams();
- for(int ii = 0; ii < taskparams.size(); ii++) {
- tmpLabel.append(taskparams.elementAt(ii));
- if(ii < taskparams.size() - 1) {
- tmpLabel.append(",");
- }
- }*/
- tmpLabel.append(")>removes;");
- if(!(lastTaskNodes[cNum].equals("first")) &&
- (tmplastTasks.get(cNum).equals(taction.getTd().getSymbol()))) {
- if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
- output.print("\t");
- output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
- lastTaskNodes[cNum] = tmpTaskNode;
- }
- if(tmpisset.contains(cNum)) {
- isTaskFinish[cNum] &= true;
- } else {
- isTaskFinish[cNum] = true;
- tmpisset.add(cNum);
- }
- lastTasks[cNum] = "";
- } else {
- throw new Exception("Error: unexpected task remove");
- }
- break;
- }
- }
- }
- Enumeration<String> keys = tmpTaskNodes.keys();
- while(keys.hasMoreElements()) {
- String tmpTaskNode = keys.nextElement();
- output.print("\t");
- output.println(tmpTaskNode + "[label=\"" + tmpTaskNodes.get(tmpTaskNode).toString() + "\"]");
- }
+ break;
+ }
+
+ case Action.TASKFINISH: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
+ }
+ tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
+ /*Vector<Integer> taskparams = taction.getTaskParams();
+ for(int ii = 0; ii < taskparams.size(); ii++) {
+ tmpLabel.append(taskparams.elementAt(ii));
+ if(ii < taskparams.size() - 1) {
+ tmpLabel.append(",");
+ }
+ }*/
+ tmpLabel.append(")>finishes;");
+ if(!(lastTaskNodes[cNum].equals("first"))) {
+ if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
output.print("\t");
- output.print("{rank=same; rankdir=LR; " + tnode + "; ");
- keys = tmpTaskNodes.keys();
- while(keys.hasMoreElements()) {
- String tmpTaskNode = keys.nextElement();
- output.print(tmpTaskNode);
- output.print("; ");
- }
- keys = null;
- output.println("}");
+ output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
+ lastTaskNodes[cNum] = tmpTaskNode;
+ }
+ if(tmpisset.contains(cNum)) {
+ isTaskFinish[cNum] &= true;
+ } else {
+ isTaskFinish[cNum] = true;
+ tmpisset.add(cNum);
+ }
+ lastTasks[cNum] = "";
+ } else {
+ throw new Exception("Error: unexpected task finish");
+ }
+ break;
+ }
+
+ case Action.TFWITHOBJ: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
+ }
+ tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
+ /*Vector<Integer> taskparams = taction.getTaskParams();
+ for(int ii = 0; ii < taskparams.size(); ii++) {
+ tmpLabel.append(taskparams.elementAt(ii));
+ if(ii < taskparams.size() - 1) {
+ tmpLabel.append(",");
+ }
+ }*/
+ tmpLabel.append(")>finishes;");
+ Iterator<Entry<ClassDescriptor, Integer>> it_entry = (Iterator<Entry<ClassDescriptor, Integer>>)taction.getNObjs().entrySet().iterator();
+ while(it_entry.hasNext()) {
+ Entry<ClassDescriptor, Integer> entry = it_entry.next();
+ tmpLabel.append(entry.getValue() + "(" + entry.getKey().getSymbol() + ")");
+ if(it_entry.hasNext()) {
+ tmpLabel.append(",");
+ } else {
+ tmpLabel.append(";");
+ }
+ entry = null;
+ }
+ it_entry = null;
+ if(!(lastTaskNodes[cNum].equals("first"))) {
+ if (!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
output.print("\t");
- tmplastTasks = null;
- tmpisTaskFinish = null;
- tmpisset = null;
- actions = null;
- tmpTaskNodes = null;
+ output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
+ lastTaskNodes[cNum] = tmpTaskNode;
+ }
+ if(tmpisset.contains(cNum)) {
+ isTaskFinish[cNum] &= true;
+ } else {
+ isTaskFinish[cNum] = true;
+ tmpisset.add(cNum);
+ }
+ lastTasks[cNum] = "";
+ } else {
+ throw new Exception("Error: unexpected task finish");
}
- output.print("\t");
- output.print("\t");
- long prev = Long.parseLong(timeNodes.elementAt(0));
- long next = 0;
- long max = 0;
- long max2 = 0;
- for(j = 1; j < timeNodes.size(); j++) {
- next = Long.parseLong(timeNodes.elementAt(j));
- long delta = next - prev;
- if(max < delta) {
- max2 = max;
- max = delta;
- } else if((max != delta) && (max2 < delta)) {
- max2 = delta;
- }
- prev = next;
+ break;
+ }
+
+ case Action.TASKSTART: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
}
- if(max2 == 0) {
- max2 = 1;
- } else if(max/max2 > 100) {
- max2 = max/100;
+ tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
+ /*Vector<Integer> taskparams = taction.getTaskParams();
+ for(int ii = 0; ii < taskparams.size(); ii++) {
+ tmpLabel.append(taskparams.elementAt(ii));
+ if(ii < taskparams.size() - 1) {
+ tmpLabel.append(",");
+ }
+ }*/
+ tmpLabel.append(")>starts;");
+ lastTasks[cNum] = taction.getTd().getSymbol();
+
+ if (!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
+ output.print("\t");
+ if(lastTaskNodes[cNum].equals("first")) {
+ output.print("\"core " + cNum + "\"->" + tmpTaskNode);
+ } else {
+ output.print(lastTaskNodes[cNum] + "->" + tmpTaskNode);
+ }
+ if(tmpisTaskFinish.contains(cNum)) {
+ output.print(" [style=invis]");
+ }
+ output.println(";");
+ lastTaskNodes[cNum] = tmpTaskNode;
}
- output.println("\"Time\"->" + timeNodes.elementAt(0) + "[style=invis];");
- prev = Long.parseLong(timeNodes.elementAt(0));
- next = 0;
- for(j = 1; j < timeNodes.size(); j++) {
- next = Long.parseLong(timeNodes.elementAt(j));
- if(next - prev > max2) {
- do {
- output.print(prev + "->");
- prev += max2;
- } while(next - prev > max2);
- output.println(next + ";");
- } else {
- output.println("{rank=same; rankdir=LR; " + prev + "; " + next + "}");
- output.println(prev + "->" + next + "[style=invis];");
- }
- prev = next;
+ isTaskFinish[cNum] &= false;
+ break;
+ }
+
+ case Action.TASKABORT: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
}
+ tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
+ /*Vector<Integer> taskparams = taction.getTaskParams();
+ for(int ii = 0; ii < taskparams.size(); ii++) {
+ tmpLabel.append(taskparams.elementAt(ii));
+ if(ii < taskparams.size() - 1) {
+ tmpLabel.append(",");
+ }
+ }*/
+ tmpLabel.append(")>aborts;");
+ if(!(lastTaskNodes[cNum].equals("first")) &&
+ (tmplastTasks.get(cNum).equals(taction.getTd().getSymbol()))) {
+ if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
+ output.print("\t");
+ output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
+ lastTaskNodes[cNum] = tmpTaskNode;
+ }
+ if(tmpisset.contains(cNum)) {
+ isTaskFinish[cNum] &= true;
+ } else {
+ isTaskFinish[cNum] = true;
+ tmpisset.add(cNum);
+ }
+ lastTasks[cNum] = "";
+ } else {
+ throw new Exception("Error: unexpected task aborts");
+ }
+ break;
+ }
- /*for(j = 0; j < time; j++) {
+ case Action.TASKREMOVE: {
+ if(!isfirst) {
+ tmpLabel.append("\\n");
+ }
+ tmpLabel.append("<" + taction.getTd().getSymbol() + "(");
+ /*Vector<Integer> taskparams = taction.getTaskParams();
+ for(int ii = 0; ii < taskparams.size(); ii++) {
+ tmpLabel.append(taskparams.elementAt(ii));
+ if(ii < taskparams.size() - 1) {
+ tmpLabel.append(",");
+ }
+ }*/
+ tmpLabel.append(")>removes;");
+ if(!(lastTaskNodes[cNum].equals("first")) &&
+ (tmplastTasks.get(cNum).equals(taction.getTd().getSymbol()))) {
+ if(!(lastTaskNodes[cNum].equals(tmpTaskNode))) {
+ output.print("\t");
+ output.println(lastTaskNodes[cNum] + "->" + tmpTaskNode + ";");
+ lastTaskNodes[cNum] = tmpTaskNode;
+ }
+ if(tmpisset.contains(cNum)) {
+ isTaskFinish[cNum] &= true;
+ } else {
+ isTaskFinish[cNum] = true;
+ tmpisset.add(cNum);
+ }
+ lastTasks[cNum] = "";
+ } else {
+ throw new Exception("Error: unexpected task remove");
+ }
+ break;
+ }
+ }
+ }
+ Enumeration<String> keys = tmpTaskNodes.keys();
+ while(keys.hasMoreElements()) {
+ String tmpTaskNode = keys.nextElement();
+ output.print("\t");
+ output.println(tmpTaskNode + "[label=\"" + tmpTaskNodes.get(tmpTaskNode).toString() + "\"]");
+ }
+ output.print("\t");
+ output.print("{rank=same; rankdir=LR; " + tnode + "; ");
+ keys = tmpTaskNodes.keys();
+ while(keys.hasMoreElements()) {
+ String tmpTaskNode = keys.nextElement();
+ output.print(tmpTaskNode);
+ output.print("; ");
+ }
+ keys = null;
+ output.println("}");
+ output.print("\t");
+ tmplastTasks = null;
+ tmpisTaskFinish = null;
+ tmpisset = null;
+ actions = null;
+ tmpTaskNodes = null;
+ }
+ output.print("\t");
+ output.print("\t");
+ long prev = Long.parseLong(timeNodes.elementAt(0));
+ long next = 0;
+ long max = 0;
+ long max2 = 0;
+ for(j = 1; j < timeNodes.size(); j++) {
+ next = Long.parseLong(timeNodes.elementAt(j));
+ long delta = next - prev;
+ if(max < delta) {
+ max2 = max;
+ max = delta;
+ } else if((max != delta) && (max2 < delta)) {
+ max2 = delta;
+ }
+ prev = next;
+ }
+ if(max2 == 0) {
+ max2 = 1;
+ } else if(max/max2 > 100) {
+ max2 = max/100;
+ }
+ output.println("\"Time\"->" + timeNodes.elementAt(0) + "[style=invis];");
+ prev = Long.parseLong(timeNodes.elementAt(0));
+ next = 0;
+ for(j = 1; j < timeNodes.size(); j++) {
+ next = Long.parseLong(timeNodes.elementAt(j));
+ if(next - prev > max2) {
+ do {
+ output.print(prev + "->");
+ prev += max2;
+ } while(next - prev > max2);
+ output.println(next + ";");
+ } else {
+ output.println("{rank=same; rankdir=LR; " + prev + "; " + next + "}");
+ output.println(prev + "->" + next + "[style=invis];");
+ }
+ prev = next;
+ }
+
+ /*for(j = 0; j < time; j++) {
output.print(j + "->");
}
output.println(timeNodes.lastElement() + ";");*/
- output.println("}");
- output.close();
- timeNodes = null;
- lastTaskNodes = null;
- lastTasks = null;
- isTaskFinish = null;
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
- }
+ output.println("}");
+ output.close();
+ timeNodes = null;
+ lastTaskNodes = null;
+ lastTasks = null;
+ isTaskFinish = null;
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
}
-
- public static void printCriticalPath(String path,
- Vector<SimExecutionEdge> criticalPath) {
- try {
- File file=new File(path);
- FileOutputStream dotstream=new FileOutputStream(file,false);
- PrintWriter output = new java.io.PrintWriter(dotstream, true);
- output.println("digraph simulation{");
- output.print("\t");
- output.println("node [shape=plaintext];");
- output.print("\t");
- output.println("edge [dir=none];");
- output.print("\t");
- output.println("ranksep=.05;");
- output.println();
- output.print("\t");
- Vector<SimExecutionNode> nodes = new Vector<SimExecutionNode>();
- String label = "";
- String dotnodeparams="";
-
- for(int i = 0; i < criticalPath.size(); i++) {
- SimExecutionEdge seedge = criticalPath.elementAt(i);
- SimExecutionNode startnode = (SimExecutionNode)seedge.getSource();
- SimExecutionNode endnode = (SimExecutionNode)seedge.getTarget();
- if(!nodes.contains(startnode)) {
- label = startnode.getCoreNum() + ":" + startnode.getTimepoint();
- output.println("\t" + startnode.getLabel() + " [label=\""
- + label + "\" ];");
- nodes.addElement(startnode);
- }
- if(!nodes.contains(endnode)) {
- label = endnode.getCoreNum() + ":" + endnode.getTimepoint();
- output.println("\t" + endnode.getLabel() + " [label=\""
- + label + "\" ];");
- nodes.addElement(endnode);
- }
- output.println("\t" + startnode.getLabel() + " -> " + endnode.getLabel()
- + " [" + "label=\"" + seedge.getLabel() + "\"];");
- }
- output.println("}");
- output.close();
- nodes.clear();
- nodes = null;
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(-1);
+ }
+
+ public static void printCriticalPath(String path,
+ Vector<SimExecutionEdge> criticalPath) {
+ try {
+ File file=new File(path);
+ FileOutputStream dotstream=new FileOutputStream(file,false);
+ PrintWriter output = new java.io.PrintWriter(dotstream, true);
+ output.println("digraph simulation{");
+ output.print("\t");
+ output.println("node [shape=plaintext];");
+ output.print("\t");
+ output.println("edge [dir=none];");
+ output.print("\t");
+ output.println("ranksep=.05;");
+ output.println();
+ output.print("\t");
+ Vector<SimExecutionNode> nodes = new Vector<SimExecutionNode>();
+ String label = "";
+ String dotnodeparams="";
+
+ for(int i = 0; i < criticalPath.size(); i++) {
+ SimExecutionEdge seedge = criticalPath.elementAt(i);
+ SimExecutionNode startnode = (SimExecutionNode)seedge.getSource();
+ SimExecutionNode endnode = (SimExecutionNode)seedge.getTarget();
+ if(!nodes.contains(startnode)) {
+ label = startnode.getCoreNum() + ":" + startnode.getTimepoint();
+ output.println("\t" + startnode.getLabel() + " [label=\""
+ + label + "\" ];");
+ nodes.addElement(startnode);
}
+ if(!nodes.contains(endnode)) {
+ label = endnode.getCoreNum() + ":" + endnode.getTimepoint();
+ output.println("\t" + endnode.getLabel() + " [label=\""
+ + label + "\" ];");
+ nodes.addElement(endnode);
+ }
+ output.println("\t" + startnode.getLabel() + " -> " + endnode.getLabel()
+ + " [" + "label=\"" + seedge.getLabel() + "\"];");
+ }
+ output.println("}");
+ output.close();
+ nodes.clear();
+ nodes = null;
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
}
+ }
}
\ No newline at end of file
import Util.Edge;
public class SimExecutionEdge extends Edge {
-
- private int eid;
- private static int nodeID=0;
-
- private int coreNum;
- private TaskDescriptor td;
- private Vector<Integer> taskparams;
- private long weight;
-
- private long bestStartPoint;
- private SimExecutionNode lastpredicatenode;
- private SimExecutionEdge lastpredicateedge;
- private Vector<SimExecutionEdge> predicates;
- private boolean isFixedTime;
-
- public SimExecutionEdge(SimExecutionNode target,
- int corenum,
- TaskDescriptor td,
- long weight,
- Vector<Integer> taskparams) {
- super(target);
- this.eid = SimExecutionEdge.nodeID++;
- this.coreNum = corenum;
- this.td = td;
- this.taskparams = taskparams;
- this.weight = weight;
- this.bestStartPoint = -1;
- this.lastpredicatenode = null;
- this.lastpredicateedge = null;
- this.predicates = null;
- this.isFixedTime = true;
- }
-
- public long getBestStartPoint() {
- if(this.bestStartPoint == -1) {
- if((this.predicates != null) && (this.predicates.size() > 0)) {
- // have predicates
- long starttime = 0;
- // check the latest finish time of all the predicates
- for(int j = 0; j < this.predicates.size(); j++) {
- SimExecutionEdge predicate = this.predicates.elementAt(j);
- long tmptime = predicate.getBestStartPoint() + predicate.getWeight();
- if(tmptime > starttime) {
- starttime = tmptime;
- this.lastpredicateedge = predicate;
- if(predicate.getTd() != null) {
- this.lastpredicatenode = (SimExecutionNode)predicate.getTarget();
- } else {
- // transfer edge
- this.lastpredicatenode = (SimExecutionNode)predicate.getSource();
- }
- }
- }
- this.bestStartPoint = starttime;
+
+ private int eid;
+ private static int nodeID=0;
+
+ private int coreNum;
+ private TaskDescriptor td;
+ private Vector<Integer> taskparams;
+ private long weight;
+
+ private long bestStartPoint;
+ private SimExecutionNode lastpredicatenode;
+ private SimExecutionEdge lastpredicateedge;
+ private Vector<SimExecutionEdge> predicates;
+ private boolean isFixedTime;
+
+ public SimExecutionEdge(SimExecutionNode target,
+ int corenum,
+ TaskDescriptor td,
+ long weight,
+ Vector<Integer> taskparams) {
+ super(target);
+ this.eid = SimExecutionEdge.nodeID++;
+ this.coreNum = corenum;
+ this.td = td;
+ this.taskparams = taskparams;
+ this.weight = weight;
+ this.bestStartPoint = -1;
+ this.lastpredicatenode = null;
+ this.lastpredicateedge = null;
+ this.predicates = null;
+ this.isFixedTime = true;
+ }
+
+ public long getBestStartPoint() {
+ if(this.bestStartPoint == -1) {
+ if((this.predicates != null) && (this.predicates.size() > 0)) {
+ // have predicates
+ long starttime = 0;
+ // check the latest finish time of all the predicates
+ for(int j = 0; j < this.predicates.size(); j++) {
+ SimExecutionEdge predicate = this.predicates.elementAt(j);
+ long tmptime = predicate.getBestStartPoint() + predicate.getWeight();
+ if(tmptime > starttime) {
+ starttime = tmptime;
+ this.lastpredicateedge = predicate;
+ if(predicate.getTd() != null) {
+ this.lastpredicatenode = (SimExecutionNode)predicate.getTarget();
} else {
- // no predicates
- this.bestStartPoint = 0;
+ // transfer edge
+ this.lastpredicatenode = (SimExecutionNode)predicate.getSource();
}
+ }
}
- return bestStartPoint;
- }
-
- public void setBestStartPoint(long bestStartPoint) {
- this.bestStartPoint = bestStartPoint;
- }
-
- public Vector<SimExecutionEdge> getPredicates() {
- return predicates;
- }
-
- public void addPredicate(SimExecutionEdge predicate) {
- if(this.predicates == null) {
- this.predicates = new Vector<SimExecutionEdge>();
- }
- if(!this.predicates.contains(predicate)) {
- this.predicates.add(predicate);
- }
- }
-
- public Vector<Integer> getTaskparams() {
- return taskparams;
- }
-
- public TaskDescriptor getTd() {
- return td;
- }
-
- public long getWeight() {
- return weight;
- }
-
- public void setWeight(int weight) {
- this.weight = weight;
- }
-
- public int getCoreNum() {
- return coreNum;
- }
-
- public SimExecutionNode getLastpredicateNode() {
- return lastpredicatenode;
- }
-
- public void setLastpredicateNode(SimExecutionNode lastpredicatenode) {
- this.lastpredicatenode = lastpredicatenode;
- }
-
- public SimExecutionEdge getLastpredicateEdge() {
- return lastpredicateedge;
- }
-
- public void setLastpredicateEdge(SimExecutionEdge lastpredicateedge) {
- this.lastpredicateedge = lastpredicateedge;
- }
-
- public boolean isFixedTime() {
- return isFixedTime;
- }
-
- public void setFixedTime(boolean isFixedTime) {
- this.isFixedTime = isFixedTime;
- }
-
- public String getLabel() {
- String completeLabel = (this.td != null? this.td.getSymbol():"")
- + "(" + this.weight + " | " + this.bestStartPoint + ")";
- return completeLabel;
- }
-
- public void destroy() {
- this.td = null;
- if(this.taskparams != null) {
- this.taskparams.clear();
- this.taskparams = null;
- }
- this.lastpredicatenode = null;
- this.lastpredicateedge = null;
- if(this.predicates != null) {
- this.predicates.clear();
- this.predicates = null;
- }
- this.source.getEdgeVector().clear();
- this.source.getInedgeVector().clear();
- this.source = null;
- this.target.getEdgeVector().clear();
- this.target.getInedgeVector().clear();
- this.target = null;
- }
+ this.bestStartPoint = starttime;
+ } else {
+ // no predicates
+ this.bestStartPoint = 0;
+ }
+ }
+ return bestStartPoint;
+ }
+
+ public void setBestStartPoint(long bestStartPoint) {
+ this.bestStartPoint = bestStartPoint;
+ }
+
+ public Vector<SimExecutionEdge> getPredicates() {
+ return predicates;
+ }
+
+ public void addPredicate(SimExecutionEdge predicate) {
+ if(this.predicates == null) {
+ this.predicates = new Vector<SimExecutionEdge>();
+ }
+ if(!this.predicates.contains(predicate)) {
+ this.predicates.add(predicate);
+ }
+ }
+
+ public Vector<Integer> getTaskparams() {
+ return taskparams;
+ }
+
+ public TaskDescriptor getTd() {
+ return td;
+ }
+
+ public long getWeight() {
+ return weight;
+ }
+
+ public void setWeight(int weight) {
+ this.weight = weight;
+ }
+
+ public int getCoreNum() {
+ return coreNum;
+ }
+
+ public SimExecutionNode getLastpredicateNode() {
+ return lastpredicatenode;
+ }
+
+ public void setLastpredicateNode(SimExecutionNode lastpredicatenode) {
+ this.lastpredicatenode = lastpredicatenode;
+ }
+
+ public SimExecutionEdge getLastpredicateEdge() {
+ return lastpredicateedge;
+ }
+
+ public void setLastpredicateEdge(SimExecutionEdge lastpredicateedge) {
+ this.lastpredicateedge = lastpredicateedge;
+ }
+
+ public boolean isFixedTime() {
+ return isFixedTime;
+ }
+
+ public void setFixedTime(boolean isFixedTime) {
+ this.isFixedTime = isFixedTime;
+ }
+
+ public String getLabel() {
+ String completeLabel = (this.td != null?this.td.getSymbol():"")
+ + "(" + this.weight + " | " + this.bestStartPoint + ")";
+ return completeLabel;
+ }
+
+ public void destroy() {
+ this.td = null;
+ if(this.taskparams != null) {
+ this.taskparams.clear();
+ this.taskparams = null;
+ }
+ this.lastpredicatenode = null;
+ this.lastpredicateedge = null;
+ if(this.predicates != null) {
+ this.predicates.clear();
+ this.predicates = null;
+ }
+ this.source.getEdgeVector().clear();
+ this.source.getInedgeVector().clear();
+ this.source = null;
+ this.target.getEdgeVector().clear();
+ this.target.getInedgeVector().clear();
+ this.target = null;
+ }
}
import Util.GraphNode;
public class SimExecutionNode extends GraphNode {
-
- private int nid;
- private static int nodeID=0;
-
- private int coreNum;
- private long timepoint;
- public Vector<Integer> spareCores;
-
- public SimExecutionNode(int corenum,
- long timepoint) {
- this.nid = SimExecutionNode.nodeID++;
- this.coreNum = corenum;
- this.timepoint = timepoint;
- this.spareCores = null;
- }
-
- public int getNid() {
- return nid;
- }
-
- public long getTimepoint() {
- return timepoint;
- }
-
- public int getCoreNum() {
- return coreNum;
- }
-
- public Vector<Integer> getSpareCores() {
- return spareCores;
- }
-
- public void setSpareCores(Vector<Integer> spareCores) {
- this.spareCores = spareCores;
- }
-
- public String getLabel() {
- return "N" + this.nid;
- }
+
+ private int nid;
+ private static int nodeID=0;
+
+ private int coreNum;
+ private long timepoint;
+ public Vector<Integer> spareCores;
+
+ public SimExecutionNode(int corenum,
+ long timepoint) {
+ this.nid = SimExecutionNode.nodeID++;
+ this.coreNum = corenum;
+ this.timepoint = timepoint;
+ this.spareCores = null;
+ }
+
+ public int getNid() {
+ return nid;
+ }
+
+ public long getTimepoint() {
+ return timepoint;
+ }
+
+ public int getCoreNum() {
+ return coreNum;
+ }
+
+ public Vector<Integer> getSpareCores() {
+ return spareCores;
+ }
+
+ public void setSpareCores(Vector<Integer> spareCores) {
+ this.spareCores = spareCores;
+ }
+
+ public String getLabel() {
+ return "N" + this.nid;
+ }
}
}
}
- public TaskSimulator(TaskDescriptor td,
- CoreSimulator cs) {
+ public TaskSimulator(TaskDescriptor td,
+ CoreSimulator cs) {
super();
this.td = td;
this.paraQueues = null;
return this.objVersionTbl.get(os).intValue();
}
- public void enquePara(ObjectSimulator obj,
- FlagState fs,
- int version,
- boolean inherent) {
+ public void enquePara(ObjectSimulator obj,
+ FlagState fs,
+ int version,
+ boolean inherent) {
ClassDescriptor cd = obj.getCd();
int paraNum = td.numParameters();
for(int i = 0; i < paraNum; i++) {
}
}
- public void refreshPara(ObjectSimulator obj,
- boolean remove) {
+ public void refreshPara(ObjectSimulator obj,
+ boolean remove) {
ClassDescriptor cd = obj.getCd();
int paraNum = td.numParameters();
for(int i = 0; i < paraNum; i++) {
for(int i = 0; i < paraQueues.size(); i++) {
ObjectSimulator tpara = paraQueues.elementAt(i).peek();
if(tpara == null) {
- // the parameter is already removed, delete this task too
- finishTime = 800;
- this.currentRun.setFinishTime(finishTime);
- this.currentRun.setExetype(2);
- for(int j = 0; j < i; ++j) {
- tpara = this.paraQueues.elementAt(j).poll();
- if(tpara.isShared() && tpara.isHold()) {
- tpara.setHold(false);
- }
- this.paraQueues.elementAt(j).add(tpara);
- }
- return;
+ // the parameter is already removed, delete this task too
+ finishTime = 800;
+ this.currentRun.setFinishTime(finishTime);
+ this.currentRun.setExetype(2);
+ for(int j = 0; j < i; ++j) {
+ tpara = this.paraQueues.elementAt(j).poll();
+ if(tpara.isShared() && tpara.isHold()) {
+ tpara.setHold(false);
+ }
+ this.paraQueues.elementAt(j).add(tpara);
+ }
+ return;
}
if(tpara.isShared()) {
if(tpara.isHold()) {
// shared object held by other tasks
finishTime = 800; // TODO currenly assume the effort on requesting locks are only 800
/*this.currentRun.setFinishTime(finishTime);
- this.currentRun.setExetype(1);
- paraQueues.elementAt(i).poll();
- paraQueues.elementAt(i).add(tpara);
- for(int j = 0; j < i; ++j) {
- tpara = this.paraQueues.elementAt(j).poll();
- if(tpara.isShared() && tpara.isHold()) {
+ this.currentRun.setExetype(1);
+ paraQueues.elementAt(i).poll();
+ paraQueues.elementAt(i).add(tpara);
+ for(int j = 0; j < i; ++j) {
+ tpara = this.paraQueues.elementAt(j).poll();
+ if(tpara.isShared() && tpara.isHold()) {
tpara.setHold(false);
- }
- this.paraQueues.elementAt(j).add(tpara);
- }*/
+ }
+ this.paraQueues.elementAt(j).add(tpara);
+ }*/
// remove it instead
this.currentRun.setFinishTime(finishTime);
this.currentRun.setExetype(2);
finishTime += toexecute.getExeTime();
// TODO for test
if(ftime == 0) {
- ftime = toexecute.getExeTime();
+ ftime = toexecute.getExeTime();
} else if(ftime != toexecute.getExeTime()) {
- //System.err.println("error for simulation: " + td.getSymbol());
+ //System.err.println("error for simulation: " + td.getSymbol());
}
// TODO for test
/*if(td.getSymbol().equals("addIYLM")) {
- System.err.println("# " + i + " time: " + toexecute.getExeTime());
- }*/
- if((toexecute.getNewObjInfoHashtable() != null)
- && (toexecute.getNewObjInfoHashtable().size() > 0)) {
+ System.err.println("# " + i + " time: " + toexecute.getExeTime());
+ }*/
+ if((toexecute.getNewObjInfoHashtable() != null)
+ && (toexecute.getNewObjInfoHashtable().size() > 0)) {
// have new objects
Iterator it = toexecute.getNewObjInfoHashtable().keySet().iterator();
int invokeNum = toexecute.getInvokeNum();
finishTime /= paraQueues.size();
// TODO for test
/*if(td.getSymbol().equals("addIYLM")) {
- System.err.println("total time: " + finishTime);
- System.err.println("=====");
- }*/
+ System.err.println("total time: " + finishTime);
+ System.err.println("=====");
+ }*/
this.currentRun.setFinishTime(finishTime);
this.currentRun.setExetype(0);
}
private int targetCoreNum;
private Queue<ObjectInfo> newObjs;
- public TransTaskSimulator(CoreSimulator cs,
- int targetCoreNum,
- Queue<ObjectInfo> nobjs) {
+ public TransTaskSimulator(CoreSimulator cs,
+ int targetCoreNum,
+ Queue<ObjectInfo> nobjs) {
super(null, cs);
this.targetCoreNum = targetCoreNum;
this.newObjs = nobjs;
}
public Queue<ObjectInfo> getNewObjs() {
- return newObjs;
- }
-
+ return newObjs;
+ }
+
}
\ No newline at end of file
while (it.hasNext()) {
FlagState fs = it.next();
if(fs.isSourceNode()) {
- for (Iterator allocit = ((Vector)fs.getAllocatingTasks()).iterator(); allocit.hasNext();) {
+ for (Iterator allocit = ((Vector)fs.getAllocatingTasks()).iterator(); allocit.hasNext(); ) {
TaskDescriptor alloctask=(TaskDescriptor)allocit.next();
EGTaskNode srcnode=new EGTaskNode(alloctask.getSymbol(),alloctask, fs);
nodes.add(srcnode);
srcnode.setSource();
- for (Iterator edges = fs.edges(); edges.hasNext();) {
+ for (Iterator edges = fs.edges(); edges.hasNext(); ) {
FEdge edge = (FEdge)edges.next();
EGTaskNode targetnode=getNode(edge, map, nodes);
EGEdge newedge=new EGEdge(fs, targetnode);
}
}
}
- for(Iterator init=fs.inedges(); init.hasNext();) {
+ for(Iterator init=fs.inedges(); init.hasNext(); ) {
FEdge inedge=(FEdge)init.next();
EGTaskNode srcnode=getNode(inedge, map, nodes);
- for(Iterator outit=fs.edges(); outit.hasNext();) {
+ for(Iterator outit=fs.edges(); outit.hasNext(); ) {
FEdge outedge=(FEdge)outit.next();
EGTaskNode dstnode=getNode(outedge, map, nodes);
EGEdge newedge=new EGEdge(fs,dstnode);
private void test(Hashtable graph) {
System.out.println("\nGraph contains :");
Collection c = graph.values();
- for ( Iterator it = c.iterator(); it.hasNext();) {
+ for ( Iterator it = c.iterator(); it.hasNext(); ) {
EGTaskNode tn = (EGTaskNode)it.next();
System.out.println(tn.getTextLabel()+" ID "+tn.getLabel()+" FS "+tn.getFSName());
}
private void traverse(java.io.PrintWriter output, Set v) {
EGTaskNode tn;
- for(Iterator it1 = v.iterator(); it1.hasNext();) {
+ for(Iterator it1 = v.iterator(); it1.hasNext(); ) {
tn = (EGTaskNode)it1.next();
output.println("\t"+tn.getLabel()+" [label=\""+tn.getTextLabel()+"\"");
if (tn.isMultipleParams()) output.println(", color=blue");
output.println("];");
- for(Iterator it2 = tn.edges(); it2.hasNext();) {
+ for(Iterator it2 = tn.edges(); it2.hasNext(); ) {
output.println("\t"+tn.getLabel()+" -> "+((EGTaskNode)((EGEdge)it2.next()).getTarget()).getLabel()+";");
}
}
}
public int getTaskExitIndex() {
- return m_taskexitindex;
+ return m_taskexitindex;
}
public void setTaskExitIndex(int taskexitindex) {
- this.m_taskexitindex = taskexitindex;
+ this.m_taskexitindex = taskexitindex;
}
public double getProbability() {
}
this.newObjInfos.put(cd, new NewObjInfo(newRate, probability));
}
-
+
public void init4Simulate() {
this.invokeNum = 0;
this.expInvokeNum = 0;
if(this.newObjInfos != null) {
Iterator<NewObjInfo> it_nobjs = this.newObjInfos.values().iterator();
while(it_nobjs.hasNext()) {
- it_nobjs.next().invokeNum = 0;
+ it_nobjs.next().invokeNum = 0;
}
}
}
public int getInvokeNumGap() {
return this.expInvokeNum - this.invokeNum;
}
-
+
public int getInvokeNumGapByObj(int byObj) {
- return this.expInvokeNum - (this.invokeNum/byObj);
+ return this.expInvokeNum - (this.invokeNum/byObj);
}
public void setExpInvokeNum(int expInvokeNum) {
public int getFlagInt(FlagState fs) {
int flagid=0;
- for(Iterator flags = fs.getFlags(); flags.hasNext();) {
+ for(Iterator flags = fs.getFlags(); flags.hasNext(); ) {
FlagDescriptor flagd = (FlagDescriptor)flags.next();
int id=1<<((Integer)flaginfo.get(flagd)).intValue();
flagid|=id;
* pairs. */
private void getFlagsfromClasses() {
- for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext();) {
+ for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext(); ) {
ClassDescriptor cd = (ClassDescriptor)it_classes.next();
Vector vFlags=new Vector();
FlagDescriptor flag[];
/* Adding the flags of the super class */
ClassDescriptor tmp=cd;
while(tmp!=null) {
- for(Iterator it_cflags=tmp.getFlags(); it_cflags.hasNext();) {
+ for(Iterator it_cflags=tmp.getFlags(); it_cflags.hasNext(); ) {
FlagDescriptor fd = (FlagDescriptor)it_cflags.next();
vFlags.add(fd);
}
if (fs.cd!=cd)
return false;
if(fs.byObj != this.byObj) {
- return false;
+ return false;
}
return (fs.flagstate.equals(flagstate) & fs.tags.equals(tags));
}
public String getTextLabel() {
String label=null;
- for(Iterator it=getFlags(); it.hasNext();) {
+ for(Iterator it=getFlags(); it.hasNext(); ) {
FlagDescriptor fd=(FlagDescriptor) it.next();
if (label==null)
label=fd.toString();
else
label+=", "+fd.toString();
}
- for (Enumeration en_tags=getTags(); en_tags.hasMoreElements();) {
+ for (Enumeration en_tags=getTags(); en_tags.hasMoreElements(); ) {
TagDescriptor td=(TagDescriptor)en_tags.nextElement();
switch (tags.get(td).intValue()) {
case ONETAG:
for(int i = 0; i < this.edges.size(); i++) {
next = (FEdge) this.edges.elementAt(i);
if(this.byObj == 0) {
- next.setExpInvokeNum((int)(Math.ceil(this.invokeNum * next.getProbability() / 100)));
+ next.setExpInvokeNum((int)(Math.ceil(this.invokeNum * next.getProbability() / 100)));
} else {
- next.setExpInvokeNum((int)(Math.ceil(((this.invokeNum - 1) / this.byObj + 1) * next.getProbability() / 100)));
+ next.setExpInvokeNum((int)(Math.ceil(((this.invokeNum - 1) / this.byObj + 1) * next.getProbability() / 100)));
}
}
- // find the one with the biggest gap between its actual invoke time and
+ // find the one with the biggest gap between its actual invoke time and
// the expected invoke time and associated with task td
int index = 0;
int gap = 0;
boolean isbackedge = true;
for(int i = 0; i < this.edges.size(); i++) {
next = ((FEdge) this.edges.elementAt(i));
- int temp = (this.byObj == 0) ? next.getInvokeNumGap() : next.getInvokeNumGapByObj(this.byObj);
+ int temp = (this.byObj == 0)?next.getInvokeNumGap():next.getInvokeNumGapByObj(this.byObj);
boolean exchange = false;
if((temp > gap) && (next.getTask().equals(td))) {
exchange = true;
}
public int getByObj() {
- return byObj;
+ return byObj;
}
public void setByObj(int byObj) {
- this.byObj = byObj;
+ this.byObj = byObj;
}
/*public Vector<ScheduleEdge> getAllys() {
}
public void doAnalysis() {
- for(Iterator it=state.getClassSymbolTable().getDescriptorsIterator(); it.hasNext();) {
+ for(Iterator it=state.getClassSymbolTable().getDescriptorsIterator(); it.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) it.next();
if (taskanalysis.getFlagStates(cd)==null)
continue;
HashSet garbage=new HashSet();
HashSet possiblegarbage=new HashSet();
- for(Iterator fsit=flagstatenodes.iterator(); fsit.hasNext();) {
+ for(Iterator fsit=flagstatenodes.iterator(); fsit.hasNext(); ) {
FlagState fs=(FlagState)fsit.next();
if (fs.numedges()==0)
garbage.add(fs);
private Hashtable<FlagState, Set<EGTaskNode>> buildMap(ClassDescriptor cd) {
Hashtable<FlagState, Set<EGTaskNode>> table=new Hashtable<FlagState, Set<EGTaskNode>>();
- for(Iterator it=((Set)executiongraph.get(cd)).iterator(); it.hasNext();) {
+ for(Iterator it=((Set)executiongraph.get(cd)).iterator(); it.hasNext(); ) {
EGTaskNode node=(EGTaskNode)it.next();
if (node.getFS()!=null) {
if (!table.containsKey(node.getFS()))
private Hashtable<FlagState, Set<FlagState>> buildUseMap(ClassDescriptor cd) {
Hashtable<FlagState, Set<FlagState>> table=new Hashtable<FlagState, Set<FlagState>>();
- for(Iterator it=((Set)executiongraph.get(cd)).iterator(); it.hasNext();) {
+ for(Iterator it=((Set)executiongraph.get(cd)).iterator(); it.hasNext(); ) {
EGTaskNode node=(EGTaskNode)it.next();
if (node.getFS()!=null) {
if (!table.containsKey(node.getPostFS()))
Hashtable<TaskIndex, Set<OptionalTaskDescriptor>> timap=new Hashtable<TaskIndex, Set<OptionalTaskDescriptor>>();
Set<TaskIndex> tiselfloops=new HashSet<TaskIndex>();
- for(Iterator<EGTaskNode> egit=egset.iterator(); egit.hasNext();) {
+ for(Iterator<EGTaskNode> egit=egset.iterator(); egit.hasNext(); ) {
EGTaskNode egnode=egit.next();
Set<OptionalTaskDescriptor> setotd;
if (egnode.isOptional()) {
else
oldsetotd=new HashSet<OptionalTaskDescriptor>();
setotd=new HashSet<OptionalTaskDescriptor>();
- for(Iterator<OptionalTaskDescriptor> otdit=oldsetotd.iterator(); otdit.hasNext();) {
+ for(Iterator<OptionalTaskDescriptor> otdit=oldsetotd.iterator(); otdit.hasNext(); ) {
OptionalTaskDescriptor oldotd=otdit.next();
Predicate newp=combinePredicates(oldotd.predicate, p);
OptionalTaskDescriptor newotd=new OptionalTaskDescriptor(oldotd.td, oldotd.getIndex(), oldotd.enterflagstates, newp);
else
setotd=new HashSet<OptionalTaskDescriptor>();
}
- TaskIndex ti=egnode.isRuntime() ? new TaskIndex() : new TaskIndex(egnode.getTD(), egnode.getIndex());
+ TaskIndex ti=egnode.isRuntime()?new TaskIndex():new TaskIndex(egnode.getTD(), egnode.getIndex());
if (!ti.runtime) {
//runtime edges don't do anything...don't have to take
//them, can't predict when we can.
//Combine all options
HashSet<OptionalTaskDescriptor> set=new HashSet<OptionalTaskDescriptor>();
- for(Iterator<Set<OptionalTaskDescriptor>> it=timap.values().iterator(); it.hasNext();) {
+ for(Iterator<Set<OptionalTaskDescriptor>> it=timap.values().iterator(); it.hasNext(); ) {
Set<OptionalTaskDescriptor> otdset=it.next();
set.addAll(otdset);
}
private HashSet createIntersection(Set A, Set B, ClassDescriptor cd) {
HashSet result = new HashSet();
- for(Iterator b_it = B.iterator(); b_it.hasNext();) {
+ for(Iterator b_it = B.iterator(); b_it.hasNext(); ) {
OptionalTaskDescriptor otd_b = (OptionalTaskDescriptor)b_it.next();
- for(Iterator a_it = A.iterator(); a_it.hasNext();) {
+ for(Iterator a_it = A.iterator(); a_it.hasNext(); ) {
OptionalTaskDescriptor otd_a = (OptionalTaskDescriptor)a_it.next();
if(otd_a.td==otd_b.td&&
otd_a.getIndex()==otd_b.getIndex()) {
Set<FlatNode> nodeset=fm.getNodeSet();
- for(Iterator<FlatNode> nodeit=nodeset.iterator(); nodeit.hasNext();) {
+ for(Iterator<FlatNode> nodeit=nodeset.iterator(); nodeit.hasNext(); ) {
FlatNode fn=nodeit.next();
if (fn.kind()==FKind.FlatFlagActionNode) {
FlatFlagActionNode ffan=(FlatFlagActionNode)fn;
if (ffan.getTaskType() == FlatFlagActionNode.TASKEXIT) {
- for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext();) {
+ for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it_tfp.next();
TempDescriptor tempd = tfp.getTemp();
if(tempd!=tmp)
result.flags.putAll(A.flags);
result.tags.putAll(A.tags);
Collection c = B.vardescriptors;
- for(Iterator varit = c.iterator(); varit.hasNext();) { //maybe change that
+ for(Iterator varit = c.iterator(); varit.hasNext(); ) { //maybe change that
VarDescriptor vd = (VarDescriptor)varit.next();
if(result.vardescriptors.contains(vd))
System.out.println("Already in ");
}
}
Collection vardesc = result.vardescriptors;
- for(Iterator varit = vardesc.iterator(); varit.hasNext();) {
+ for(Iterator varit = vardesc.iterator(); varit.hasNext(); ) {
VarDescriptor vd = (VarDescriptor)varit.next();
HashSet bflags = B.flags.get(vd);
if( bflags == null ) {
FlatFlagActionNode ffan=(FlatFlagActionNode)fn1;
if (ffan.getTaskType() == FlatFlagActionNode.TASKEXIT) {
HashSet tempset = new HashSet();
- for(Iterator it_fs = otd.enterflagstates.iterator(); it_fs.hasNext();) {
+ for(Iterator it_fs = otd.enterflagstates.iterator(); it_fs.hasNext(); ) {
FlagState fstemp = (FlagState)it_fs.next();
Vector<FlagState> processed=new Vector<FlagState>();
- for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext();) {
+ for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it_tfp.next();
if (tfp.getTemp()==temp)
fstemp=fstemp.setFlag(tfp.getFlag(),ffan.getFlagChange(tfp));
processed.add(fstemp);
//Process clears first
- for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
if (temp==ttp.getTemp()) {
Vector<FlagState> oldprocess=processed;
processed=new Vector<FlagState>();
- for (Enumeration en=oldprocess.elements(); en.hasMoreElements();) {
+ for (Enumeration en=oldprocess.elements(); en.hasMoreElements(); ) {
FlagState fsworking=(FlagState)en.nextElement();
if (!ffan.getTagChange(ttp)) {
processed.addAll(Arrays.asList(fsworking.clearTag(ttp.getTag())));
}
}
//Process sets next
- for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
if (temp==ttp.getTemp()) {
Vector<FlagState> oldprocess=processed;
processed=new Vector<FlagState>();
- for (Enumeration en=oldprocess.elements(); en.hasMoreElements();) {
+ for (Enumeration en=oldprocess.elements(); en.hasMoreElements(); ) {
FlagState fsworking=(FlagState)en.nextElement();
if (ffan.getTagChange(ttp)) {
processed.addAll(Arrays.asList(fsworking.setTag(ttp.getTag())));
FlagState fs = (FlagState)fses.nextElement();
System.out.println("\t"+fs.getTextLabel()+"\n\tSafe tasks to execute :\n");
HashSet availabletasks = (HashSet)hashtbtemp.get(fs);
- for(Iterator otd_it = availabletasks.iterator(); otd_it.hasNext();) {
+ for(Iterator otd_it = availabletasks.iterator(); otd_it.hasNext(); ) {
OptionalTaskDescriptor otd = (OptionalTaskDescriptor)otd_it.next();
System.out.println("\t\tTASK "+otd.td.getSymbol()+" UID : "+otd.getuid()+"\n");
System.out.println("\t\twith flags :");
- for(Iterator myfses = otd.enterflagstates.iterator(); myfses.hasNext();) {
+ for(Iterator myfses = otd.enterflagstates.iterator(); myfses.hasNext(); ) {
System.out.println("\t\t\t"+((FlagState)myfses.next()).getTextLabel());
}
System.out.println("\t\tand exitflags :");
- for(Iterator fseshash = otd.exitfses.iterator(); fseshash.hasNext();) {
+ for(Iterator fseshash = otd.exitfses.iterator(); fseshash.hasNext(); ) {
HashSet temphs = (HashSet)fseshash.next();
System.out.println("");
- for(Iterator exfses = temphs.iterator(); exfses.hasNext();) {
+ for(Iterator exfses = temphs.iterator(); exfses.hasNext(); ) {
System.out.println("\t\t\t"+((FlagState)exfses.next()).getTextLabel());
}
}
Predicate predicate = otd.predicate;
System.out.println("\t\tPredicate constraints :");
Collection c = predicate.vardescriptors;
- for(Iterator varit = c.iterator(); varit.hasNext();) {
+ for(Iterator varit = c.iterator(); varit.hasNext(); ) {
VarDescriptor vard = (VarDescriptor)varit.next();
System.out.println("\t\t\tClass "+vard.getType().getClassDesc().getSymbol());
}
System.out.println("\n\n\n\tOptionaltaskdescriptors contains : ");
Collection c_otd = optionaltaskdescriptors.get(cdtemp).values();
- for(Iterator otd_it = c_otd.iterator(); otd_it.hasNext();) {
+ for(Iterator otd_it = c_otd.iterator(); otd_it.hasNext(); ) {
OptionalTaskDescriptor otd = (OptionalTaskDescriptor)otd_it.next();
System.out.println("\t\tTASK "+otd.td.getSymbol()+" UID : "+otd.getuid()+"\n");
System.out.println("\t\twith flags :");
- for(Iterator myfses = otd.enterflagstates.iterator(); myfses.hasNext();) {
+ for(Iterator myfses = otd.enterflagstates.iterator(); myfses.hasNext(); ) {
System.out.println("\t\t\t"+((FlagState)myfses.next()).getTextLabel());
}
System.out.println("\t\tand exitflags :");
- for(Iterator fseshash = otd.exitfses.iterator(); fseshash.hasNext();) {
+ for(Iterator fseshash = otd.exitfses.iterator(); fseshash.hasNext(); ) {
HashSet temphs = (HashSet)fseshash.next();
System.out.println("");
- for(Iterator exfses = temphs.iterator(); exfses.hasNext();) {
+ for(Iterator exfses = temphs.iterator(); exfses.hasNext(); ) {
System.out.println("\t\t\t"+((FlagState)exfses.next()).getTextLabel());
}
}
Predicate predicate = otd.predicate;
System.out.println("\t\tPredicate contains :");
Collection c = predicate.vardescriptors;
- for(Iterator varit = c.iterator(); varit.hasNext();) {
+ for(Iterator varit = c.iterator(); varit.hasNext(); ) {
VarDescriptor vard = (VarDescriptor)varit.next();
System.out.println("\t\t\tClass "+vard.getType().getClassDesc().getSymbol());
HashSet temphash = predicate.flags.get(vard.getName());
private void traverse(java.io.PrintWriter output, Collection v) {
EGTaskNode tn;
- for(Iterator it1 = v.iterator(); it1.hasNext();) {
+ for(Iterator it1 = v.iterator(); it1.hasNext(); ) {
tn = (EGTaskNode)it1.next();
output.println("\t"+tn.getLabel()+" [label=\""+tn.getTextLabel()+"\"");
if (tn.isOptional()) {
output.println(", shape=octagon");
output.println("];");
- for(Iterator it2 = tn.edges(); it2.hasNext();) {
+ for(Iterator it2 = tn.edges(); it2.hasNext(); ) {
EGTaskNode tn2 = (EGTaskNode)((Edge)it2.next()).getTarget();
output.println("\t"+tn.getLabel()+" -> "+tn2.getLabel()+";");
}
for(int i=0; i<scc.numSCC(); i++) {
Set component=scc.getSCC(i);
HashSet flagset=new HashSet();
- for(Iterator compit=component.iterator(); compit.hasNext();) {
+ for(Iterator compit=component.iterator(); compit.hasNext(); ) {
TagBinding tb=(TagBinding)compit.next();
flagset.addAll(tb.getAllocations());
- for(Iterator edgeit=tb.edges(); edgeit.hasNext();) {
+ for(Iterator edgeit=tb.edges(); edgeit.hasNext(); ) {
Edge e=(Edge)edgeit.next();
TagBinding tb2=(TagBinding)e.getTarget();
flagset.addAll(tb2.getAllocations());
}
}
- for(Iterator compit=component.iterator(); compit.hasNext();) {
+ for(Iterator compit=component.iterator(); compit.hasNext(); ) {
TagBinding tb=(TagBinding)compit.next();
tb.getAllocations().addAll(flagset);
}
}
SymbolTable tasktable=state.getTaskSymbolTable();
- for(Iterator taskit=tasktable.getDescriptorsIterator(); taskit.hasNext();) {
+ for(Iterator taskit=tasktable.getDescriptorsIterator(); taskit.hasNext(); ) {
TaskDescriptor task=(TaskDescriptor)taskit.next();
HashSet roottags=(HashSet)tasktotagbindings.get(task);
HashSet taskflags=(HashSet)tasktoflagstates.get(task);
- for(Iterator tagit=roottags.iterator(); tagit.hasNext();) {
+ for(Iterator tagit=roottags.iterator(); tagit.hasNext(); ) {
TagBinding tb=(TagBinding)tagit.next();
taskflags.addAll(tb.getAllocations());
}
private Set computeRootSet() {
HashSet rootset=new HashSet();
SymbolTable tasktable=state.getTaskSymbolTable();
- for(Iterator taskit=tasktable.getDescriptorsIterator(); taskit.hasNext();) {
+ for(Iterator taskit=tasktable.getDescriptorsIterator(); taskit.hasNext(); ) {
TaskDescriptor task=(TaskDescriptor)taskit.next();
HashSet roottags=new HashSet();
HashSet taskflags=new HashSet();
private void computeCallsFlags(FlatMethod fm, Hashtable parammap, Set tagbindings, Set newflags) {
Set nodeset=fm.getNodeSet();
- for(Iterator nodeit=nodeset.iterator(); nodeit.hasNext();) {
+ for(Iterator nodeit=nodeset.iterator(); nodeit.hasNext(); ) {
FlatNode fn=(FlatNode)nodeit.next();
if(fn.kind()==FKind.FlatCall) {
FlatCall fc=(FlatCall)fn;
MethodDescriptor nodemd=fc.getMethod();
- Set methodset=fc.getThis()==null ? callgraph.getMethods(nodemd) :
+ Set methodset=fc.getThis()==null?callgraph.getMethods(nodemd):
callgraph.getMethods(nodemd, fc.getThis().getType());
- for(Iterator methodit=methodset.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=methodset.iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
TagBinding nodetb=new TagBinding(md);
for(int i=0; i<md.numParameters(); i++) {
}
Vector<FlagState> targetFStates = ffan.getTargetFStates4NewObj(ffantemp.getType().getClassDesc());
FlagState fs=new FlagState(ffantemp.getType().getClassDesc());
- for(Iterator it=ffan.getTempFlagPairs(); it.hasNext();) {
+ for(Iterator it=ffan.getTempFlagPairs(); it.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it.next();
if (ffan.getFlagChange(tfp))
fs=fs.setFlag(tfp.getFlag(), true);
HashSet fsset=new HashSet();
fsset.add(fs);
- for(Iterator it=ffan.getTempTagPairs(); it.hasNext();) {
+ for(Iterator it=ffan.getTempTagPairs(); it.hasNext(); ) {
HashSet oldfsset=fsset;
fsset=new HashSet();
if (tag==null&¶mmap!=null&¶mmap.containsKey(ttp.getTagTemp())) {
tag=(TagDescriptor)parammap.get(ttp.getTagTemp());
}
- for(Iterator setit=oldfsset.iterator(); setit.hasNext();) {
+ for(Iterator setit=oldfsset.iterator(); setit.hasNext(); ) {
FlagState fs2=(FlagState)setit.next();
fsset.addAll(Arrays.asList(fs2.setTag(tag)));
}
throw new Error("Don't clear tag in new object allocation");
}
- for(Iterator setit=fsset.iterator(); setit.hasNext();) {
+ for(Iterator setit=fsset.iterator(); setit.hasNext(); ) {
FlagState fs2=(FlagState)setit.next();
if (!flagmap.containsKey(fs2))
flagmap.put(fs2,fs2);
private void computeTagBindings(Set roots) {
tovisit.addAll(roots);
- for(Iterator it=roots.iterator(); it.hasNext();) {
+ for(Iterator it=roots.iterator(); it.hasNext(); ) {
TagBinding tb=(TagBinding)it.next();
discovered.put(tb,tb);
}
FlatMethod fm=state.getMethodFlat(md);
/* Build map from temps -> tagdescriptors */
Hashtable parammap=new Hashtable();
- int offset=md.isStatic() ? 0 : 1;
+ int offset=md.isStatic()?0:1;
for(int i=0; i<fm.numParameters(); i++) {
computeCallsFlags(fm, parammap, newtags, tb.getAllocations());
- for(Iterator tagit=newtags.iterator(); tagit.hasNext();) {
+ for(Iterator tagit=newtags.iterator(); tagit.hasNext(); ) {
TagBinding newtag=(TagBinding)tagit.next();
Edge e=new Edge(newtag);
tb.addEdge(e);
/** Iterate through the classes used in the program to build
* the table of flags
*/
- for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext();) {
+ for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext(); ) {
ClassDescriptor cd = (ClassDescriptor)it_classes.next();
Vector vFlags=new Vector();
/* Adding the flags of the super class */
ClassDescriptor tmp=cd;
while(tmp!=null) {
- for(Iterator it_cflags=tmp.getFlags(); it_cflags.hasNext();) {
+ for(Iterator it_cflags=tmp.getFlags(); it_cflags.hasNext(); ) {
FlagDescriptor fd = (FlagDescriptor)it_cflags.next();
vFlags.add(fd);
}
if (vFlags.size()!=0) {
flag=new FlagDescriptor[vFlags.size()];
- for(int i=0; i < vFlags.size() ; i++) {
+ for(int i=0; i < vFlags.size(); i++) {
if (((FlagDescriptor)vFlags.get(i)).getExternal()) {
flag[ctr]=(FlagDescriptor)vFlags.get(i);
vFlags.remove(flag[ctr]);
ctr++;
}
}
- for(int i=0; i < vFlags.size() ; i++) {
+ for(int i=0; i < vFlags.size(); i++) {
flag[i+ctr]=(FlagDescriptor)vFlags.get(i);
}
extern_flags.put(cd,new Integer(ctr));
int externs;
toprocess=new LinkedList<FlagState>();
- for(Iterator it_classes=(Iterator)flags.keys(); it_classes.hasNext();) {
+ for(Iterator it_classes=(Iterator)flags.keys(); it_classes.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor)it_classes.next();
externs=((Integer)extern_flags.get(cd)).intValue();
FlagDescriptor[] fd=(FlagDescriptor[])flags.get(cd);
ClassDescriptor cd=fs.getClassDescriptor();
Hashtable<FlagState,FlagState> sourcenodes=(Hashtable<FlagState,FlagState>)flagstates.get(cd);
- for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext();) {
+ for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext(); ) {
TaskDescriptor td = (TaskDescriptor)it_tasks.next();
String taskname=td.getSymbol();
Set newstates=taganalysis.getFlagStates(td);
- for(Iterator fsit=newstates.iterator(); fsit.hasNext();) {
+ for(Iterator fsit=newstates.iterator(); fsit.hasNext(); ) {
FlagState fsnew=(FlagState) fsit.next();
System.out.println("SOURCE:"+fsnew);
initFStates.addElement(fs);
}
Vector<FlagState> targetFStates = ffan.getTargetFStates(fs);
- for(Enumeration en=fsv_taskexit.elements(); en.hasMoreElements();) {
+ for(Enumeration en=fsv_taskexit.elements(); en.hasMoreElements(); ) {
FlagState fs_taskexit=(FlagState)en.nextElement();
if (!sourcenodes.containsKey(fs_taskexit)) {
toprocess.add(fs_taskexit);
private boolean isTaskTrigger_tag(TagExpressionList tel, FlagState fs) {
if (tel!=null) {
- for (int i=0; i<tel.numTags() ; i++) {
+ for (int i=0; i<tel.numTags(); i++) {
switch (fs.getTagCount(tel.getType(i))) {
case FlagState.ONETAG:
case FlagState.MULTITAGS:
//Process the flag changes
- for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext();) {
+ for(Iterator it_tfp=ffan.getTempFlagPairs(); it_tfp.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it_tfp.next();
if (temp==tfp.getTemp())
fstemp=fstemp.setFlag(tfp.getFlag(),ffan.getFlagChange(tfp));
processed.add(fstemp);
//Process clears first
- for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
if (temp==ttp.getTemp()) {
Vector<FlagState> oldprocess=processed;
processed=new Vector<FlagState>();
- for (Enumeration en=oldprocess.elements(); en.hasMoreElements();) {
+ for (Enumeration en=oldprocess.elements(); en.hasMoreElements(); ) {
FlagState fsworking=(FlagState)en.nextElement();
if (!ffan.getTagChange(ttp)) {
processed.addAll(Arrays.asList(fsworking.clearTag(ttp.getTag())));
}
}
//Process sets next
- for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=ffan.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
if (temp==ttp.getTemp()) {
Vector<FlagState> oldprocess=processed;
processed=new Vector<FlagState>();
- for (Enumeration en=oldprocess.elements(); en.hasMoreElements();) {
+ for (Enumeration en=oldprocess.elements(); en.hasMoreElements(); ) {
FlagState fsworking=(FlagState)en.nextElement();
if (ffan.getTagChange(ttp)) {
processed.addAll(Arrays.asList(fsworking.setTag(ttp.getTag())));
boolean BoolValTable[]=new boolean[externs];
- for(int i=0; i < externs ; i++) {
+ for(int i=0; i < externs; i++) {
BoolValTable[i]=fs.get(fd[i]);
}
for(int k=0; k<noOfIterations; k++) {
- for(int j=0; j < externs ; j++) {
+ for(int j=0; j < externs; j++) {
if ((k% (1<<j)) == 0)
BoolValTable[j]=(!BoolValTable[j]);
}
this.cdtonodes=new Hashtable();
this.alltasknodes=new Hashtable<TaskNode,TaskNode>();
- for(Iterator classit=state.getClassSymbolTable().getDescriptorsIterator(); classit.hasNext();) {
+ for(Iterator classit=state.getClassSymbolTable().getDescriptorsIterator(); classit.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) classit.next();
if (cd.hasFlags())
produceTaskNodes(cd);
public void createDOTfiles() {
- for(Iterator it_classes=(Iterator)cdtonodes.keys(); it_classes.hasNext();) {
+ for(Iterator it_classes=(Iterator)cdtonodes.keys(); it_classes.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) it_classes.next();
Set tasknodes=getTaskNodes(cd);
if (tasknodes!=null) {
Hashtable<TaskNode,TaskNode> tasknodes=new Hashtable<TaskNode,TaskNode>();
cdtonodes.put(cd, tasknodes);
- for(Iterator it=fsnodes.iterator(); it.hasNext();) {
+ for(Iterator it=fsnodes.iterator(); it.hasNext(); ) {
FlagState fs=(FlagState)it.next();
Iterator it_inedges=fs.inedges();
TaskNode tn,sn;
if (fs.isSourceNode()) {
Vector src=fs.getAllocatingTasks();
- for(Iterator it2=src.iterator(); it2.hasNext();) {
+ for(Iterator it2=src.iterator(); it2.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)it2.next();
sn=new TaskNode(td.getSymbol());
if(fs.edges().hasNext()) {
private void produceAllTaskNodes() {
alltasknodes=new Hashtable<TaskNode,TaskNode>();
- for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext();) {
+ for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)it_tasks.next();
TaskNode tn=new TaskNode(td.getSymbol());
alltasknodes.put(tn,tn);
alltasknodes.put(tn_runtime,tn_runtime);
int ColorID=0;
- for(Iterator classit=state.getClassSymbolTable().getDescriptorsIterator(); classit.hasNext()&&ColorID<10;) {
+ for(Iterator classit=state.getClassSymbolTable().getDescriptorsIterator(); classit.hasNext()&&ColorID<10; ) {
ClassDescriptor cd=(ClassDescriptor) classit.next();
Set fsnodes;
//
System.out.println("\nWorking on fses of Class: "+cd.getSymbol());
//
- for(Iterator it=fsnodes.iterator(); it.hasNext();) {
+ for(Iterator it=fsnodes.iterator(); it.hasNext(); ) {
FlagState fs=(FlagState)it.next();
//
System.out.println("Evaluating fs: "+fs.getTextLabel());
if (allocatingtasks.iterator().hasNext())
System.out.println("has been allocated by "+allocatingtasks.size()+" tasks");
//
- for(Iterator it_at=allocatingtasks.iterator(); it_at.hasNext();) {
+ for(Iterator it_at=allocatingtasks.iterator(); it_at.hasNext(); ) {
TaskDescriptor allocatingtd=(TaskDescriptor)it_at.next();
//
System.out.println(allocatingtd.getSymbol());
// Hashtable<TaskNode,TaskNode> tasknodes=(Hashtable<TaskNode,TaskNode>)cdtonodes.get(fs.getClassDescriptor());
tn=(TaskNode)canonicalizeTaskNode(tasknodes, tn);
- for (Iterator it_edges=fs.edges(); it_edges.hasNext();) {
+ for (Iterator it_edges=fs.edges(); it_edges.hasNext(); ) {
TaskNode target=new TaskNode(((FEdge)it_edges.next()).getLabel());
target=(TaskNode)canonicalizeTaskNode(tasknodes,target);
private void addEdges(FlagState fs, TaskNode tn,Hashtable<TaskNode,TaskNode> tasknodes,int ColorID) {
tn=(TaskNode)canonicalizeTaskNode(tasknodes, tn);
- for (Iterator it_edges=fs.edges(); it_edges.hasNext();) {
+ for (Iterator it_edges=fs.edges(); it_edges.hasNext(); ) {
TaskNode target=new TaskNode(((FEdge)it_edges.next()).getLabel());
target=(TaskNode)canonicalizeTaskNode(tasknodes,target);
TagExpressionList tel=td.getTag(vd);
int j;
if (needinit) {
- j=(tel!=null)&&tel.numTags()>0 ? tel.numTags()-1 : 0;
+ j=(tel!=null)&&tel.numTags()>0?tel.numTags()-1:0;
needinit=false;
} else
j=0;
this.fsresults=new Hashtable<ClassDescriptor, Set<TagState>>();
- for(Iterator taskit=state.getTaskSymbolTable().getDescriptorsIterator(); taskit.hasNext();) {
+ for(Iterator taskit=state.getTaskSymbolTable().getDescriptorsIterator(); taskit.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)taskit.next();
tasktable.put(td, new TaskQueue(td));
}
private void doOutput() {
try {
- for(Iterator<TagDescriptor> tagit=tsresults.keySet().iterator(); tagit.hasNext();) {
+ for(Iterator<TagDescriptor> tagit=tsresults.keySet().iterator(); tagit.hasNext(); ) {
TagDescriptor tag=tagit.next();
Set<TagState> set=tsresults.get(tag);
File dotfile_flagstates= new File("tag"+tag.getSymbol()+".dot");
FileOutputStream dotstream=new FileOutputStream(dotfile_flagstates,false);
TagState.DOTVisitor.visit(dotstream,set);
}
- for(Iterator<ClassDescriptor> cdit=fsresults.keySet().iterator(); cdit.hasNext();) {
+ for(Iterator<ClassDescriptor> cdit=fsresults.keySet().iterator(); cdit.hasNext(); ) {
ClassDescriptor cd=cdit.next();
Set<TagState> set=fsresults.get(cd);
File dotfile_flagstates= new File("class"+cd.getSymbol()+".dot");
TagState ts=toprocess.iterator().next();
toprocess.remove(ts);
//Loop through each task
- for(Iterator taskit=state.getTaskSymbolTable().getDescriptorsIterator(); taskit.hasNext();) {
+ for(Iterator taskit=state.getTaskSymbolTable().getDescriptorsIterator(); taskit.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)taskit.next();
TaskQueue tq=tasktable.get(td);
processTask(td, tq, ts);
private void processTask(TaskDescriptor td, TaskQueue tq, TagState ts) {
Set<FlagState> flagset=ts.getFS();
- for(Iterator<FlagState> fsit=flagset.iterator(); fsit.hasNext();) {
+ for(Iterator<FlagState> fsit=flagset.iterator(); fsit.hasNext(); ) {
FlagState fs=fsit.next();
FlagTagState fts=new FlagTagState(ts, fs);
for(int i=0; i<td.numParameters(); i++) {
Hashtable<TempDescriptor, Wrapper> prevtable=maintable.get(fn);
//Iterator through the Tags
- for(Iterator<TempDescriptor> tmpit=prevtable.keySet().iterator(); tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=prevtable.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Wrapper prevtag=prevtable.get(tmp);
if (prevtag instanceof ObjWrapper)
TagWrapper currtag=(TagWrapper) table.get(tmp);
tagtable.put((TagWrapper)prevtag, currtag);
assert(currtag.initts.equals(((TagWrapper)prevtag).initts));
- for(Iterator<TagState> tagit=((TagWrapper)prevtag).ts.iterator(); tagit.hasNext();) {
+ for(Iterator<TagState> tagit=((TagWrapper)prevtag).ts.iterator(); tagit.hasNext(); ) {
TagState tag=tagit.next();
if (!currtag.ts.contains(tag)) {
currtag.ts.add(tag);
}
//Iterator through the Objects
- for(Iterator<TempDescriptor> tmpit=prevtable.keySet().iterator(); tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=prevtable.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Wrapper obj=prevtable.get(tmp);
if (obj instanceof TagWrapper)
}
ObjWrapper currobj=(ObjWrapper) table.get(tmp);
assert(currobj.initfs.equals(prevobj.initfs));
- for(Iterator<TagWrapper> tagit=prevobj.tags.iterator(); tagit.hasNext();) {
+ for(Iterator<TagWrapper> tagit=prevobj.tags.iterator(); tagit.hasNext(); ) {
TagWrapper tprev=tagit.next();
TagWrapper t=tagtable.get(tprev);
currobj.tags.add(t);
}
- for(Iterator<FlagState> flagit=prevobj.fs.iterator(); flagit.hasNext();) {
+ for(Iterator<FlagState> flagit=prevobj.fs.iterator(); flagit.hasNext(); ) {
FlagState fs=flagit.next();
currobj.fs.add(fs);
}
private void setFlag(ObjWrapper ow, FlagDescriptor fd, boolean value) {
HashSet<FlagState> newstate=new HashSet<FlagState>();
Hashtable<FlagState, FlagState> flagmap=new Hashtable<FlagState, FlagState>();
- for(Iterator<FlagState> flagit=ow.fs.iterator(); flagit.hasNext();) {
+ for(Iterator<FlagState> flagit=ow.fs.iterator(); flagit.hasNext(); ) {
FlagState fs=flagit.next();
FlagState fsnew=canonical(fs.setFlag(fd, value));
newstate.add(fsnew);
flagmap.put(fs, fsnew);
}
- for(Iterator<TagWrapper> tagit=ow.tags.iterator(); tagit.hasNext();) {
+ for(Iterator<TagWrapper> tagit=ow.tags.iterator(); tagit.hasNext(); ) {
TagWrapper tw=tagit.next();
HashSet<TagState> newstates=new HashSet<TagState>();
- for(Iterator<TagState> tgit=tw.ts.iterator(); tgit.hasNext();) {
+ for(Iterator<TagState> tgit=tw.ts.iterator(); tgit.hasNext(); ) {
TagState ts=tgit.next();
- for(Iterator<FlagState> flagit=ts.getFS().iterator(); flagit.hasNext();) {
+ for(Iterator<FlagState> flagit=ts.getFS().iterator(); flagit.hasNext(); ) {
FlagState fs=flagit.next();
if (flagmap.containsKey(fs)) {
if (flagmap.get(fs).equals(fs)) {
HashSet<FlagState> newfsstates=new HashSet<FlagState>();
Hashtable<FlagState, FlagState[]> flagmap=new Hashtable<FlagState, FlagState[]>();
//Change the flag states
- for(Iterator<FlagState> fsit=ow.fs.iterator(); fsit.hasNext();) {
+ for(Iterator<FlagState> fsit=ow.fs.iterator(); fsit.hasNext(); ) {
FlagState fs=fsit.next();
FlagState[] fsnew=canonical(fs.setTag(tag, value));
flagmap.put(fs, fsnew);
newfsstates.addAll(Arrays.asList(fsnew));
}
- for(Iterator<TagWrapper> tagit=ow.tags.iterator(); tagit.hasNext();) {
+ for(Iterator<TagWrapper> tagit=ow.tags.iterator(); tagit.hasNext(); ) {
TagWrapper tw=tagit.next();
HashSet<TagState> newstates=new HashSet<TagState>();
- for(Iterator<TagState> tgit=tw.ts.iterator(); tgit.hasNext();) {
+ for(Iterator<TagState> tgit=tw.ts.iterator(); tgit.hasNext(); ) {
TagState ts=tgit.next();
- for(Iterator<FlagState> flagit=ts.getFS().iterator(); flagit.hasNext();) {
+ for(Iterator<FlagState> flagit=ts.getFS().iterator(); flagit.hasNext(); ) {
FlagState fs=flagit.next();
if (flagmap.containsKey(fs)) {
FlagState[] fmap=flagmap.get(fs);
{
HashSet<TagState> newstates=new HashSet<TagState>();
- for(Iterator<TagState> tgit=twnew.ts.iterator(); tgit.hasNext();) {
+ for(Iterator<TagState> tgit=twnew.ts.iterator(); tgit.hasNext(); ) {
TagState ts=tgit.next();
- for(Iterator<FlagState> flagit=newfsstates.iterator(); flagit.hasNext();) {
+ for(Iterator<FlagState> flagit=newfsstates.iterator(); flagit.hasNext(); ) {
FlagState fsnew=flagit.next();
//Can do strong update here because these must
//be parameter objects...therefore all
private void evalTaskExitNode(FlatFlagActionNode fn, Hashtable<TempDescriptor, Wrapper> table) {
//Process clears first
- for(Iterator<TempTagPair> it_ttp=fn.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator<TempTagPair> it_ttp=fn.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=it_ttp.next();
TempDescriptor tmp=ttp.getTemp();
TagDescriptor tag=ttp.getTag();
}
//Do the flags next
- for(Iterator<TempFlagPair> it_tfp=fn.getTempFlagPairs(); it_tfp.hasNext();) {
+ for(Iterator<TempFlagPair> it_tfp=fn.getTempFlagPairs(); it_tfp.hasNext(); ) {
TempFlagPair tfp=it_tfp.next();
TempDescriptor tmp=tfp.getTemp();
FlagDescriptor fd=tfp.getFlag();
}
//Process sets last
- for(Iterator it_ttp=fn.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=fn.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
TempDescriptor tmp=ttp.getTemp();
TagDescriptor tag=ttp.getTag();
ow.fs.add(fs);
table.put(fntemp, ow);
//Do the flags first
- for(Iterator<TempFlagPair> it_tfp=fn.getTempFlagPairs(); it_tfp.hasNext();) {
+ for(Iterator<TempFlagPair> it_tfp=fn.getTempFlagPairs(); it_tfp.hasNext(); ) {
TempFlagPair tfp=it_tfp.next();
TempDescriptor tmp=tfp.getTemp();
FlagDescriptor fd=tfp.getFlag();
setFlag(ow, fd, newflagstate);
}
//Process sets next
- for(Iterator it_ttp=fn.getTempTagPairs(); it_ttp.hasNext();) {
+ for(Iterator it_ttp=fn.getTempTagPairs(); it_ttp.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it_ttp.next();
TempDescriptor tmp=ttp.getTemp();
TagDescriptor tag=ttp.getTag();
else
throw new Error("Can't clear tag in newly allocated object");
}
- for(Iterator<FlagState> fsit=ow.fs.iterator(); fsit.hasNext();) {
+ for(Iterator<FlagState> fsit=ow.fs.iterator(); fsit.hasNext(); ) {
FlagState fs2=fsit.next();
fs2.addAllocatingTask(td);
TagState ts2=new TagState(fs2.getClassDescriptor());
public void recordtagchange(TagWrapper tw, TaskDescriptor td) {
TagState init=tw.initts;
- for(Iterator<TagState> tsit=tw.ts.iterator(); tsit.hasNext();) {
+ for(Iterator<TagState> tsit=tw.ts.iterator(); tsit.hasNext(); ) {
TagState ts=tsit.next();
if (init==null) {
ts.addSource(td);
}
private void recordobj(ObjWrapper ow, TaskDescriptor td) {
- for(Iterator<TagWrapper> twit=ow.tags.iterator(); twit.hasNext();) {
+ for(Iterator<TagWrapper> twit=ow.tags.iterator(); twit.hasNext(); ) {
TagWrapper tw=twit.next();
recordtagchange(tw, td);
}
}
private void processFlatReturnNode(FlatReturnNode fr, Hashtable<TempDescriptor, Wrapper> table, TaskDescriptor td) {
- for(Iterator<TempDescriptor> tmpit=table.keySet().iterator(); tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=table.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Wrapper w=table.get(tmp);
if (w instanceof TagWrapper) {
if (table1.keySet().size()!=table2.keySet().size())
return false;
- for(Iterator<TempDescriptor> tmpit=table1.keySet().iterator(); tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=table1.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
if (table2.containsKey(tmp)) {
emap.put(table1.get(tmp), table2.get(tmp));
} else return false;
}
- for(Iterator<TempDescriptor> tmpit=table1.keySet().iterator(); tmpit.hasNext();) {
+ for(Iterator<TempDescriptor> tmpit=table1.keySet().iterator(); tmpit.hasNext(); ) {
TempDescriptor tmp=tmpit.next();
Wrapper w1=table1.get(tmp);
Wrapper w2=table2.get(tmp);
return false;
if (t1.tags.size()!=t2.tags.size())
return false;
- for(Iterator<TagWrapper> twit=t1.tags.iterator(); twit.hasNext();) {
+ for(Iterator<TagWrapper> twit=t1.tags.iterator(); twit.hasNext(); ) {
TagWrapper tw1=twit.next();
if (!t2.tags.contains(emap.get(tw1)))
return false;
private static boolean isTaskTrigger_tag(TagExpressionList tel, FlagState fs) {
if (tel!=null) {
- for (int i=0; i<tel.numTags() ; i++) {
+ for (int i=0; i<tel.numTags(); i++) {
switch (fs.getTagCount(tel.getType(i))) {
case FlagState.ONETAG:
case FlagState.MULTITAGS:
MAINCLASS=LookUpService
SRC1=${MAINCLASS}.java \
DistributedHashMap.java
-FLAGS2=-dsm -transstats -dsmcaching -prefetch -optimize -excprefetch String.hashCode -excprefetch DistributedHashMap.resize -excprefetch String.equals -excprefetch LookUpService.main -mainclass ${MAINCLASS} -trueprob 0.95
-FLAGS3=-dsm -transstats -optimize -mainclass ${MAINCLASS}
-FLAGS4=-dsm -dsmcaching -transstats -debug -optimize -mainclass ${MAINCLASS}
+FLAGS2=-dsm -transstats -dsmcaching -prefetch -optimize -excprefetch String.hashCode -excprefetch DistributedHashMap.resize -excprefetch String.equals -excprefetch LookUpService.main -mainclass ${MAINCLASS} -trueprob 0.95 -32bit
+FLAGS3=-dsm -transstats -optimize -mainclass ${MAINCLASS} -32bit
+FLAGS4=-dsm -dsmcaching -transstats -debug -optimize -mainclass ${MAINCLASS} -32bit
default:
../../../../buildscript ${FLAGS3} -o ${MAINCLASS}NPNC ${SRC1}
../../../../buildscript ${FLAGS4} -o ${MAINCLASS}NPC ${SRC1}
Node.java \
AStarPathFinder.java
-FLAGS1=-dsm -transstats -optimize -mainclass ${MAINCLASS}
-FLAGS2=-dsm -transstats -dsmcaching -optimize -mainclass ${MAINCLASS}
-FLAGS3=-dsm -dsmcaching -transstats -prefetch -optimize -mainclass ${MAINCLASS} -excprefetch BarrierServer.updateAge -excprefetch RainForest.main -excprefetch GameMap.hasRock -excprefetch GameMap.hasTree -trueprob 0.90
+FLAGS1=-dsm -transstats -optimize -mainclass ${MAINCLASS} -32bit
+FLAGS2=-dsm -transstats -dsmcaching -optimize -mainclass ${MAINCLASS} -32bit
+FLAGS3=-dsm -dsmcaching -transstats -prefetch -optimize -mainclass ${MAINCLASS} -excprefetch BarrierServer.updateAge -excprefetch RainForest.main -excprefetch GameMap.hasRock -excprefetch GameMap.hasTree -trueprob 0.90 -32bit
-FLAGSNPNC=-dsm -optimize -mainclass ${MAINCLASS}
-FLAGSNPC=-dsm -dsmcaching -optimize -mainclass ${MAINCLASS}
-FLAGSP=-dsm -dsmcaching -prefetch -optimize -mainclass ${MAINCLASS} -excprefetch BarrierServer.updateAge -excprefetch RainForest.main -excprefetch GameMap.hasRock -excprefetch GameMap.hasTree -trueprob 0.90
+FLAGSNPNC=-dsm -optimize -mainclass ${MAINCLASS} -32bit
+FLAGSNPC=-dsm -dsmcaching -optimize -mainclass ${MAINCLASS} -32bit
+FLAGSP=-dsm -dsmcaching -prefetch -optimize -mainclass ${MAINCLASS} -excprefetch BarrierServer.updateAge -excprefetch RainForest.main -excprefetch GameMap.hasRock -excprefetch GameMap.hasTree -trueprob 0.90 -32bit
default:
public class Character {
-
+
public static int digit(char ch, int radix) {
if (ch>='0'&&ch<='9')
return ch-'0';
}
return -1;
}
-
+
public static boolean isDigit(char ch) {
// TODO This is a temparory implementation, there are other groups of digits
// besides '0' ~ '9'
if (ch>='0'&&ch<='9')
return true;
- else
+ else
return false;
}
}
return returnValue;
}
-
+
public static final int MIN_RADIX = 2;
public static final int MAX_RADIX = 36;
-
+
public static char forDigit(int digit, int radix) {
if ((digit >= radix) || (digit < 0)) {
- return '\0';
+ return '\0';
}
if ((radix < Character.MIN_RADIX) || (radix > Character.MAX_RADIX)) {
- return '\0';
+ return '\0';
}
if (digit < 10) {
- return (char)('0' + digit);
+ return (char)('0' + digit);
}
return (char)('a' - 10 + digit);
-}
+ }
}
public class Enumeration {
-
- public Enumeration(){}
+
+ public Enumeration() {
+ }
public boolean hasMoreElements() {
return false;
}
-
+
public Object nextElement() {
return null;
}
public FileOutputStreamOpen(String pathname) {
fd = nativeOpen(pathname.getBytes());
}
-
+
public FileOutputStream(FileDescriptor fdObj) {
fd = nativeOpen(fdObj.channel.getBytes());
}
-public class HashMap{
+public class HashMap {
HashEntry[] table;
float loadFactor;
int numItems;
this.numItems=0;
this.threshold=(int)(loadFactor*table.length);
}
-
+
private static int computeCapacity(int capacity) {
- int x=16;
- while(x<capacity)
- x=x<<1;
- return x;
+ int x=16;
+ while(x<capacity)
+ x=x<<1;
+ return x;
}
private static int hash(Object o, int length) {
- int orig=o.hashCode();
- orig=orig^(orig>>>22)^(orig>>>10);
- orig=orig^(orig>>>8)^(orig>>4);
- return orig&(length-1);
+ int orig=o.hashCode();
+ orig=orig^(orig>>>22)^(orig>>>10);
+ orig=orig^(orig>>>8)^(orig>>4);
+ return orig&(length-1);
}
void resize() {
}
}
- public void clear() {
- for(int i=0;i<table.length;i++)
- table[i]=null;
- numItems=0;
- }
+ public void clear() {
+ for(int i=0; i<table.length; i++)
+ table[i]=null;
+ numItems=0;
+ }
public boolean isEmpty() {
return numItems==0;
public static InetAddress getByName(String hostname) {
InetAddress[] addresses=getAllByName(hostname);
- if (addresses != null)
- return addresses[0];
- else
- return null;
+ if (addresses != null)
+ return addresses[0];
+ else
+ return null;
}
public byte[] getAddress() {
byte[][] iplist = InetAddress.getHostByName(hostname.getBytes());
- if (iplist != null) {
- addresses = new InetAddress[iplist.length];
+ if (iplist != null) {
+ addresses = new InetAddress[iplist.length];
- for (int i = 0; i < iplist.length; i++) {
- addresses[i] = new InetAddress(iplist[i], hostname);
- }
- return addresses;
- }
- else
- return null;
+ for (int i = 0; i < iplist.length; i++) {
+ addresses[i] = new InetAddress(iplist[i], hostname);
+ }
+ return addresses;
+ } else
+ return null;
}
public static native byte[][] getHostByName(byte[] hostname);
}
/*
- public int byteArrayToInt(byte [] b) {
- int val;
- val = b[0] << 24 + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8) + (b[3] & 0xFF);
- return val;
- }
- */
+ public int byteArrayToInt(byte [] b) {
+ int val;
+ val = b[0] << 24 + ((b[1] & 0xFF) << 16) + ((b[2] & 0xFF) << 8) + (b[3] & 0xFF);
+ return val;
+ }
+ */
public static int parseInt(String str) {
return Integer.parseInt(str, 10);
return false;
return true;
}
-
+
public int compareTo(Integer i) {
if (value == i.value)
return 0;
// Returns just -1 or 1 on inequality; doing math might overflow.
- return value > i.value ? 1 : -1;
+ return value > i.value?1:-1;
}
}
LinkedListElement e = head;
while (e != null) {
if (e.element == o) {
- if (e.prev != null) {
- e.prev.next = e.next;
- }
- if (e.next != null) {
- e.next.prev = e.prev;
- }
- size--;
- return true;
+ if (e.prev != null) {
+ e.prev.next = e.next;
+ }
+ if (e.next != null) {
+ e.next.prev = e.prev;
+ }
+ size--;
+ return true;
}
e = e.next;
}
public static long max(long a, long b) {
return (a>b)?a:b;
}
-
+
public static double min(double a, double b) {
return (a<b)?a:b;
}
public class Objectwrapper {
- public Objectwrapper() {}
- Object value;
+ public Objectwrapper() {
+ }
+ Object value;
}
\ No newline at end of file
* University of California, Irvine
*
* =============================================================================
- *
+ *
* Unless otherwise noted, the following license applies to STAMP files:
- *
+ *
* Copyright (c) 2007, Stanford University
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- *
+ *
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- *
+ *
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
Object[] elements;
public Queue() {
- Queue(10);
+ Queue(10);
}
/* =============================================================================
* queue_alloc
* =============================================================================
*/
- public Queue (int initCapacity)
- {
+ public Queue (int initCapacity) {
QUEUE_GROWTH_FACTOR = 2;
- capacity = ((initCapacity < 2) ? 2 : initCapacity);
+ capacity = ((initCapacity < 2)?2:initCapacity);
elements = new Object[capacity];
size = 0;
* =============================================================================
*/
public boolean
- isEmpty ()
- {
- return (((pop + 1) % capacity == push) ? true : false);
- }
+ isEmpty() {
+ return (((pop + 1) % capacity == push)?true:false);
+ }
/* =============================================================================
* =============================================================================
*/
public void
- queue_clear ()
- {
- pop = capacity - 1;
- push = 0;
- }
+ queue_clear() {
+ pop = capacity - 1;
+ push = 0;
+ }
/* =============================================================================
* queue_push
* =============================================================================
*/
public boolean
- push (Object dataPtr)
- {
- if(pop == push) {
+ push(Object dataPtr) {
+ if(pop == push) {
// System.out.println("push == pop in Queue.java");
- return false;
+ return false;
+ }
+
+ /* Need to resize */
+ int newPush = (push + 1) % capacity;
+ if (newPush == pop) {
+
+ int newCapacity = capacity * QUEUE_GROWTH_FACTOR;
+ Object[] newElements = new Object[newCapacity];
+
+ if (newElements == null) {
+ return false;
}
- /* Need to resize */
- int newPush = (push + 1) % capacity;
- if (newPush == pop) {
-
- int newCapacity = capacity * QUEUE_GROWTH_FACTOR;
- Object[] newElements = new Object[newCapacity];
-
- if (newElements == null) {
- return false;
- }
-
- int dst = 0;
- Object[] tmpelements = elements;
- if (pop < push) {
- int src;
- for (src = (pop + 1); src < push; src++, dst++) {
- newElements[dst] = elements[src];
- }
- } else {
- int src;
- for (src = (pop + 1); src < capacity; src++, dst++) {
- newElements[dst] = elements[src];
- }
- for (src = 0; src < push; src++, dst++) {
- newElements[dst] = elements[src];
- }
- }
-
- //elements = null;
- elements = newElements;
- pop = newCapacity - 1;
- capacity = newCapacity;
- push = dst;
- newPush = push + 1; /* no need modulo */
+ int dst = 0;
+ Object[] tmpelements = elements;
+ if (pop < push) {
+ int src;
+ for (src = (pop + 1); src < push; src++, dst++) {
+ newElements[dst] = elements[src];
+ }
+ } else {
+ int src;
+ for (src = (pop + 1); src < capacity; src++, dst++) {
+ newElements[dst] = elements[src];
+ }
+ for (src = 0; src < push; src++, dst++) {
+ newElements[dst] = elements[src];
+ }
}
- size++;
- elements[push] = dataPtr;
- push = newPush;
- return true;
+ //elements = null;
+ elements = newElements;
+ pop = newCapacity - 1;
+ capacity = newCapacity;
+ push = dst;
+ newPush = push + 1; /* no need modulo */
}
+ size++;
+ elements[push] = dataPtr;
+ push = newPush;
+
+ return true;
+ }
/* =============================================================================
* =============================================================================
*/
public Object
- pop ()
- {
- int newPop = (pop + 1) % capacity;
- if (newPop == push) {
- return null;
- }
-
- //Object dataPtr = queuePtr.elements[newPop];
- //queuePtr.pop = newPop;
- Object dataPtr = elements[newPop];
- pop = newPop;
- size--;
- return dataPtr;
+ pop() {
+ int newPop = (pop + 1) % capacity;
+ if (newPop == push) {
+ return null;
}
- public int size()
- {
+
+ //Object dataPtr = queuePtr.elements[newPop];
+ //queuePtr.pop = newPop;
+ Object dataPtr = elements[newPop];
+ pop = newPop;
+ size--;
+ return dataPtr;
+ }
+ public int size() {
return size;
}
this.count=length;
this.offset=0;
}
-
+
public String(byte str[], String encoding) {
int length = this.count;
if (length>(str.length))
this.count=length;
this.offset=0;
}
-
+
public String(char str[], int offset, int length) {
if (length>(str.length-offset))
length=str.length-offset;
public int lastIndexOf(char ch) {
return this.lastindexOf((int)ch, count - 1);
}
-
+
public static String concat2(String s1, String s2) {
if (s1==null)
return "null".concat(s2);
return -1;
}
- public int indexOfIgnoreCase(String str, int fromIndex) {
- if (fromIndex < 0)
- fromIndex = 0;
- }
+ public int indexOfIgnoreCase(String str, int fromIndex) {
+ if (fromIndex < 0)
+ fromIndex = 0;
+ }
public int lastIndexOf(String str, int fromIndex) {
int k=count-str.count;
str[i]=(byte)value[i+offset];
return str;
}
-
+
public void getChars(char dst[], int dstBegin) {
getChars(0, count, dst, dstBegin);
}
-
+
public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) {
if((srcBegin < 0) || (srcEnd > count) || (srcBegin > srcEnd)) {
// FIXME
s.value=chararray;
return s;
}
-
+
public static native int convertdoubletochar(double val, char [] chararray);
public static String valueOf(long x) {
int cnt =0;
// skip first spaces
- for(i = 0; i< count;i++) {
- if(value[i+offset] != '\n' && value[i+offset] != '\t' && value[i+offset] != ' ')
- break;
+ for(i = 0; i< count; i++) {
+ if(value[i+offset] != '\n' && value[i+offset] != '\t' && value[i+offset] != ' ')
+ break;
}
int oldi=i;
while(i<count) {
if(value[i+offset] == '\n' || value[i+offset] == '\t' || value[i+offset] == ' ') {
- String t=new String();
- t.value=value;
- t.offset=oldi;
- t.count=i-oldi;
- splitted.addElement(t);
-
- // skip extra spaces
- while( i < count && ( value[i+offset] == '\n' || value[i+offset] == '\t' || value[i+offset] == ' ')) {
- i++;
- }
- oldi=i;
- } else {
- i++;
- }
- }
-
- if(i!=oldi) {
String t=new String();
t.value=value;
t.offset=oldi;
t.count=i-oldi;
splitted.addElement(t);
+
+ // skip extra spaces
+ while( i < count && ( value[i+offset] == '\n' || value[i+offset] == '\t' || value[i+offset] == ' ')) {
+ i++;
+ }
+ oldi=i;
+ } else {
+ i++;
+ }
}
-
+
+ if(i!=oldi) {
+ String t=new String();
+ t.value=value;
+ t.offset=oldi;
+ t.count=i-oldi;
+ splitted.addElement(t);
+ }
+
return splitted;
}
- public boolean contains(String str)
- {
+ public boolean contains(String str) {
int i,j;
char[] strChar = str.toCharArray();
int cnt;
for(i = 0; i < count; i++) {
if(value[i] == strChar[0]) {
- cnt=0;
- for(j=0; j < str.length() && i+j < count;j++) {
- if(value[i+j] == strChar[j])
- cnt++;
- }
- if(cnt == str.length())
- return true;
+ cnt=0;
+ for(j=0; j < str.length() && i+j < count; j++) {
+ if(value[i+j] == strChar[j])
+ cnt++;
+ }
+ if(cnt == str.length())
+ return true;
}
}
return false;
}
-
+
public String trim() {
int len = count;
int st = 0;
while ((st < len) && (val[off + len - 1] <= ' ')) {
len--;
}
- return ((st > 0) || (len < count)) ? substring(st, len) : this;
+ return ((st > 0) || (len < count))?substring(st, len):this;
}
-
+
public boolean matches(String regex) {
System.println("String.matches() is not fully supported");
return this.equals(regex);
count=0;
}
- public StringBuffer(int i) {
- value=new char[i];
- count=0;
- }
+ public StringBuffer(int i) {
+ value=new char[i];
+ count=0;
+ }
public int length() {
return count;
return this;
}
- public void ensureCapacity(int i) {
- int size=2*count;
- if (i>size)
- size=i;
- if (i>value.length) {
- char newvalue[]=new char[i];
- for(int ii=0;ii<count;ii++)
- newvalue[ii]=value[ii];
- value=newvalue;
- }
+ public void ensureCapacity(int i) {
+ int size=2*count;
+ if (i>size)
+ size=i;
+ if (i>value.length) {
+ char newvalue[]=new char[i];
+ for(int ii=0; ii<count; ii++)
+ newvalue[ii]=value[ii];
+ value=newvalue;
}
+ }
public StringBuffer append(StringBuffer s) {
if ((s.count+count)>value.length) {
}
return this;
}
-
+
public int indexOf(String str) {
return indexOf(str, 0);
}
-
+
public synchronized int indexOf(String str, int fromIndex) {
String vstr = new String(value, 0, count);
return vstr.indexOf(str, fromIndex);
public String toString() {
return new String(this);
}
-
+
public synchronized StringBuffer replace(int start, int end, String str) {
if (start < 0) {
// FIXME
count = newCount;
return this;
}
-
+
void expandCapacity(int minimumCapacity) {
int newCapacity = (value.length + 1) * 2;
if (newCapacity < 0) {
- newCapacity = 0x7fffffff/*Integer.MAX_VALUE*/;
+ newCapacity = 0x7fffffff /*Integer.MAX_VALUE*/;
} else if (minimumCapacity > newCapacity) {
newCapacity = minimumCapacity;
- }
+ }
char newValue[] = new char[newCapacity];
System.arraycopy(value, 0, newValue, 0, count);
value = newValue;
-public class System {
+public class System {
public static void printInt(int x) {
String s=String.valueOf(x);
printString(s);
}
- public static native void gc();
+ public static native void gc();
public static native long currentTimeMillis();
-
+
public static native long microTimes();
public static native long getticks();
public static void println(long o) {
System.printString(""+o+"\n");
}
-
+
public static void println() {
System.printString("\n");
}
this.size=0;
array=new Object[size];
}
-
+
//used for internal cloning
private Vector(int size, int capacityIncrement, Object[] array) {
this.size = size;
this.array = new Object[array.length];
System.arraycopy(array, 0, this.array, 0, size);
}
-
+
public Vector clone() {
return new Vector(size,capacityIncrement, array);
}
removeElementAt(in);
return true;
}
-
+
return false;
}
* @return True if the end of the list has not yet been
* reached.
*/
- public boolean hasNext()
- {
+ public boolean hasNext() {
return pos < size;
}
*
* @return The next object.
*/
- public Object next()
- {
+ public Object next() {
if (pos == size) {
return null; //since we can't throw anything...
}
public class bytewrapper {
- public bytewrapper() {}
- byte value;
+ public bytewrapper() {
+ }
+ byte value;
}
\ No newline at end of file
public class charwrapper {
- public charwrapper() {}
- char value;
+ public charwrapper() {
+ }
+ char value;
}
\ No newline at end of file
public class doublewrapper {
- public doublewrapper() {}
- double value;
+ public doublewrapper() {
+ }
+ double value;
}
\ No newline at end of file
public class floatwrapper {
- public floatwrapper() {}
- float value;
+ public floatwrapper() {
+ }
+ float value;
}
\ No newline at end of file
Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
-This file is part of GNU Classpath.
-
-GNU Classpath is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU Classpath is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU Classpath; see the file COPYING. If not, write to the
-Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301 USA.
-
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library. Thus, the terms and
-conditions of the GNU General Public License cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module. An independent module is a module which is not derived from
-or based on this library. If you modify this library, you may extend
-this exception to your version of the library, but you are not
-obligated to do so. If you do not wish to do so, delete this
-exception statement from your version. */
+ This file is part of GNU Classpath.
+
+ GNU Classpath is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Classpath is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Classpath; see the file COPYING. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301 USA.
+
+ Linking this library statically or dynamically with other modules is
+ making a combined work based on this library. Thus, the terms and
+ conditions of the GNU General Public License cover the whole
+ combination.
+
+ As a special exception, the copyright holders of this library give you
+ permission to link this library with independent modules to produce an
+ executable, regardless of the license terms of these independent
+ modules, and to copy and distribute the resulting executable under
+ terms of your choice, provided that you also meet, for each linked
+ independent module, the terms and conditions of the license of that
+ module. An independent module is a module which is not derived from
+ or based on this library. If you modify this library, you may extend
+ this exception to your version of the library, but you are not
+ obligated to do so. If you do not wish to do so, delete this
+ exception statement from your version. */
//package java.lang;
* @param d the <code>double</code> to convert
* @return the <code>String</code> representing the <code>double</code>
*/
- public static String toString(double d) {
+ public static String toString(double d) {
return String.valueOf(d);
}
*/
public static String toHexString(double d) {
/*
- if (isNaN(d))
- return "NaN";
- if (isInfinite(d))
- return d < 0 ? "-Infinity" : "Infinity";
-
- long bits = doubleToLongBits(d);
- StringBuilder result = new StringBuilder();
-
- if (bits < 0)
- result.append('-');
- result.append("0x");
-
- final int mantissaBits = 52;
- final int exponentBits = 11;
- long mantMask = (1L << mantissaBits) - 1;
- long mantissa = bits & mantMask;
- long expMask = (1L << exponentBits) - 1;
- long exponent = (bits >>> mantissaBits) & expMask;
-
- result.append(exponent == 0 ? '0' : '1');
- result.append('.');
- result.append(Long.toHexString(mantissa));
- if (exponent == 0 && mantissa != 0)
- {
+ if (isNaN(d))
+ return "NaN";
+ if (isInfinite(d))
+ return d < 0 ? "-Infinity" : "Infinity";
+
+ long bits = doubleToLongBits(d);
+ StringBuilder result = new StringBuilder();
+
+ if (bits < 0)
+ result.append('-');
+ result.append("0x");
+
+ final int mantissaBits = 52;
+ final int exponentBits = 11;
+ long mantMask = (1L << mantissaBits) - 1;
+ long mantissa = bits & mantMask;
+ long expMask = (1L << exponentBits) - 1;
+ long exponent = (bits >>> mantissaBits) & expMask;
+
+ result.append(exponent == 0 ? '0' : '1');
+ result.append('.');
+ result.append(Long.toHexString(mantissa));
+ if (exponent == 0 && mantissa != 0)
+ {
// Treat denormal specially by inserting '0's to make
// the length come out right. The constants here are
// to account for things like the '0x'.
int desiredLength = offset + (mantissaBits + 3) / 4;
while (result.length() < desiredLength)
result.insert(offset, '0');
- }
- result.append('p');
- if (exponent == 0 && mantissa == 0)
- {
+ }
+ result.append('p');
+ if (exponent == 0 && mantissa == 0)
+ {
// Zero, so do nothing special.
- }
- else
- {
+ }
+ else
+ {
// Apply bias.
boolean denormal = exponent == 0;
exponent -= (1 << (exponentBits - 1)) - 1;
// Handle denormal.
if (denormal)
- ++exponent;
- }
+ ++exponent;
+ }
- result.append(Long.toString(exponent));
- return result.toString();
- */
+ result.append(Long.toString(exponent));
+ return result.toString();
+ */
return "0x0";
}
return new Double(val);
}
- /**
+ /**
* Create a new <code>Double</code> object using the <code>String</code>.
*
* @param s the <code>String</code> to convert
}
public static native double nativeparsedouble(String str);
- public static native double nativeparsedouble(int start, int length, byte[] str);
+ public static native double nativeparsedouble(int start, int length, byte[] str);
/**
* Return <code>true</code> if the <code>double</code> has the same
*
* @return whether this <code>Double</code> is <code>NaN</code>
*/
- public boolean isNaN()
- {
+ public boolean isNaN() {
return isNaN(value);
}
*
* @return whether this <code>Double</code> is (-/+) infinity
*/
- public boolean isInfinite()
- {
+ public boolean isInfinite() {
return isInfinite(value);
}
* @return the <code>String</code> representation
* @see #toString(double)
*/
- public String toString()
- {
+ public String toString() {
return toString(value);
}
* @return the byte value
* @since 1.1
*/
- public byte byteValue()
- {
+ public byte byteValue() {
return (byte) value;
}
* @return the short value
* @since 1.1
*/
- public short shortValue()
- {
+ public short shortValue() {
return (short) value;
}
*
* @return the int value
*/
- public int intValue()
- {
+ public int intValue() {
return (int) value;
}
*
* @return the long value
*/
- public long longValue()
- {
+ public long longValue() {
return (long) value;
}
*
* @return the float value
*/
- public float floatValue()
- {
+ public float floatValue() {
return (float) value;
}
*
* @return the double value
*/
- public double doubleValue()
- {
+ public double doubleValue() {
return value;
}
* @return this Object's hash code
* @see #doubleToLongBits(double)
*/
- public int hashCode()
- {
+ public int hashCode() {
long v = doubleToLongBits(value);
return (int) (v ^ (v >>> 32));
}
* @param obj the object to compare
* @return whether the objects are semantically equal
*/
- public boolean equals(Object obj)
- {
- if (! (obj instanceof Double))
+ public boolean equals(Object obj) {
+ if (!(obj instanceof Double))
return false;
double d = ((Double) obj).value;
* @return the bits of the <code>double</code>
* @see #longBitsToDouble(long)
*/
- public static long doubleToLongBits(double value)
- {
+ public static long doubleToLongBits(double value) {
if (isNaN(value))
return 0x7ff8000000000000L;
else
- return /*VMDouble.*/doubleToRawLongBits(value);
+ return /*VMDouble.*/ doubleToRawLongBits(value);
}
/**
* @see #longBitsToDouble(long)
*/
/*public static long doubleToRawLongBits(double value)
- {
- return VMDouble.doubleToRawLongBits(value);
- }*/
+ {
+ return VMDouble.doubleToRawLongBits(value);
+ }*/
public static native long doubleToRawLongBits(double value);
/**
* @see #doubleToRawLongBits(double)
*/
/*public static double longBitsToDouble(long bits)
- {
- return VMDouble.longBitsToDouble(bits);
- }*/
+ {
+ return VMDouble.longBitsToDouble(bits);
+ }*/
public static native double longBitsToDouble(long bits);
/**
* @return the comparison
* @since 1.2
*/
- public int compareTo(Double d)
- {
+ public int compareTo(Double d) {
return compare(value, d.value);
}
* @return the comparison
* @since 1.4
*/
- public static int compare(double x, double y)
- {
- // handle the easy cases:
- if (x < y)
- return -1;
- if (x > y)
- return 1;
-
- // handle equality respecting that 0.0 != -0.0 (hence not using x == y):
- long lx = doubleToRawLongBits(x);
- long ly = doubleToRawLongBits(y);
- if (lx == ly)
- return 0;
-
- // handle NaNs:
- if (x != x)
- return (y != y) ? 0 : 1;
- else if (y != y)
- return -1;
-
- // handle +/- 0.0
- return (lx < ly) ? -1 : 1;
+ public static int compare(double x, double y) {
+ // handle the easy cases:
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+
+ // handle equality respecting that 0.0 != -0.0 (hence not using x == y):
+ long lx = doubleToRawLongBits(x);
+ long ly = doubleToRawLongBits(y);
+ if (lx == ly)
+ return 0;
+
+ // handle NaNs:
+ if (x != x)
+ return (y != y)?0:1;
+ else if (y != y)
+ return -1;
+
+ // handle +/- 0.0
+ return (lx < ly)?-1:1;
}
}
Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004
Free Software Foundation, Inc.
-This file is part of GNU Classpath.
-
-GNU Classpath is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU Classpath is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU Classpath; see the file COPYING. If not, write to the
-Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301 USA.
-
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library. Thus, the terms and
-conditions of the GNU General Public License cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module. An independent module is a module which is not derived from
-or based on this library. If you modify this library, you may extend
-this exception to your version of the library, but you are not
-obligated to do so. If you do not wish to do so, delete this
-exception statement from your version. */
+ This file is part of GNU Classpath.
+
+ GNU Classpath is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Classpath is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Classpath; see the file COPYING. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301 USA.
+
+ Linking this library statically or dynamically with other modules is
+ making a combined work based on this library. Thus, the terms and
+ conditions of the GNU General Public License cover the whole
+ combination.
+
+ As a special exception, the copyright holders of this library give you
+ permission to link this library with independent modules to produce an
+ executable, regardless of the license terms of these independent
+ modules, and to copy and distribute the resulting executable under
+ terms of your choice, provided that you also meet, for each linked
+ independent module, the terms and conditions of the license of that
+ module. An independent module is a module which is not derived from
+ or based on this library. If you modify this library, you may extend
+ this exception to your version of the library, but you are not
+ obligated to do so. If you do not wish to do so, delete this
+ exception statement from your version. */
//package java.io;
/*import gnu.java.nio.FileChannelImpl;
-import java.nio.channels.ByteChannel;
-import java.nio.channels.FileChannel;
-*/
+ import java.nio.channels.ByteChannel;
+ import java.nio.channels.FileChannel;
+ */
/**
* This class represents an opaque file handle as a Java class. It should
* be used only to pass to other methods that expect an object of this
*
* @author Aaron M. Renn (arenn@urbanophile.com)
* @author Tom Tromey (tromey@cygnus.com)
- * @date September 24, 1998
+ * @date September 24, 1998
*/
public final class FileDescriptor
{
* <code>System.in</code>variable.
*/
public static final FileDescriptor in
- = new FileDescriptor ("System.in"/*FileChannelImpl.in*/);
+ = new FileDescriptor ("System.in" /*FileChannelImpl.in*/);
/**
* A <code>FileDescriptor</code> representing the system standard output
* <code>System.out</code>variable.
*/
public static final FileDescriptor out
- = new FileDescriptor ("System.out"/*FileChannelImpl.out*/);
+ = new FileDescriptor ("System.out" /*FileChannelImpl.out*/);
/**
* A <code>FileDescriptor</code> representing the system standard error
* <code>System.err</code>variable.
*/
public static final FileDescriptor err
- = new FileDescriptor ("System.err"/*FileChannelImpl.err*/);
+ = new FileDescriptor ("System.err" /*FileChannelImpl.err*/);
//final ByteChannel channel;
final String channel;
/**
* This method is used to initialize an invalid FileDescriptor object.
*/
- public FileDescriptor()
- {
+ public FileDescriptor() {
channel = null;
}
* This method is used to initialize a FileDescriptor object.
*/
/*FileDescriptor(ByteChannel channel)
- {
- this.channel = channel;
- }*/
-
- FileDescriptor(String channel)
- {
+ {
+ this.channel = channel;
+ }*/
+
+ FileDescriptor(String channel) {
this.channel = channel;
}
/**
* This method forces all data that has not yet been physically written to
- * the underlying storage medium associated with this
+ * the underlying storage medium associated with this
* <code>FileDescriptor</code>
* to be written out. This method will not return until all data has
* been fully written to the underlying device. If the device does not
* will be thrown.
*/
/*public void sync () throws SyncFailedException
- {
- if (channel instanceof FileChannel)
+ {
+ if (channel instanceof FileChannel)
{
- try
- {
- ((FileChannel) channel).force(true);
- }
- catch (IOException ex)
- {
- if (ex instanceof SyncFailedException)
- throw (SyncFailedException) ex;
- else
- throw new SyncFailedException(ex.toString());
- }
+ try
+ {
+ ((FileChannel) channel).force(true);
+ }
+ catch (IOException ex)
+ {
+ if (ex instanceof SyncFailedException)
+ throw (SyncFailedException) ex;
+ else
+ throw new SyncFailedException(ex.toString());
+ }
}
- }*/
+ }*/
/**
* This methods tests whether or not this object represents a valid open
* native file handle.
*
- * @return <code>true</code> if this object represents a valid
+ * @return <code>true</code> if this object represents a valid
* native file handle, <code>false</code> otherwise
*/
/*public boolean valid ()
- {
- ByteChannel c = channel;
- return (c != null) && (c.isOpen());
- }*/
+ {
+ ByteChannel c = channel;
+ return (c != null) && (c.isOpen());
+ }*/
}
/* Number.java =- abstract superclass of numeric objects
Copyright (C) 1998, 2001, 2002, 2005 Free Software Foundation, Inc.
-This file is part of GNU Classpath.
-
-GNU Classpath is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU Classpath is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU Classpath; see the file COPYING. If not, write to the
-Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301 USA.
-
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library. Thus, the terms and
-conditions of the GNU General Public License cover the whole
-combination.
-
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module. An independent module is a module which is not derived from
-or based on this library. If you modify this library, you may extend
-this exception to your version of the library, but you are not
-obligated to do so. If you do not wish to do so, delete this
-exception statement from your version. */
+ This file is part of GNU Classpath.
+
+ GNU Classpath is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Classpath is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Classpath; see the file COPYING. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301 USA.
+
+ Linking this library statically or dynamically with other modules is
+ making a combined work based on this library. Thus, the terms and
+ conditions of the GNU General Public License cover the whole
+ combination.
+
+ As a special exception, the copyright holders of this library give you
+ permission to link this library with independent modules to produce an
+ executable, regardless of the license terms of these independent
+ modules, and to copy and distribute the resulting executable under
+ terms of your choice, provided that you also meet, for each linked
+ independent module, the terms and conditions of the license of that
+ module. An independent module is a module which is not derived from
+ or based on this library. If you modify this library, you may extend
+ this exception to your version of the library, but you are not
+ obligated to do so. If you do not wish to do so, delete this
+ exception statement from your version. */
//package java.lang;
/**
* The basic constructor (often called implicitly).
*/
- public Number()
- {
+ public Number() {
digits = new char[36];
- digits[0] = '0';
- digits[1] = '1';
+ digits[0] = '0';
+ digits[1] = '1';
digits[2] = '2';
- digits[3] = '3';
+ digits[3] = '3';
digits[4] = '4';
- digits[5] = '5';
+ digits[5] = '5';
digits[6] = '6';
digits[7] = '7';
digits[8] = '8';
*
* @return the int value
*/
- public /*abstract*/ int intValue(){}
+ public /*abstract*/ int intValue() {
+ }
/**
* Return the value of this <code>Number</code> as a <code>long</code>.
*
* @return the long value
*/
- public /*abstract*/ long longValue(){}
+ public /*abstract*/ long longValue() {
+ }
/**
* Return the value of this <code>Number</code> as a <code>float</code>.
*
* @return the float value
*/
- public /*abstract*/ float floatValue(){}
+ public /*abstract*/ float floatValue() {
+ }
/**
* Return the value of this <code>Number</code> as a <code>float</code>.
*
* @return the double value
*/
- public /*abstract*/ double doubleValue(){}
+ public /*abstract*/ double doubleValue() {
+ }
/**
* Return the value of this <code>Number</code> as a <code>byte</code>.
* @return the byte value
* @since 1.1
*/
- public byte byteValue()
- {
+ public byte byteValue() {
return (byte) intValue();
}
* @return the short value
* @since 1.1
*/
- public short shortValue()
- {
+ public short shortValue() {
return (short) intValue();
}
}
/* StringTokenizer -- breaks a String into tokens
Copyright (C) 1998, 1999, 2001, 2002, 2005 Free Software Foundation, Inc.
-This file is part of GNU Classpath.
+ This file is part of GNU Classpath.
-GNU Classpath is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
+ GNU Classpath is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
-GNU Classpath is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
+ GNU Classpath is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
-You should have received a copy of the GNU General Public License
-along with GNU Classpath; see the file COPYING. If not, write to the
-Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301 USA.
+ You should have received a copy of the GNU General Public License
+ along with GNU Classpath; see the file COPYING. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301 USA.
-Linking this library statically or dynamically with other modules is
-making a combined work based on this library. Thus, the terms and
-conditions of the GNU General Public License cover the whole
-combination.
+ Linking this library statically or dynamically with other modules is
+ making a combined work based on this library. Thus, the terms and
+ conditions of the GNU General Public License cover the whole
+ combination.
-As a special exception, the copyright holders of this library give you
-permission to link this library with independent modules to produce an
-executable, regardless of the license terms of these independent
-modules, and to copy and distribute the resulting executable under
-terms of your choice, provided that you also meet, for each linked
-independent module, the terms and conditions of the license of that
-module. An independent module is a module which is not derived from
-or based on this library. If you modify this library, you may extend
-this exception to your version of the library, but you are not
-obligated to do so. If you do not wish to do so, delete this
-exception statement from your version. */
+ As a special exception, the copyright holders of this library give you
+ permission to link this library with independent modules to produce an
+ executable, regardless of the license terms of these independent
+ modules, and to copy and distribute the resulting executable under
+ terms of your choice, provided that you also meet, for each linked
+ independent module, the terms and conditions of the license of that
+ module. An independent module is a module which is not derived from
+ or based on this library. If you modify this library, you may extend
+ this exception to your version of the library, but you are not
+ obligated to do so. If you do not wish to do so, delete this
+ exception statement from your version. */
//package java.util;
* @param str The string to split
* @throws NullPointerException if str is null
*/
- public StringTokenizer(String str)
- {
+ public StringTokenizer(String str) {
//this(str, " \t\n\r\f", false);
StringTokenizer(str, " \t\n\r\f", false);
}
* @param delim a string containing all delimiter characters
* @throws NullPointerException if either argument is null
*/
- public StringTokenizer(String str, String delim)
- {
+ public StringTokenizer(String str, String delim) {
//this(str, delim, false);
StringTokenizer(str, delim, false);
}
* @param returnDelims tells, if you want to get the delimiters
* @throws NullPointerException if str or delim is null
*/
- public StringTokenizer(String str, String delim, boolean returnDelims)
- {
+ public StringTokenizer(String str, String delim, boolean returnDelims) {
len = str.length();
this.str = str;
this.delim = delim;
*
* @return true if the next call of nextToken() will succeed
*/
- public boolean hasMoreTokens()
- {
- if (! retDelims)
- {
- while (pos < len && delim.indexOf(str.charAt(pos)) >= 0)
- pos++;
- }
+ public boolean hasMoreTokens() {
+ if (!retDelims) {
+ while (pos < len && delim.indexOf(str.charAt(pos)) >= 0)
+ pos++;
+ }
return pos < len;
}
* @throws NoSuchElementException if there are no more tokens
* @throws NullPointerException if delim is null
*/
- public String nextToken(String delim) //throws NoSuchElementException
- {
+ public String nextToken(String delim) { //throws NoSuchElementException
this.delim = delim;
return nextToken();
}
* @return the next token with respect to the current delimiter characters
* @throws NoSuchElementException if there are no more tokens
*/
- public String nextToken() //throws NoSuchElementException
- {
- if (pos < len && delim.indexOf(str.charAt(pos)) >= 0)
- {
- if (retDelims)
- return str.substring(pos, ++pos);
- while (++pos < len && delim.indexOf(str.charAt(pos)) >= 0)
- ;
- }
- if (pos < len)
- {
- int start = pos;
- while (++pos < len && delim.indexOf(str.charAt(pos)) < 0)
- ;
-
- return str.substring(start, pos);
- }
+ public String nextToken() { //throws NoSuchElementException
+ if (pos < len && delim.indexOf(str.charAt(pos)) >= 0) {
+ if (retDelims)
+ return str.substring(pos, ++pos);
+ while (++pos < len && delim.indexOf(str.charAt(pos)) >= 0)
+ ;
+ }
+ if (pos < len) {
+ int start = pos;
+ while (++pos < len && delim.indexOf(str.charAt(pos)) < 0)
+ ;
+
+ return str.substring(start, pos);
+ }
//throw new NoSuchElementException();
System.printString("StringTokenizer: No such element");
* @return true, if the next call of nextElement() will succeed
* @see #hasMoreTokens()
*/
- public boolean hasMoreElements()
- {
+ public boolean hasMoreElements() {
return hasMoreTokens();
}
* @throws NoSuchElementException if there are no more tokens
* @see #nextToken()
*/
- public Object nextElement() //throws NoSuchElementException
- {
+ public Object nextElement() { //throws NoSuchElementException
return nextToken();
}
* @return the number of times <code>nextTokens()</code> will succeed
* @see #nextToken()
*/
- public int countTokens()
- {
+ public int countTokens() {
int count = 0;
int delimiterCount = 0;
boolean tokenFound = false; // Set when a non-delimiter is found
// Note for efficiency, we count up the delimiters rather than check
// retDelims every time we encounter one. That way, we can
// just do the conditional once at the end of the method
- while (tmpPos < len)
+ while (tmpPos < len) {
+ if (delim.indexOf(str.charAt(tmpPos++)) >= 0) {
+ if (tokenFound) {
+ // Got to the end of a token
+ count++;
+ tokenFound = false;
+ }
+ delimiterCount++; // Increment for this delimiter
+ } else
{
- if (delim.indexOf(str.charAt(tmpPos++)) >= 0)
- {
- if (tokenFound)
- {
- // Got to the end of a token
- count++;
- tokenFound = false;
- }
- delimiterCount++; // Increment for this delimiter
- }
- else
- {
- tokenFound = true;
- // Get to the end of the token
- while (tmpPos < len
- && delim.indexOf(str.charAt(tmpPos)) < 0)
- ++tmpPos;
- }
+ tokenFound = true;
+ // Get to the end of the token
+ while (tmpPos < len
+ && delim.indexOf(str.charAt(tmpPos)) < 0)
+ ++tmpPos;
}
+ }
// Make sure to count the last token
if (tokenFound)
public class intwrapper {
- public intwrapper() {}
+ public intwrapper() {
+ }
int value;
}
public String getMarker() {
return marker;
}
-
- public String getData(){
+
+ public String getData() {
return data;
}
if (o instanceof AnnotationDescriptor) {
AnnotationDescriptor a = (AnnotationDescriptor) o;
if (a.getType() != type)
- return false;
+ return false;
if (!a.getMarker().equals(getMarker()))
- return false;
+ return false;
return true;
}
Vector fieldvec;
SymbolTable flags;
SymbolTable methods;
-
+
Hashtable singleImports;
-
+
int numstaticblocks = 0;
int numstaticfields = 0;
-
+
// for interfaces
Vector<String> superinterfaces;
SymbolTable superIFdesc;
private int interfaceid;
-
+
// for inner classes
boolean isInnerClass=false;
-
+
// inner classes/enum can have these
String surroundingclass=null;
ClassDescriptor surroudingdesc=null;
SymbolTable innerdescs;
-
+
// for enum type
boolean isEnum = false;
SymbolTable enumdescs;
HashMap<String, Integer> enumConstantTbl;
int enumconstantid = 0;
-
+
String sourceFileName;
-
+
public ClassDescriptor(String classname, boolean isInterface) {
this("", classname, isInterface);
}
public Iterator getFlags() {
return flags.getDescriptorsIterator();
}
-
+
public Iterator getSuperInterfaces() {
return this.superIFdesc.getDescriptorsIterator();
}
public SymbolTable getMethodTable() {
return methods;
}
-
+
public SymbolTable getSuperInterfaceTable() {
return this.superIFdesc;
}
st += "implements ";
boolean needcomma = false;
for(int i = 0; i < this.superinterfaces.size(); i++) {
- if(needcomma) {
- st += ", ";
- }
- st += this.superinterfaces.elementAt(i);
- needcomma = true;
+ if(needcomma) {
+ st += ", ";
+ }
+ st += this.superinterfaces.elementAt(i);
+ needcomma = true;
}
}
st+=" {\n";
indent=TreeNode.INDENT;
boolean printcr=false;
- for(Iterator it=getFlags(); it.hasNext();) {
+ for(Iterator it=getFlags(); it.hasNext(); ) {
FlagDescriptor fd=(FlagDescriptor)it.next();
st+=TreeNode.printSpace(indent)+fd.toString()+"\n";
printcr=true;
printcr=false;
- for(Iterator it=getFields(); it.hasNext();) {
+ for(Iterator it=getFields(); it.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)it.next();
st+=TreeNode.printSpace(indent)+fd.toString()+"\n";
printcr=true;
}
if (printcr)
st+="\n";
-
- for(Iterator it=this.getInnerClasses(); it.hasNext();) {
+
+ for(Iterator it=this.getInnerClasses(); it.hasNext(); ) {
ClassDescriptor icd=(ClassDescriptor)it.next();
st+=icd.printTree(state)+"\n";
printcr=true;
}
if (printcr)
st+="\n";
-
- for(Iterator it=this.getEnum(); it.hasNext();) {
+
+ for(Iterator it=this.getEnum(); it.hasNext(); ) {
ClassDescriptor icd = (ClassDescriptor)it.next();
st += icd.getModifier().toString() + " enum " + icd.getSymbol() + " {\n ";
Set keys = icd.getEnumConstantTbl().keySet();
String[] econstants = new String[keys.size()];
Iterator it_keys = keys.iterator();
while(it_keys.hasNext()) {
- String key = (String)it_keys.next();
- econstants[icd.getEnumConstant(key)] = key;
+ String key = (String)it_keys.next();
+ econstants[icd.getEnumConstant(key)] = key;
}
for(int i = 0; i < econstants.length; i++) {
- st += econstants[i];
- if(i < econstants.length-1) {
- st += ", ";
- }
+ st += econstants[i];
+ if(i < econstants.length-1) {
+ st += ", ";
+ }
}
st+="\n}\n";
printcr=true;
if (printcr)
st+="\n";
- for(Iterator it=getMethods(); it.hasNext();) {
+ for(Iterator it=getMethods(); it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)it.next();
st+=TreeNode.printSpace(indent)+md.toString()+" ";
BlockNode bn=state.getMethodBody(md);
ClassDescriptor cn=this;
while(true) {
if (cn==null) {
- // TODO: the original code returned "null" if no super class
- // ever defines the method. Is there a situation where this is
- // fine and the client should take other actions? If not, we should
- // change this warning to an error.
- System.out.println( "ClassDescriptor.java: WARNING "+md+
- " did not resolve to an actual method." );
+ // TODO: the original code returned "null" if no super class
+ // ever defines the method. Is there a situation where this is
+ // fine and the client should take other actions? If not, we should
+ // change this warning to an error.
+ System.out.println("ClassDescriptor.java: WARNING "+md+
+ " did not resolve to an actual method.");
return null;
}
Set possiblematches=cn.getMethodTable().getSetFromSameScope(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
-
+
if (md.matches(matchmd)) {
return matchmd;
}
}
-
+
//Not found...walk one level up
cn=cn.getSuperDesc();
}
public String getSuper() {
return superclass;
}
-
+
public void addSuperInterface(String superif) {
this.superinterfaces.addElement(superif);
}
-
+
public Vector<String> getSuperInterface() {
return this.superinterfaces;
}
-
+
public void addSuperInterfaces(ClassDescriptor sif) {
this.superIFdesc.add(sif);
}
-
+
public void incStaticBlocks() {
this.numstaticblocks++;
}
-
+
public int getNumStaticBlocks() {
return this.numstaticblocks;
}
-
+
public void incStaticFields() {
this.numstaticfields++;
}
-
+
public int getNumStaticFields() {
return this.numstaticfields;
}
-
+
public boolean isAbstract() {
return this.modifiers.isAbstract();
}
-
+
public boolean isInterface() {
return (this.classid == -2);
}
-
+
public void setInterfaceId(int id) {
this.interfaceid = id;
}
-
+
public boolean isStatic() {
return this.modifiers.isStatic();
}
-
+
public void setAsInnerClass() {
this.isInnerClass = true;
}
-
+
public boolean isInnerClass() {
return this.isInnerClass;
}
-
+
public void setSurroundingClass(String sclass) {
this.surroundingclass=sclass;
}
public void setSurrounding(ClassDescriptor scd) {
this.surroudingdesc=scd;
}
-
+
public void addInnerClass(ClassDescriptor icd) {
this.innerdescs.add(icd);
}
-
+
public Iterator getInnerClasses() {
return this.innerdescs.getDescriptorsIterator();
}
public SymbolTable getInnerClassTable() {
return this.innerdescs;
}
-
+
public void setAsEnum() {
this.isEnum = true;
}
-
+
public boolean isEnum() {
return this.isEnum;
}
-
+
public void addEnum(ClassDescriptor icd) {
this.enumdescs.add(icd);
}
-
+
public Iterator getEnum() {
return this.enumdescs.getDescriptorsIterator();
}
public SymbolTable getEnumTable() {
return this.enumdescs;
}
-
+
public void addEnumConstant(String econstant) {
if(this.enumConstantTbl == null) {
this.enumConstantTbl = new HashMap<String, Integer>();
}
return;
}
-
+
public int getEnumConstant(String econstant) {
if(this.enumConstantTbl.containsKey(econstant)) {
return this.enumConstantTbl.get(econstant).intValue();
return -1;
}
}
-
+
public HashMap<String, Integer> getEnumConstantTbl() {
return this.enumConstantTbl;
}
-
+
public Modifiers getModifier() {
return this.modifiers;
}
-
- public void setSourceFileName(String sourceFileName){
+
+ public void setSourceFileName(String sourceFileName) {
this.sourceFileName=sourceFileName;
}
-
+
public void setImports(Hashtable singleImports) {
this.singleImports = singleImports;
}
-
- public String getSourceFileName(){
+
+ public String getSourceFileName() {
return this.sourceFileName;
}
-
+
public Hashtable getSingleImportMappings() {
return this.singleImports;
}
-
+
}
private boolean isglobal;
private boolean isenum;
private int enumvalue;
-
+
private ClassDescriptor cn;
public FieldDescriptor(Modifiers m, TypeDescriptor t, String identifier, ExpressionNode e, boolean isglobal) {
this.isenum = false;
this.enumvalue = -1;
}
-
+
public ClassDescriptor getClassDescriptor() {
return this.cn;
}
-
+
public void setClassDescriptor(ClassDescriptor cn) {
this.cn = cn;
}
} else
return safename;
}
-
+
public boolean isEnum() {
return this.isenum;
}
-
+
public int enumValue() {
return this.enumvalue;
}
-
+
public void setAsEnum() {
this.isenum = true;
}
-
+
public void setEnumValue(int value) {
this.enumvalue = value;
}
- public ExpressionNode getExpressionNode(){
- return en;
+ public ExpressionNode getExpressionNode() {
+ return en;
}
public boolean isFinal() {
return modifier.isFinal();
}
-
+
public boolean isStatic() {
return modifier.isStatic();
}
-
+
public boolean isVolatile() {
return modifier.isVolatile();
}
-
+
public boolean isGlobal() {
return isglobal;
}
return td;
}
- public void changeSafeSymbol(int id) {
- safename+=id;
- }
-
+ public void changeSafeSymbol(int id) {
+ safename+=id;
+ }
+
public String toString() {
if (en==null)
return modifier.toString()+td.toString()+" "+getSymbol()+";";
Set<TempDescriptor> reallivein;
Set<TempDescriptor> liveout;
Set<TempDescriptor> liveoutvirtualread;
-
+
}
int globaldefscount=0;
boolean mgcstaticinit = false;
JavaBuilder javabuilder;
-
+
int boundschknum = 0;
public BuildCode(State st, Hashtable temptovar, TypeUtil typeutil, CallGraph callgraph, JavaBuilder javabuilder) {
fieldorder=new Hashtable();
flagorder=new Hashtable();
this.typeutil=typeutil;
- State.logEvent("Virtual");
+ State.logEvent("Virtual");
virtualcalls=new Virtual(state, null, callgraph);
printedfieldstbl = new Hashtable<String, ClassDescriptor>();
}
MethodDescriptor current_md=null;
HashSet tovisit=new HashSet();
HashSet visited=new HashSet();
-
+
while(it_sclasses.hasNext()) {
ClassDescriptor cd = (ClassDescriptor)it_sclasses.next();
MethodDescriptor md = (MethodDescriptor)cd.getMethodTable().get("staticblocks");
tovisit.add(md);
}
}
-
+
while(!tovisit.isEmpty()) {
current_md=(MethodDescriptor)tovisit.iterator().next();
tovisit.remove(current_md);
Iterator it_sclasses = sctbl.getDescriptorsIterator();
if(it_sclasses.hasNext()) {
while(it_sclasses.hasNext()) {
- ClassDescriptor t_cd = (ClassDescriptor)it_sclasses.next();
- MethodDescriptor t_md = (MethodDescriptor)t_cd.getMethodTable().get("staticblocks");
-
- if(t_md != null&&callgraph.isInit(t_cd)) {
- outmethod.println(" {");
- if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- outmethod.print(" struct "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"_params __parameterlist__={");
- outmethod.println("0, NULL};");
- outmethod.println(" "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"(& __parameterlist__);");
- } else {
- outmethod.println(" "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"();");
- }
- outmethod.println(" }");
- }
+ ClassDescriptor t_cd = (ClassDescriptor)it_sclasses.next();
+ MethodDescriptor t_md = (MethodDescriptor)t_cd.getMethodTable().get("staticblocks");
+
+ if(t_md != null&&callgraph.isInit(t_cd)) {
+ outmethod.println(" {");
+ if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
+ outmethod.print(" struct "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"_params __parameterlist__={");
+ outmethod.println("0, NULL};");
+ outmethod.println(" "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"(& __parameterlist__);");
+ } else {
+ outmethod.println(" "+t_cd.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"();");
+ }
+ outmethod.println(" }");
+ }
}
}
}
outmethod.println(" {");
outmethod.println(" int i = 0;");
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- outmethod.println(" struct garbagelist dummy={0,NULL};");
- outmethod.println(" global_defs_p->classobjs = allocate_newarray(&dummy, OBJECTARRAYTYPE, "
- + (state.numClasses()+state.numArrays()+state.numInterfaces()) + ");");
+ outmethod.println(" struct garbagelist dummy={0,NULL};");
+ outmethod.println(" global_defs_p->classobjs = allocate_newarray(&dummy, OBJECTARRAYTYPE, "
+ + (state.numClasses()+state.numArrays()+state.numInterfaces()) + ");");
} else {
- outmethod.println(" global_defs_p->classobjs = allocate_newarray(OBJECTARRAYTYPE, "
- + (state.numClasses()+state.numArrays()+state.numInterfaces()) + ");");
+ outmethod.println(" global_defs_p->classobjs = allocate_newarray(OBJECTARRAYTYPE, "
+ + (state.numClasses()+state.numArrays()+state.numInterfaces()) + ");");
}
outmethod.println(" for(i = 0; i < " + (state.numClasses()+state.numArrays()+state.numInterfaces()) + "; i++) {");
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
// always include: compiler directives will leave out
// instrumentation when option is not set
- if(!state.MULTICORE) {
- outmethod.println("#include \"coreprof/coreprof.h\"");
- }
+ if(!state.MULTICORE) {
+ outmethod.println("#include \"coreprof/coreprof.h\"");
+ }
if (state.FASTCHECK) {
outmethod.println("#include \"localobjects.h\"");
while(classit.hasNext()) {
ClassDescriptor cd=(ClassDescriptor)classit.next();
if(cd.isInterface()) {
- continue;
- }
+ continue;
+ }
fillinRow(cd, virtualtable, cd.getId());
}
while(it.hasNext()) {
ClassDescriptor cd=(ClassDescriptor)it.next();
if(cd.isInterface()) {
- ifarray[cd.getId()] = cd;
+ ifarray[cd.getId()] = cd;
} else {
- cdarray[cd.getId()] = cd;
+ cdarray[cd.getId()] = cd;
}
}
TypeDescriptor arraytd=arraytable[i];
outclassdefs.println(arraytd.toPrettyString() +" "+(i+state.numClasses()));
}
-
+
for(int i=0; i<state.numInterfaces(); i++) {
ClassDescriptor ifcd = ifarray[i];
outclassdefs.println(ifcd +" "+(i+state.numClasses()+state.numArrays()));
outclassdefs.print("sizeof("+tdelement.getSafeSymbol()+")");
needcomma=true;
}
-
+
for(int i=0; i<state.numInterfaces(); i++) {
if (needcomma)
- outclassdefs.print(", ");
+ outclassdefs.print(", ");
outclassdefs.print("sizeof(struct "+ifarray[i].getSafeSymbol()+")");
needcomma=true;
}
outclassdefs.print("int typearray[]={");
for(int i=0; i<state.numClasses(); i++) {
ClassDescriptor cd=cdarray[i];
- ClassDescriptor supercd=i>0 ? cd.getSuperDesc() : null;
+ ClassDescriptor supercd=i>0?cd.getSuperDesc():null;
if(supercd != null && supercd.isInterface()) {
- throw new Error("Super class can not be interfaces");
+ throw new Error("Super class can not be interfaces");
}
if (needcomma)
outclassdefs.print(", ");
outclassdefs.print(type);
needcomma=true;
}
-
+
for(int i=0; i<state.numInterfaces(); i++) {
ClassDescriptor cd=ifarray[i];
ClassDescriptor supercd=cd.getSuperDesc();
if(supercd != null && supercd.isInterface()) {
- throw new Error("Super class can not be interfaces");
+ throw new Error("Super class can not be interfaces");
}
if (needcomma)
- outclassdefs.print(", ");
+ outclassdefs.print(", ");
if (supercd==null)
- outclassdefs.print("-1");
+ outclassdefs.print("-1");
else
- outclassdefs.print(supercd.getId());
+ outclassdefs.print(supercd.getId());
needcomma=true;
}
protected void generateTempStructs(FlatMethod fm) {
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- ParamsObject objectparams=md!=null ? new ParamsObject(md,tag++) : new ParamsObject(task, tag++);
+ ParamsObject objectparams=md!=null?new ParamsObject(md,tag++):new ParamsObject(task, tag++);
if (md!=null)
paramstable.put(md, objectparams);
else
objectparams.addPrim(temp);
}
- TempObject objecttemps=md!=null ? new TempObject(objectparams,md,tag++) : new TempObject(objectparams, task, tag++);
+ TempObject objecttemps=md!=null?new TempObject(objectparams,md,tag++):new TempObject(objectparams, task, tag++);
if (md!=null)
tempstable.put(md, objecttemps);
else
output.println("0};");
continue;
}
-
+
int count=0;
- for(Iterator allit=cn.getFieldTable().getAllDescriptorsIterator();allit.hasNext();) {
+ for(Iterator allit=cn.getFieldTable().getAllDescriptorsIterator(); allit.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)allit.next();
if(fd.isStatic()) {
continue;
count++;
}
output.print(count);
- for(Iterator allit=cn.getFieldTable().getAllDescriptorsIterator();allit.hasNext();) {
+ for(Iterator allit=cn.getFieldTable().getAllDescriptorsIterator(); allit.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)allit.next();
if(fd.isStatic()) {
continue;
}
output.println("};");
}
-
+
output.println("unsigned INTPTR * pointerarray[]={");
boolean needcomma=false;
for(int i=0; i<state.numClasses(); i++) {
}
output.println("};");
}
-
+
private int checkarraysupertype(ClassDescriptor arraycd, TypeDescriptor arraytd) {
int type=-1;
-
+
TypeDescriptor supertd=new TypeDescriptor(arraycd);
supertd.setArrayCount(arraytd.getArrayCount());
type=state.getArrayNumber(supertd);
if (type!=-1) {
return type;
}
-
+
ClassDescriptor cd = arraycd.getSuperDesc();
if(cd != null) {
type = checkarraysupertype(cd, arraytd);
if(type != -1) {
- return type;
+ return type;
}
}
ClassDescriptor ifcd = (ClassDescriptor)it_sifs.next();
type = checkarraysupertype(ifcd, arraytd);
if(type != -1) {
- return type;
+ return type;
}
}
-
+
return type;
}
for(int i=0; i<state.numClasses(); i++) {
ClassDescriptor cn=cdarray[i];
if(cn == null) {
- continue;
+ continue;
}
output.print("int supertypes" + cn.getSafeSymbol() + "[] = {");
boolean ncomma = false;
int snum = 0;
if((cn != null) && (cn.getSuperDesc() != null)) {
- snum++;
+ snum++;
}
- Iterator it_sifs = cn != null? cn.getSuperInterfaces() : null;
+ Iterator it_sifs = cn != null?cn.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- snum++;
- it_sifs.next();
+ snum++;
+ it_sifs.next();
}
output.print(snum);
ncomma = true;
if ((cn != null) && (cn.getSuperDesc()!=null)) {
- if(ncomma) {
- output.print(",");
- }
- ClassDescriptor cdsuper=cn.getSuperDesc();
- output.print(cdsuper.getId());
- }
- it_sifs = cn != null? cn.getSuperInterfaces() : null;
+ if(ncomma) {
+ output.print(",");
+ }
+ ClassDescriptor cdsuper=cn.getSuperDesc();
+ output.print(cdsuper.getId());
+ }
+ it_sifs = cn != null?cn.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- if(ncomma) {
- output.print(",");
- }
- output.print(((ClassDescriptor)it_sifs.next()).getId()+state.numClasses()+state.numArrays());
+ if(ncomma) {
+ output.print(",");
+ }
+ output.print(((ClassDescriptor)it_sifs.next()).getId()+state.numClasses()+state.numArrays());
}
-
+
output.println("};");
}
-
+
for(int i=0; i<state.numArrays(); i++) {
TypeDescriptor arraytd=arraytable[i];
ClassDescriptor arraycd=arraytd.getClassDesc();
boolean ncomma = false;
int snum = 0;
if (arraycd==null) {
- snum++;
- output.print(snum);
- output.print(", ");
- output.print(objectclass.getId());
- output.println("};");
- continue;
+ snum++;
+ output.print(snum);
+ output.print(", ");
+ output.print(objectclass.getId());
+ output.println("};");
+ continue;
}
if((arraycd != null) && (arraycd.getSuperDesc() != null)) {
- snum++;
+ snum++;
}
- Iterator it_sifs = arraycd != null? arraycd.getSuperInterfaces() : null;
+ Iterator it_sifs = arraycd != null?arraycd.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- snum++;
- it_sifs.next();
+ snum++;
+ it_sifs.next();
}
output.print(snum);
ncomma = true;
if ((arraycd != null) && (arraycd.getSuperDesc()!=null)) {
- ClassDescriptor cd=arraycd.getSuperDesc();
- int type=-1;
- if(cd!=null) {
- type = checkarraysupertype(cd, arraytd);
- if(type != -1) {
- type += state.numClasses();
- }
- }
- if (ncomma)
- output.print(", ");
- output.print(type);
- }
- it_sifs = arraycd != null? arraycd.getSuperInterfaces() : null;
+ ClassDescriptor cd=arraycd.getSuperDesc();
+ int type=-1;
+ if(cd!=null) {
+ type = checkarraysupertype(cd, arraytd);
+ if(type != -1) {
+ type += state.numClasses();
+ }
+ }
+ if (ncomma)
+ output.print(", ");
+ output.print(type);
+ }
+ it_sifs = arraycd != null?arraycd.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- ClassDescriptor ifcd = (ClassDescriptor)it_sifs.next();
- int type = checkarraysupertype(ifcd , arraytd);
- if(type != -1) {
- type += state.numClasses();
- }
- if (ncomma)
- output.print(", ");
- output.print(type);
+ ClassDescriptor ifcd = (ClassDescriptor)it_sifs.next();
+ int type = checkarraysupertype(ifcd, arraytd);
+ if(type != -1) {
+ type += state.numClasses();
+ }
+ if (ncomma)
+ output.print(", ");
+ output.print(type);
}
output.println("};");
}
-
+
for(int i=0; i<state.numInterfaces(); i++) {
ClassDescriptor cn=ifarray[i];
if(cn == null) {
- continue;
+ continue;
}
output.print("int supertypes" + cn.getSafeSymbol() + "[] = {");
boolean ncomma = false;
int snum = 0;
if((cn != null) && (cn.getSuperDesc() != null)) {
- snum++;
+ snum++;
}
- Iterator it_sifs = cn != null? cn.getSuperInterfaces() : null;
+ Iterator it_sifs = cn != null?cn.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- snum++;
- it_sifs.next();
+ snum++;
+ it_sifs.next();
}
output.print(snum);
ncomma = true;
if ((cn != null) && (cn.getSuperDesc()!=null)) {
- if(ncomma) {
- output.print(",");
- }
- ClassDescriptor cdsuper=cn.getSuperDesc();
- output.print(cdsuper.getId());
- }
- it_sifs = cn != null? cn.getSuperInterfaces() : null;
+ if(ncomma) {
+ output.print(",");
+ }
+ ClassDescriptor cdsuper=cn.getSuperDesc();
+ output.print(cdsuper.getId());
+ }
+ it_sifs = cn != null?cn.getSuperInterfaces():null;
while(it_sifs != null && it_sifs.hasNext()) {
- if(ncomma) {
- output.print(",");
- }
- output.print(((ClassDescriptor)it_sifs.next()).getId()+state.numClasses()+state.numArrays());
+ if(ncomma) {
+ output.print(",");
+ }
+ output.print(((ClassDescriptor)it_sifs.next()).getId()+state.numClasses()+state.numArrays());
}
-
+
output.println("};");
}
-
+
output.println("int* supertypes[]={");
boolean needcomma=false;
for(int i=0; i<state.numClasses(); i++) {
output.println(",");
needcomma=true;
if(cn != null) {
- output.print("supertypes" + cn.getSafeSymbol());
+ output.print("supertypes" + cn.getSafeSymbol());
} else {
- output.print(0);
+ output.print(0);
}
}
-
+
for(int i=0; i<state.numArrays(); i++) {
if (needcomma)
- output.println(",");
+ output.println(",");
needcomma = true;
output.print("supertypes___arraytype___" + (i+state.numClasses()));
}
-
+
for(int i=0; i<state.numInterfaces(); i++) {
ClassDescriptor cn=ifarray[i];
if (needcomma)
- output.println(",");
+ output.println(",");
needcomma=true;
output.print("supertypes" + cn.getSafeSymbol());
}
fieldorder.put(cn,fields);
Vector fieldvec=cn.getFieldVec();
- fldloop:
+fldloop:
for(int i=0; i<fieldvec.size(); i++) {
FieldDescriptor fd=(FieldDescriptor)fieldvec.get(i);
if((sp != null) && sp.getFieldTable().contains(fd.getSymbol())) {
FieldDescriptor fd=(FieldDescriptor)fields.get(i);
String fstring = fd.getSafeSymbol();
if(printedfieldstbl.containsKey(fstring)) {
- printedfieldstbl.put(fstring, cn);
- continue;
+ printedfieldstbl.put(fstring, cn);
+ continue;
} else {
- printedfieldstbl.put(fstring, cn);
+ printedfieldstbl.put(fstring, cn);
}
if (fd.getType().isClass()
&& fd.getType().getClassDesc().isEnum()) {
MethodDescriptor md=(MethodDescriptor)methodit.next();
FlatMethod fm=state.getMethodFlat(md);
-
+
if (!callgraph.isCallable(md)&&(!md.isStaticBlock()||!callgraph.isInit(cn))) {
if (callgraph.isCalled(md)) {
generateTempStructs(fm);
/* Output parameter structure */
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
if(md.isInvokedByStatic() && !md.isStaticBlock() && !md.getModifiers().isNative()) {
- // generate the staticinit version
- String mdstring = md.getSafeMethodDescriptor() + "staticinit";
-
- ParamsObject objectparams=(ParamsObject) paramstable.get(md);
- output.println("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_params {");
- output.println(" int size;");
- output.println(" void * next;");
- for(int i=0; i<objectparams.numPointers(); i++) {
- TempDescriptor temp=objectparams.getPointer(i);
- if(temp.getType().isClass() && temp.getType().getClassDesc().isEnum()) {
- output.println(" int " + temp.getSafeSymbol() + ";");
- } else {
- output.println(" struct "+temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol()+";");
- }
- }
- output.println("};\n");
- }
-
+ // generate the staticinit version
+ String mdstring = md.getSafeMethodDescriptor() + "staticinit";
+
+ ParamsObject objectparams=(ParamsObject) paramstable.get(md);
+ output.println("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_params {");
+ output.println(" int size;");
+ output.println(" void * next;");
+ for(int i=0; i<objectparams.numPointers(); i++) {
+ TempDescriptor temp=objectparams.getPointer(i);
+ if(temp.getType().isClass() && temp.getType().getClassDesc().isEnum()) {
+ output.println(" int " + temp.getSafeSymbol() + ";");
+ } else {
+ output.println(" struct "+temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol()+";");
+ }
+ }
+ output.println("};\n");
+ }
+
ParamsObject objectparams=(ParamsObject) paramstable.get(md);
output.println("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params {");
output.println(" int size;");
protected void generateMethod(ClassDescriptor cn, MethodDescriptor md, PrintWriter headersout, PrintWriter output) {
ParamsObject objectparams=(ParamsObject) paramstable.get(md);
TempObject objecttemps=(TempObject) tempstable.get(md);
-
+
boolean printcomma = false;
-
-
+
+
if(md.isInvokedByStatic() && !md.isStaticBlock() && !md.getModifiers().isNative()) {
// generate the staticinit version
String mdstring = md.getSafeMethodDescriptor() + "staticinit";
/* Output temp structure */
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- output.println("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_locals {");
- output.println(" int size;");
- output.println(" void * next;");
- for(int i=0; i<objecttemps.numPointers(); i++) {
- TempDescriptor temp=objecttemps.getPointer(i);
- if (!temp.getType().isArray() && temp.getType().isNull())
- output.println(" void * "+temp.getSafeSymbol()+";");
- else
- output.println(" struct "+temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol()+";");
- }
- output.println("};\n");
- }
-
+ output.println("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_locals {");
+ output.println(" int size;");
+ output.println(" void * next;");
+ for(int i=0; i<objecttemps.numPointers(); i++) {
+ TempDescriptor temp=objecttemps.getPointer(i);
+ if (!temp.getType().isArray() && temp.getType().isNull())
+ output.println(" void * "+temp.getSafeSymbol()+";");
+ else
+ output.println(" struct "+temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol()+";");
+ }
+ output.println("};\n");
+ }
+
headersout.println("#define D"+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+" 1");
/* First the return type */
if (md.getReturnType()!=null) {
- if(md.getReturnType().isClass() && md.getReturnType().getClassDesc().isEnum()) {
- headersout.println(" int ");
- } else if (md.getReturnType().isClass()||md.getReturnType().isArray())
- headersout.print("struct " + md.getReturnType().getSafeSymbol()+" * ");
- else
- headersout.print(md.getReturnType().getSafeSymbol()+" ");
+ if(md.getReturnType().isClass() && md.getReturnType().getClassDesc().isEnum()) {
+ headersout.println(" int ");
+ } else if (md.getReturnType().isClass()||md.getReturnType().isArray())
+ headersout.print("struct " + md.getReturnType().getSafeSymbol()+" * ");
+ else
+ headersout.print(md.getReturnType().getSafeSymbol()+" ");
} else
- //catch the constructor case
- headersout.print("void ");
+ //catch the constructor case
+ headersout.print("void ");
/* Next the method name */
headersout.print(cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"(");
printcomma=false;
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- headersout.print("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_params * "+paramsprefix);
- printcomma=true;
+ headersout.print("struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_params * "+paramsprefix);
+ printcomma=true;
}
/* Output parameter list*/
for(int i=0; i<objectparams.numPrimitives(); i++) {
- TempDescriptor temp=objectparams.getPrimitive(i);
- if (printcomma)
- headersout.print(", ");
- printcomma=true;
- if(temp.getType().isClass() && temp.getType().getClassDesc().isEnum()) {
- headersout.print("int " + temp.getSafeSymbol());
- } else if (temp.getType().isClass()||temp.getType().isArray())
- headersout.print("struct " + temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol());
- else
- headersout.print(temp.getType().getSafeSymbol()+" "+temp.getSafeSymbol());
+ TempDescriptor temp=objectparams.getPrimitive(i);
+ if (printcomma)
+ headersout.print(", ");
+ printcomma=true;
+ if(temp.getType().isClass() && temp.getType().getClassDesc().isEnum()) {
+ headersout.print("int " + temp.getSafeSymbol());
+ } else if (temp.getType().isClass()||temp.getType().isArray())
+ headersout.print("struct " + temp.getType().getSafeSymbol()+" * "+temp.getSafeSymbol());
+ else
+ headersout.print(temp.getType().getSafeSymbol()+" "+temp.getSafeSymbol());
}
if(md.getSymbol().equals("MonitorEnter") && state.OBJECTLOCKDEBUG) {
- headersout.print(", int linenum");
+ headersout.print(", int linenum");
}
headersout.println(");\n");
}
headersout.println(");\n");
}
}
-
+
protected void generateNativeFlatMethod(FlatMethod fm, PrintWriter outmethod) {
MethodDescriptor md=fm.getMethod();
ClassDescriptor cd=md.getClassDesc();
generateHeader(fm, md, outmethod);
int startindex=0;
- outmethod.println("JNIPUSHFRAME();");
+ outmethod.println("JNIPUSHFRAME();");
if (md.getModifiers().isStatic()) {
outmethod.println("jobject rec=JNIWRAP(((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))[" + cd.getId() + "]);");
} else {
outmethod.println("jobject rec=JNIWRAP("+generateTemp(fm, fm.getParameter(0))+");");
startindex=1;
}
- for(int i=startindex;i<fm.numParameters();i++) {
+ for(int i=startindex; i<fm.numParameters(); i++) {
TempDescriptor tmp=fm.getParameter(i);
if (tmp.getType().isPtr()) {
outmethod.println("jobject param"+i+"=JNIWRAP("+generateTemp(fm, fm.getParameter(i))+");");
outmethod.println("stopforgc((struct garbagelist *)___params___);");
}
if (!md.getReturnType().isVoid()) {
- if (md.getReturnType().isPtr())
+ if (md.getReturnType().isPtr())
outmethod.print("jobject retval=");
else
outmethod.print(md.getReturnType().getSafeSymbol()+" retval=");
outmethod.print("_"+md.getSymbol()+"(");
outmethod.print("JNI_vtable, rec");
- for(int i=startindex;i<fm.numParameters();i++) {
+ for(int i=startindex; i<fm.numParameters(); i++) {
outmethod.print(", ");
TempDescriptor tmp=fm.getParameter(i);
if (tmp.getType().isPtr()) {
}
} else
outmethod.println("JNIPOPFRAME();");
-
+
outmethod.println("}");
outmethod.println("");
}
System.out.println(fm.printMethod());
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- ClassDescriptor cn=md!=null ? md.getClassDesc() : null;
- ParamsObject objectparams=(ParamsObject)paramstable.get(md!=null ? md : task);
-
+ ClassDescriptor cn=md!=null?md.getClassDesc():null;
+ ParamsObject objectparams=(ParamsObject)paramstable.get(md!=null?md:task);
+
if((md != null) && md.isInvokedByStatic() && !md.isStaticBlock() && !md.getModifiers().isNative()) {
// generate a special static init version
mgcstaticinit = true;
String mdstring = md.getSafeMethodDescriptor() + "staticinit";
-
- generateHeader(fm, md!=null ? md : task,output);
- TempObject objecttemp=(TempObject) tempstable.get(md!=null ? md : task);
-
+
+ generateHeader(fm, md!=null?md:task,output);
+ TempObject objecttemp=(TempObject) tempstable.get(md!=null?md:task);
+
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- output.print(" struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_locals "+localsprefix+"={");
- output.print(objecttemp.numPointers()+",");
- output.print(paramsprefix);
- for(int j=0; j<objecttemp.numPointers(); j++)
- output.print(", NULL");
- output.println("};");
+ output.print(" struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"_locals "+localsprefix+"={");
+ output.print(objecttemp.numPointers()+",");
+ output.print(paramsprefix);
+ for(int j=0; j<objecttemp.numPointers(); j++)
+ output.print(", NULL");
+ output.println("};");
}
for(int i=0; i<objecttemp.numPrimitives(); i++) {
- TempDescriptor td=objecttemp.getPrimitive(i);
- TypeDescriptor type=td.getType();
- if (type.isNull() && !type.isArray())
- output.println(" void * "+td.getSafeSymbol()+";");
- else if (type.isClass() && type.getClassDesc().isEnum()) {
- output.println(" int " + td.getSafeSymbol() + ";");
- } else if (type.isClass()||type.isArray())
- output.println(" struct "+type.getSafeSymbol()+" * "+td.getSafeSymbol()+";");
- else
- output.println(" "+type.getSafeSymbol()+" "+td.getSafeSymbol()+";");
+ TempDescriptor td=objecttemp.getPrimitive(i);
+ TypeDescriptor type=td.getType();
+ if (type.isNull() && !type.isArray())
+ output.println(" void * "+td.getSafeSymbol()+";");
+ else if (type.isClass() && type.getClassDesc().isEnum()) {
+ output.println(" int " + td.getSafeSymbol() + ";");
+ } else if (type.isClass()||type.isArray())
+ output.println(" struct "+type.getSafeSymbol()+" * "+td.getSafeSymbol()+";");
+ else
+ output.println(" "+type.getSafeSymbol()+" "+td.getSafeSymbol()+";");
}
additionalCodeAtTopFlatMethodBody(output, fm);
* multi-threaded program...*/
if (((state.OOOJAVA||state.THREAD)&&GENERATEPRECISEGC) || state.MULTICOREGC) {
- //Don't bother if we aren't in recursive methods...The loops case will catch it
- if (callgraph.getAllMethods(md).contains(md)) {
- if (this.state.MULTICOREGC) {
- output.println("if(gcflag) gc("+localsprefixaddr+");");
- } else {
- output.println("if (unlikely(needtocollect)) checkcollect("+localsprefixaddr+");");
- }
- }
- }
-
+ //Don't bother if we aren't in recursive methods...The loops case will catch it
+ if (callgraph.getAllMethods(md).contains(md)) {
+ if (this.state.MULTICOREGC) {
+ output.println("if(gcflag) gc("+localsprefixaddr+");");
+ } else {
+ output.println("if (unlikely(needtocollect)) checkcollect("+localsprefixaddr+");");
+ }
+ }
+ }
+
generateCode(fm.getNext(0), fm, null, output);
output.println("}\n\n");
-
+
mgcstaticinit = false;
}
-
- generateHeader(fm, md!=null ? md : task,output);
- TempObject objecttemp=(TempObject) tempstable.get(md!=null ? md : task);
+
+ generateHeader(fm, md!=null?md:task,output);
+ TempObject objecttemp=(TempObject) tempstable.get(md!=null?md:task);
if((md != null) && (md.isStaticBlock())) {
mgcstaticinit = true;
generateCode(fm.getNext(0), fm, null, output);
output.println("}\n\n");
-
+
mgcstaticinit = false;
}
protected String generateTemp(FlatMethod fm, TempDescriptor td) {
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- TempObject objecttemps=(TempObject) tempstable.get(md!=null ? md : task);
+ TempObject objecttemps=(TempObject) tempstable.get(md!=null?md:task);
if (objecttemps.isLocalPrim(td)||objecttemps.isParamPrim(td)) {
return td.getSafeSymbol();
output.println("/* FlatOffsetNode */");
FieldDescriptor fd=fofn.getField();
if(!fd.isStatic()) {
- output.println(generateTemp(fm, fofn.getDst())+ " = (short)(int) (&((struct "+fofn.getClassType().getSafeSymbol() +" *)0)->"+
- fd.getSafeSymbol()+");");
+ output.println(generateTemp(fm, fofn.getDst())+ " = (short)(int) (&((struct "+fofn.getClassType().getSafeSymbol() +" *)0)->"+
+ fd.getSafeSymbol()+");");
}
output.println("/* offset */");
}
// is a static block or is invoked in some static block
ClassDescriptor cd = fm.getMethod().getClassDesc();
if(cd != cn && mgcstaticinit && callgraph.isInit(cn)) {
- // generate static init check code if it has not done static init in main()
+ // generate static init check code if it has not done static init in main()
if((cn.getNumStaticFields() != 0) || (cn.getNumStaticBlocks() != 0)) {
// need to check if the class' static fields have been initialized and/or
// its static blocks have been executed
if((md.getSymbol().equals("MonitorEnter") || md.getSymbol().equals("MonitorExit")) && fc.getThis().getSymbol().equals("classobj")) {
output.println("{");
if(md.getSymbol().equals("MonitorEnter") && state.OBJECTLOCKDEBUG) {
- output.println("int monitorenterline = __LINE__;");
+ output.println("int monitorenterline = __LINE__;");
}
// call MonitorEnter/MonitorExit on a class obj
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- output.print(" struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params __parameterlist__={");
- output.println("1," + localsprefixaddr + ", ((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))[" + fc.getThis().getType().getClassDesc().getId() + "]};");
- if(md.getSymbol().equals("MonitorEnter") && state.OBJECTLOCKDEBUG) {
- output.println(" "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"(& __parameterlist__, monitorenterline);");
- } else {
- output.println(" "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"(& __parameterlist__);");
- }
+ output.print(" struct "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params __parameterlist__={");
+ output.println("1," + localsprefixaddr + ", ((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))[" + fc.getThis().getType().getClassDesc().getId() + "]};");
+ if(md.getSymbol().equals("MonitorEnter") && state.OBJECTLOCKDEBUG) {
+ output.println(" "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"(& __parameterlist__, monitorenterline);");
+ } else {
+ output.println(" "+cn.getSafeSymbol()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"(& __parameterlist__);");
+ }
} else {
- output.println(" " + cn.getSafeSymbol()+md.getSafeSymbol()+"_"
- + md.getSafeMethodDescriptor() + "((struct ___Object___*)(((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))["
- + fc.getThis().getType().getClassDesc().getId() + "]));");
+ output.println(" " + cn.getSafeSymbol()+md.getSafeSymbol()+"_"
+ + md.getSafeMethodDescriptor() + "((struct ___Object___*)(((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))["
+ + fc.getThis().getType().getClassDesc().getId() + "]));");
}
output.println("}");
return;
}
-
+
output.println("{");
if(md.getSymbol().equals("MonitorEnter")) {
output.println("int monitorenterline = __LINE__;");
System.out.println("WARNING!!!!!!!!!!!!");
System.out.println("Source code calls static method "+md+" on an object in "+fm.getMethod()+"!");
}
-
+
for(int i=0; i<fc.numArgs(); i++) {
Descriptor var=md.getParameter(i);
TempDescriptor paramtemp=(TempDescriptor)temptovar.get(var);
}
if(md.getSymbol().equals("MonitorEnter") && state.OBJECTLOCKDEBUG) {
- output.print(", int");
+ output.print(", int");
}
output.print("))virtualtable["+generateTemp(fm,fc.getThis())+"->type*"+maxcount+"+"+virtualcalls.getMethodNumber(md)+"])");
}
// for interfaces, always need virtual dispatch
return false;
} else {
- Set subclasses=typeutil.getSubClasses(thiscd);
- if (subclasses==null)
- return true;
- for(Iterator classit=subclasses.iterator(); classit.hasNext(); ) {
- ClassDescriptor cd=(ClassDescriptor)classit.next();
- Set possiblematches=cd.getMethodTable().getSetFromSameScope(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
- MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
- if (md.matches(matchmd))
- return false;
+ Set subclasses=typeutil.getSubClasses(thiscd);
+ if (subclasses==null)
+ return true;
+ for(Iterator classit=subclasses.iterator(); classit.hasNext(); ) {
+ ClassDescriptor cd=(ClassDescriptor)classit.next();
+ Set possiblematches=cd.getMethodTable().getSetFromSameScope(md.getSymbol());
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
+ MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
+ if (md.matches(matchmd))
+ return false;
+ }
}
}
- }
return true;
}
protected void generateFlatFieldNode(FlatMethod fm, FlatFieldNode ffn, PrintWriter output) {
-
+
if(ffn.getField().isStatic()) {
// static field
if((fm.getMethod().isStaticBlock()) || (fm.getMethod().isInvokedByStatic())) {
ClassDescriptor cd = fm.getMethod().getClassDesc();
ClassDescriptor cn = ffn.getSrc().getType().getClassDesc();
if(cd != cn && mgcstaticinit && callgraph.isInit(cn)) {
- // generate the static init check code if has not done the static init in main()
+ // generate the static init check code if has not done the static init in main()
if((cn.getNumStaticFields() != 0) || (cn.getNumStaticBlocks() != 0)) {
// need to check if the class' static fields have been initialized and/or
// its static blocks have been executed
output.println("if(global_defsprim_p->" + cn.getSafeSymbol()+"static_block_exe_flag == 0) {");
if(cn.getNumStaticBlocks() != 0) {
MethodDescriptor t_md = (MethodDescriptor)cn.getMethodTable().get("staticblocks");
- if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- output.print(" struct "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"_params __parameterlist__={");
- output.println("0, NULL};");
- output.println(" "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"(& __parameterlist__);");
- } else {
- output.println(" "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"();");
- }
+ if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
+ output.print(" struct "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"_params __parameterlist__={");
+ output.println("0, NULL};");
+ output.println(" "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"(& __parameterlist__);");
+ } else {
+ output.println(" "+cn.getSafeSymbol()+t_md.getSafeSymbol()+"_"+t_md.getSafeMethodDescriptor()+"();");
+ }
} else {
output.println(" global_defsprim_p->" + cn.getSafeSymbol()+"static_block_exe_flag = 1;");
}
}
}
// redirect to the global_defs_p structure
- if (ffn.getField().getType().isPtr())
- output.println(generateTemp(fm, ffn.getDst())+"=global_defs_p->"+ffn.getField().getSafeSymbol()+";");
- else
- output.println(generateTemp(fm, ffn.getDst())+"=global_defsprim_p->"+ffn.getField().getSafeSymbol()+";");
+ if (ffn.getField().getType().isPtr())
+ output.println(generateTemp(fm, ffn.getDst())+"=global_defs_p->"+ffn.getField().getSafeSymbol()+";");
+ else
+ output.println(generateTemp(fm, ffn.getDst())+"=global_defsprim_p->"+ffn.getField().getSafeSymbol()+";");
} else if (ffn.getField().isEnum()) {
// an Enum value, directly replace the field access as int
output.println(generateTemp(fm, ffn.getDst()) + "=" + ffn.getField().enumValue() + ";");
output.println("if (" + generateTemp(fm,ffn.getSrc()) + " == NULL) {");
output.println("printf(\" NULL ptr error: %s, %s, %d \\n\", __FILE__, __func__, __LINE__);");
if(state.MULTICOREGC) {
- output.println("failednullptr(&___locals___);");
+ output.println("failednullptr(&___locals___);");
} else {
- output.println("failednullptr(NULL);");
+ output.println("failednullptr(NULL);");
}
output.println("}");
output.println("#endif //MULTICORE_DEBUG");
// is a static block or is invoked in some static block
ClassDescriptor cd = fm.getMethod().getClassDesc();
ClassDescriptor cn = fsfn.getDst().getType().getClassDesc();
- if(cd != cn && mgcstaticinit && callgraph.isInit(cn)){
+ if(cd != cn && mgcstaticinit && callgraph.isInit(cn)) {
// generate static init check code if has not done the static init in main()
if((cn.getNumStaticFields() != 0) || (cn.getNumStaticBlocks() != 0)) {
// need to check if the class' static fields have been initialized and/or
if (fsfn.getField().getType().isPtr()) {
if (fsfn.getField().getType()!=fsfn.getSrc().getType())
output.println("global_defs_p->" +
- fsfn.getField().getSafeSymbol()+"=(struct "+ fsfn.getField().getType().getSafeSymbol()+" *)"+generateTemp(fm,fsfn.getSrc())+";");
+ fsfn.getField().getSafeSymbol()+"=(struct "+ fsfn.getField().getType().getSafeSymbol()+" *)"+generateTemp(fm,fsfn.getSrc())+";");
else
output.println("global_defs_p->" +
- fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
+ fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
} else
output.println("global_defsprim_p->" +
- fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
+ fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
} else {
output.println("#ifdef MULTICORE_DEBUG");
output.println("if (" + generateTemp(fm,fsfn.getDst()) + " == NULL) {");
output.println("printf(\" NULL ptr error: %s, %s, %d \\n\", __FILE__, __func__, __LINE__);");
if(state.MULTICOREGC) {
- output.println("failednullptr(&___locals___);");
+ output.println("failednullptr(&___locals___);");
} else {
- output.println("failednullptr(NULL);");
+ output.println("failednullptr(NULL);");
}
output.println("}");
output.println("#endif //MULTICORE_DEBUG");
if (fsfn.getSrc().getType().isPtr()&&fsfn.getSrc().getType()!=fsfn.getField().getType())
output.println(generateTemp(fm, fsfn.getDst())+"->"+
- fsfn.getField().getSafeSymbol()+"=(struct "+ fsfn.getField().getType().getSafeSymbol()+"*)"+generateTemp(fm,fsfn.getSrc())+";");
+ fsfn.getField().getSafeSymbol()+"=(struct "+ fsfn.getField().getType().getSafeSymbol()+"*)"+generateTemp(fm,fsfn.getSrc())+";");
else
output.println(generateTemp(fm, fsfn.getDst())+"->"+
- fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
+ fsfn.getField().getSafeSymbol()+"="+ generateTemp(fm,fsfn.getSrc())+";");
}
}
String str=(String)fln.getValue();
output.println("{");
output.print("short str"+flncount+"[]={");
- for(int i=0;i<str.length();i++) {
+ for(int i=0; i<str.length(); i++) {
if (i!=0)
output.print(", ");
output.print(((int)str.charAt(i)));
output.println(" global_defsprim_p->" + fm.getMethod().getClassDesc().getSafeSymbol()+"static_block_exe_flag = 1;");
output.println("");
}
-
+
if (frn.getReturnTemp()!=null) {
if (frn.getReturnTemp().getType().isPtr())
output.println("return (struct "+fm.getMethod().getReturnType().getSafeSymbol()+"*)"+generateTemp(fm, frn.getReturnTemp())+";");
md=(MethodDescriptor) des;
else
task=(TaskDescriptor) des;
- String mdstring = md != null ? md.getSafeMethodDescriptor() : null;
+ String mdstring = md != null?md.getSafeMethodDescriptor():null;
- ClassDescriptor cn=md!=null ? md.getClassDesc() : null;
+ ClassDescriptor cn=md!=null?md.getClassDesc():null;
if (md!=null&&md.getReturnType()!=null) {
if (md.getReturnType().isClass() && md.getReturnType().getClassDesc().isEnum()) {
output.print("void ");
if (md!=null) {
if(mgcstaticinit && !md.isStaticBlock() && !md.getModifiers().isNative()) {
- mdstring += "staticinit";
+ mdstring += "staticinit";
}
output.print(cn.getSafeSymbol()+md.getSafeSymbol()+"_"+mdstring+"(");
} else
}
protected void additionalCodePostNode(FlatMethod fm, FlatNode fn, PrintWriter output) {
}
-
+
private void printSourceLineNumber(FlatMethod fm, FlatNode fn, PrintWriter output) {
// we do not print out line number if no one explicitly set the number
- if(fn.getNumLine()!=-1){
-
+ if(fn.getNumLine()!=-1) {
+
int lineNum=fn.getNumLine();
// do not generate the line number if it is same as the previous one
boolean needtoprint;
- if(fn.prev.size()==0){
- needtoprint=true;
- }else{
- needtoprint=false;
+ if(fn.prev.size()==0) {
+ needtoprint=true;
+ } else {
+ needtoprint=false;
}
- for(int i=0;i<fn.prev.size();i++){
- int prevLineNum=((FlatNode)fn.prev.get(i)).getNumLine();
- if(prevLineNum!=lineNum){
- needtoprint=true;
- break;
- }
+ for(int i=0; i<fn.prev.size(); i++) {
+ int prevLineNum=((FlatNode)fn.prev.get(i)).getNumLine();
+ if(prevLineNum!=lineNum) {
+ needtoprint=true;
+ break;
+ }
}
- if(needtoprint){
- output.println("// "+fm.getMethod().getClassDesc().getSourceFileName()+":"+fn.getNumLine());
+ if(needtoprint) {
+ output.println("// "+fm.getMethod().getClassDesc().getSourceFileName()+":"+fn.getNumLine());
}
}
}
-
+
}
int startupcorenum; // record the core containing startup task, s
// uppose only one core can have startup object
- public BuildCodeMGC(State st,
- Hashtable temptovar,
- TypeUtil typeutil,
+ public BuildCodeMGC(State st,
+ Hashtable temptovar,
+ TypeUtil typeutil,
SafetyAnalysis sa,
- int coreNum,
+ int coreNum,
int tcoreNum,
int gcoreNum, CallGraph callgraph) {
super(st, temptovar, typeutil, sa, callgraph);
e.printStackTrace();
System.exit(-1);
}
-
+
/* Fix field safe symbols due to shadowing */
FieldShadow.handleFieldShadow(state);
/* Build the virtual dispatch tables */
super.buildVirtualTables(outvirtual);
-
+
/* Tag the methods that are invoked by static blocks */
super.tagMethodInvokedByStaticBlock();
outglobaldefsprim.println("#define __GLOBALDEFPRIM_H_");
outglobaldefsprim.println("");
outglobaldefsprim.println("struct global_defsprim_t {");
-
+
// Output the C class declarations
- // These could mutually reference each other
+ // These could mutually reference each other
outclassdefs.println("#ifndef __CLASSDEF_H_");
outclassdefs.println("#define __CLASSDEF_H_");
super.outputClassDeclarations(outclassdefs, outglobaldefs, outglobaldefsprim);
/* Record number of total cores */
outstructs.println("#define NUMCORES "+this.tcoreNum);
/* Record number of active cores */
- outstructs.println("#define NUMCORESACTIVE "+this.coreNum); // this.coreNum
- // can be reset by the scheduling analysis
+ outstructs.println("#define NUMCORESACTIVE "+this.coreNum); // this.coreNum
+ // can be reset by the scheduling analysis
/* Record number of garbage collection cores */
outstructs.println("#ifdef MULTICORE_GC");
outstructs.println("#define NUMCORES4GC "+this.gcoreNum);
outstructs.println("#endif");
/* Record number of core containing startup task */
outstructs.println("#define STARTUPCORE "+this.startupcorenum);
-
+
if (state.main!=null) {
- /* Generate main method */
+ /* Generate main method */
outputMainMethod(outmethod);
}
outstructs.println("#endif");
outstructs.close();
}
-
+
protected void outputMainMethod(PrintWriter outmethod) {
outmethod.println("int mgc_main(int argc, const char *argv[]) {");
outmethod.println(" int i;");
-
+
if (state.MULTICOREGC) {
outmethod.println(" global_defs_p->size="+globaldefscount+";");
outmethod.println(" global_defs_p->next=NULL;");
outmethod.println(" ((struct garbagelist *)global_defs_p)->array[i]=NULL;");
outmethod.println(" }");
}
-
+
outputStaticBlocks(outmethod);
outputClassObjects(outmethod);
-
+
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
outmethod.println(" struct ArrayObject * stringarray=allocate_newarray(NULL, STRINGARRAYTYPE, argc-1);");
} else {
outmethod.println(" struct ___String___ *newstring=NewString(argv[i], length);");
}
outmethod.println(" ((void **)(((char *)& stringarray->___length___)+sizeof(int)))[i-1]=newstring;");
- outmethod.println(" }");
+ outmethod.println(" }");
MethodDescriptor md=typeutil.getMain();
ClassDescriptor cd=typeutil.getMainClass();
Hashtable<FlatNew, Vector<FlatNew>> m_aliasFNTbl;
Hashtable<FlatNew, Vector<Integer>> m_aliaslocksTbl4FN;
- public BuildCodeMultiCore(State st,
- Hashtable temptovar,
- TypeUtil typeutil,
- SafetyAnalysis sa,
- Vector<Schedule> scheduling,
- int coreNum,
- int gcoreNum, CallGraph callgraph) {
+ public BuildCodeMultiCore(State st,
+ Hashtable temptovar,
+ TypeUtil typeutil,
+ SafetyAnalysis sa,
+ Vector<Schedule> scheduling,
+ int coreNum,
+ int gcoreNum, CallGraph callgraph) {
super(st, temptovar, typeutil, sa, callgraph);
this.scheduling = scheduling;
this.coreNum = coreNum; // # of the active cores
e.printStackTrace();
System.exit(-1);
}
-
+
/* Fix field safe symbols due to shadowing */
FieldShadow.handleFieldShadow(state);
/* Build the virtual dispatch tables */
buildVirtualTables(outvirtual);
-
+
/* Tag the methods that are invoked by static blocks */
tagMethodInvokedByStaticBlock();
/* Output Structures */
outputStructs(outstructs);
-
+
outglobaldefs.println("#ifndef __GLOBALDEF_H_");
outglobaldefs.println("#define __GLOBALDEF_H_");
outglobaldefs.println("");
boolean comma = false;
outtaskdefs.println("struct parameterwrapper ** objectqueues[][NUMCLASSES] = {");
boolean needcomma = false;
- for(int i = 0; i < numqueues.length ; ++i) {
+ for(int i = 0; i < numqueues.length; ++i) {
if(needcomma) {
outtaskdefs.println(",");
} else {
/* parameter queue arrays for all the tasks*/
outtaskdefs.println("struct parameterwrapper *** paramqueues[] = {");
needcomma = false;
- for(int i = 0; i < this.coreNum ; ++i) {
+ for(int i = 0; i < this.coreNum; ++i) {
if(needcomma) {
outtaskdefs.println(",");
} else {
/* Record number of total cores */
outstructs.println("#define NUMCORES "+this.tcoreNum);
/* Record number of active cores */
- outstructs.println("#define NUMCORESACTIVE "+this.coreNum); // this.coreNum
- // can be reset by the scheduling analysis
+ outstructs.println("#define NUMCORESACTIVE "+this.coreNum); // this.coreNum
+ // can be reset by the scheduling analysis
/* Record number of garbage collection cores */
outstructs.println("#ifdef MULTICORE_GC");
outstructs.println("#define NUMCORES4GC "+this.gcoreNum);
}*/
outputInitStaticAndGlobalMethod(outmethod);
-
+
/* Close files */
outmethodheader.println("#endif");
outmethodheader.close();
outstructs.println("#endif");
outstructs.close();
}
-
+
private void outputInitStaticAndGlobalMethod(PrintWriter outmethod) {
outmethod.println("void initStaticAndGlobal() {");
outmethod.println(" int i;");
-
+
if (state.MULTICOREGC) {
outmethod.println(" global_defs_p->size="+globaldefscount+";");
outmethod.println(" global_defs_p->next=NULL;");
outmethod.println(" ((struct garbagelist *)global_defs_p)->array[i]=NULL;");
outmethod.println(" }");
}
-
+
outputStaticBlocks(outmethod);
outputClassObjects(outmethod);
* passed in (when PRECISE GC is enabled) and (2) function
* prototypes for the tasks */
- protected void generateTaskStructs(PrintWriter output,
- PrintWriter headersout) {
+ protected void generateTaskStructs(PrintWriter output,
+ PrintWriter headersout) {
/* Cycle through tasks */
for(int i = 0; i < this.scheduling.size(); ++i) {
Schedule tmpschedule = this.scheduling.elementAt(i);
/* This method outputs code for each task. */
- protected void outputTaskCode(PrintWriter outtaskdefs,
- PrintWriter outmethod,
- PrintWriter outtask,
- Iterator[] taskits,
- int[] numtasks,
- int[][] numqueues) {
+ protected void outputTaskCode(PrintWriter outtaskdefs,
+ PrintWriter outmethod,
+ PrintWriter outtask,
+ Iterator[] taskits,
+ int[] numtasks,
+ int[][] numqueues) {
/* Compile task based program */
outtaskdefs.println("#include \"task.h\"");
outtaskdefs.println("#include \"methodheaders.h\"");
outtask.println("#include \"structdefs.h\"");
outtask.println("#include \"Queue.h\"");
outtask.println("#include <string.h>");
- outtask.println("#include \"runtime_arch.h\"");
+ outtask.println("#include \"runtime_arch.h\"");
//outtask.println("#ifdef RAW");
//outtask.println("#include <raw.h>");
//outtask.println("#endif");
output.println();
}
- protected void generateTaskMethod(FlatMethod fm,
- PrintWriter output) {
+ protected void generateTaskMethod(FlatMethod fm,
+ PrintWriter output) {
/*if (State.PRINTFLAT)
System.out.println(fm.printMethod());*/
TaskDescriptor task=fm.getTask();
if (type.isNull())
output.println(" void * "+td.getSafeSymbol()+";");
else if (state.MGC && type.isClass() && type.getClassDesc().isEnum()) {
- output.println(" int " + td.getSafeSymbol()+";");
+ output.println(" int " + td.getSafeSymbol()+";");
} else if (type.isClass()||type.isArray())
output.println(" struct "+type.getSafeSymbol()+" * "+td.getSafeSymbol()+";");
else
/* generate print information for RAW version */
output.println("#ifdef MULTICORE");
- if(this.state.RAW) {
- output.println("{");
- output.println("int tmpsum = 0;");
- output.println("char * taskname = \"" + task.getSymbol() + "\";");
- output.println("int tmplen = " + task.getSymbol().length() + ";");
- output.println("int tmpindex = 1;");
- output.println("for(;tmpindex < tmplen; tmpindex++) {");
- output.println(" tmpsum = tmpsum * 10 + *(taskname + tmpindex) - '0';");
- output.println("}");
- }
+ if(this.state.RAW) {
+ output.println("{");
+ output.println("int tmpsum = 0;");
+ output.println("char * taskname = \"" + task.getSymbol() + "\";");
+ output.println("int tmplen = " + task.getSymbol().length() + ";");
+ output.println("int tmpindex = 1;");
+ output.println("for(;tmpindex < tmplen; tmpindex++) {");
+ output.println(" tmpsum = tmpsum * 10 + *(taskname + tmpindex) - '0';");
+ output.println("}");
+ }
output.println("#ifdef RAWPATH");
- if(this.state.RAW) {
- output.println("BAMBOO_DEBUGPRINT(0xAAAA);");
- output.println("BAMBOO_DEBUGPRINT_REG(tmpsum);");
- } else {
- //output.println("BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();");
- output.println("printf(\"(%x,%x) Process %x(%d): task %s\\n\", udn_tile_coord_x(), udn_tile_coord_y(), corenum, corenum, \"" + task.getSymbol() + "\");");
- //output.println("BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();");
- }
- //output.println("BAMBOO_DEBUGPRINT(BAMBOO_GET_EXE_TIME());");
+ if(this.state.RAW) {
+ output.println("BAMBOO_DEBUGPRINT(0xAAAA);");
+ output.println("BAMBOO_DEBUGPRINT_REG(tmpsum);");
+ } else {
+ //output.println("BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();");
+ output.println("printf(\"(%x,%x) Process %x(%d): task %s\\n\", udn_tile_coord_x(), udn_tile_coord_y(), corenum, corenum, \"" + task.getSymbol() + "\");");
+ //output.println("BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();");
+ }
+ //output.println("BAMBOO_DEBUGPRINT(BAMBOO_GET_EXE_TIME());");
output.println("#endif");
output.println("#ifdef DEBUG");
- if(this.state.RAW) {
- output.println("BAMBOO_DEBUGPRINT(0xAAAA);");
- output.println("BAMBOO_DEBUGPRINT_REG(tmpsum);");
- } else {
- //output.println("BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();");
- output.println("printf(\"(%x,%x) Process %x(%d): task %s\\n\", udn_tile_coord_x(), udn_tile_coord_y(), corenum, corenum, \"" + task.getSymbol() + "\");");
- //output.println("BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();");
- }
+ if(this.state.RAW) {
+ output.println("BAMBOO_DEBUGPRINT(0xAAAA);");
+ output.println("BAMBOO_DEBUGPRINT_REG(tmpsum);");
+ } else {
+ //output.println("BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();");
+ output.println("printf(\"(%x,%x) Process %x(%d): task %s\\n\", udn_tile_coord_x(), udn_tile_coord_y(), corenum, corenum, \"" + task.getSymbol() + "\");");
+ //output.println("BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();");
+ }
+ output.println("#endif");
+ if(this.state.RAW) {
+ output.println("}");
+ }
output.println("#endif");
- if(this.state.RAW) {
- output.println("}");
- }
- output.println("#endif");
for(int i = 0; i < fm.numParameters(); ++i) {
TempDescriptor temp = fm.getParameter(i);
}
/** This method outputs TaskDescriptor information */
- protected void generateTaskDescriptor(PrintWriter output,
- PrintWriter outtask,
- FlatMethod fm,
- TaskDescriptor task,
- Vector[] qnames) {
+ protected void generateTaskDescriptor(PrintWriter output,
+ PrintWriter outtask,
+ FlatMethod fm,
+ TaskDescriptor task,
+ Vector[] qnames) {
int num = this.currentSchedule.getCoreNum();
output.println("/* TaskDescriptor information for task " + task.getSymbol() + " on core " + num + "*/");
/** This method generates header information for the task
* referenced by the Descriptor des. */
- protected void generateTaskHeader(FlatMethod fm,
- Descriptor des,
- PrintWriter output) {
+ protected void generateTaskHeader(FlatMethod fm,
+ Descriptor des,
+ PrintWriter output) {
/* Print header */
ParamsObject objectparams=(ParamsObject)paramstable.get(des);
TaskDescriptor task=(TaskDescriptor) des;
} else output.println(") {");
}
- protected void generateFlagOrAnd(FlatFlagActionNode ffan,
- FlatMethod fm,
- TempDescriptor temp,
- PrintWriter output,
- int ormask,
+ protected void generateFlagOrAnd(FlatFlagActionNode ffan,
+ FlatMethod fm,
+ TempDescriptor temp,
+ PrintWriter output,
+ int ormask,
int andmask) {
if (ffan.getTaskType()==FlatFlagActionNode.NEWOBJECT) {
output.println("flagorandinit("+generateTemp(fm, temp)+", 0x"+Integer.toHexString(ormask)+", 0x"+Integer.toHexString(andmask)+");");
output.println("}");
}
if(ffan.getTaskType()==FlatFlagActionNode.TASKEXIT) {
- // generate codes for profiling, recording which task exit it is
- output.println("#ifdef PROFILE");
- output.println("setTaskExitIndex(" + ffan.getTaskExitIndex() + ");");
- output.println("#endif");
+ // generate codes for profiling, recording which task exit it is
+ output.println("#ifdef PROFILE");
+ output.println("setTaskExitIndex(" + ffan.getTaskExitIndex() + ");");
+ output.println("#endif");
}
}
}
- protected void generateObjectDistribute(FlatFlagActionNode ffan,
- FlatMethod fm,
- TempDescriptor temp,
+ protected void generateObjectDistribute(FlatFlagActionNode ffan,
+ FlatMethod fm,
+ TempDescriptor temp,
PrintWriter output) {
ClassDescriptor cd = temp.getType().getClassDesc();
Vector<FlagState> initfstates = null;
", " + qinfo.length + ");");
output.println("}");
} /*else {
- // TODO
- // really needed?
- output.println("/* possibly needed by multi-parameter tasks on this core*//*");
- output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
- }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
+ // TODO
+ // really needed?
+ output.println("/* possibly needed by multi-parameter tasks on this core*//*");
+ output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
+ }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
} else {
/*if(!isolate) {
- // TODO
- // Is it possible to decide the actual queues?
- output.println("/* possibly needed by multi-parameter tasks on this core*//*");
- output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
- }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
+ // TODO
+ // Is it possible to decide the actual queues?
+ output.println("/* possibly needed by multi-parameter tasks on this core*//*");
+ output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
+ }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
output.println("/* transfer to core " + targetcore.toString() + "*/");
output.println("{");
// enqueue this object and its destinations for later process
} else {
tmpinfo.fs = tmpFState;
}
- qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
- output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
- output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
- output.println("tmpObjInfo->targetcore = "+targetcore.toString()+";");
- output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
- output.println("tmpObjInfo->length = " + qinfo.length + ";");
- output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
+ qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
+ output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
+ output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
+ output.println("tmpObjInfo->targetcore = "+targetcore.toString()+";");
+ output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
+ output.println("tmpObjInfo->length = " + qinfo.length + ";");
+ output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
output.println("}");
}
output.println("break;");
output.println("}");
} else {
/*if(!isolate) {
- // TODO
- // Is it possible to decide the actual queues?
- output.println("/* possibly needed by multi-parameter tasks on this core*//*");
- output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
- }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
+ // TODO
+ // Is it possible to decide the actual queues?
+ output.println("/* possibly needed by multi-parameter tasks on this core*//*");
+ output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
+ }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
output.println("/* transfer to core " + targetcore.toString() + "*/");
output.println("{");
// enqueue this object and its destinations for later process
} else {
tmpinfo.fs = tmpFState;
}
- qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
- output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
- output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
- output.println("tmpObjInfo->targetcore = "+targetcore.toString()+";");
- output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
- output.println("tmpObjInfo->length = " + qinfo.length + ";");
- output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
+ qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
+ output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
+ output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
+ output.println("tmpObjInfo->targetcore = "+targetcore.toString()+";");
+ output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
+ output.println("tmpObjInfo->length = " + qinfo.length + ";");
+ output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
output.println("}");
}
output.println("/* increase index*/");
", " + qinfo.length + ");");
output.println("}");
} /*else {
- // TODO
- // really needed?
- output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
- }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
+ // TODO
+ // really needed?
+ output.println("enqueueObject("+generateTemp(fm, temp)+", NULL, 0);");
+ }*/ // deleted 09/07/06, multi-param tasks are pinned to one core now
}
// codes for multi-params tasks
// need to be send to other cores
Vector<Integer> targetcores = this.currentSchedule.getAllyCores(tmpFState);
output.println("/* send the shared object to possible queues on other cores*/");
- // TODO, temporary solution, send to mostly the first two
- int upperbound = targetcores.size() > 2? 2: targetcores.size();
+ // TODO, temporary solution, send to mostly the first two
+ int upperbound = targetcores.size() > 2?2:targetcores.size();
for(int k = 0; k < upperbound; ++k) {
// TODO
// add the information of exactly which queue
int targetcore = targetcores.elementAt(k).intValue();
if(!sendto.contains(targetcore)) {
- // previously not sended to this target core
- // enqueue this object and its destinations for later process
- output.println("{");
- // all the possible queues
- QueueInfo qinfo = null;
- TranObjInfo tmpinfo = new TranObjInfo();
- tmpinfo.name = generateTemp(fm, temp);
- tmpinfo.targetcore = targetcore;
- FlagState targetFS = this.currentSchedule.getTargetFState(tmpFState);
- if(targetFS != null) {
- tmpinfo.fs = targetFS;
- } else {
- tmpinfo.fs = tmpFState;
- }
+ // previously not sended to this target core
+ // enqueue this object and its destinations for later process
+ output.println("{");
+ // all the possible queues
+ QueueInfo qinfo = null;
+ TranObjInfo tmpinfo = new TranObjInfo();
+ tmpinfo.name = generateTemp(fm, temp);
+ tmpinfo.targetcore = targetcore;
+ FlagState targetFS = this.currentSchedule.getTargetFState(tmpFState);
+ if(targetFS != null) {
+ tmpinfo.fs = targetFS;
+ } else {
+ tmpinfo.fs = tmpFState;
+ }
qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
}
}
- protected QueueInfo outputqueues(FlagState tmpFState,
- int num,
- PrintWriter output,
- boolean isEnqueue) {
+ protected QueueInfo outputqueues(FlagState tmpFState,
+ int num,
+ PrintWriter output,
+ boolean isEnqueue) {
// queue array
QueueInfo qinfo = new QueueInfo();
qinfo.qname = "queues_" + tmpFState.getLabel() + "_" + tmpFState.getiuid();
return qinfo;
}
- protected QueueInfo outputtransqueues(FlagState tmpFState,
- int targetcore,
- PrintWriter output) {
+ protected QueueInfo outputtransqueues(FlagState tmpFState,
+ int targetcore,
+ PrintWriter output) {
// queue array
QueueInfo qinfo = new QueueInfo();
qinfo.qname = "queues_" + tmpFState.getLabel() + "_" + tmpFState.getiuid();
public String qname;
}
- protected String generateTempFlagName(FlatMethod fm,
- TempDescriptor td) {
+ protected String generateTempFlagName(FlatMethod fm,
+ TempDescriptor td) {
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- TempObject objecttemps=(TempObject) tempstable.get(md!=null ? md : task);
+ TempObject objecttemps=(TempObject) tempstable.get(md!=null?md:task);
if (objecttemps.isLocalPrim(td)||objecttemps.isParamPrim(td)) {
return td.getSafeSymbol() + "_oldflag";
//output.println("freeQueue(totransobjqueue);");
}
- protected void outputAliasLockCode(FlatMethod fm,
- PrintWriter output) {
+ protected void outputAliasLockCode(FlatMethod fm,
+ PrintWriter output) {
if(this.m_oa == null) {
return;
}
for( int j = i + 1; j < fm.numParameters(); ++j ) {
common = this.m_oa.createsPotentialAliases(td, i, j);
if(!common.isEmpty()) {
- // ith parameter and jth parameter has alias, create lock to protect them
+ // ith parameter and jth parameter has alias, create lock to protect them
if(aliasSets.elementAt(i) == null) {
aliasSets.setElementAt(new Vector<Integer>(), i);
}
AllocationSite as = (AllocationSite)allocSites[j];
common = this.m_oa.createsPotentialAliases(td, i, as);
if( !common.isEmpty() ) {
- // ith parameter and allocationsite as has alias
+ // ith parameter and allocationsite as has alias
if(aliasFNSets.elementAt(i) == null) {
aliasFNSets.setElementAt(new Vector<FlatNew>(), i);
}
common = this.m_oa.createsPotentialAliases(td, as1, as2);
if( !common.isEmpty() ) {
- // as1 and as2 has alias
+ // as1 and as2 has alias
if(!aliasFNTbl.containsKey(as1.getFlatNew())) {
aliasFNTbl.put(as1.getFlatNew(), new Vector<FlatNew>());
}
// create locks
if(numlock > 0) {
output.println("int aliaslocks[" + numlock + "];");
- output.println("int tmpi = 0;");
+ output.println("int tmpi = 0;");
// associate locks with parameters
int lockindex = 0;
for(int i = 0; i < this.m_aliasSets.size(); i++) {
- Vector<Integer> toadd = this.m_aliasSets.elementAt(i);
-
- output.println("int tmplen_" + lockindex + " = " + toadd.size());
- output.println("void * tmpptrs_" + lockindex + "[] = {");
- for(int j = 0; j < toadd.size(); j++) {
- int para = toadd.elementAt(j).intValue();
- output.print(generateTemp(fm, fm.getParameter(para)));
- if(j < toadd.size() - 1) {
- output.print(", ");
- } else {
- output.println("};");
- }
- }
- output.println("aliaslocks[tmpi++] = getAliasLock(tmpptrs_" + lockindex + ", tmplen_" + lockindex + ", lockRedirectTbl);");
-
- for(int j = 0; j < toadd.size(); j++) {
- int para = toadd.elementAt(j).intValue();
- output.println("addAliasLock(" + generateTemp(fm, fm.getParameter(para)) + ", aliaslocks[" + i + "]);");
- }
- // check if this lock is also associated with any FlatNew nodes
- if(this.m_aliasFNTbl4Para.containsKey(toadd.elementAt(0))) {
- if(this.m_aliaslocksTbl4FN == null) {
- this.m_aliaslocksTbl4FN = new Hashtable<FlatNew, Vector<Integer>>();
- }
- Vector<FlatNew> tmpv = this.m_aliasFNTbl4Para.get(toadd.elementAt(0));
- for(int j = 0; j < tmpv.size(); j++) {
- FlatNew fn = tmpv.elementAt(j);
- if(!this.m_aliaslocksTbl4FN.containsKey(fn)) {
- this.m_aliaslocksTbl4FN.put(fn, new Vector<Integer>());
- }
- this.m_aliaslocksTbl4FN.get(fn).add(i);
- }
- this.m_aliasFNTbl4Para.remove(toadd.elementAt(0));
- }
- lockindex++;
+ Vector<Integer> toadd = this.m_aliasSets.elementAt(i);
+
+ output.println("int tmplen_" + lockindex + " = " + toadd.size());
+ output.println("void * tmpptrs_" + lockindex + "[] = {");
+ for(int j = 0; j < toadd.size(); j++) {
+ int para = toadd.elementAt(j).intValue();
+ output.print(generateTemp(fm, fm.getParameter(para)));
+ if(j < toadd.size() - 1) {
+ output.print(", ");
+ } else {
+ output.println("};");
+ }
+ }
+ output.println("aliaslocks[tmpi++] = getAliasLock(tmpptrs_" + lockindex + ", tmplen_" + lockindex + ", lockRedirectTbl);");
+
+ for(int j = 0; j < toadd.size(); j++) {
+ int para = toadd.elementAt(j).intValue();
+ output.println("addAliasLock(" + generateTemp(fm, fm.getParameter(para)) + ", aliaslocks[" + i + "]);");
+ }
+ // check if this lock is also associated with any FlatNew nodes
+ if(this.m_aliasFNTbl4Para.containsKey(toadd.elementAt(0))) {
+ if(this.m_aliaslocksTbl4FN == null) {
+ this.m_aliaslocksTbl4FN = new Hashtable<FlatNew, Vector<Integer>>();
+ }
+ Vector<FlatNew> tmpv = this.m_aliasFNTbl4Para.get(toadd.elementAt(0));
+ for(int j = 0; j < tmpv.size(); j++) {
+ FlatNew fn = tmpv.elementAt(j);
+ if(!this.m_aliaslocksTbl4FN.containsKey(fn)) {
+ this.m_aliaslocksTbl4FN.put(fn, new Vector<Integer>());
+ }
+ this.m_aliaslocksTbl4FN.get(fn).add(i);
+ }
+ this.m_aliasFNTbl4Para.remove(toadd.elementAt(0));
+ }
+ lockindex++;
}
-
+
Object[] key = this.m_aliasFNTbl4Para.keySet().toArray();
for(int i = 0; i < key.length; i++) {
int para = ((Integer)key[i]).intValue();
output.println("void * tmpptrs_" + lockindex + "[] = {" + generateTemp(fm, fm.getParameter(para)) + "};");
output.println("aliaslocks[tmpi++] = getAliasLock(tmpptrs_" + lockindex + ", 1, lockRedirectTbl);");
-
+
output.println("addAliasLock(" + generateTemp(fm, fm.getParameter(para)) + ", aliaslocks[" + lockindex + "]);");
Vector<FlatNew> tmpv = this.m_aliasFNTbl4Para.get(para);
for(int j = 0; j < tmpv.size(); j++) {
}
lockindex++;
}
-
+
// check m_aliasFNTbl for locks associated with FlatNew nodes
Object[] FNkey = this.m_aliasFNTbl.keySet().toArray();
for(int i = 0; i < FNkey.length; i++) {
FlatNew fn = (FlatNew)FNkey[i];
Vector<FlatNew> tmpv = this.m_aliasFNTbl.get(fn);
-
+
output.println("aliaslocks[tmpi++] = (int)(RUNMALLOC(sizeof(int)));");
-
+
if(this.m_aliaslocksTbl4FN == null) {
this.m_aliaslocksTbl4FN = new Hashtable<FlatNew, Vector<Integer>>();
}
}
}
- protected void generateFlatReturnNode(FlatMethod fm,
- FlatReturnNode frn,
- PrintWriter output) {
+ protected void generateFlatReturnNode(FlatMethod fm,
+ FlatReturnNode frn,
+ PrintWriter output) {
if (frn.getReturnTemp()!=null) {
if (frn.getReturnTemp().getType().isPtr())
output.println("return (struct "+fm.getMethod().getReturnType().getSafeSymbol()+"*)"+generateTemp(fm, frn.getReturnTemp())+";");
}
}
- protected void generateFlatNew(FlatMethod fm,
- FlatNew fn,
+ protected void generateFlatNew(FlatMethod fm,
+ FlatNew fn,
PrintWriter output) {
if (fn.getType().isArray()) {
int arrayid = state.getArrayNumber(fn.getType())
}
}
// generate codes for profiling, recording how many new objects are created
- if(!fn.getType().isArray() &&
- (fn.getType().getClassDesc() != null)
- && (fn.getType().getClassDesc().hasFlags())) {
- output.println("#ifdef PROFILE");
- output.println("addNewObjInfo(\"" + fn.getType().getClassDesc().getSymbol() + "\");");
- output.println("#endif");
+ if(!fn.getType().isArray() &&
+ (fn.getType().getClassDesc() != null)
+ && (fn.getType().getClassDesc().hasFlags())) {
+ output.println("#ifdef PROFILE");
+ output.println("addNewObjInfo(\"" + fn.getType().getClassDesc().getSymbol() + "\");");
+ output.println("#endif");
}
}
public FlagState fs;
}
- protected boolean contains(Vector<TranObjInfo> sendto,
- TranObjInfo t) {
+ protected boolean contains(Vector<TranObjInfo> sendto,
+ TranObjInfo t) {
if(sendto.size() == 0) {
return false;
}
protected void generateTempStructs(FlatMethod fm, LocalityBinding lb) {
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- Set<TempDescriptor> saveset=lb!=null ? locality.getTempSet(lb) : null;
- ParamsObject objectparams=md!=null ? new ParamsObject(md,tag++) : new ParamsObject(task, tag++);
+ Set<TempDescriptor> saveset=lb!=null?locality.getTempSet(lb):null;
+ ParamsObject objectparams=md!=null?new ParamsObject(md,tag++):new ParamsObject(task, tag++);
if (lb!=null) {
paramstable.put(lb, objectparams);
backuptable.put(lb, new Hashtable<TempDescriptor, TempDescriptor>());
objectparams.addPrim(temp);
}
- TempObject objecttemps=md!=null ? new TempObject(objectparams,md,tag++) : new TempObject(objectparams, task, tag++);
+ TempObject objecttemps=md!=null?new TempObject(objectparams,md,tag++):new TempObject(objectparams, task, tag++);
if (lb!=null)
tempstable.put(lb, objecttemps);
else if (md!=null)
protected void generateMethodParam(ClassDescriptor cn, MethodDescriptor md, LocalityBinding lb, PrintWriter output) {
/* Output parameter structure */
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- ParamsObject objectparams=(ParamsObject) paramstable.get(lb!=null ? lb : md);
+ ParamsObject objectparams=(ParamsObject) paramstable.get(lb!=null?lb:md);
if ((state.DSM||state.SINGLETM)&&lb!=null)
output.println("struct "+cn.getSafeSymbol()+lb.getSignature()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params {");
else
System.out.println(fm.printMethod());
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- ClassDescriptor cn=md!=null ? md.getClassDesc() : null;
- ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null ? lb : md!=null ? md : task);
+ ClassDescriptor cn=md!=null?md.getClassDesc():null;
+ ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null?lb:md!=null?md:task);
HashSet<AtomicRecord> arset=null;
branchanalysis=null;
localsprefixaddr="&"+localsprefix;
localsprefixderef=localsprefix+".";
- generateHeader(fm, lb, md!=null ? md : task,output);
- TempObject objecttemp=(TempObject) tempstable.get(lb!=null ? lb : md!=null ? md : task);
+ generateHeader(fm, lb, md!=null?md:task,output);
+ TempObject objecttemp=(TempObject) tempstable.get(lb!=null?lb:md!=null?md:task);
if (state.DELAYCOMP&&!lb.isAtomic()&&lb.getHasAtomic()) {
for(Iterator<AtomicRecord> arit=arset.iterator(); arit.hasNext(); ) {
protected String generateTemp(FlatMethod fm, TempDescriptor td) {
MethodDescriptor md=fm.getMethod();
TaskDescriptor task=fm.getTask();
- TempObject objecttemps=(TempObject) tempstable.get(currlb!=null ? currlb : md!=null ? md : task);
+ TempObject objecttemps=(TempObject) tempstable.get(currlb!=null?currlb:md!=null?md:task);
if (objecttemps.isLocalPrim(td)||objecttemps.isParamPrim(td)) {
return td.getSafeSymbol();
fieldoffset.add(newfieldoffset);
}
- int base=(tuplecount>0) ? ((Short)endoffset.get(tuplecount-1)).intValue() : 0;
+ int base=(tuplecount>0)?((Short)endoffset.get(tuplecount-1)).intValue():0;
base+=pp.desc.size()-breakindex;
endoffset.add(new Short((short)base));
}
protected void generateFlatCall(FlatMethod fm, FlatCall fc, PrintWriter output) {
MethodDescriptor md=fc.getMethod();
- ParamsObject objectparams=(ParamsObject)paramstable.get(currlb!=null ? locality.getBinding(currlb, fc) : md);
+ ParamsObject objectparams=(ParamsObject)paramstable.get(currlb!=null?locality.getBinding(currlb, fc):md);
ClassDescriptor cn=md.getClassDesc();
// if the called method is a static block or a static method or a constructor
}
if((md.getSymbol().equals("MonitorEnter") || md.getSymbol().equals("MonitorExit")) && fc.getThis().getSymbol().equals("classobj")) {
// call MonitorEnter/MonitorExit on a class obj
- output.println(" " + cn.getSafeSymbol()+md.getSafeSymbol()+"_"
- + md.getSafeMethodDescriptor() + "((struct ___Object___*)(((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))["
- + fc.getThis().getType().getClassDesc().getId() + "]));");
+ output.println(" " + cn.getSafeSymbol()+md.getSafeSymbol()+"_"
+ + md.getSafeMethodDescriptor() + "((struct ___Object___*)(((void **)(((char *) &(global_defs_p->classobjs->___length___))+sizeof(int)))["
+ + fc.getThis().getType().getClassDesc().getId() + "]));");
return;
}
}
* task referenced by the Descriptor des. */
protected void generateHeader(FlatMethod fm, LocalityBinding lb, Descriptor des, PrintWriter output) {
/* Print header */
- ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null ? lb : des);
+ ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null?lb:des);
MethodDescriptor md=null;
TaskDescriptor task=null;
if (des instanceof MethodDescriptor)
else
task=(TaskDescriptor) des;
- ClassDescriptor cn=md!=null ? md.getClassDesc() : null;
+ ClassDescriptor cn=md!=null?md.getClassDesc():null;
if (md!=null&&md.getReturnType()!=null) {
if (state.MGC && md.getReturnType().isClass() && md.getReturnType().getClassDesc().isEnum()) {
HashSet breakset;
HashSet continueset;
FlatExit fe;
-
+
// for synchronized blocks
Stack<TempDescriptor> lockStack;
o=new Float(0.0);
} else if (currmd.getReturnType().isDouble()) {
o=new Double(0.0);
- }
+ }
FlatLiteralNode fln=new FlatLiteralNode(currmd.getReturnType(),o,tmp);
rnflat=new FlatReturnNode(tmp);
fln.addNext(rnflat);
- fn.addNext(fln);
+ fn.addNext(fln);
}
return rnflat;
}
private void flattenClass(ClassDescriptor cn) {
Iterator methodit=cn.getMethods();
- while(methodit.hasNext()) {
+ while(methodit.hasNext()) {
flattenMethod(cn, (MethodDescriptor)methodit.next());
}
}
state.addFlatCode(md,fm);
}
}
-
+
public void flattenMethod(ClassDescriptor cn, MethodDescriptor md) {
// if OOOJava is on, splice a special SESE in to
// enclose the main method
currmd=md;
- boolean spliceInImplicitMain = state.OOOJAVA && currmd.equals( typeutil.getMain() );
-
+ boolean spliceInImplicitMain = state.OOOJAVA && currmd.equals(typeutil.getMain() );
+
FlatSESEEnterNode spliceSESE = null;
- FlatSESEExitNode spliceExit = null;
-
+ FlatSESEExitNode spliceExit = null;
+
if( spliceInImplicitMain ) {
- SESENode mainTree = new SESENode( "main" );
- spliceSESE = new FlatSESEEnterNode( mainTree );
- spliceExit = new FlatSESEExitNode ( mainTree );
- spliceSESE.setFlatExit ( spliceExit );
- spliceExit.setFlatEnter( spliceSESE );
+ SESENode mainTree = new SESENode("main");
+ spliceSESE = new FlatSESEEnterNode(mainTree);
+ spliceExit = new FlatSESEExitNode(mainTree);
+ spliceSESE.setFlatExit(spliceExit);
+ spliceExit.setFlatEnter(spliceSESE);
spliceSESE.setIsMainSESE();
- }
-
+ }
+
fe=new FlatExit();
BlockNode bn=state.getMethodBody(currmd);
MethodDescriptor memd=(MethodDescriptor)typeutil.getClass("Object").getMethodTable().get("MonitorEnter");
FlatNode first = null;
FlatNode end = null;
-
+
{
if (lockStack.size()!=1) {
throw new Error("TOO MANY THINGS ON LOCKSTACK");
}
TempDescriptor thistd = this.lockStack.elementAt(0);
- FlatCall fc = new FlatCall(memd, null, thistd, new TempDescriptor[0]);
- fc.setNumLine(bn.getNumLine());
- first = end = fc;
+ FlatCall fc = new FlatCall(memd, null, thistd, new TempDescriptor[0]);
+ fc.setNumLine(bn.getNumLine());
+ first = end = fc;
}
-
+
end.addNext(fn);
fn=first;
end = np.getEnd();
FlatNode rnflat=spliceReturn(end);
rnflat.addNext(fe);
} else {
- this.lockStack.clear();
+ this.lockStack.clear();
}
} else if (state.DSM&&currmd.getModifiers().isAtomic()) {
curran.addNext(fn);
np.getEnd().addNext(aen);
FlatNode rnflat=spliceReturn(aen);
rnflat.addNext(fe);
- }
+ }
} else if (np.getEnd()!=null&&np.getEnd().kind()!=FKind.FlatReturnNode) {
FlatNode rnflat=null;
if( spliceInImplicitMain ) {
}
if( spliceInImplicitMain ) {
spliceSESE.addNext(fn);
- fn=spliceSESE;
+ fn=spliceSESE;
}
FlatMethod fm=new FlatMethod(currmd, fe);
NodePair np=flattenBlockStatementNode(bn.get(i));
FlatNode np_begin=np.getBegin();
FlatNode np_end=np.getEnd();
- if(bn.getLabel()!=null){
- // interim implementation to have the labeled statement
- state.fn2labelMap.put(np_begin, bn.getLabel());
+ if(bn.getLabel()!=null) {
+ // interim implementation to have the labeled statement
+ state.fn2labelMap.put(np_begin, bn.getLabel());
}
if (begin==null) {
begin=np_begin;
FlatNode last=fn;
//handle wrapper fields
ClassDescriptor cd=td.getClassDesc();
- for(Iterator fieldit=cd.getFields();fieldit.hasNext();) {
+ for(Iterator fieldit=cd.getFields(); fieldit.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)fieldit.next();
if (fd.getType().iswrapper()) {
TempDescriptor wrap_tmp=TempDescriptor.tempFactory("wrapper_obj",fd.getType());
return new NodePair(fn,last);
} else {
if(con.getArrayInitializer() == null) {
- FlatNode first=null;
- FlatNode last=null;
- TempDescriptor[] temps=new TempDescriptor[con.numArgs()];
- for (int i=0; i<con.numArgs(); i++) {
- ExpressionNode en=con.getArg(i);
- TempDescriptor tmp=TempDescriptor.tempFactory("arg",en.getType());
- temps[i]=tmp;
- NodePair np=flattenExpressionNode(en, tmp);
- if (first==null)
- first=np.getBegin();
- else
- last.addNext(np.getBegin());
- last=np.getEnd();
-
- TempDescriptor tmp2=(i==0) ?
- out_temp :
- TempDescriptor.tempFactory("arg",en.getType());
- }
- FlatNew fn=new FlatNew(td, out_temp, temps[0], con.isGlobal(), con.getDisjointId());
- last.addNext(fn);
- if (temps.length>1) {
- NodePair np=generateNewArrayLoop(temps, td.dereference(), out_temp, 0, con.isGlobal());
- fn.addNext(np.getBegin());
- return new NodePair(first,np.getEnd());
- } else if (td.isArray()&&td.dereference().iswrapper()) {
- NodePair np=generateNewArrayLoop(temps, td.dereference(), out_temp, 0, con.isGlobal());
- fn.addNext(np.getBegin());
- return new NodePair(first,np.getEnd());
- } else
- return new NodePair(first, fn);
+ FlatNode first=null;
+ FlatNode last=null;
+ TempDescriptor[] temps=new TempDescriptor[con.numArgs()];
+ for (int i=0; i<con.numArgs(); i++) {
+ ExpressionNode en=con.getArg(i);
+ TempDescriptor tmp=TempDescriptor.tempFactory("arg",en.getType());
+ temps[i]=tmp;
+ NodePair np=flattenExpressionNode(en, tmp);
+ if (first==null)
+ first=np.getBegin();
+ else
+ last.addNext(np.getBegin());
+ last=np.getEnd();
+
+ TempDescriptor tmp2=(i==0)?
+ out_temp:
+ TempDescriptor.tempFactory("arg",en.getType());
+ }
+ FlatNew fn=new FlatNew(td, out_temp, temps[0], con.isGlobal(), con.getDisjointId());
+ last.addNext(fn);
+ if (temps.length>1) {
+ NodePair np=generateNewArrayLoop(temps, td.dereference(), out_temp, 0, con.isGlobal());
+ fn.addNext(np.getBegin());
+ return new NodePair(first,np.getEnd());
+ } else if (td.isArray()&&td.dereference().iswrapper()) {
+ NodePair np=generateNewArrayLoop(temps, td.dereference(), out_temp, 0, con.isGlobal());
+ fn.addNext(np.getBegin());
+ return new NodePair(first,np.getEnd());
+ } else
+ return new NodePair(first, fn);
} else {
- // array creation with initializers
- return flattenArrayInitializerNode(con.getArrayInitializer(), out_temp);
+ // array creation with initializers
+ return flattenArrayInitializerNode(con.getArrayInitializer(), out_temp);
}
}
}
} else if (td.isArray()&&td.dereference().iswrapper()) {
NodePair np2=generateNewArrayLoop(temparray, td.dereference(), new_tmp, i+1, isglobal);
fsen.addNext(np2.getBegin());
- np2.getEnd().addNext(fon);
+ np2.getEnd().addNext(fon);
} else {
fsen.addNext(fon);
}
if (min.getExpression()!=null) {
TypeDescriptor mtd = min.getExpression().getType();
if(mtd.isClass() && mtd.getClassDesc().isEnum()) {
- mtd = new TypeDescriptor(TypeDescriptor.INT);
+ mtd = new TypeDescriptor(TypeDescriptor.INT);
}
thisarg=TempDescriptor.tempFactory("thisarg", mtd);
NodePair np=flattenExpressionNode(min.getExpression(),thisarg);
ExpressionNode en=min.getArg(i);
TypeDescriptor etd = en.getType();
if(etd.isClass() && etd.getClassDesc().isEnum()) {
- etd = new TypeDescriptor(TypeDescriptor.INT);
+ etd = new TypeDescriptor(TypeDescriptor.INT);
}
TempDescriptor td=TempDescriptor.tempFactory("arg", etd);
temps[i]=td;
fc=new FlatCall(md, null, thisarg, temps, min.getSuper());
else
fc=new FlatCall(md, out_temp, thisarg, temps, min.getSuper());
-
+
fc.setNumLine(min.getNumLine());
-
+
if (first==null) {
first=fc;
} else
npe.getEnd().addNext(npi.getBegin());
npi.getEnd().addNext(fn);
if (aan.iswrapper()) {
- FlatFieldNode ffn=new FlatFieldNode((FieldDescriptor)aan.getExpression().getType().dereference().getClassDesc().getFieldTable().get("value") ,arraytmp,out_temp);
+ FlatFieldNode ffn=new FlatFieldNode((FieldDescriptor)aan.getExpression().getType().dereference().getClassDesc().getFieldTable().get("value"),arraytmp,out_temp);
ffn.setNumLine(aan.getNumLine());
fn.addNext(ffn);
fn=ffn;
if (!pre) {
//rewrite the base operation
- base=base.getOp()==Operation.POSTINC ? new Operation(Operation.ADD) : new Operation(Operation.SUB);
+ base=base.getOp()==Operation.POSTINC?new Operation(Operation.ADD):new Operation(Operation.SUB);
}
FlatNode first=null;
FlatNode last=null;
- TempDescriptor src_tmp = src_tmp=an.getSrc()==null ? TempDescriptor.tempFactory("srctmp",an.getDest().getType()) : TempDescriptor.tempFactory("srctmp",an.getSrc().getType());
+ TempDescriptor src_tmp = src_tmp=an.getSrc()==null?TempDescriptor.tempFactory("srctmp",an.getDest().getType()):TempDescriptor.tempFactory("srctmp",an.getSrc().getType());
//Get src value
if (an.getSrc()!=null) {
if(an.getSrc().getEval() != null) {
- FlatLiteralNode fln=new FlatLiteralNode(an.getSrc().getType(), an.getSrc().getEval().longValue(), src_tmp);
- fln.setNumLine(an.getSrc().getNumLine());
- first = last =fln;
+ FlatLiteralNode fln=new FlatLiteralNode(an.getSrc().getType(), an.getSrc().getEval().longValue(), src_tmp);
+ fln.setNumLine(an.getSrc().getNumLine());
+ first = last =fln;
} else {
- NodePair np_src=flattenExpressionNode(an.getSrc(),src_tmp);
- first=np_src.getBegin();
- last=np_src.getEnd();
+ NodePair np_src=flattenExpressionNode(an.getSrc(),src_tmp);
+ first=np_src.getBegin();
+ last=np_src.getEnd();
}
} else if (!pre) {
FlatLiteralNode fln=new FlatLiteralNode(new TypeDescriptor(TypeDescriptor.INT),new Integer(1),src_tmp);
TempDescriptor dst_tmp=null;
NodePair np_baseexp=null;
if(en.getType().isClassNameRef()) {
- // static field dereference with class name
- dst_tmp = new TempDescriptor(en.getType().getClassDesc().getSymbol(), en.getType());
- FlatNop nop=new FlatNop();
- np_baseexp = new NodePair(nop,nop);
+ // static field dereference with class name
+ dst_tmp = new TempDescriptor(en.getType().getClassDesc().getSymbol(), en.getType());
+ FlatNop nop=new FlatNop();
+ np_baseexp = new NodePair(nop,nop);
} else {
- dst_tmp=TempDescriptor.tempFactory("dst",en.getType());
- np_baseexp=flattenExpressionNode(en, dst_tmp);
+ dst_tmp=TempDescriptor.tempFactory("dst",en.getType());
+ np_baseexp=flattenExpressionNode(en, dst_tmp);
}
if (first==null)
first=np_baseexp.getBegin();
//See if we need to perform an operation
if (base!=null) {
//If it is a preinc we need to store the initial value
- TempDescriptor src_tmp2=pre ? TempDescriptor.tempFactory("src",an.getDest().getType()) : out_temp;
+ TempDescriptor src_tmp2=pre?TempDescriptor.tempFactory("src",an.getDest().getType()):out_temp;
TempDescriptor tmp=TempDescriptor.tempFactory("srctmp3_",an.getDest().getType());
FlatFieldNode ffn=new FlatFieldNode(fan.getField(), dst_tmp, src_tmp2);
ffn.setNumLine(an.getNumLine());
//See if we need to perform an operation
if (base!=null) {
//If it is a preinc we need to store the initial value
- TempDescriptor src_tmp2=pre ? TempDescriptor.tempFactory("src",an.getDest().getType()) : out_temp;
+ TempDescriptor src_tmp2=pre?TempDescriptor.tempFactory("src",an.getDest().getType()):out_temp;
TempDescriptor tmp=TempDescriptor.tempFactory("srctmp3_",an.getDest().getType());
if (aan.iswrapper()) {
}
}
- if (aan.iswrapper()) {
+ if (aan.iswrapper()) {
TypeDescriptor arrayeltype=aan.getExpression().getType().dereference();
TempDescriptor src_tmp3=TempDescriptor.tempFactory("src3",arrayeltype);
FlatElementNode fen=new FlatElementNode(dst_tmp, index_tmp, src_tmp3);
last.addNext(fen);
fen.addNext(fsfn);
last=fsfn;
- } else {
+ } else {
FlatSetElementNode fsen=new FlatSetElementNode(dst_tmp, index_tmp, src_tmp);
fsen.setNumLine(aan.getNumLine());
last.addNext(fsen);
//It is a field
FieldAccessNode fan=(FieldAccessNode)nn.getExpression();
ExpressionNode en=fan.getExpression();
- TempDescriptor dst_tmp=null;
- NodePair np_baseexp=null;
- if(en.getType().isClassNameRef()) {
- // static field dereference with class name
- dst_tmp = new TempDescriptor(en.getType().getClassDesc().getSymbol(), en.getType());
- FlatNop nop=new FlatNop();
- np_baseexp = new NodePair(nop,nop);
- } else {
- dst_tmp=TempDescriptor.tempFactory("dst",en.getType());
- np_baseexp=flattenExpressionNode(en, dst_tmp);
- }
+ TempDescriptor dst_tmp=null;
+ NodePair np_baseexp=null;
+ if(en.getType().isClassNameRef()) {
+ // static field dereference with class name
+ dst_tmp = new TempDescriptor(en.getType().getClassDesc().getSymbol(), en.getType());
+ FlatNop nop=new FlatNop();
+ np_baseexp = new NodePair(nop,nop);
+ } else {
+ dst_tmp=TempDescriptor.tempFactory("dst",en.getType());
+ np_baseexp=flattenExpressionNode(en, dst_tmp);
+ }
if (first==null)
first=np_baseexp.getBegin();
else
//See if we need to perform an operation
if (base!=null) {
//If it is a preinc we need to store the initial value
- TempDescriptor src_tmp2=pre ? TempDescriptor.tempFactory("src",an.getDest().getType()) : out_temp;
+ TempDescriptor src_tmp2=pre?TempDescriptor.tempFactory("src",an.getDest().getType()):out_temp;
TempDescriptor tmp=TempDescriptor.tempFactory("srctmp3_",an.getDest().getType());
FlatFieldNode ffn=new FlatFieldNode(fan.getField(), dst_tmp, src_tmp2);
//See if we need to perform an operation
if (base!=null) {
//If it is a preinc we need to store the initial value
- TempDescriptor src_tmp2=pre ? TempDescriptor.tempFactory("src",an.getDest().getType()) : out_temp;
+ TempDescriptor src_tmp2=pre?TempDescriptor.tempFactory("src",an.getDest().getType()):out_temp;
TempDescriptor tmp=TempDescriptor.tempFactory("srctmp3_",an.getDest().getType());
-
- TempDescriptor ftmp= null;
- if((nn.getClassDesc() != null)) {
- // this is a static field
- ftmp = new TempDescriptor(nn.getClassDesc().getSymbol(), nn.getClassType());
-
- } else {
- ftmp=getTempforVar(nn.getVar());
- }
- FlatFieldNode ffn=new FlatFieldNode(nn.getField(), ftmp, src_tmp2);
- ffn.setNumLine(an.getNumLine());
+
+ TempDescriptor ftmp= null;
+ if((nn.getClassDesc() != null)) {
+ // this is a static field
+ ftmp = new TempDescriptor(nn.getClassDesc().getSymbol(), nn.getClassType());
+
+ } else {
+ ftmp=getTempforVar(nn.getVar());
+ }
+ FlatFieldNode ffn=new FlatFieldNode(nn.getField(), ftmp, src_tmp2);
+ ffn.setNumLine(an.getNumLine());
if (first==null)
first=ffn;
}
}
- FlatSetFieldNode fsfn=null;
- if(nn.getClassDesc()!=null) {
- // this is a static field access inside of a static block
- fsfn=new FlatSetFieldNode(new TempDescriptor("sfsb", nn.getClassType()), nn.getField(), src_tmp);
- fsfn.setNumLine(nn.getNumLine());
- } else {
- fsfn=new FlatSetFieldNode(getTempforVar(nn.getVar()), nn.getField(), src_tmp);
- fsfn.setNumLine(nn.getNumLine());
- }
+ FlatSetFieldNode fsfn=null;
+ if(nn.getClassDesc()!=null) {
+ // this is a static field access inside of a static block
+ fsfn=new FlatSetFieldNode(new TempDescriptor("sfsb", nn.getClassType()), nn.getField(), src_tmp);
+ fsfn.setNumLine(nn.getNumLine());
+ } else {
+ fsfn=new FlatSetFieldNode(getTempforVar(nn.getVar()), nn.getField(), src_tmp);
+ fsfn.setNumLine(nn.getNumLine());
+ }
if (first==null) {
first=fsfn;
} else {
} else if (nn.getField()!=null) {
TempDescriptor tmp= null;
if((nn.getClassDesc() != null)) {
- // this is a static field
- tmp = new TempDescriptor(nn.getClassDesc().getSymbol(), nn.getClassType());
-
+ // this is a static field
+ tmp = new TempDescriptor(nn.getClassDesc().getSymbol(), nn.getClassType());
+
} else {
- tmp=getTempforVar(nn.getVar());
+ tmp=getTempforVar(nn.getVar());
}
FlatFieldNode ffn=new FlatFieldNode(nn.getField(), tmp, out_temp);
ffn.setNumLine(nn.getNumLine());
return new NodePair(ffn,ffn);
} else {
- TempDescriptor tmp=getTempforVar(nn.isTag() ? nn.getTagVar() : nn.getVar());
+ TempDescriptor tmp=getTempforVar(nn.isTag()?nn.getTagVar():nn.getVar());
if (nn.isTag()) {
//propagate tag
out_temp.setTag(tmp.getTag());
return flattenNameNode((NameNode)en,out_temp);
case Kind.OpNode:
- return flattenOpNode((OpNode)en,out_temp);
+ return flattenOpNode((OpNode)en,out_temp);
case Kind.OffsetNode:
return flattenOffsetNode((OffsetNode)en,out_temp);
fcb.addTrueNext(true_np.getBegin());
fcb.addFalseNext(false_np.getBegin());
if (true_np.getEnd()!=null)
- true_np.getEnd().addNext(nopend);
+ true_np.getEnd().addNext(nopend);
if (false_np.getEnd()!=null)
- false_np.getEnd().addNext(nopend);
+ false_np.getEnd().addNext(nopend);
if (nopend.numPrev()==0)
return new NodePair(cond.getBegin(), null);
return new NodePair(cond.getBegin(), nopend);
}
-
+
private NodePair flattenSwitchStatementNode(SwitchStatementNode ssn) {
TempDescriptor cond_temp=TempDescriptor.tempFactory("condition",new TypeDescriptor(TypeDescriptor.INT));
NodePair cond=flattenExpressionNode(ssn.getCondition(),cond_temp);
NodePair sbody = flattenSwitchBodyNode(ssn.getSwitchBody(), cond_temp);
-
+
cond.getEnd().addNext(sbody.getBegin());
return new NodePair(cond.getBegin(), sbody.getEnd());
}
-
+
private NodePair flattenSwitchBodyNode(BlockNode bn, TempDescriptor cond_temp) {
FlatNode begin=null;
FlatNode end=null;
SwitchBlockNode sbn = (SwitchBlockNode)bn.get(i);
HashSet oldbs=breakset;
breakset=new HashSet();
-
+
NodePair body=flattenBlockNode(sbn.getSwitchBlockStatement());
Vector<SwitchLabelNode> slnv = sbn.getSwitchConditions();
FlatNode cond_begin = null;
NodePair prev_fnp = null;
for(int j = 0; j < slnv.size(); j++) {
- SwitchLabelNode sln = slnv.elementAt(j);
- NodePair left = null;
- NodePair false_np = null;
- if(sln.isDefault()) {
- left = body;
- } else {
- TempDescriptor cond_tmp=TempDescriptor.tempFactory("condition", new TypeDescriptor(TypeDescriptor.BOOLEAN));
- TempDescriptor temp_left=TempDescriptor.tempFactory("leftop", sln.getCondition().getType());
- Operation op=new Operation(Operation.EQUAL);
- left=flattenExpressionNode(sln.getCondition(), temp_left);
- FlatOpNode fon=new FlatOpNode(cond_tmp, temp_left, cond_temp, op);
- fon.setNumLine(sln.getNumLine());
- left.getEnd().addNext(fon);
-
- FlatCondBranch fcb=new FlatCondBranch(cond_tmp);
- fcb.setNumLine(bn.getNumLine());
- fcb.setTrueProb(State.TRUEPROB);
-
- FlatNop nop=new FlatNop();
- false_np=new NodePair(nop,nop);
-
- fon.addNext(fcb);
- fcb.addTrueNext(body.getBegin());
- fcb.addFalseNext(false_np.getBegin());
- }
- if((prev_fnp != null) && (prev_fnp.getEnd() != null)) {
- prev_fnp.getEnd().addNext(left.getBegin());
- }
- prev_fnp = false_np;
-
- if (begin==null) {
- begin = left.getBegin();
- }
- if(cond_begin == null) {
- cond_begin = left.getBegin();
- }
+ SwitchLabelNode sln = slnv.elementAt(j);
+ NodePair left = null;
+ NodePair false_np = null;
+ if(sln.isDefault()) {
+ left = body;
+ } else {
+ TempDescriptor cond_tmp=TempDescriptor.tempFactory("condition", new TypeDescriptor(TypeDescriptor.BOOLEAN));
+ TempDescriptor temp_left=TempDescriptor.tempFactory("leftop", sln.getCondition().getType());
+ Operation op=new Operation(Operation.EQUAL);
+ left=flattenExpressionNode(sln.getCondition(), temp_left);
+ FlatOpNode fon=new FlatOpNode(cond_tmp, temp_left, cond_temp, op);
+ fon.setNumLine(sln.getNumLine());
+ left.getEnd().addNext(fon);
+
+ FlatCondBranch fcb=new FlatCondBranch(cond_tmp);
+ fcb.setNumLine(bn.getNumLine());
+ fcb.setTrueProb(State.TRUEPROB);
+
+ FlatNop nop=new FlatNop();
+ false_np=new NodePair(nop,nop);
+
+ fon.addNext(fcb);
+ fcb.addTrueNext(body.getBegin());
+ fcb.addFalseNext(false_np.getBegin());
+ }
+ if((prev_fnp != null) && (prev_fnp.getEnd() != null)) {
+ prev_fnp.getEnd().addNext(left.getBegin());
+ }
+ prev_fnp = false_np;
+
+ if (begin==null) {
+ begin = left.getBegin();
+ }
+ if(cond_begin == null) {
+ cond_begin = left.getBegin();
+ }
}
if((prev_false_branch != null) && (prev_false_branch.getEnd() != null)) {
- prev_false_branch.getEnd().addNext(cond_begin);
+ prev_false_branch.getEnd().addNext(cond_begin);
}
prev_false_branch = prev_fnp;
if((prev_true_branch != null) && (prev_true_branch.getEnd() != null)) {
- prev_true_branch.getEnd().addNext(body.getBegin());
+ prev_true_branch.getEnd().addNext(body.getBegin());
}
prev_true_branch = body;
- for(Iterator breakit=breakset.iterator();breakit.hasNext();) {
- FlatNode fn=(FlatNode)breakit.next();
- breakit.remove();
+ for(Iterator breakit=breakset.iterator(); breakit.hasNext(); ) {
+ FlatNode fn=(FlatNode)breakit.next();
+ breakit.remove();
if (end==null)
end=new FlatNop();
- fn.addNext(end);
+ fn.addNext(end);
}
breakset=oldbs;
}
}
return new NodePair(begin,end);
}
-
+
private NodePair flattenLoopNode(LoopNode ln) {
HashSet oldbs=breakset;
HashSet oldcs=continueset;
breakset=new HashSet();
continueset=new HashSet();
-
+
if (ln.getType()==LoopNode.FORLOOP) {
NodePair initializer=flattenBlockNode(ln.getInitializer());
TempDescriptor cond_temp=TempDescriptor.tempFactory("condition", new TypeDescriptor(TypeDescriptor.BOOLEAN));
initializer.getEnd().addNext(nop2);
nop2.addNext(condition.getBegin());
if (body.getEnd()!=null)
- body.getEnd().addNext(update.getBegin());
+ body.getEnd().addNext(update.getBegin());
update.getEnd().addNext(backedge);
backedge.addNext(condition.getBegin());
condition.getEnd().addNext(fcb);
fcb.addFalseNext(nopend);
fcb.addTrueNext(body.getBegin());
- for(Iterator contit=continueset.iterator();contit.hasNext();) {
- FlatNode fn=(FlatNode)contit.next();
- contit.remove();
- fn.addNext(update.getBegin());
+ for(Iterator contit=continueset.iterator(); contit.hasNext(); ) {
+ FlatNode fn=(FlatNode)contit.next();
+ contit.remove();
+ fn.addNext(update.getBegin());
}
- for(Iterator breakit=breakset.iterator();breakit.hasNext();) {
- FlatNode fn=(FlatNode)breakit.next();
- breakit.remove();
- fn.addNext(nopend);
+ for(Iterator breakit=breakset.iterator(); breakit.hasNext(); ) {
+ FlatNode fn=(FlatNode)breakit.next();
+ breakit.remove();
+ fn.addNext(nopend);
}
breakset=oldbs;
continueset=oldcs;
fcb.addFalseNext(nopend);
fcb.addTrueNext(body.getBegin());
- for(Iterator contit=continueset.iterator();contit.hasNext();) {
- FlatNode fn=(FlatNode)contit.next();
- contit.remove();
- fn.addNext(backedge);
+ for(Iterator contit=continueset.iterator(); contit.hasNext(); ) {
+ FlatNode fn=(FlatNode)contit.next();
+ contit.remove();
+ fn.addNext(backedge);
}
- for(Iterator breakit=breakset.iterator();breakit.hasNext();) {
- FlatNode fn=(FlatNode)breakit.next();
- breakit.remove();
- fn.addNext(nopend);
+ for(Iterator breakit=breakset.iterator(); breakit.hasNext(); ) {
+ FlatNode fn=(FlatNode)breakit.next();
+ breakit.remove();
+ fn.addNext(nopend);
}
breakset=oldbs;
continueset=oldcs;
fcb.addTrueNext(backedge);
backedge.addNext(body.getBegin());
- for(Iterator contit=continueset.iterator();contit.hasNext();) {
- FlatNode fn=(FlatNode)contit.next();
- contit.remove();
- fn.addNext(condition.getBegin());
+ for(Iterator contit=continueset.iterator(); contit.hasNext(); ) {
+ FlatNode fn=(FlatNode)contit.next();
+ contit.remove();
+ fn.addNext(condition.getBegin());
}
- for(Iterator breakit=breakset.iterator();breakit.hasNext();) {
- FlatNode fn=(FlatNode)breakit.next();
- breakit.remove();
- fn.addNext(nopend);
+ for(Iterator breakit=breakset.iterator(); breakit.hasNext(); ) {
+ FlatNode fn=(FlatNode)breakit.next();
+ breakit.remove();
+ fn.addNext(nopend);
}
breakset=oldbs;
continueset=oldcs;
FlatNode end = null;
MethodDescriptor memdex=(MethodDescriptor)typeutil.getClass("Object").getMethodTable().get("MonitorExit");
for(int j = this.lockStack.size(); j > 0; j--) {
- TempDescriptor thistd = this.lockStack.elementAt(j-1);
- FlatCall fcunlock = new FlatCall(memdex, null, thistd, new TempDescriptor[0]);
- fcunlock.setNumLine(rntree.getNumLine());
- if(end != null) {
- end.addNext(fcunlock);
- }
- end = fcunlock;
+ TempDescriptor thistd = this.lockStack.elementAt(j-1);
+ FlatCall fcunlock = new FlatCall(memdex, null, thistd, new TempDescriptor[0]);
+ fcunlock.setNumLine(rntree.getNumLine());
+ if(end != null) {
+ end.addNext(fcunlock);
+ }
+ end = fcunlock;
}
end.addNext(ln);
ln=end;
MethodDescriptor mexmd=(MethodDescriptor)typeutil.getClass("Object").getMethodTable().get("MonitorExit");
FlatCall fcex=new FlatCall(mexmd, null, montmp, new TempDescriptor[0]);
fcex.setNumLine(sbn.getNumLine());
-
+
this.lockStack.pop();
if(first != null) {
first = fcen;
}
fcen.addNext(npblock.getBegin());
-
+
if (npblock.getEnd()!=null&&npblock.getEnd().kind()!=FKind.FlatReturnNode) {
npblock.getEnd().addNext(fcex);
return new NodePair(first, fcex);
return new NodePair(faen, faexn);
}
- private NodePair flattenGenReachNode( GenReachNode grn ) {
- FlatGenReachNode fgrn = new FlatGenReachNode( grn.getGraphName() );
- return new NodePair( fgrn, fgrn );
+ private NodePair flattenGenReachNode(GenReachNode grn) {
+ FlatGenReachNode fgrn = new FlatGenReachNode(grn.getGraphName() );
+ return new NodePair(fgrn, fgrn);
}
private NodePair flattenSESENode(SESENode sn) {
FlatSESEExitNode fsexn=new FlatSESEExitNode(sn);
sn.setFlatExit(fsexn);
FlatSESEEnterNode fsen=sn.getStart().getFlatEnter();
- fsexn.setFlatEnter(fsen);
- sn.getStart().getFlatEnter().setFlatExit( fsexn );
-
+ fsexn.setFlatEnter(fsen);
+ sn.getStart().getFlatEnter().setFlatExit(fsexn);
+
return new NodePair(fsexn, fsexn);
}
private NodePair flattenContinueBreakNode(ContinueBreakNode cbn) {
- FlatNop fn=new FlatNop();
- if (cbn.isBreak())
- breakset.add(fn);
- else
- continueset.add(fn);
- return new NodePair(fn,null);
+ FlatNop fn=new FlatNop();
+ if (cbn.isBreak())
+ breakset.add(fn);
+ else
+ continueset.add(fn);
+ return new NodePair(fn,null);
}
private NodePair flattenInstanceOfNode(InstanceOfNode tn, TempDescriptor out_temp) {
FlatLiteralNode fln_tmp=new FlatLiteralNode(tmp.getType(), new Integer(ain.numVarInitializers()), tmp);
fln_tmp.setNumLine(ain.getNumLine());
first = last=fln_tmp;
-
+
// create the new array
FlatNew fn=new FlatNew(td, out_temp, tmp, isGlobal, disjointId);
last.addNext(fn);
last = fn;
-
+
// initialize the new array
for(int i = 0; i < ain.numVarInitializers(); i++) {
ExpressionNode var_init_node = ain.getVarInitializer(i);
NodePair np_init = flattenExpressionNode(var_init_node, tmp_init);
// TODO wrapper class process is missing now
/*if(td.isArray() && td.dereference().iswrapper()) {
- }*/
+ }*/
FlatSetElementNode fsen=new FlatSetElementNode(tmp_toinit, index, tmp_init);
fsen.setNumLine(ain.getNumLine());
last.addNext(fln);
np_init.getEnd().addNext(fsen);
last = fsen;
}
-
+
return new NodePair(first, last);
}
falseExpr.getEnd().addNext(fonF);
fonF.addNext(nopend);
-
+
return new NodePair(cond.getBegin(), nopend);
}
case Kind.IfStatementNode:
return flattenIfStatementNode((IfStatementNode)bsn);
-
+
case Kind.SwitchStatementNode:
return flattenSwitchStatementNode((SwitchStatementNode)bsn);
String maxTaskRecSizeStr="__maxTaskRecSize___";
- String mlperrstr =
+ String mlperrstr =
"if(status != 0) { "+
"sprintf(errmsg, \"MLP error at %s:%d\", __FILE__, __LINE__); "+
"perror(errmsg); exit(-1); }";
RuntimeConflictResolver rcr = null;
- public BuildOoOJavaCode( State st,
- Hashtable temptovar,
- TypeUtil typeutil,
- SafetyAnalysis sa,
- OoOJavaAnalysis oooa, CallGraph callgraph
- ) {
+ public BuildOoOJavaCode(State st,
+ Hashtable temptovar,
+ TypeUtil typeutil,
+ SafetyAnalysis sa,
+ OoOJavaAnalysis oooa, CallGraph callgraph
+ ) {
super( st, temptovar, typeutil, sa, callgraph);
this.oooa = oooa;
}
- protected void additionalIncludesMethodsHeader( PrintWriter outmethodheader ) {
+ protected void additionalIncludesMethodsHeader(PrintWriter outmethodheader) {
outmethodheader.println("#include <stdlib.h>");
outmethodheader.println("#include <stdio.h>");
// have to initialize some SESE compiler data before
// analyzing normal methods, which must happen before
// generating SESE internal code
-
+
Iterator<FlatSESEEnterNode> seseit = oooa.getAllSESEs().iterator();
while( seseit.hasNext() ) {
FlatSESEEnterNode fsen = seseit.next();
- initializeSESE( fsen );
+ initializeSESE(fsen);
}
-
+
//TODO signal the object that will report errors
if( state.RCR ) {
try {
- rcr = new RuntimeConflictResolver( PREFIX,
- oooa,
- state );
- System.out.println("Runtime Conflict Resolver started.");
+ rcr = new RuntimeConflictResolver(PREFIX,
+ oooa,
+ state);
+ System.out.println("Runtime Conflict Resolver started.");
} catch (FileNotFoundException e) {
- System.out.println("Runtime Conflict Resolver could not create output file.");
+ System.out.println("Runtime Conflict Resolver could not create output file.");
}
}
}
- protected void initializeSESE( FlatSESEEnterNode fsen ) {
+ protected void initializeSESE(FlatSESEEnterNode fsen) {
- FlatMethod fm = fsen.getfmEnclosing();
+ FlatMethod fm = fsen.getfmEnclosing();
MethodDescriptor md = fm.getMethod();
- ClassDescriptor cn = md.getClassDesc();
-
+ ClassDescriptor cn = md.getClassDesc();
+
// Creates bogus method descriptor to index into tables
Modifiers modBogus = new Modifiers();
- MethodDescriptor mdBogus =
- new MethodDescriptor( modBogus,
- new TypeDescriptor( TypeDescriptor.VOID ),
- "sese_"+fsen.getPrettyIdentifier()+fsen.getIdentifier()
- );
-
- mdBogus.setClassDesc( fsen.getcdEnclosing() );
- FlatMethod fmBogus = new FlatMethod( mdBogus, null );
- fsen.setfmBogus( fmBogus );
- fsen.setmdBogus( mdBogus );
-
+ MethodDescriptor mdBogus =
+ new MethodDescriptor(modBogus,
+ new TypeDescriptor(TypeDescriptor.VOID),
+ "sese_"+fsen.getPrettyIdentifier()+fsen.getIdentifier()
+ );
+
+ mdBogus.setClassDesc(fsen.getcdEnclosing() );
+ FlatMethod fmBogus = new FlatMethod(mdBogus, null);
+ fsen.setfmBogus(fmBogus);
+ fsen.setmdBogus(mdBogus);
+
Set<TempDescriptor> inSetAndOutSet = new HashSet<TempDescriptor>();
- inSetAndOutSet.addAll( fsen.getInVarSet() );
- inSetAndOutSet.addAll( fsen.getOutVarSet() );
+ inSetAndOutSet.addAll(fsen.getInVarSet() );
+ inSetAndOutSet.addAll(fsen.getOutVarSet() );
// Build paramsobj for bogus method descriptor
- ParamsObject objectparams = new ParamsObject( mdBogus, tag++ );
- paramstable.put( mdBogus, objectparams );
-
+ ParamsObject objectparams = new ParamsObject(mdBogus, tag++);
+ paramstable.put(mdBogus, objectparams);
+
Iterator<TempDescriptor> itr = inSetAndOutSet.iterator();
while( itr.hasNext() ) {
TempDescriptor temp = itr.next();
TypeDescriptor type = temp.getType();
if( type.isPtr() ) {
- objectparams.addPtr( temp );
+ objectparams.addPtr(temp);
} else {
- objectparams.addPrim( temp );
+ objectparams.addPrim(temp);
}
}
-
+
// Build normal temp object for bogus method descriptor
- TempObject objecttemps = new TempObject( objectparams, mdBogus, tag++ );
- tempstable.put( mdBogus, objecttemps );
+ TempObject objecttemps = new TempObject(objectparams, mdBogus, tag++);
+ tempstable.put(mdBogus, objecttemps);
for( Iterator nodeit = fsen.getNodeSet().iterator(); nodeit.hasNext(); ) {
- FlatNode fn = (FlatNode)nodeit.next();
+ FlatNode fn = (FlatNode)nodeit.next();
TempDescriptor[] writes = fn.writesTemps();
for( int i = 0; i < writes.length; i++ ) {
TypeDescriptor type = temp.getType();
if( type.isPtr() ) {
- objecttemps.addPtr( temp );
+ objecttemps.addPtr(temp);
} else {
- objecttemps.addPrim( temp );
+ objecttemps.addPrim(temp);
}
}
}
}
}
-
- protected void additionalCodeGen( PrintWriter outmethodheader,
- PrintWriter outstructs,
- PrintWriter outmethod ) {
+
+ protected void additionalCodeGen(PrintWriter outmethodheader,
+ PrintWriter outstructs,
+ PrintWriter outmethod) {
// Output function prototypes and structures for SESE's and code
// spit out a global to inform all worker threads with
// the maximum size is for any task record
- outmethod.println( "int "+maxTaskRecSizeStr+" = 0;" );
+ outmethod.println("int "+maxTaskRecSizeStr+" = 0;");
- // first generate code for each sese's internals
+ // first generate code for each sese's internals
Iterator<FlatSESEEnterNode> seseit;
seseit = oooa.getAllSESEs().iterator();
-
+
while( seseit.hasNext() ) {
FlatSESEEnterNode fsen = seseit.next();
- generateMethodSESE( fsen, outstructs, outmethodheader, outmethod );
+ generateMethodSESE(fsen, outstructs, outmethodheader, outmethod);
}
// then write the invokeSESE switch to decouple scheduler
// from having to do unique details of sese invocation
- generateSESEinvocationMethod( outmethodheader, outmethod );
+ generateSESEinvocationMethod(outmethodheader, outmethod);
}
- protected void additionalCodeAtTopOfMain( PrintWriter outmethod ) {
-
+ protected void additionalCodeAtTopOfMain(PrintWriter outmethod) {
+
// do a calculation to determine which task record
// is the largest, store that as a global value for
// allocating records
" ) > "+maxTaskRecSizeStr+
" ) { "+maxTaskRecSizeStr+
" = sizeof( "+fsen.getSESErecordName()+
- " ); }" );
+ " ); }");
}
-
+
outmethod.println(" runningSESE = NULL;");
outmethod.println(" workScheduleInit( "+state.OOO_NUMCORES+", invokeSESEmethod );");
-
+
//initializes data structures needed for the RCR traverser
if( state.RCR && rcr != null ) {
outmethod.println(" initializeStructsRCR();");
}
- protected void additionalCodeAtBottomOfMain( PrintWriter outmethod ) {
+ protected void additionalCodeAtBottomOfMain(PrintWriter outmethod) {
outmethod.println(" workScheduleBegin();");
}
- protected void additionalIncludesMethodsImplementation( PrintWriter outmethod ) {
+ protected void additionalIncludesMethodsImplementation(PrintWriter outmethod) {
outmethod.println("#include <stdlib.h>");
outmethod.println("#include <stdio.h>");
outmethod.println("#include \"mlp_runtime.h\"");
outmethod.println("#include \"psemaphore.h\"");
-
+
if( state.RCR ) {
outmethod.println("#include \"trqueue.h\"");
outmethod.println("#include \"RuntimeConflictResolver.h\"");
}
- protected void additionalIncludesStructsHeader( PrintWriter outstructs ) {
+ protected void additionalIncludesStructsHeader(PrintWriter outstructs) {
outstructs.println("#include \"mlp_runtime.h\"");
outstructs.println("#include \"psemaphore.h\"");
if( state.RCR ) {
}
- protected void additionalClassObjectFields( PrintWriter outclassdefs ) {
+ protected void additionalClassObjectFields(PrintWriter outclassdefs) {
outclassdefs.println(" int oid;");
outclassdefs.println(" int allocsite;");
}
- protected void additionalCodeAtTopMethodsImplementation( PrintWriter outmethod ) {
+ protected void additionalCodeAtTopMethodsImplementation(PrintWriter outmethod) {
outmethod.print("extern __thread int oid;\n");
outmethod.print("extern int oidIncrement;\n");
}
- protected void additionalCodeAtTopFlatMethodBody( PrintWriter output, FlatMethod fm ) {
+ protected void additionalCodeAtTopFlatMethodBody(PrintWriter output, FlatMethod fm) {
// declare variables for naming static and dynamic SESE's
- ContextTaskNames context = oooa.getContextTaskNames( fm );
+ ContextTaskNames context = oooa.getContextTaskNames(fm);
output.println(" /* static SESE names */");
Iterator<SESEandAgePair> pItr = context.getNeededStaticNames().iterator();
SESEandAgePair pair = pItr.next();
output.println(" void* "+pair+" = NULL;");
}
-
+
output.println(" /* dynamic variable sources */");
Iterator<TempDescriptor> dynSrcItr = context.getDynamicVarSet().iterator();
while( dynSrcItr.hasNext() ) {
TempDescriptor dynSrcVar = dynSrcItr.next();
output.println(" SESEcommon* "+dynSrcVar+"_srcSESE = NULL;");
output.println(" INTPTR "+dynSrcVar+"_srcOffset = 0x1;");
- }
+ }
+
-
// eom - set up related allocation sites's waiting queues
// TODO: we have to do a table-based thing here...
// jjenista, I THINK WE LOSE THIS ALTOGETHER!
/*
- FlatSESEEnterNode callerSESEplaceholder = (FlatSESEEnterNode) fm.getNext( 0 );
- if(callerSESEplaceholder!= oooa.getMainSESE()){
- Analysis.OoOJava.ConflictGraph graph = oooa.getConflictGraph(callerSESEplaceholder);
- if (graph != null && graph.hasConflictEdge()) {
+ FlatSESEEnterNode callerSESEplaceholder = (FlatSESEEnterNode) fm.getNext( 0 );
+ if(callerSESEplaceholder!= oooa.getMainSESE()){
+ Analysis.OoOJava.ConflictGraph graph = oooa.getConflictGraph(callerSESEplaceholder);
+ if (graph != null && graph.hasConflictEdge()) {
output.println(" // set up waiting queues ");
output.println(" int numMemoryQueue=0;");
output.println(" int memoryQueueItemID=0;");
output.println(" runningSESE->memoryQueueArray=mlpCreateMemoryQueueArray(numMemoryQueue);");
output.println();
}
- }
- }
- */
+ }
+ }
+ */
}
protected void generateMethodSESE(FlatSESEEnterNode fsen,
- PrintWriter outputStructs,
- PrintWriter outputMethHead,
- PrintWriter outputMethods) {
+ PrintWriter outputStructs,
+ PrintWriter outputMethHead,
+ PrintWriter outputMethods) {
+
+ ParamsObject objectparams = (ParamsObject) paramstable.get(fsen.getmdBogus() );
+ TempObject objecttemps = (TempObject) tempstable.get(fsen.getmdBogus() );
- ParamsObject objectparams = (ParamsObject) paramstable.get( fsen.getmdBogus() );
- TempObject objecttemps = (TempObject) tempstable .get( fsen.getmdBogus() );
-
// generate locals structure
outputStructs.println("struct "+
- fsen.getcdEnclosing().getSafeSymbol()+
- fsen.getmdBogus().getSafeSymbol()+"_"+
- fsen.getmdBogus().getSafeMethodDescriptor()+
- "_locals {");
-
+ fsen.getcdEnclosing().getSafeSymbol()+
+ fsen.getmdBogus().getSafeSymbol()+"_"+
+ fsen.getmdBogus().getSafeMethodDescriptor()+
+ "_locals {");
+
outputStructs.println(" int size;");
outputStructs.println(" void * next;");
TempDescriptor temp=objecttemps.getPointer(i);
if (temp.getType().isNull())
- outputStructs.println(" void * "+temp.getSafeSymbol()+";");
+ outputStructs.println(" void * "+temp.getSafeSymbol()+";");
else
- outputStructs.println(" struct "+
- temp.getType().getSafeSymbol()+" * "+
- temp.getSafeSymbol()+";");
+ outputStructs.println(" struct "+
+ temp.getType().getSafeSymbol()+" * "+
+ temp.getSafeSymbol()+";");
}
outputStructs.println("};\n");
-
+
// divide in-set and out-set into objects and primitives to prep
// for the record generation just below
Set<TempDescriptor> inSetAndOutSet = new HashSet<TempDescriptor>();
- inSetAndOutSet.addAll( fsen.getInVarSet() );
- inSetAndOutSet.addAll( fsen.getOutVarSet() );
+ inSetAndOutSet.addAll(fsen.getInVarSet() );
+ inSetAndOutSet.addAll(fsen.getOutVarSet() );
Set<TempDescriptor> inSetAndOutSetObjs = new HashSet<TempDescriptor>();
Set<TempDescriptor> inSetAndOutSetPrims = new HashSet<TempDescriptor>();
TempDescriptor temp = itr.next();
TypeDescriptor type = temp.getType();
if( type.isPtr() ) {
- inSetAndOutSetObjs.add( temp );
+ inSetAndOutSetObjs.add(temp);
} else {
- inSetAndOutSetPrims.add( temp );
+ inSetAndOutSetPrims.add(temp);
}
}
// generate the SESE record structure
outputStructs.println(fsen.getSESErecordName()+" {");
-
+
// data common to any SESE, and it must be placed first so
// a module that doesn't know what kind of SESE record this
// is can cast the pointer to a common struct
// implementation unless there is actually a problem...
Vector<TempDescriptor> inset=fsen.getInVarsForDynamicCoarseConflictResolution();
- for(int i=0; i<inset.size();i++) {
+ for(int i=0; i<inset.size(); i++) {
TempDescriptor temp=inset.get(i);
if (temp.getType().isNull())
outputStructs.println(" void * "+temp.getSafeSymbol()+
- "; /* in-or-out-set obj in gl */");
+ "; /* in-or-out-set obj in gl */");
else
outputStructs.println(" struct "+temp.getType().getSafeSymbol()+" * "+
- temp.getSafeSymbol()+"; /* in-or-out-set obj in gl */");
+ temp.getSafeSymbol()+"; /* in-or-out-set obj in gl */");
}
for(int i=0; i<objectparams.numPointers(); i++) {
if (!inset.contains(temp)) {
if (temp.getType().isNull())
outputStructs.println(" void * "+temp.getSafeSymbol()+
- "; /* in-or-out-set obj in gl */");
+ "; /* in-or-out-set obj in gl */");
else
outputStructs.println(" struct "+temp.getType().getSafeSymbol()+" * "+
- temp.getSafeSymbol()+"; /* in-or-out-set obj in gl */");
+ temp.getSafeSymbol()+"; /* in-or-out-set obj in gl */");
}
}
-
+
outputStructs.println(" /* next is primitives for in-set and out-set and dynamic tracking */");
Iterator<TempDescriptor> itrPrims = inSetAndOutSetPrims.iterator();
while( itrPrims.hasNext() ) {
TempDescriptor temp = itrPrims.next();
TypeDescriptor type = temp.getType();
- if(type.isPrimitive()){
- outputStructs.println(" "+temp.getType().getSafeSymbol()+" "+
- temp.getSafeSymbol()+"; /* in-set or out-set primitive */");
- }
+ if(type.isPrimitive()) {
+ outputStructs.println(" "+temp.getType().getSafeSymbol()+" "+
+ temp.getSafeSymbol()+"; /* in-set or out-set primitive */");
+ }
}
-
+
// note that the sese record pointer will be added below, just primitive part of tracking here
Iterator<TempDescriptor> itrDynInVars = fsen.getDynamicInVarSet().iterator();
while( itrDynInVars.hasNext() ) {
TempDescriptor dynInVar = itrDynInVars.next();
outputStructs.println(" INTPTR "+dynInVar+"_srcOffset; /* dynamic tracking primitive */");
- }
-
-
- outputStructs.println(" /* everything after this should be pointers to an SESE record */" );
+ }
+
+
+ outputStructs.println(" /* everything after this should be pointers to an SESE record */");
// other half of info for dynamic tracking, the SESE record pointer
itrDynInVars = fsen.getDynamicInVarSet().iterator();
TempDescriptor dynInVar = itrDynInVars.next();
String depRecField = dynInVar+"_srcSESE";
outputStructs.println(" SESEcommon* "+depRecField+";");
- addingDepRecField( fsen, depRecField );
- }
-
+ addingDepRecField(fsen, depRecField);
+ }
+
// statically known sese sources are record pointers, too
Iterator<SESEandAgePair> itrStaticInVarSrcs = fsen.getStaticInVarSrcs().iterator();
while( itrStaticInVarSrcs.hasNext() ) {
if (state.RCR) {
if (inset.size()!=0) {
- outputStructs.println("struct rcrRecord rcrRecords["+inset.size()+"];");
- }
+ outputStructs.println("struct rcrRecord rcrRecords["+inset.size()+"];");
+ }
}
-
+
if( fsen.getFirstDepRecField() != null ) {
outputStructs.println(" /* compiler believes first dependent SESE record field above is: "+
- fsen.getFirstDepRecField()+" */" );
+ fsen.getFirstDepRecField()+" */");
}
outputStructs.println("};\n");
-
+
// write method declaration to header file
outputMethHead.print("void ");
outputMethHead.print(fsen.getSESEmethodName()+"(");
outputMethHead.println(");\n");
- generateFlatMethodSESE( fsen.getfmBogus(),
- fsen.getcdEnclosing(),
- fsen,
- fsen.getFlatExit(),
- outputMethods );
+ generateFlatMethodSESE(fsen.getfmBogus(),
+ fsen.getcdEnclosing(),
+ fsen,
+ fsen.getFlatExit(),
+ outputMethods);
}
// used when generating the specific SESE record struct
- // to remember the FIRST field name of sese records
+ // to remember the FIRST field name of sese records
// that the current SESE depends on--we need to know the
// offset to the first one for garbage collection
- protected void addingDepRecField( FlatSESEEnterNode fsen,
- String field ) {
+ protected void addingDepRecField(FlatSESEEnterNode fsen,
+ String field) {
if( fsen.getFirstDepRecField() == null ) {
- fsen.setFirstDepRecField( field );
+ fsen.setFirstDepRecField(field);
}
fsen.incNumDepRecs();
}
- private void generateFlatMethodSESE(FlatMethod fm,
- ClassDescriptor cn,
- FlatSESEEnterNode fsen,
- FlatSESEExitNode seseExit,
- PrintWriter output
+ private void generateFlatMethodSESE(FlatMethod fm,
+ ClassDescriptor cn,
+ FlatSESEEnterNode fsen,
+ FlatSESEExitNode seseExit,
+ PrintWriter output
) {
MethodDescriptor md = fm.getMethod();
// declare variables for naming static and dynamic SESE's
- ContextTaskNames context = oooa.getContextTaskNames( fsen );
+ ContextTaskNames context = oooa.getContextTaskNames(fsen);
output.println(" /* static SESE names */");
Iterator<SESEandAgePair> pItr = context.getNeededStaticNames().iterator();
SESEandAgePair pair = pItr.next();
output.println(" SESEcommon* "+pair+" = NULL;");
}
-
+
// declare variables for tracking dynamic sources
output.println(" /* dynamic variable sources */");
Iterator<TempDescriptor> dynSrcItr = context.getDynamicVarSet().iterator();
TempDescriptor dynSrcVar = dynSrcItr.next();
output.println(" SESEcommon* "+dynSrcVar+"_srcSESE = NULL;");
output.println(" INTPTR "+dynSrcVar+"_srcOffset = 0x1;");
- }
+ }
// declare local temps for in-set primitives, and if it is
TempDescriptor temp = itrInSet.next();
TypeDescriptor type = temp.getType();
if( !type.isPtr() ) {
- if( fsen.getReadyInVarSet().contains( temp ) ) {
+ if( fsen.getReadyInVarSet().contains(temp) ) {
output.println(" "+type+" "+temp+" = "+paramsprefix+"->"+temp+";");
} else {
output.println(" "+type+" "+temp+";");
}
}
- }
+ }
// declare local temps for out-set primitives if its not already
// in the in-set, and it's value will get written so no problem
while( itrOutSet.hasNext() ) {
TempDescriptor temp = itrOutSet.next();
TypeDescriptor type = temp.getType();
- if( !type.isPtr() && !fsen.getInVarSet().contains( temp ) ) {
- output.println(" "+type+" "+temp+";");
+ if( !type.isPtr() && !fsen.getInVarSet().contains(temp) ) {
+ output.println(" "+type+" "+temp+";");
}
}
output.println(" runningSESE = &(___params___->common);");
output.println(" childSESE = 0;");
output.println(" ");
-
+
// eom - setup memory queue
output.println(" // set up memory queues ");
output.println(" int numMemoryQueue=0;");
output.println(" int memoryQueueItemID=0;");
- Analysis.OoOJava.ConflictGraph graph = oooa.getConflictGraph( fsen );
+ Analysis.OoOJava.ConflictGraph graph = oooa.getConflictGraph(fsen);
if( graph != null && graph.hasConflictEdge() ) {
output.println(" {");
- Set<Analysis.OoOJava.SESELock> lockSet = oooa.getLockMappings( graph );
+ Set<Analysis.OoOJava.SESELock> lockSet = oooa.getLockMappings(graph);
System.out.println("#lockSet="+lockSet);
if( lockSet.size() > 0 ) {
- output.println(" numMemoryQueue=" + lockSet.size() + ";");
- output.println(" runningSESE->numMemoryQueue=numMemoryQueue;");
- output.println(" runningSESE->memoryQueueArray=mlpCreateMemoryQueueArray(numMemoryQueue);");
- output.println();
+ output.println(" numMemoryQueue=" + lockSet.size() + ";");
+ output.println(" runningSESE->numMemoryQueue=numMemoryQueue;");
+ output.println(" runningSESE->memoryQueueArray=mlpCreateMemoryQueueArray(numMemoryQueue);");
+ output.println();
}
output.println(" }");
}
// set up a task's mem pool to recycle the allocation of children tasks
// don't bother if the task never has children (a leaf task)
- output.println( "#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println( "/////////////////////////////////////////////" );
- output.println( "//" );
- output.println( "// TODO: use poolcreate to make one record pool" );
- output.println( "// per WORKER THREAD and never destroy it..." );
- output.println( "//" );
- output.println( "/////////////////////////////////////////////" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println("/////////////////////////////////////////////");
+ output.println("//");
+ output.println("// TODO: use poolcreate to make one record pool");
+ output.println("// per WORKER THREAD and never destroy it...");
+ output.println("//");
+ output.println("/////////////////////////////////////////////");
if( !fsen.getIsLeafSESE() ) {
output.println(" runningSESE->taskRecordMemPool = poolcreate( "+
maxTaskRecSizeStr+", freshTaskRecordInitializer );");
if (state.RCR && !rcr.hasEmptyTraversers(fsen)) {
- output.println(" createTR();");
- output.println(" runningSESE->allHashStructures=TRqueue->allHashStructures;");
+ output.println(" createTR();");
+ output.println(" runningSESE->allHashStructures=TRqueue->allHashStructures;");
}
} else {
// make it clear we purposefully did not initialize this
output.println(" runningSESE->taskRecordMemPool = (MemPool*)0x7;");
}
- output.println( "#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
- // copy in-set into place, ready vars were already
+ // copy in-set into place, ready vars were already
// copied when the SESE was issued
Iterator<TempDescriptor> tempItr;
tempItr = fsen.getStaticInVarSet().iterator();
while( tempItr.hasNext() ) {
TempDescriptor temp = tempItr.next();
- VariableSourceToken vst = fsen.getStaticInVarSrc( temp );
- SESEandAgePair srcPair = new SESEandAgePair( vst.getSESE(), vst.getAge() );
- output.println(" "+generateTemp( fsen.getfmBogus(), temp)+
- " = "+paramsprefix+"->"+srcPair+"->"+vst.getAddrVar()+";");
+ VariableSourceToken vst = fsen.getStaticInVarSrc(temp);
+ SESEandAgePair srcPair = new SESEandAgePair(vst.getSESE(), vst.getAge() );
+ output.println(" "+generateTemp(fsen.getfmBogus(), temp)+
+ " = "+paramsprefix+"->"+srcPair+"->"+vst.getAddrVar()+";");
}
-
+
output.println(" // decrement references to static sources");
for( Iterator<SESEandAgePair> pairItr = fsen.getStaticInVarSrcs().iterator(); pairItr.hasNext(); ) {
SESEandAgePair srcPair = pairItr.next();
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" {");
output.println(" SESEcommon* src = &("+paramsprefix+"->"+srcPair+"->common);");
output.println(" RELEASE_REFERENCE_TO( src );");
output.println(" }");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
}
while( tempItr.hasNext() ) {
TempDescriptor temp = tempItr.next();
TypeDescriptor type = temp.getType();
-
+
// go grab it from the SESE source
output.println(" if( "+paramsprefix+"->"+temp+"_srcSESE != NULL ) {");
} else {
typeStr = type.getSafeSymbol();
}
-
- output.println(" "+generateTemp( fsen.getfmBogus(), temp)+
- " = *(("+typeStr+"*) ((void*)"+
- paramsprefix+"->"+temp+"_srcSESE + "+
- paramsprefix+"->"+temp+"_srcOffset));");
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println(" "+generateTemp(fsen.getfmBogus(), temp)+
+ " = *(("+typeStr+"*) ((void*)"+
+ paramsprefix+"->"+temp+"_srcSESE + "+
+ paramsprefix+"->"+temp+"_srcOffset));");
+
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" SESEcommon* src = "+paramsprefix+"->"+temp+"_srcSESE;");
output.println(" RELEASE_REFERENCE_TO( src );");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
// or if the source was our parent, its already in our record to grab
output.println(" } else {");
- output.println(" "+generateTemp( fsen.getfmBogus(), temp)+
- " = "+paramsprefix+"->"+temp+";");
+ output.println(" "+generateTemp(fsen.getfmBogus(), temp)+
+ " = "+paramsprefix+"->"+temp+";");
output.println(" }");
}
// Check to see if we need to do a GC if this is a
- // multi-threaded program...
+ // multi-threaded program...
if ((GENERATEPRECISEGC) || (this.state.MULTICOREGC)) {
- output.println("if (unlikely(needtocollect)) checkcollect("+localsprefixaddr+");");
+ output.println("if (unlikely(needtocollect)) checkcollect("+localsprefixaddr+");");
//Don't bother if we aren't in recursive methods...The loops case will catch it
// if (callgraph.getAllMethods(md).contains(md)) {
// if(this.state.MULTICOREGC) {
// output.println("if (unlikely(needtocollect)) checkcollect("+localsprefixaddr+");");
// }
// }
- }
+ }
if( state.COREPROF ) {
output.println("#ifdef CP_EVENTID_TASKEXECUTE");
}
HashSet<FlatNode> exitset=new HashSet<FlatNode>();
- exitset.add(seseExit);
+ exitset.add(seseExit);
generateCode(fsen.getNext(0), fm, exitset, output);
- output.println("}\n\n");
+ output.println("}\n\n");
}
) {
outmethodheader.println("void* invokeSESEmethod( void* seseRecord );");
- outmethod.println( "void* invokeSESEmethod( void* seseRecord ) {");
- outmethod.println( " int status;");
- outmethod.println( " char errmsg[128];");
+ outmethod.println("void* invokeSESEmethod( void* seseRecord ) {");
+ outmethod.println(" int status;");
+ outmethod.println(" char errmsg[128];");
// generate a case for each SESE class that can be invoked
- outmethod.println( " switch( ((SESEcommon*)seseRecord)->classID ) {");
- outmethod.println( " ");
+ outmethod.println(" switch( ((SESEcommon*)seseRecord)->classID ) {");
+ outmethod.println(" ");
Iterator<FlatSESEEnterNode> seseit;
seseit = oooa.getAllSESEs().iterator();
while( seseit.hasNext() ) {
FlatSESEEnterNode fsen = seseit.next();
- outmethod.println( " /* "+fsen.getPrettyIdentifier()+" */");
- outmethod.println( " case "+fsen.getIdentifier()+":");
- outmethod.println( " "+fsen.getSESEmethodName()+"( seseRecord );");
-
+ outmethod.println(" /* "+fsen.getPrettyIdentifier()+" */");
+ outmethod.println(" case "+fsen.getIdentifier()+":");
+ outmethod.println(" "+fsen.getSESEmethodName()+"( seseRecord );");
+
if( fsen.getIsMainSESE() ) {
- outmethod.println( " workScheduleExit();");
+ outmethod.println(" workScheduleExit();");
}
- outmethod.println( " break;");
- outmethod.println( "");
+ outmethod.println(" break;");
+ outmethod.println("");
}
// default case should never be taken, error out
- outmethod.println( " default:");
- outmethod.println( " printf(\"Error: unknown SESE class ID in invoke method.\\n\");");
- outmethod.println( " exit(-30);");
- outmethod.println( " break;");
- outmethod.println( " }");
- outmethod.println( " return NULL;");
- outmethod.println( "}\n\n");
+ outmethod.println(" default:");
+ outmethod.println(" printf(\"Error: unknown SESE class ID in invoke method.\\n\");");
+ outmethod.println(" exit(-30);");
+ outmethod.println(" break;");
+ outmethod.println(" }");
+ outmethod.println(" return NULL;");
+ outmethod.println("}\n\n");
}
- protected void stallMEMRCR( FlatMethod fm,
- FlatNode fn,
- Set<WaitingElement> waitingElementSet, PrintWriter output) {
+ protected void stallMEMRCR(FlatMethod fm,
+ FlatNode fn,
+ Set<WaitingElement> waitingElementSet, PrintWriter output) {
output.println("// stall on parent's stall sites ");
output.println(" {");
output.println(" REntry* rentry;");
output.println(" // stallrecord sometimes is used as a task record for instance ");
output.println(" // when you call RELEASE_REFERENCE_TO on a stall record.");
output.println(" // so the parent field must be initialized.");
- output.println(" SESEstall * stallrecord=(SESEstall *) poolalloc(runningSESE->taskRecordMemPool);");
+ output.println(" SESEstall * stallrecord=(SESEstall *) poolalloc(runningSESE->taskRecordMemPool);");
output.println(" stallrecord->common.parent=runningSESE;");
output.println(" stallrecord->common.unresolvedDependencies=10000;");
output.println(" stallrecord->common.rcrstatus=1;");
output.println(" stallrecord->tag=runningSESEstallSem.tag;");
TempDescriptor stalltd=null;
- for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
WaitingElement waitingElement =(WaitingElement) iterator.next();
if (waitingElement.getStatus() >= ConflictNode.COARSE) {
output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
- + ", (SESEcommon *) stallrecord, 1LL);");
+ + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
+ + ", (SESEcommon *) stallrecord, 1LL);");
} else {
throw new Error("Fine-grained conflict: This should not happen in RCR");
}
output.println(" rentry->queue=runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "];");
+ + waitingElement.getQueueID() + "];");
output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "],rentry)==NOTREADY) {");
+ + waitingElement.getQueueID() + "],rentry)==NOTREADY) {");
output.println(" localCount--;");
output.println(" }");
output.println("#if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)");
output.println(" if(!atomic_sub_and_test(localCount, &(stallrecord->common.unresolvedDependencies))) {");
//have to do fine-grained work also
output.println(" stallrecord->___obj___=(struct ___Object___ *)"
- + generateTemp(fm, stalltd) + ";");
+ + generateTemp(fm, stalltd) + ";");
output.println(" stallrecord->common.classID=-"
- + rcr.getTraverserID(stalltd, fn) + ";");
-
+ + rcr.getTraverserID(stalltd, fn) + ";");
+
output.println(" enqueueTR(TRqueue, (void *)stallrecord);");
if (state.COREPROF) {
output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
output
- .println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ .println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
output.println("#endif");
- }
-
+ }
+
output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
-
+
if (state.COREPROF) {
output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
output
- .println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ .println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
output.println("#endif");
}
- output.println(" } else {");//exit if condition
+ output.println(" } else {"); //exit if condition
//release traversers reference if we didn't use traverser
output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" RELEASE_REFERENCES_TO((SESEcommon *)stallrecord, 2);");
output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" RELEASE_REFERENCE_TO((SESEcommon *)stallrecord);");
output.println("#endif");
- output.println(" }");//exit block
+ output.println(" }"); //exit block
}
- protected void additionalCodePreNode( FlatMethod fm,
- FlatNode fn,
- PrintWriter output ) {
+ protected void additionalCodePreNode(FlatMethod fm,
+ FlatNode fn,
+ PrintWriter output) {
// insert pre-node actions from the code plan
-
+
CodePlan cp = oooa.getCodePlan(fn);
if( cp != null ) {
FlatMethod fmContext;
if( currentSESE.getIsCallerProxySESE() ) {
- fmContext = oooa.getContainingFlatMethod( fn );
+ fmContext = oooa.getContainingFlatMethod(fn);
} else {
- fmContext = currentSESE.getfmBogus();
+ fmContext = currentSESE.getfmBogus();
}
ContextTaskNames contextTaskNames;
if( currentSESE.getIsCallerProxySESE() ) {
- contextTaskNames = oooa.getContextTaskNames( oooa.getContainingFlatMethod( fn ) );
+ contextTaskNames = oooa.getContextTaskNames(oooa.getContainingFlatMethod(fn) );
} else {
- contextTaskNames = oooa.getContextTaskNames( currentSESE );
+ contextTaskNames = oooa.getContextTaskNames(currentSESE);
}
// for each sese and age pair that this parent statement
// copying of values comes after the statement
Iterator<VariableSourceToken> vstItr = cp.getStallTokens().iterator();
while( vstItr.hasNext() ) {
- VariableSourceToken vst = vstItr.next();
+ VariableSourceToken vst = vstItr.next();
- SESEandAgePair pair = new SESEandAgePair( vst.getSESE(), vst.getAge() );
+ SESEandAgePair pair = new SESEandAgePair(vst.getSESE(), vst.getAge() );
- output.println(" {");
- output.println(" "+
- pair.getSESE().getSESErecordName()+"* child = ("+
- pair.getSESE().getSESErecordName()+"*) "+pair+";");
+ output.println(" {");
+ output.println(" "+
+ pair.getSESE().getSESErecordName()+"* child = ("+
+ pair.getSESE().getSESErecordName()+"*) "+pair+";");
- output.println(" SESEcommon* childCom = (SESEcommon*) "+pair+";");
+ output.println(" SESEcommon* childCom = (SESEcommon*) "+pair+";");
- if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
- output.println("#endif");
- }
+ if( state.COREPROF ) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ output.println("#endif");
+ }
- output.println(" pthread_mutex_lock( &(childCom->lock) );");
- output.println(" if( childCom->doneExecuting == FALSE ) {");
- output.println(" psem_reset( &runningSESEstallSem );");
- output.println(" childCom->parentsStallSem = &runningSESEstallSem;");
- output.println(" pthread_mutex_unlock( &(childCom->lock) );");
- output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
- output.println(" } else {");
- output.println(" pthread_mutex_unlock( &(childCom->lock) );");
- output.println(" }");
-
- // copy things we might have stalled for
- Iterator<TempDescriptor> tdItr = cp.getCopySet( vst ).iterator();
- while( tdItr.hasNext() ) {
- TempDescriptor td = tdItr.next();
- output.println(" "+generateTemp( fmContext, td)+
- " = child->"+vst.getAddrVar().getSafeSymbol()+";");
- }
+ output.println(" pthread_mutex_lock( &(childCom->lock) );");
+ output.println(" if( childCom->doneExecuting == FALSE ) {");
+ output.println(" psem_reset( &runningSESEstallSem );");
+ output.println(" childCom->parentsStallSem = &runningSESEstallSem;");
+ output.println(" pthread_mutex_unlock( &(childCom->lock) );");
+ output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
+ output.println(" } else {");
+ output.println(" pthread_mutex_unlock( &(childCom->lock) );");
+ output.println(" }");
- if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
- output.println("#endif");
- }
+ // copy things we might have stalled for
+ Iterator<TempDescriptor> tdItr = cp.getCopySet(vst).iterator();
+ while( tdItr.hasNext() ) {
+ TempDescriptor td = tdItr.next();
+ output.println(" "+generateTemp(fmContext, td)+
+ " = child->"+vst.getAddrVar().getSafeSymbol()+";");
+ }
+
+ if( state.COREPROF ) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ output.println("#endif");
+ }
- output.println(" }");
+ output.println(" }");
}
-
+
// for each variable with a dynamic source, stall just for that variable
Iterator<TempDescriptor> dynItr = cp.getDynamicStallSet().iterator();
while( dynItr.hasNext() ) {
- TempDescriptor dynVar = dynItr.next();
+ TempDescriptor dynVar = dynItr.next();
- // only stall if the dynamic source is not yourself, denoted by src==NULL
- // otherwise the dynamic write nodes will have the local var up-to-date
- output.println(" {");
- output.println(" if( "+dynVar+"_srcSESE != NULL ) {");
+ // only stall if the dynamic source is not yourself, denoted by src==NULL
+ // otherwise the dynamic write nodes will have the local var up-to-date
+ output.println(" {");
+ output.println(" if( "+dynVar+"_srcSESE != NULL ) {");
- output.println(" SESEcommon* childCom = (SESEcommon*) "+dynVar+"_srcSESE;");
+ output.println(" SESEcommon* childCom = (SESEcommon*) "+dynVar+"_srcSESE;");
- if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
- output.println("#endif");
- }
+ if( state.COREPROF ) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ output.println("#endif");
+ }
- output.println(" pthread_mutex_lock( &(childCom->lock) );");
- output.println(" if( childCom->doneExecuting == FALSE ) {");
- output.println(" psem_reset( &runningSESEstallSem );");
- output.println(" childCom->parentsStallSem = &runningSESEstallSem;");
- output.println(" pthread_mutex_unlock( &(childCom->lock) );");
- output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
- output.println(" } else {");
- output.println(" pthread_mutex_unlock( &(childCom->lock) );");
- output.println(" }");
-
- TypeDescriptor type = dynVar.getType();
- String typeStr;
- if( type.isNull() ) {
- typeStr = "void*";
- } else if( type.isClass() || type.isArray() ) {
- typeStr = "struct "+type.getSafeSymbol()+"*";
- } else {
- typeStr = type.getSafeSymbol();
- }
-
- output.println(" "+generateTemp( fmContext, dynVar )+
- " = *(("+typeStr+"*) ((void*)"+
- dynVar+"_srcSESE + "+dynVar+"_srcOffset));");
-
- if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
- output.println("#endif");
- }
+ output.println(" pthread_mutex_lock( &(childCom->lock) );");
+ output.println(" if( childCom->doneExecuting == FALSE ) {");
+ output.println(" psem_reset( &runningSESEstallSem );");
+ output.println(" childCom->parentsStallSem = &runningSESEstallSem;");
+ output.println(" pthread_mutex_unlock( &(childCom->lock) );");
+ output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
+ output.println(" } else {");
+ output.println(" pthread_mutex_unlock( &(childCom->lock) );");
+ output.println(" }");
+
+ TypeDescriptor type = dynVar.getType();
+ String typeStr;
+ if( type.isNull() ) {
+ typeStr = "void*";
+ } else if( type.isClass() || type.isArray() ) {
+ typeStr = "struct "+type.getSafeSymbol()+"*";
+ } else {
+ typeStr = type.getSafeSymbol();
+ }
+
+ output.println(" "+generateTemp(fmContext, dynVar)+
+ " = *(("+typeStr+"*) ((void*)"+
+ dynVar+"_srcSESE + "+dynVar+"_srcOffset));");
- output.println(" }");
- output.println(" }");
+ if( state.COREPROF ) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLVAR");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ output.println("#endif");
+ }
+
+ output.println(" }");
+ output.println(" }");
}
// for each assignment of a variable to rhs that has a dynamic source,
// copy the dynamic sources
Iterator dynAssignItr = cp.getDynAssigns().entrySet().iterator();
while( dynAssignItr.hasNext() ) {
- Map.Entry me = (Map.Entry) dynAssignItr.next();
- TempDescriptor lhs = (TempDescriptor) me.getKey();
- TempDescriptor rhs = (TempDescriptor) me.getValue();
-
- output.println(" {");
- output.println(" SESEcommon* oldSrc = "+lhs+"_srcSESE;");
-
- output.println(" "+lhs+"_srcSESE = "+rhs+"_srcSESE;");
- output.println(" "+lhs+"_srcOffset = "+rhs+"_srcOffset;");
-
- // no matter what we did above, track reference count of whatever
- // this variable pointed to, do release last in case we're just
- // copying the same value in because 1->2->1 is safe but ref count
- // 1->0->1 has a window where it looks like it should be free'd
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" if( "+rhs+"_srcSESE != NULL ) {");
- output.println(" ADD_REFERENCE_TO( "+rhs+"_srcSESE );");
- output.println(" }");
- output.println(" if( oldSrc != NULL ) {");
- output.println(" RELEASE_REFERENCE_TO( oldSrc );");
- output.println(" }");
- output.println(" }");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ Map.Entry me = (Map.Entry)dynAssignItr.next();
+ TempDescriptor lhs = (TempDescriptor) me.getKey();
+ TempDescriptor rhs = (TempDescriptor) me.getValue();
+
+ output.println(" {");
+ output.println(" SESEcommon* oldSrc = "+lhs+"_srcSESE;");
+
+ output.println(" "+lhs+"_srcSESE = "+rhs+"_srcSESE;");
+ output.println(" "+lhs+"_srcOffset = "+rhs+"_srcOffset;");
+
+ // no matter what we did above, track reference count of whatever
+ // this variable pointed to, do release last in case we're just
+ // copying the same value in because 1->2->1 is safe but ref count
+ // 1->0->1 has a window where it looks like it should be free'd
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" if( "+rhs+"_srcSESE != NULL ) {");
+ output.println(" ADD_REFERENCE_TO( "+rhs+"_srcSESE );");
+ output.println(" }");
+ output.println(" if( oldSrc != NULL ) {");
+ output.println(" RELEASE_REFERENCE_TO( oldSrc );");
+ output.println(" }");
+ output.println(" }");
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
}
// for each lhs that is dynamic from a non-dynamic source, set the
// dynamic source vars to the current SESE
dynItr = cp.getDynAssignCurr().iterator();
while( dynItr.hasNext() ) {
- TempDescriptor dynVar = dynItr.next();
+ TempDescriptor dynVar = dynItr.next();
- assert contextTaskNames.getDynamicVarSet().contains( dynVar );
+ assert contextTaskNames.getDynamicVarSet().contains(dynVar);
- // first release a reference to current record
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" if( "+dynVar+"_srcSESE != NULL ) {");
- output.println(" RELEASE_REFERENCE_TO( oldSrc );");
- output.println(" }");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ // first release a reference to current record
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" if( "+dynVar+"_srcSESE != NULL ) {");
+ output.println(" RELEASE_REFERENCE_TO( oldSrc );");
+ output.println(" }");
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
- output.println(" "+dynVar+"_srcSESE = NULL;");
+ output.println(" "+dynVar+"_srcSESE = NULL;");
}
-
+
// handling stall site, consider that one of several tasks might be
// executing, so create a switch on task ID, because waiting elements
// generated by this stall site should be inserted into possibly a
boolean atLeastOneCase = false;
// create a case for each class of task that might be executing
- Iterator<FlatSESEEnterNode> taskItr = oooa.getPossibleExecutingRBlocks( fn ).iterator();
+ Iterator<FlatSESEEnterNode> taskItr = oooa.getPossibleExecutingRBlocks(fn).iterator();
while( taskItr.hasNext() ) {
- FlatSESEEnterNode parent = taskItr.next();
- ConflictGraph graph = oooa.getConflictGraph( parent );
+ FlatSESEEnterNode parent = taskItr.next();
+ ConflictGraph graph = oooa.getConflictGraph(parent);
- if( graph == null ) {
- continue;
- }
+ if( graph == null ) {
+ continue;
+ }
- Set<SESELock> seseLockSet = oooa.getLockMappings( graph );
- Set<WaitingElement> waitingElementSet = graph.getStallSiteWaitingElementSet( fn, seseLockSet );
-
- if( waitingElementSet.size() == 0 ) {
- continue;
- }
+ Set<SESELock> seseLockSet = oooa.getLockMappings(graph);
+ Set<WaitingElement> waitingElementSet = graph.getStallSiteWaitingElementSet(fn, seseLockSet);
- // TODO: THIS STRATEGY CAN BE OPTIMIZED EVEN FURTHER, IF THERE
- // IS EXACTLY ONE CASE, DON'T GENERATE A SWITCH AT ALL
- if( atLeastOneCase == false ) {
- atLeastOneCase = true;
- output.println(" // potential stall site ");
- output.println(" switch( runningSESE->classID ) {");
- }
+ if( waitingElementSet.size() == 0 ) {
+ continue;
+ }
- output.println(" case "+parent.getIdentifier()+": {");
-
- if( state.RCR ) {
- stallMEMRCR(fm, fn, waitingElementSet, output);
- } else {
-
- output.println(" REntry* rentry;");
-
- for( Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
- WaitingElement waitingElement = (WaitingElement) iterator.next();
-
- if (waitingElement.getStatus() >= ConflictNode.COARSE) {
- output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
- + ", runningSESE);");
- } else {
- output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
- + ", runningSESE, (void*)&"
- + generateTemp(fm, waitingElement.getTempDesc()) + ");");
- }
- output.println(" rentry->parentStallSem=&runningSESEstallSem;");
- output.println(" psem_reset( &runningSESEstallSem);");
- output.println(" rentry->tag=runningSESEstallSem.tag;");
- output.println(" rentry->queue=runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "];");
- output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID() + "],rentry)==NOTREADY){");
- if (state.COREPROF) {
- output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
- output.println("#endif");
- }
-
- output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
-
- if (state.COREPROF) {
- output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
- output.println("#endif");
- }
- output.println(" } ");
- }
+ // TODO: THIS STRATEGY CAN BE OPTIMIZED EVEN FURTHER, IF THERE
+ // IS EXACTLY ONE CASE, DON'T GENERATE A SWITCH AT ALL
+ if( atLeastOneCase == false ) {
+ atLeastOneCase = true;
+ output.println(" // potential stall site ");
+ output.println(" switch( runningSESE->classID ) {");
+ }
- }
- output.println(" } break; // end case "+parent.getIdentifier());
+ output.println(" case "+parent.getIdentifier()+": {");
+
+ if( state.RCR ) {
+ stallMEMRCR(fm, fn, waitingElementSet, output);
+ } else {
+
+ output.println(" REntry* rentry;");
+
+ for( Iterator iterator = waitingElementSet.iterator(); iterator.hasNext(); ) {
+ WaitingElement waitingElement = (WaitingElement) iterator.next();
+
+ if (waitingElement.getStatus() >= ConflictNode.COARSE) {
+ output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
+ + ", runningSESE);");
+ } else {
+ output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID() + "]," + waitingElement.getStatus()
+ + ", runningSESE, (void*)&"
+ + generateTemp(fm, waitingElement.getTempDesc()) + ");");
+ }
+ output.println(" rentry->parentStallSem=&runningSESEstallSem;");
+ output.println(" psem_reset( &runningSESEstallSem);");
+ output.println(" rentry->tag=runningSESEstallSem.tag;");
+ output.println(" rentry->queue=runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID() + "];");
+ output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID() + "],rentry)==NOTREADY){");
+ if (state.COREPROF) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ output.println("#endif");
+ }
+
+ output.println(" psem_take( &runningSESEstallSem, (struct garbagelist *)&___locals___ );");
+
+ if (state.COREPROF) {
+ output.println("#ifdef CP_EVENTID_TASKSTALLMEM");
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ output.println("#endif");
+ }
+ output.println(" } ");
+ }
+
+ }
+ output.println(" } break; // end case "+parent.getIdentifier());
}
if( atLeastOneCase ) {
- output.println(" } // end stall site switch");
+ output.println(" } // end stall site switch");
}
}
}
-
- protected void additionalCodePostNode( FlatMethod fm,
- FlatNode fn,
- PrintWriter output ) {
+
+ protected void additionalCodePostNode(FlatMethod fm,
+ FlatNode fn,
+ PrintWriter output) {
// insert post-node actions from the code-plan (none right now...)
}
- public void generateFlatSESEEnterNode( FlatMethod fm,
- FlatSESEEnterNode fsen,
- PrintWriter output ) {
+ public void generateFlatSESEEnterNode(FlatMethod fm,
+ FlatSESEEnterNode fsen,
+ PrintWriter output) {
// there may be an SESE in an unreachable method, skip over
- if( !oooa.getAllSESEs().contains( fsen ) ) {
+ if( !oooa.getAllSESEs().contains(fsen) ) {
return;
}
}
// allocate the space for this record
- output.println( "#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
- output.println( "#ifdef CP_EVENTID_POOLALLOC");
- output.println( " CP_LOGEVENT( CP_EVENTID_POOLALLOC, CP_EVENTTYPE_BEGIN );");
- output.println( "#endif");
+ output.println("#ifdef CP_EVENTID_POOLALLOC");
+ output.println(" CP_LOGEVENT( CP_EVENTID_POOLALLOC, CP_EVENTTYPE_BEGIN );");
+ output.println("#endif");
if( !fsen.getIsMainSESE() ) {
output.println(" "+
fsen.getSESErecordName()+"* seseToIssue = ("+
fsen.getSESErecordName()+"*) mlpAllocSESErecord( sizeof( "+
fsen.getSESErecordName()+" ) );");
}
- output.println( "#ifdef CP_EVENTID_POOLALLOC");
- output.println( " CP_LOGEVENT( CP_EVENTID_POOLALLOC, CP_EVENTTYPE_END );");
- output.println( "#endif");
+ output.println("#ifdef CP_EVENTID_POOLALLOC");
+ output.println(" CP_LOGEVENT( CP_EVENTID_POOLALLOC, CP_EVENTTYPE_END );");
+ output.println("#endif");
- output.println( "#else // OOO_DISABLE_TASKMEMPOOL" );
- output.println(" "+
- fsen.getSESErecordName()+"* seseToIssue = ("+
- fsen.getSESErecordName()+"*) mlpAllocSESErecord( sizeof( "+
- fsen.getSESErecordName()+" ) );");
- output.println( "#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#else // OOO_DISABLE_TASKMEMPOOL");
+ output.println(" "+
+ fsen.getSESErecordName()+"* seseToIssue = ("+
+ fsen.getSESErecordName()+"*) mlpAllocSESErecord( sizeof( "+
+ fsen.getSESErecordName()+" ) );");
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
// set up the SESE in-set and out-set objects, which look
// record to the first dependent record pointer?
output.println(" seseToIssue->common.numDependentSESErecords="+
fsen.getNumDepRecs()+";");
-
+
// we only need this (and it will only compile) when the number of dependent
// SESE records is non-zero
if( fsen.getFirstDepRecField() != null ) {
fsen.getSESErecordName()+"*)0)->"+fsen.getFirstDepRecField()+");"
);
}
-
+
if( state.RCR &&
- fsen.getInVarsForDynamicCoarseConflictResolution().size() > 0
+ fsen.getInVarsForDynamicCoarseConflictResolution().size() > 0
) {
output.println(" seseToIssue->common.offsetToParamRecords=(INTPTR) & ((("+
fsen.getSESErecordName()+"*)0)->rcrRecords);");
output.println(" seseToIssue->common.unresolvedDependencies = 10000;");
output.println(" seseToIssue->common.parentsStallSem = NULL;");
output.println(" initQueue(&seseToIssue->common.forwardList);");
- output.println(" seseToIssue->common.doneExecuting = FALSE;");
+ output.println(" seseToIssue->common.doneExecuting = FALSE;");
output.println(" seseToIssue->common.numRunningChildren = 0;");
- output.println( "#ifdef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifdef OOO_DISABLE_TASKMEMPOOL");
output.println(" pthread_cond_init( &(seseToIssue->common.runningChildrenCond), NULL );");
output.println("#endif");
output.println(" seseToIssue->common.parent = runningSESE;");
if( state.RCR ) {
// if we're using RCR, ref count is 3 because the traverser has
// a reference, too
- if( !fsen.getIsMainSESE() && fsen.getInVarsForDynamicCoarseConflictResolution().size()>0){
- output.println(" seseToIssue->common.refCount = 10003;");
+ if( !fsen.getIsMainSESE() && fsen.getInVarsForDynamicCoarseConflictResolution().size()>0) {
+ output.println(" seseToIssue->common.refCount = 10003;");
} else {
- output.println(" seseToIssue->common.refCount = 10002;");
+ output.println(" seseToIssue->common.refCount = 10002;");
}
output.println(" int refCount=10000;");
} else {
FlatSESEEnterNode parent = fsen.getLocalParent();
if( parent != null && !parent.getIsCallerProxySESE() ) {
output.println(" seseToIssue->"+temp+" = "+
- generateTemp( parent.getfmBogus(), temp )+";");
+ generateTemp(parent.getfmBogus(), temp)+";");
} else {
output.println(" seseToIssue->"+temp+" = "+
- generateTemp( fsen.getfmEnclosing(), temp )+";");
+ generateTemp(fsen.getfmEnclosing(), temp)+";");
}
}
-
+
// before potentially adding this SESE to other forwarding lists,
// create it's lock
- output.println( "#ifdef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifdef OOO_DISABLE_TASKMEMPOOL");
output.println(" pthread_mutex_init( &(seseToIssue->common.lock), NULL );");
output.println("#endif");
-
+
if( !fsen.getIsMainSESE() ) {
// count up outstanding dependencies, static first, then dynamic
Iterator<SESEandAgePair> staticSrcsItr = fsen.getStaticInVarSrcs().iterator();
output.println(" {");
output.println(" SESEcommon* src = (SESEcommon*)"+srcPair+";");
output.println(" pthread_mutex_lock( &(src->lock) );");
- // FORWARD TODO - ...what? make it a chain of arrays instead of true linked-list?
+ // FORWARD TODO - ...what? make it a chain of arrays instead of true linked-list?
output.println(" if( !src->doneExecuting ) {");
- output.println(" addNewItem( &src->forwardList, seseToIssue );");
+ output.println(" addNewItem( &src->forwardList, seseToIssue );");
output.println(" ++(localCount);");
output.println(" }");
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" ADD_REFERENCE_TO( src );");
- output.println("#endif" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" ADD_REFERENCE_TO( src );");
+ output.println("#endif");
output.println(" pthread_mutex_unlock( &(src->lock) );");
output.println(" }");
// whether or not it is an outstanding dependency, make sure
// to pass the static name to the child's record
output.println(" seseToIssue->"+srcPair+" = "+
- "("+srcPair.getSESE().getSESErecordName()+"*)"+
- srcPair+";");
+ "("+srcPair.getSESE().getSESErecordName()+"*)"+
+ srcPair+";");
}
-
+
// dynamic sources might already be accounted for in the static list,
// so only add them to forwarding lists if they're not already there
Iterator<TempDescriptor> dynVarsItr = fsen.getDynamicInVarSet().iterator();
output.println(" if( src != NULL ) {");
output.println(" pthread_mutex_lock( &(src->lock) );");
- // FORWARD TODO
+ // FORWARD TODO
output.println(" if( isEmpty( &src->forwardList ) ||");
output.println(" seseToIssue != peekItem( &src->forwardList ) ) {");
output.println(" ++(localCount);");
output.println(" }");
output.println(" }");
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" ADD_REFERENCE_TO( src );");
- output.println("#endif" );
- output.println(" pthread_mutex_unlock( &(src->lock) );");
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" ADD_REFERENCE_TO( src );");
+ output.println("#endif");
+ output.println(" pthread_mutex_unlock( &(src->lock) );");
output.println(" seseToIssue->"+dynInVar+"_srcOffset = "+dynInVar+"_srcOffset;");
output.println(" } else {");
- // determine whether this new task instance is in a method context,
- // or within the body of another task
- assert !fsen.getIsCallerProxySESE();
- FlatSESEEnterNode parent = fsen.getLocalParent();
- if( parent != null && !parent.getIsCallerProxySESE() ) {
- output.println(" seseToIssue->"+dynInVar+" = "+
- generateTemp( parent.getfmBogus(), dynInVar )+";");
- } else {
- output.println(" seseToIssue->"+dynInVar+" = "+
- generateTemp( fsen.getfmEnclosing(), dynInVar )+";");
- }
-
+ // determine whether this new task instance is in a method context,
+ // or within the body of another task
+ assert !fsen.getIsCallerProxySESE();
+ FlatSESEEnterNode parent = fsen.getLocalParent();
+ if( parent != null && !parent.getIsCallerProxySESE() ) {
+ output.println(" seseToIssue->"+dynInVar+" = "+
+ generateTemp(parent.getfmBogus(), dynInVar)+";");
+ } else {
+ output.println(" seseToIssue->"+dynInVar+" = "+
+ generateTemp(fsen.getfmEnclosing(), dynInVar)+";");
+ }
+
output.println(" }");
output.println(" }");
-
+
// even if the value is already copied, make sure your NULL source
// gets passed so child knows it already has the dynamic value
output.println(" seseToIssue->"+dynInVar+"_srcSESE = "+dynInVar+"_srcSESE;");
}
- // maintain pointers for finding dynamic SESE
+ // maintain pointers for finding dynamic SESE
// instances from static names, do a shuffle as instances age
// and also release references that have become too old
if( !fsen.getIsMainSESE() ) {
- FlatSESEEnterNode currentSESE = fsen.getLocalParent();
+ FlatSESEEnterNode currentSESE = fsen.getLocalParent();
- ContextTaskNames contextTaskNames;
- if( currentSESE == null ) {
- contextTaskNames = oooa.getContextTaskNames( oooa.getContainingFlatMethod( fsen ) );
- } else {
- contextTaskNames = oooa.getContextTaskNames( currentSESE );
- }
+ ContextTaskNames contextTaskNames;
+ if( currentSESE == null ) {
+ contextTaskNames = oooa.getContextTaskNames(oooa.getContainingFlatMethod(fsen) );
+ } else {
+ contextTaskNames = oooa.getContextTaskNames(currentSESE);
+ }
- SESEandAgePair pairNewest = new SESEandAgePair( fsen, 0 );
- SESEandAgePair pairOldest = new SESEandAgePair( fsen, fsen.getOldestAgeToTrack() );
- if( contextTaskNames.getNeededStaticNames().contains( pairNewest ) ) {
- output.println(" {");
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" SESEcommon* oldest = "+pairOldest+";");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
-
- for( int i = fsen.getOldestAgeToTrack(); i > 0; --i ) {
- SESEandAgePair pair1 = new SESEandAgePair( fsen, i );
- SESEandAgePair pair2 = new SESEandAgePair( fsen, i-1 );
- output.println(" "+pair1+" = "+pair2+";");
- }
- output.println(" "+pairNewest+" = &(seseToIssue->common);");
-
- // no need to add a reference to whatever is the newest record, because
- // we initialized seseToIssue->refCount to *2*
- // but release a reference to whatever was the oldest BEFORE the shift
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
- output.println(" if( oldest != NULL ) {");
- output.println(" RELEASE_REFERENCE_TO( oldest );");
- output.println(" }");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
- output.println(" }");
- }
+ SESEandAgePair pairNewest = new SESEandAgePair(fsen, 0);
+ SESEandAgePair pairOldest = new SESEandAgePair(fsen, fsen.getOldestAgeToTrack() );
+ if( contextTaskNames.getNeededStaticNames().contains(pairNewest) ) {
+ output.println(" {");
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" SESEcommon* oldest = "+pairOldest+";");
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
+
+ for( int i = fsen.getOldestAgeToTrack(); i > 0; --i ) {
+ SESEandAgePair pair1 = new SESEandAgePair(fsen, i);
+ SESEandAgePair pair2 = new SESEandAgePair(fsen, i-1);
+ output.println(" "+pair1+" = "+pair2+";");
+ }
+ output.println(" "+pairNewest+" = &(seseToIssue->common);");
+
+ // no need to add a reference to whatever is the newest record, because
+ // we initialized seseToIssue->refCount to *2*
+ // but release a reference to whatever was the oldest BEFORE the shift
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
+ output.println(" if( oldest != NULL ) {");
+ output.println(" RELEASE_REFERENCE_TO( oldest );");
+ output.println(" }");
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
+ output.println(" }");
+ }
}
}
if( !fsen.getIsMainSESE() ) {
if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_PREPAREMEMQ");
- output.println(" CP_LOGEVENT( CP_EVENTID_PREPAREMEMQ, CP_EVENTTYPE_BEGIN );");
- output.println("#endif");
+ output.println("#ifdef CP_EVENTID_PREPAREMEMQ");
+ output.println(" CP_LOGEVENT( CP_EVENTID_PREPAREMEMQ, CP_EVENTTYPE_BEGIN );");
+ output.println("#endif");
}
if(state.RCR) {
- dispatchMEMRC(fm, fsen, output);
+ dispatchMEMRC(fm, fsen, output);
} else {
- // there may be several task types that can get to this
- // program point (issue this new task) so create a switch
- // based on task ID, each type of task has a different index
- // scheme for its memory queue's, and the cases here drop the
- // new task instance in the right bucket
- boolean atLeastOneCase = false;
-
- // create a case for each class of task that might be executing
- Iterator<FlatSESEEnterNode> taskItr = oooa.getPossibleExecutingRBlocks( fsen ).iterator();
- while( taskItr.hasNext() ) {
- FlatSESEEnterNode parent = taskItr.next();
- ConflictGraph graph = oooa.getConflictGraph( parent );
-
- if( graph == null || !graph.hasConflictEdge() ) {
- continue;
- }
-
- Set<SESELock> seseLockSet = oooa.getLockMappings(graph);
-
- SESEWaitingQueue seseWaitingQueue =
- graph.getWaitingElementSetBySESEID(fsen.getIdentifier(), seseLockSet);
-
- if( seseWaitingQueue.getWaitingElementSize() == 0 ) {
- continue;
- }
-
- if( atLeastOneCase == false ) {
- atLeastOneCase = true;
- output.println(" // add new task instance to current task's memory queues if needed ");
- output.println(" switch( runningSESE->classID ) {");
- }
-
- output.println(" case "+parent.getIdentifier()+": {");
- output.println(" REntry* rentry=NULL;");
- output.println(" INTPTR* pointer=NULL;");
- output.println(" seseToIssue->common.rentryIdx=0;");
-
- Set<Integer> queueIDSet=seseWaitingQueue.getQueueIDSet();
- for (Iterator iterator = queueIDSet.iterator(); iterator.hasNext();) {
- Integer key = (Integer) iterator.next();
- int queueID=key.intValue();
- Set<WaitingElement> waitingQueueSet =
- seseWaitingQueue.getWaitingElementSet(queueID);
- int enqueueType=seseWaitingQueue.getType(queueID);
- if(enqueueType==SESEWaitingQueue.EXCEPTION) {
- output.println(" INITIALIZEBUF(runningSESE->memoryQueueArray[" + queueID+ "]);");
- }
- for (Iterator iterator2 = waitingQueueSet.iterator(); iterator2.hasNext();) {
- WaitingElement waitingElement
- = (WaitingElement) iterator2.next();
- if (waitingElement.getStatus() >= ConflictNode.COARSE) {
- output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
- + waitingElement.getStatus()
- + ", &(seseToIssue->common));");
- } else {
- TempDescriptor td = waitingElement.getTempDesc();
- // decide whether waiting element is dynamic or static
- if (fsen.getDynamicInVarSet().contains(td)) {
- // dynamic in-var case
- output.println(" pointer=seseToIssue->"
- + waitingElement.getDynID()
- + "_srcSESE+seseToIssue->"
- + waitingElement.getDynID()
- + "_srcOffset;");
- output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
- + waitingElement.getStatus()
- + ", &(seseToIssue->common), pointer );");
- } else if (fsen.getStaticInVarSet().contains(td)) {
- // static in-var case
- VariableSourceToken vst = fsen.getStaticInVarSrc(td);
- if (vst != null) {
-
- String srcId = "SESE_" + vst.getSESE().getPrettyIdentifier()
- + vst.getSESE().getIdentifier()
- + "_" + vst.getAge();
- output.println(" pointer=(void*)&seseToIssue->"
- + srcId
- + "->"
- + waitingElement
- .getDynID()
- + ";");
- output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
- + waitingElement.getStatus()
- + ", &(seseToIssue->common), pointer );");
- }
- } else {
- output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
- + waitingElement.getStatus()
- + ", &(seseToIssue->common), (void*)&seseToIssue->"
- + waitingElement.getDynID()
- + ");");
- }
- }
- output.println(" rentry->queue=runningSESE->memoryQueueArray["
- + waitingElement.getQueueID()
- + "];");
-
- if(enqueueType==SESEWaitingQueue.NORMAL){
- output.println(" seseToIssue->common.rentryArray[seseToIssue->common.rentryIdx++]=rentry;");
- output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["
- + waitingElement.getQueueID()
- + "],rentry)==NOTREADY) {");
- output.println(" localCount++;");
- output.println(" }");
- } else {
- output.println(" ADDRENTRYTOBUF(runningSESE->memoryQueueArray[" + waitingElement.getQueueID() + "],rentry);");
- }
- }
- if(enqueueType!=SESEWaitingQueue.NORMAL){
- output.println(" localCount+=RESOLVEBUF(runningSESE->memoryQueueArray["
- + queueID+ "],&seseToIssue->common);");
- }
- }
- output.println(" } break; // end case "+parent.getIdentifier());
- output.println();
- }
+ // there may be several task types that can get to this
+ // program point (issue this new task) so create a switch
+ // based on task ID, each type of task has a different index
+ // scheme for its memory queue's, and the cases here drop the
+ // new task instance in the right bucket
+ boolean atLeastOneCase = false;
+
+ // create a case for each class of task that might be executing
+ Iterator<FlatSESEEnterNode> taskItr = oooa.getPossibleExecutingRBlocks(fsen).iterator();
+ while( taskItr.hasNext() ) {
+ FlatSESEEnterNode parent = taskItr.next();
+ ConflictGraph graph = oooa.getConflictGraph(parent);
+
+ if( graph == null || !graph.hasConflictEdge() ) {
+ continue;
+ }
+
+ Set<SESELock> seseLockSet = oooa.getLockMappings(graph);
+
+ SESEWaitingQueue seseWaitingQueue =
+ graph.getWaitingElementSetBySESEID(fsen.getIdentifier(), seseLockSet);
+
+ if( seseWaitingQueue.getWaitingElementSize() == 0 ) {
+ continue;
+ }
+
+ if( atLeastOneCase == false ) {
+ atLeastOneCase = true;
+ output.println(" // add new task instance to current task's memory queues if needed ");
+ output.println(" switch( runningSESE->classID ) {");
+ }
+
+ output.println(" case "+parent.getIdentifier()+": {");
+ output.println(" REntry* rentry=NULL;");
+ output.println(" INTPTR* pointer=NULL;");
+ output.println(" seseToIssue->common.rentryIdx=0;");
+
+ Set<Integer> queueIDSet=seseWaitingQueue.getQueueIDSet();
+ for (Iterator iterator = queueIDSet.iterator(); iterator.hasNext(); ) {
+ Integer key = (Integer) iterator.next();
+ int queueID=key.intValue();
+ Set<WaitingElement> waitingQueueSet =
+ seseWaitingQueue.getWaitingElementSet(queueID);
+ int enqueueType=seseWaitingQueue.getType(queueID);
+ if(enqueueType==SESEWaitingQueue.EXCEPTION) {
+ output.println(" INITIALIZEBUF(runningSESE->memoryQueueArray[" + queueID+ "]);");
+ }
+ for (Iterator iterator2 = waitingQueueSet.iterator(); iterator2.hasNext(); ) {
+ WaitingElement waitingElement
+ = (WaitingElement) iterator2.next();
+ if (waitingElement.getStatus() >= ConflictNode.COARSE) {
+ output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
+ + waitingElement.getStatus()
+ + ", &(seseToIssue->common));");
+ } else {
+ TempDescriptor td = waitingElement.getTempDesc();
+ // decide whether waiting element is dynamic or static
+ if (fsen.getDynamicInVarSet().contains(td)) {
+ // dynamic in-var case
+ output.println(" pointer=seseToIssue->"
+ + waitingElement.getDynID()
+ + "_srcSESE+seseToIssue->"
+ + waitingElement.getDynID()
+ + "_srcOffset;");
+ output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
+ + waitingElement.getStatus()
+ + ", &(seseToIssue->common), pointer );");
+ } else if (fsen.getStaticInVarSet().contains(td)) {
+ // static in-var case
+ VariableSourceToken vst = fsen.getStaticInVarSrc(td);
+ if (vst != null) {
+
+ String srcId = "SESE_" + vst.getSESE().getPrettyIdentifier()
+ + vst.getSESE().getIdentifier()
+ + "_" + vst.getAge();
+ output.println(" pointer=(void*)&seseToIssue->"
+ + srcId
+ + "->"
+ + waitingElement
+ .getDynID()
+ + ";");
+ output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
+ + waitingElement.getStatus()
+ + ", &(seseToIssue->common), pointer );");
+ }
+ } else {
+ output.println(" rentry=mlpCreateFineREntry(runningSESE->memoryQueueArray["+ queueID+ "],"
+ + waitingElement.getStatus()
+ + ", &(seseToIssue->common), (void*)&seseToIssue->"
+ + waitingElement.getDynID()
+ + ");");
+ }
+ }
+ output.println(" rentry->queue=runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID()
+ + "];");
+
+ if(enqueueType==SESEWaitingQueue.NORMAL) {
+ output.println(" seseToIssue->common.rentryArray[seseToIssue->common.rentryIdx++]=rentry;");
+ output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["
+ + waitingElement.getQueueID()
+ + "],rentry)==NOTREADY) {");
+ output.println(" localCount++;");
+ output.println(" }");
+ } else {
+ output.println(" ADDRENTRYTOBUF(runningSESE->memoryQueueArray[" + waitingElement.getQueueID() + "],rentry);");
+ }
+ }
+ if(enqueueType!=SESEWaitingQueue.NORMAL) {
+ output.println(" localCount+=RESOLVEBUF(runningSESE->memoryQueueArray["
+ + queueID+ "],&seseToIssue->common);");
+ }
+ }
+ output.println(" } break; // end case "+parent.getIdentifier());
+ output.println();
+ }
+
+ if( atLeastOneCase ) {
+ output.println(" } // end stall site switch");
+ }
+ }
- if( atLeastOneCase ) {
- output.println(" } // end stall site switch");
- }
- }
-
if( state.COREPROF ) {
- output.println("#ifdef CP_EVENTID_PREPAREMEMQ");
- output.println(" CP_LOGEVENT( CP_EVENTID_PREPAREMEMQ, CP_EVENTTYPE_END );");
- output.println("#endif");
+ output.println("#ifdef CP_EVENTID_PREPAREMEMQ");
+ output.println(" CP_LOGEVENT( CP_EVENTID_PREPAREMEMQ, CP_EVENTTYPE_END );");
+ output.println("#endif");
}
}
// Enqueue Task Record
if (state.RCR) {
- if( fsen != oooa.getMainSESE() && fsen.getInVarsForDynamicCoarseConflictResolution().size()>0){
- output.println(" enqueueTR(TRqueue, (void *)seseToIssue);");
+ if( fsen != oooa.getMainSESE() && fsen.getInVarsForDynamicCoarseConflictResolution().size()>0) {
+ output.println(" enqueueTR(TRqueue, (void *)seseToIssue);");
}
}
output.println(" workScheduleSubmit( (void*)seseToIssue );");
output.println(" }");
-
+
if( state.COREPROF ) {
output.println("#ifdef CP_EVENTID_TASKDISPATCH");
}
- void dispatchMEMRC( FlatMethod fm,
- FlatSESEEnterNode newChild,
- PrintWriter output ) {
+ void dispatchMEMRC(FlatMethod fm,
+ FlatSESEEnterNode newChild,
+ PrintWriter output) {
// what we need to do here is create RCR records for the
// new task and insert it into the appropriate parent queues
// IF NEEDED!!!!!!!!
while( pItr.hasNext() ) {
FlatSESEEnterNode parent = pItr.next();
- ConflictGraph graph = oooa.getConflictGraph( parent );
+ ConflictGraph graph = oooa.getConflictGraph(parent);
if( graph != null && graph.hasConflictEdge() ) {
- Set<SESELock> seseLockSet = oooa.getLockMappings(graph);
- SESEWaitingQueue seseWaitingQueue=graph.getWaitingElementSetBySESEID(newChild.getIdentifier(), seseLockSet);
- if(seseWaitingQueue.getWaitingElementSize()>0) {
-
- output.println(" /* "+parent.getPrettyIdentifier()+" */");
- output.println(" case "+parent.getIdentifier()+": {");
-
- output.println(" REntry* rentry=NULL;");
- output.println(" INTPTR* pointer=NULL;");
- output.println(" seseToIssue->common.rentryIdx=0;");
- Vector<TempDescriptor> invars=newChild.getInVarsForDynamicCoarseConflictResolution();
- //System.out.println(fm.getMethod()+"["+invars+"]");
-
- Vector<Long> queuetovar=new Vector<Long>();
-
- for(int i=0;i<invars.size();i++) {
- TempDescriptor td=invars.get(i);
- Set<WaitingElement> weset=seseWaitingQueue.getWaitingElementSet(td);
-
- //TODO FIX MEEEEE!!!!
- //Weset is sometimes null which breaks the following code and
- //we don't know what weset = null means. For now, we bail when it's null
- //until we find out what to do....
+ Set<SESELock> seseLockSet = oooa.getLockMappings(graph);
+ SESEWaitingQueue seseWaitingQueue=graph.getWaitingElementSetBySESEID(newChild.getIdentifier(), seseLockSet);
+ if(seseWaitingQueue.getWaitingElementSize()>0) {
+
+ output.println(" /* "+parent.getPrettyIdentifier()+" */");
+ output.println(" case "+parent.getIdentifier()+": {");
+
+ output.println(" REntry* rentry=NULL;");
+ output.println(" INTPTR* pointer=NULL;");
+ output.println(" seseToIssue->common.rentryIdx=0;");
+ Vector<TempDescriptor> invars=newChild.getInVarsForDynamicCoarseConflictResolution();
+ //System.out.println(fm.getMethod()+"["+invars+"]");
+
+ Vector<Long> queuetovar=new Vector<Long>();
+
+ for(int i=0; i<invars.size(); i++) {
+ TempDescriptor td=invars.get(i);
+ Set<WaitingElement> weset=seseWaitingQueue.getWaitingElementSet(td);
+
+ //TODO FIX MEEEEE!!!!
+ //Weset is sometimes null which breaks the following code and
+ //we don't know what weset = null means. For now, we bail when it's null
+ //until we find out what to do....
// if(weset == null) {
// continue;
// }
- //UPDATE: This hack DOES NOT FIX IT!.
-
-
-
- int numqueues=0;
- Set<Integer> queueSet=new HashSet<Integer>();
- for (Iterator iterator = weset.iterator(); iterator.hasNext();) {
- WaitingElement we = (WaitingElement) iterator.next();
- Integer queueID=new Integer( we.getQueueID());
- if(!queueSet.contains(queueID)){
- numqueues++;
- queueSet.add(queueID);
- }
- }
-
- output.println(" seseToIssue->rcrRecords["+i+"].flag="+numqueues+";");
- output.println(" seseToIssue->rcrRecords["+i+"].index=0;");
- output.println(" seseToIssue->rcrRecords["+i+"].next=NULL;");
- output.println(" int dispCount"+i+"=0;");
-
- for (Iterator<WaitingElement> wtit = weset.iterator(); wtit.hasNext();) {
- WaitingElement waitingElement = wtit.next();
- int queueID = waitingElement.getQueueID();
- if (queueID >= queuetovar.size())
- queuetovar.setSize(queueID + 1);
- Long l = queuetovar.get(queueID);
- long val = (l != null) ? l.longValue() : 0;
- val = val | (1 << i);
- queuetovar.set(queueID, new Long(val));
- }
- }
-
- HashSet generatedqueueentry=new HashSet();
- for(int i=0;i<invars.size();i++) {
- TempDescriptor td=invars.get(i);
- Set<WaitingElement> weset=seseWaitingQueue.getWaitingElementSet(td);
-
-
-
- //TODO FIX MEEEEE!!!!
- //Weset is sometimes null which breaks the following code and
- //we don't know what weset = null means. For now, we bail when it's null
- //until we find out what to do....
+ //UPDATE: This hack DOES NOT FIX IT!.
+
+
+
+ int numqueues=0;
+ Set<Integer> queueSet=new HashSet<Integer>();
+ for (Iterator iterator = weset.iterator(); iterator.hasNext(); ) {
+ WaitingElement we = (WaitingElement) iterator.next();
+ Integer queueID=new Integer(we.getQueueID());
+ if(!queueSet.contains(queueID)) {
+ numqueues++;
+ queueSet.add(queueID);
+ }
+ }
+
+ output.println(" seseToIssue->rcrRecords["+i+"].flag="+numqueues+";");
+ output.println(" seseToIssue->rcrRecords["+i+"].index=0;");
+ output.println(" seseToIssue->rcrRecords["+i+"].next=NULL;");
+ output.println(" int dispCount"+i+"=0;");
+
+ for (Iterator<WaitingElement> wtit = weset.iterator(); wtit.hasNext(); ) {
+ WaitingElement waitingElement = wtit.next();
+ int queueID = waitingElement.getQueueID();
+ if (queueID >= queuetovar.size())
+ queuetovar.setSize(queueID + 1);
+ Long l = queuetovar.get(queueID);
+ long val = (l != null)?l.longValue():0;
+ val = val | (1 << i);
+ queuetovar.set(queueID, new Long(val));
+ }
+ }
+
+ HashSet generatedqueueentry=new HashSet();
+ for(int i=0; i<invars.size(); i++) {
+ TempDescriptor td=invars.get(i);
+ Set<WaitingElement> weset=seseWaitingQueue.getWaitingElementSet(td);
+
+
+
+ //TODO FIX MEEEEE!!!!
+ //Weset is sometimes null which breaks the following code and
+ //we don't know what weset = null means. For now, we bail when it's null
+ //until we find out what to do....
// if(weset == null) {
// continue;
// }
- //UPDATE: This hack DOES NOT FIX IT!.
-
-
-
- for(Iterator<WaitingElement> wtit=weset.iterator();wtit.hasNext();) {
- WaitingElement waitingElement=wtit.next();
- int queueID=waitingElement.getQueueID();
-
- if(waitingElement.isBogus()){
- continue;
- }
-
- if (generatedqueueentry.contains(queueID))
- continue;
- else
- generatedqueueentry.add(queueID);
-
- assert(waitingElement.getStatus()>=ConflictNode.COARSE);
- long mask=queuetovar.get(queueID);
- output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["+ waitingElement.getQueueID()+ "]," + waitingElement.getStatus() + ", &(seseToIssue->common), "+mask+"LL);");
- output.println(" rentry->count=2;");
- output.println(" seseToIssue->common.rentryArray[seseToIssue->common.rentryIdx++]=rentry;");
- output.println(" rentry->queue=runningSESE->memoryQueueArray[" + waitingElement.getQueueID()+"];");
-
- output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["+ waitingElement.getQueueID()+ "],rentry)==READY) {");
- for(int j=0;mask!=0;j++) {
- if ((mask&1)==1)
- output.println(" dispCount"+j+"++;");
- mask=mask>>1;
- }
- output.println(" } else ");
- output.println(" refCount--;");
+ //UPDATE: This hack DOES NOT FIX IT!.
+
+
+
+ for(Iterator<WaitingElement> wtit=weset.iterator(); wtit.hasNext(); ) {
+ WaitingElement waitingElement=wtit.next();
+ int queueID=waitingElement.getQueueID();
+
+ if(waitingElement.isBogus()) {
+ continue;
+ }
+
+ if (generatedqueueentry.contains(queueID))
+ continue;
+ else
+ generatedqueueentry.add(queueID);
+
+ assert(waitingElement.getStatus()>=ConflictNode.COARSE);
+ long mask=queuetovar.get(queueID);
+ output.println(" rentry=mlpCreateREntry(runningSESE->memoryQueueArray["+ waitingElement.getQueueID()+ "]," + waitingElement.getStatus() + ", &(seseToIssue->common), "+mask+"LL);");
+ output.println(" rentry->count=2;");
+ output.println(" seseToIssue->common.rentryArray[seseToIssue->common.rentryIdx++]=rentry;");
+ output.println(" rentry->queue=runningSESE->memoryQueueArray[" + waitingElement.getQueueID()+"];");
+
+ output.println(" if(ADDRENTRY(runningSESE->memoryQueueArray["+ waitingElement.getQueueID()+ "],rentry)==READY) {");
+ for(int j=0; mask!=0; j++) {
+ if ((mask&1)==1)
+ output.println(" dispCount"+j+"++;");
+ mask=mask>>1;
+ }
+ output.println(" } else ");
+ output.println(" refCount--;");
+ }
+
+ if (newChild.getDynamicInVarSet().contains(td)) {
+ // dynamic in-var case
+ //output.println(" pointer=seseToIssue->"+waitingElement.getDynID()+
+ // "_srcSESE+seseToIssue->"+waitingElement.getDynID()+
+ // "_srcOffset;");
+ //output.println(" rentry=mlpCreateFineREntry("+ waitingElement.getStatus()+
+ // ", &(seseToIssue->common), pointer );");
+ }
}
-
- if (newChild.getDynamicInVarSet().contains(td)) {
- // dynamic in-var case
- //output.println(" pointer=seseToIssue->"+waitingElement.getDynID()+
- // "_srcSESE+seseToIssue->"+waitingElement.getDynID()+
- // "_srcOffset;");
- //output.println(" rentry=mlpCreateFineREntry("+ waitingElement.getStatus()+
- // ", &(seseToIssue->common), pointer );");
- }
- }
- for(int i=0;i<invars.size();i++) {
- output.println(" if(!dispCount"+i+" || !atomic_sub_and_test(dispCount"+i+",&(seseToIssue->rcrRecords["+i+"].flag)))");
- output.println(" localCount++;");
- }
- output.println(" } break;");
- }
+ for(int i=0; i<invars.size(); i++) {
+ output.println(" if(!dispCount"+i+" || !atomic_sub_and_test(dispCount"+i+",&(seseToIssue->rcrRecords["+i+"].flag)))");
+ output.println(" localCount++;");
+ }
+ output.println(" } break;");
+ }
}
}
}
- public void generateFlatSESEExitNode( FlatMethod fm,
- FlatSESEExitNode fsexn,
- PrintWriter output ) {
+ public void generateFlatSESEExitNode(FlatMethod fm,
+ FlatSESEExitNode fsexn,
+ PrintWriter output) {
// get the enter node for this exit that has meta data embedded
FlatSESEEnterNode fsen = fsexn.getFlatEnter();
// there may be an SESE in an unreachable method, skip over
- if( !oooa.getAllSESEs().contains( fsen ) ) {
+ if( !oooa.getAllSESEs().contains(fsen) ) {
return;
}
// it should only appear in analysis results
assert !fsen.getIsCallerProxySESE();
-
+
if( state.COREPROF ) {
output.println("#ifdef CP_EVENTID_TASKEXECUTE");
output.println(" CP_LOGEVENT( CP_EVENTID_TASKEXECUTE, CP_EVENTTYPE_END );");
output.println(" CP_LOGEVENT( CP_EVENTID_TASKRETIRE, CP_EVENTTYPE_BEGIN );");
output.println("#endif");
}
-
+
// this SESE cannot be done until all of its children are done
// so grab your own lock with the condition variable for watching
- // that the number of your running children is greater than zero
+ // that the number of your running children is greater than zero
output.println(" atomic_add(childSESE, &runningSESE->numRunningChildren);");
output.println(" pthread_mutex_lock( &(runningSESE->lock) );");
output.println(" if( runningSESE->numRunningChildren > 0 ) {");
continue;
}
- String from = generateTemp( fsen.getfmBogus(), temp );
+ String from = generateTemp(fsen.getfmBogus(), temp);
output.println(" "+paramsprefix+
- "->"+temp.getSafeSymbol()+
- " = "+from+";");
+ "->"+temp.getSafeSymbol()+
+ " = "+from+";");
}
// static vars are from a known SESE
output.println(" // copy out-set from static sources");
tempItr = fsen.getStaticOutVarSet().iterator();
while( tempItr.hasNext() ) {
- TempDescriptor temp = tempItr.next();
- VariableSourceToken vst = fsen.getStaticOutVarSrc( temp );
- SESEandAgePair srcPair = new SESEandAgePair( vst.getSESE(), vst.getAge() );
+ TempDescriptor temp = tempItr.next();
+ VariableSourceToken vst = fsen.getStaticOutVarSrc(temp);
+ SESEandAgePair srcPair = new SESEandAgePair(vst.getSESE(), vst.getAge() );
output.println(" "+paramsprefix+
- "->"+temp.getSafeSymbol()+
- " = "+paramsprefix+"->"+srcPair+"->"+vst.getAddrVar()+";");
+ "->"+temp.getSafeSymbol()+
+ " = "+paramsprefix+"->"+srcPair+"->"+vst.getAddrVar()+";");
}
-
+
//output.println(" // decrement references to static sources");
//for( Iterator<SESEandAgePair> pairItr = fsen.getStaticOutVarSrcs().iterator(); pairItr.hasNext(); ) {
// SESEandAgePair srcPair = pairItr.next();
while( tempItr.hasNext() ) {
TempDescriptor temp = tempItr.next();
TypeDescriptor type = temp.getType();
-
+
// go grab it from the SESE source, when the source is NULL it is
// this exiting task, so nothing to do!
output.println(" if( "+temp+"_srcSESE != NULL ) {");
output.println(" "+paramsprefix+
- "->"+temp.getSafeSymbol()+
- " = *(void**)( (void*)"+
- temp+"_srcSESE + "+
- temp+"_srcOffset);");
+ "->"+temp.getSafeSymbol()+
+ " = *(void**)( (void*)"+
+ temp+"_srcSESE + "+
+ temp+"_srcOffset);");
//output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
//output.println(" SESEcommon* src = "+paramsprefix+"->"+temp+"_srcSESE;");
//output.println(" RELEASE_REFERENCE_TO( src );");
//output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
-
+
output.println(" }");
}
-
-
+
+
// mark yourself done, your task data is now read-only
output.println(" runningSESE->doneExecuting = TRUE;");
// FORWARD TODO
output.println(" while( !isEmpty( &runningSESE->forwardList ) ) {");
output.println(" SESEcommon* consumer = (SESEcommon*) getItem( &runningSESE->forwardList );");
-
-
+
+
if (!state.RCR) {
output.println(" if(consumer->rentryIdx>0){");
output.println(" // resolved null pointer");
output.println(" workScheduleSubmit( (void*)consumer );");
output.println(" }");
output.println(" }");
-
-
+
+
// clean up its lock element from waiting queue, and decrement dependency count for next SESE block
if( !fsen.getIsMainSESE() ) {
output.println();
output.println(" }");
output.println(" }");
}
-
+
Vector<TempDescriptor> inset=fsen.getInVarsForDynamicCoarseConflictResolution();
if (state.RCR && inset.size() > 0) {
/* Make sure the running SESE is finished */
output.println(" struct Hashtable_rcr ** hashstruct=runningSESE->parent->allHashStructures;");
for (int i = 0; i < inset.size(); i++) {
- output.println(" rec=&" + paramsprefix + "->rcrRecords[" + i + "];");
- output.println(" while(rec!=NULL) {");
- output.println(" for(idx2=0;idx2<rec->index;idx2++) {");
+ output.println(" rec=&" + paramsprefix + "->rcrRecords[" + i + "];");
+ output.println(" while(rec!=NULL) {");
+ output.println(" for(idx2=0;idx2<rec->index;idx2++) {");
- int weaklyConnectedComponentIndex = rcr.getWeakID(inset.get(i), fsen);
+ int weaklyConnectedComponentIndex = rcr.getWeakID(inset.get(i), fsen);
- output.println(" rcr_RETIREHASHTABLE(hashstruct[" + weaklyConnectedComponentIndex
- + "],&(___params___->common), rec->array[idx2], (BinItem_rcr *) rec->ptrarray[idx2]);");
+ output.println(" rcr_RETIREHASHTABLE(hashstruct[" + weaklyConnectedComponentIndex
+ + "],&(___params___->common), rec->array[idx2], (BinItem_rcr *) rec->ptrarray[idx2]);");
- output.println(" }");// exit idx2 for loop
- output.println(" rec=rec->next;");
- output.println(" }");// exit rec while loop
+ output.println(" }"); // exit idx2 for loop
+ output.println(" rec=rec->next;");
+ output.println(" }"); // exit rec while loop
}
output.println("}");
}
// that serve as sources, release the parent's ref of each
// non-null var of these types
output.println(" // releasing static SESEs");
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
- ContextTaskNames contextTaskNames = oooa.getContextTaskNames( fsen );
+ ContextTaskNames contextTaskNames = oooa.getContextTaskNames(fsen);
Iterator<SESEandAgePair> pItr = contextTaskNames.getNeededStaticNames().iterator();
while( pItr.hasNext() ) {
output.println(" if( "+dynSrcVar+"_srcSESE != NULL ) {");
output.println(" RELEASE_REFERENCE_TO( "+dynSrcVar+"_srcSESE );");
output.println(" }");
- }
+ }
// destroy this task's mempool if it is not a leaf task
if( !fsen.getIsLeafSESE() ) {
output.println(" pooldestroy( runningSESE->taskRecordMemPool );");
if (state.RCR && fsen.getInVarsForDynamicCoarseConflictResolution().size() > 0 ) {
- output.println(" returnTR();");
+ output.println(" returnTR();");
}
}
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
output.println("{");
// THIS task's record to the PARENT'S task record pool, and only if
// the reference count is now zero
if( !fsen.getIsMainSESE() ) {
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" RELEASE_REFERENCE_TO( runningSESE );");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
} else {
// the main task has no parent, just free its record
output.println(" mlpFreeSESErecord( runningSESE );");
}
- // last of all, decrement your parent's number of running children
+ // last of all, decrement your parent's number of running children
output.println(" if( myparent != NULL ) {");
output.println(" if( atomic_sub_and_test( 1, &(myparent->numRunningChildren) ) ) {");
output.println(" pthread_mutex_lock ( &(myparent->lock) );");
output.println(" }");
output.println("}");
-
+
// as this thread is wrapping up the task, make sure the thread-local var
// for the currently running task record references an invalid task
output.println(" runningSESE = (SESEcommon*) 0x1;");
}
}
-
- public void generateFlatWriteDynamicVarNode( FlatMethod fm,
- FlatWriteDynamicVarNode fwdvn,
- PrintWriter output
- ) {
-
+
+ public void generateFlatWriteDynamicVarNode(FlatMethod fm,
+ FlatWriteDynamicVarNode fwdvn,
+ PrintWriter output
+ ) {
+
Hashtable<TempDescriptor, VSTWrapper> writeDynamic = fwdvn.getVar2src();
assert writeDynamic != null;
Iterator wdItr = writeDynamic.entrySet().iterator();
while( wdItr.hasNext() ) {
- Map.Entry me = (Map.Entry) wdItr.next();
- TempDescriptor refVar = (TempDescriptor) me.getKey();
- VSTWrapper vstW = (VSTWrapper) me.getValue();
+ Map.Entry me = (Map.Entry)wdItr.next();
+ TempDescriptor refVar = (TempDescriptor) me.getKey();
+ VSTWrapper vstW = (VSTWrapper) me.getValue();
VariableSourceToken vst = vstW.vst;
output.println(" {");
// mark src pointer NULL to signify that the var is up-to-date
output.println(" "+refVar+"_srcSESE = NULL;");
} else {
- // otherwise we track where it will come from
- SESEandAgePair instance = new SESEandAgePair( vst.getSESE(), vst.getAge() );
- output.println(" "+refVar+"_srcSESE = "+instance+";");
- output.println(" "+refVar+"_srcOffset = (INTPTR) &((("+
- vst.getSESE().getSESErecordName()+"*)0)->"+vst.getAddrVar()+");");
+ // otherwise we track where it will come from
+ SESEandAgePair instance = new SESEandAgePair(vst.getSESE(), vst.getAge() );
+ output.println(" "+refVar+"_srcSESE = "+instance+";");
+ output.println(" "+refVar+"_srcOffset = (INTPTR) &((("+
+ vst.getSESE().getSESErecordName()+"*)0)->"+vst.getAddrVar()+");");
}
// no matter what we did above, track reference count of whatever
// this variable pointed to, do release last in case we're just
// copying the same value in because 1->2->1 is safe but ref count
// 1->0->1 has a window where it looks like it should be free'd
- output.println("#ifndef OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
output.println(" if( "+refVar+"_srcSESE != NULL ) {");
output.println(" ADD_REFERENCE_TO( "+refVar+"_srcSESE );");
output.println(" }");
output.println(" if( oldSrc != NULL ) {");
output.println(" RELEASE_REFERENCE_TO( oldSrc );");
output.println(" }");
- output.println("#endif // OOO_DISABLE_TASKMEMPOOL" );
+ output.println("#endif // OOO_DISABLE_TASKMEMPOOL");
output.println(" }");
- }
+ }
}
- protected void generateFlatNew( FlatMethod fm,
- FlatNew fn,
- PrintWriter output ) {
+ protected void generateFlatNew(FlatMethod fm,
+ FlatNew fn,
+ PrintWriter output) {
if( fn.getType().isArray() ) {
- int arrayid = state.getArrayNumber( fn.getType() )+state.numClasses();
+ int arrayid = state.getArrayNumber(fn.getType() )+state.numClasses();
if( GENERATEPRECISEGC ) {
- output.println(generateTemp( fm, fn.getDst())+
- "=allocate_newarray_mlp("+localsprefixaddr+
- ", "+arrayid+", "+generateTemp( fm, fn.getSize())+
- ", oid, "+
- oooa.getDisjointAnalysis().getAllocationSiteFromFlatNew( fn ).getUniqueAllocSiteID()+
- ");");
- output.println(" oid += oidIncrement;");
+ output.println(generateTemp(fm, fn.getDst())+
+ "=allocate_newarray_mlp("+localsprefixaddr+
+ ", "+arrayid+", "+generateTemp(fm, fn.getSize())+
+ ", oid, "+
+ oooa.getDisjointAnalysis().getAllocationSiteFromFlatNew(fn).getUniqueAllocSiteID()+
+ ");");
+ output.println(" oid += oidIncrement;");
} else {
- output.println(generateTemp( fm, fn.getDst())+
- "=allocate_newarray("+arrayid+
- ", "+generateTemp( fm, fn.getSize())+
- ");");
+ output.println(generateTemp(fm, fn.getDst())+
+ "=allocate_newarray("+arrayid+
+ ", "+generateTemp(fm, fn.getSize())+
+ ");");
}
} else {
// not an array
if( GENERATEPRECISEGC ) {
- output.println( generateTemp( fm, fn.getDst())+
- "=allocate_new_mlp("+localsprefixaddr+
- ", "+fn.getType().getClassDesc().getId()+
- ", oid, "+
- oooa.getDisjointAnalysis().getAllocationSiteFromFlatNew( fn ).getUniqueAllocSiteID()+
- ");");
- output.println(" oid += oidIncrement;");
+ output.println(generateTemp(fm, fn.getDst())+
+ "=allocate_new_mlp("+localsprefixaddr+
+ ", "+fn.getType().getClassDesc().getId()+
+ ", oid, "+
+ oooa.getDisjointAnalysis().getAllocationSiteFromFlatNew(fn).getUniqueAllocSiteID()+
+ ");");
+ output.println(" oid += oidIncrement;");
} else {
- output.println( generateTemp( fm, fn.getDst())+
- "=allocate_new("+fn.getType().getClassDesc().getId()+
- ");");
+ output.println(generateTemp(fm, fn.getDst())+
+ "=allocate_new("+fn.getType().getClassDesc().getId()+
+ ");");
}
}
}
- private int calculateSizeOfSESEParamList(FlatSESEEnterNode fsen){
-
+ private int calculateSizeOfSESEParamList(FlatSESEEnterNode fsen) {
+
Set<TempDescriptor> tdSet=new HashSet<TempDescriptor>();
-
- for (Iterator iterator = fsen.getInVarSet().iterator(); iterator.hasNext();) {
+
+ for (Iterator iterator = fsen.getInVarSet().iterator(); iterator.hasNext(); ) {
TempDescriptor tempDescriptor = (TempDescriptor) iterator.next();
- if(!tempDescriptor.getType().isPrimitive() || tempDescriptor.getType().isArray()){
- tdSet.add(tempDescriptor);
- }
+ if(!tempDescriptor.getType().isPrimitive() || tempDescriptor.getType().isArray()) {
+ tdSet.add(tempDescriptor);
+ }
}
-
- for (Iterator iterator = fsen.getOutVarSet().iterator(); iterator.hasNext();) {
+
+ for (Iterator iterator = fsen.getOutVarSet().iterator(); iterator.hasNext(); ) {
TempDescriptor tempDescriptor = (TempDescriptor) iterator.next();
- if(!tempDescriptor.getType().isPrimitive() || tempDescriptor.getType().isArray()){
- tdSet.add(tempDescriptor);
- }
- }
-
+ if(!tempDescriptor.getType().isPrimitive() || tempDescriptor.getType().isArray()) {
+ tdSet.add(tempDescriptor);
+ }
+ }
+
return tdSet.size();
}
-
- private String calculateSizeOfSESEParamSize(FlatSESEEnterNode fsen){
+
+ private String calculateSizeOfSESEParamSize(FlatSESEEnterNode fsen) {
HashMap <String,Integer> map=new HashMap();
HashSet <TempDescriptor> processed=new HashSet<TempDescriptor>();
String rtr="";
-
+
// space for all in and out set primitives
Set<TempDescriptor> inSetAndOutSet = new HashSet<TempDescriptor>();
- inSetAndOutSet.addAll( fsen.getInVarSet() );
- inSetAndOutSet.addAll( fsen.getOutVarSet() );
-
+ inSetAndOutSet.addAll(fsen.getInVarSet() );
+ inSetAndOutSet.addAll(fsen.getOutVarSet() );
+
Set<TempDescriptor> inSetAndOutSetPrims = new HashSet<TempDescriptor>();
Iterator<TempDescriptor> itr = inSetAndOutSet.iterator();
TempDescriptor temp = itr.next();
TypeDescriptor type = temp.getType();
if( !type.isPtr() ) {
- inSetAndOutSetPrims.add( temp );
+ inSetAndOutSetPrims.add(temp);
}
}
-
+
Iterator<TempDescriptor> itrPrims = inSetAndOutSetPrims.iterator();
while( itrPrims.hasNext() ) {
TempDescriptor temp = itrPrims.next();
TypeDescriptor type = temp.getType();
- if(type.isPrimitive()){
- Integer count=map.get(type.getSymbol());
- if(count==null){
- count=new Integer(1);
- map.put(type.getSymbol(), count);
- }else{
- map.put(type.getSymbol(), new Integer(count.intValue()+1));
- }
- }
+ if(type.isPrimitive()) {
+ Integer count=map.get(type.getSymbol());
+ if(count==null) {
+ count=new Integer(1);
+ map.put(type.getSymbol(), count);
+ } else {
+ map.put(type.getSymbol(), new Integer(count.intValue()+1));
+ }
+ }
}
-
+
Set<String> keySet=map.keySet();
- for (Iterator iterator = keySet.iterator(); iterator.hasNext();) {
+ for (Iterator iterator = keySet.iterator(); iterator.hasNext(); ) {
String key = (String) iterator.next();
rtr+="+sizeof("+key+")*"+map.get(key);
}
- return rtr;
+ return rtr;
}
}
handleClass(cd, state, namemap);
}
}
-
+
private static void handleClass(ClassDescriptor cd, State state, HashMap<ClassDescriptor, HashMap<String, Integer>> namemap) {
if (cd.getSuperDesc()!=null&&!namemap.containsKey(cd.getSuperDesc()))
handleClass(cd.getSuperDesc(), state, namemap);
-
+
Iterator it_sifs = cd.getSuperInterfaces();
while(it_sifs.hasNext()) {
ClassDescriptor sif = (ClassDescriptor)it_sifs.next();
if (!namemap.containsKey(sif))
- handleClass(sif, state, namemap);
+ handleClass(sif, state, namemap);
}
-
+
HashMap<String, Integer> supermap=cd.getSuperDesc()!=null?namemap.get(cd.getSuperDesc()):new HashMap<String, Integer>();
-
+
Vector<HashMap<String, Integer>> superifmaps = new Vector<HashMap<String, Integer>>();
it_sifs = cd.getSuperInterfaces();
while(it_sifs.hasNext()) {
ClassDescriptor sif = (ClassDescriptor)it_sifs.next();
superifmaps.addElement(namemap.get(sif));
}
-
+
HashMap<String, Integer> fieldmap=new HashMap<String, Integer>();
namemap.put(cd, fieldmap);
-
- for(Iterator fieldit=cd.getFields();fieldit.hasNext();) {
+
+ for(Iterator fieldit=cd.getFields(); fieldit.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)fieldit.next();
if (supermap.containsKey(fd.getSymbol())) {
Integer oldint=supermap.get(fd.getSymbol());
fieldmap.put(fd.getSymbol(), new Integer(newint));
fd.changeSafeSymbol(newint);
} else {
- // the fields in interfaces are defaultely static & final, so do not need to
- // check them, they will always have the interface name as prefix
+ // the fields in interfaces are defaultely static & final, so do not need to
+ // check them, they will always have the interface name as prefix
fieldmap.put(fd.getSymbol(), new Integer(0));
}
}
return "backedge";
}
public FlatNode clone(TempMap t) {
- return new FlatBackEdge();
+ return new FlatBackEdge();
}
public int kind() {
this.isSuper=isSuper;
}
public void rewriteUse(TempMap t) {
- for(int i=0;i<args.length;i++)
+ for(int i=0; i<args.length; i++)
args[i]=t.tempMap(args[i]);
this_temp=t.tempMap(this_temp);
}
TempDescriptor ndst=t.tempMap(dst);
TempDescriptor nthis=t.tempMap(this_temp);
TempDescriptor[] nargs=new TempDescriptor[args.length];
- for(int i=0;i<nargs.length;i++)
+ for(int i=0; i<nargs.length; i++)
nargs[i]=t.tempMap(args[i]);
-
+
return new FlatCall(method, ndst, nthis, nargs);
}
public boolean getSuper() {
if( i == 0 ) {
return this_temp;
}
-
+
return args[i-1];
}
// return the temp for the argument in caller that
// becomes the given parameter
- public TempDescriptor getArgMatchingParam(FlatMethod fm,
+ public TempDescriptor getArgMatchingParam(FlatMethod fm,
TempDescriptor tdParam) {
// in non-static methods the "this" pointer
// affects the matching index
}
for( int i = 0; i < fm.numParameters(); ++i ) {
- TempDescriptor tdParamI = fm.getParameter( i );
-
- if( tdParamI.equals( tdParam ) ) {
-
- if( method.isStatic() ) {
- return args[i];
- }
-
- if( i == 0 ) {
- return this_temp;
- }
-
- return args[i-1];
+ TempDescriptor tdParamI = fm.getParameter(i);
+
+ if( tdParamI.equals(tdParam) ) {
+
+ if( method.isStatic() ) {
+ return args[i];
+ }
+
+ if( i == 0 ) {
+ return this_temp;
+ }
+
+ return args[i-1];
}
}
protected FlatNode tail;
protected FlatNode head;
- public FlatEdge( FlatNode t, FlatNode h ) {
+ public FlatEdge(FlatNode t, FlatNode h) {
assert t != null;
assert h != null;
tail = t;
head = h;
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
-
+
if( !(o instanceof FlatEdge) ) {
return false;
}
FlatEdge fe = (FlatEdge) o;
- return tail.equals( fe.tail ) && head.equals( fe.head );
+ return tail.equals(fe.tail) && head.equals(fe.head);
}
public int hashCode() {
}
public int getTaskExitIndex() {
- return m_taskexitindex;
+ return m_taskexitindex;
}
public void setTaskExitIndex(int taskexitindex) {
- this.m_taskexitindex = taskexitindex;
+ this.m_taskexitindex = taskexitindex;
}
public int getTaskType() {
return new TempDescriptor [0];
else {
HashSet temps=new HashSet();
- for(Iterator it=tempflagpairs.keySet().iterator(); it.hasNext();) {
+ for(Iterator it=tempflagpairs.keySet().iterator(); it.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it.next();
temps.add(tfp.getTemp());
}
- for(Iterator it=temptagpairs.keySet().iterator(); it.hasNext();) {
+ for(Iterator it=temptagpairs.keySet().iterator(); it.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it.next();
temps.add(ttp.getTemp());
temps.add(ttp.getTagTemp());
public String toString() {
String st="FlatFlagActionNode_";
- for(Iterator it=tempflagpairs.keySet().iterator(); it.hasNext();) {
+ for(Iterator it=tempflagpairs.keySet().iterator(); it.hasNext(); ) {
TempFlagPair tfp=(TempFlagPair)it.next();
- st+=getFlagChange(tfp) ? "" : "!";
+ st+=getFlagChange(tfp)?"":"!";
st+=tfp.getTemp()+" "+tfp.getFlag()+",";
}
- for(Iterator it=temptagpairs.keySet().iterator(); it.hasNext();) {
+ for(Iterator it=temptagpairs.keySet().iterator(); it.hasNext(); ) {
TempTagPair ttp=(TempTagPair)it.next();
- st+=getTagChange(ttp) ? "" : "!";
+ st+=getTagChange(ttp)?"":"!";
st+=ttp.getTemp()+" "+ttp.getTag()+"("+ttp.getTagTemp()+"),";
}
public class FlatGenReachNode extends FlatNode {
String graphName;
- public FlatGenReachNode( String graphName ) {
+ public FlatGenReachNode(String graphName) {
this.graphName = graphName;
}
return graphName;
}
- public FlatNode clone(TempMap t){ return new FlatGenReachNode( graphName ); }
- public void rewriteUse(TempMap t){}
- public void rewriteDst(TempMap t) {}
+ public FlatNode clone(TempMap t) {
+ return new FlatGenReachNode(graphName);
+ }
+ public void rewriteUse(TempMap t) {
+ }
+ public void rewriteDst(TempMap t) {
+ }
public String toString() {
}
ret+="(";
boolean first=true;
- for(int i=0;i<numParameters();i++) {
+ for(int i=0; i<numParameters(); i++) {
if (first) {
first=false;
} else
public void check() {
Set<FlatNode> set=getNodeSet();
- for(Iterator<FlatNode> setit=set.iterator();setit.hasNext();) {
+ for(Iterator<FlatNode> setit=set.iterator(); setit.hasNext(); ) {
FlatNode fn=setit.next();
- for(int i=0;i<fn.numPrev();i++) {
+ for(int i=0; i<fn.numPrev(); i++) {
FlatNode fnprev=fn.getPrev(i);
if (!set.contains(fnprev)) {
System.out.println(fn+" has unreachable parent:"+i+" "+fnprev);
visited.add(current_node);
if (nodetolabel.containsKey(current_node)) {
st+="L"+nodetolabel.get(current_node)+":\n";
- for(int i=0;i<current_node.numPrev();i++) {
+ for(int i=0; i<current_node.numPrev(); i++) {
st+="i="+i+" "+current_node.getPrev(i);
}
st+="\n";
/** This function modifies the graph */
public void setNewNext(int i, FlatNode n) {
if (next.size()<=i)
- next.setSize(i+1);
+ next.setSize(i+1);
next.set(i, n);
n.addPrev(this);
}
public void replace(FlatNode fnnew) {
fnnew.prev.setSize(prev.size());
fnnew.next.setSize(next.size());
- for(int i=0;i<prev.size();i++) {
+ for(int i=0; i<prev.size(); i++) {
FlatNode nprev=(FlatNode)prev.get(i);
fnnew.prev.set(i,nprev);
- for(int j=0;j<nprev.numNext();j++) {
+ for(int j=0; j<nprev.numNext(); j++) {
FlatNode n=nprev.getNext(j);
if (n==this)
nprev.next.set(j, fnnew);
}
}
- for(int i=0;i<next.size();i++) {
+ for(int i=0; i<next.size(); i++) {
FlatNode nnext=(FlatNode)next.get(i);
fnnew.next.set(i,nnext);
- for(int j=0;j<nnext.numPrev();j++) {
+ for(int j=0; j<nnext.numPrev(); j++) {
FlatNode n=nnext.getPrev(j);
if (n==this)
nnext.prev.set(j, fnnew);
next=null;
prev=null;
}
-
- public void setNumLine(int lineNum){
+
+ public void setNumLine(int lineNum) {
this.numLine=lineNum;
}
-
- public int getNumLine(){
+
+ public int getNumLine() {
return this.numLine;
}
}
public String toString() {
String st="prefetch(";
boolean first=true;
- for(Iterator<PrefetchPair> it=hspp.iterator(); it.hasNext();) {
+ for(Iterator<PrefetchPair> it=hspp.iterator(); it.hasNext(); ) {
PrefetchPair pp=it.next();
if (!first)
st+=", ";
import IR.Tree.SESENode;
public class FlatSESEEnterNode extends FlatNode {
-
+
// SESE class identifiers should be numbered
// sequentially from 0 to 1-(total # SESE's)
private static int identifier=0;
- private int id;
- protected FlatSESEExitNode exit;
- protected SESENode treeNode;
+ private int id;
+ protected FlatSESEExitNode exit;
+ protected SESENode treeNode;
// a leaf tasks simply has no children, ever
protected static final int ISLEAF_UNINIT = 1;
// all children tasks, INCLUDING those that are reachable
// by calling methods
protected Set<FlatSESEEnterNode> children;
-
+
// all possible parents
protected Set<FlatSESEEnterNode> parents;
// parent or children of an SESE for various analysis,
// and by local it is one SESE nested within another
// in a single method context
- protected Set<FlatSESEEnterNode> localChildren;
+ protected Set<FlatSESEEnterNode> localChildren;
protected FlatSESEEnterNode localParent;
// code gen for issuing this task
protected Set<TempDescriptor> readyInVars;
protected Set<TempDescriptor> staticInVars;
- protected Set<TempDescriptor> dynamicInVars;
+ protected Set<TempDescriptor> dynamicInVars;
protected Set<SESEandAgePair> staticInVarSrcs;
protected Hashtable<TempDescriptor, VariableSourceToken> staticInVar2src;
// to know how to acquire those values before it can truly exit
protected Set<TempDescriptor> readyOutVars;
protected Set<TempDescriptor> staticOutVars;
- protected Set<TempDescriptor> dynamicOutVars;
+ protected Set<TempDescriptor> dynamicOutVars;
protected Set<SESEandAgePair> staticOutVarSrcs;
protected Hashtable<TempDescriptor, VariableSourceToken> staticOutVar2src;
// get the oldest age of this task that other contexts
// have a static name for when tracking variables
protected Integer oldestAgeToTrack;
-
+
// a subset of the in-set variables that shouuld be traversed during
// the dynamic coarse grained conflict strategy, remember them here so
// scope info for this SESE
- protected FlatMethod fmEnclosing;
+ protected FlatMethod fmEnclosing;
protected MethodDescriptor mdEnclosing;
- protected ClassDescriptor cdEnclosing;
+ protected ClassDescriptor cdEnclosing;
// structures that allow SESE to appear as
// a normal method to code generation
- protected FlatMethod fmBogus;
+ protected FlatMethod fmBogus;
protected MethodDescriptor mdBogus;
// used during code generation to calculate an offset
// first field in a sequence of pointers to other SESE
// records which is relevant to garbage collection
protected String firstDepRecField;
- protected int numDepRecs;
-
+ protected int numDepRecs;
+
- public FlatSESEEnterNode( SESENode sn ) {
+ public FlatSESEEnterNode(SESENode sn) {
this.id = identifier++;
treeNode = sn;
children = new HashSet<FlatSESEEnterNode>();
staticOutVars = new HashSet<TempDescriptor>();
dynamicOutVars = new HashSet<TempDescriptor>();
staticOutVarSrcs = new HashSet<SESEandAgePair>();
- oldestAgeToTrack = new Integer( 0 );
+ oldestAgeToTrack = new Integer(0);
staticInVar2src = new Hashtable<TempDescriptor, VariableSourceToken>();
staticOutVar2src = new Hashtable<TempDescriptor, VariableSourceToken>();
inVarsForDynamicCoarseConflictResolution = new Vector<TempDescriptor>();
-
-
+
+
fmEnclosing = null;
mdEnclosing = null;
cdEnclosing = null;
public void rewriteDef() {
}
- public void setFlatExit( FlatSESEExitNode fsexn ) {
+ public void setFlatExit(FlatSESEExitNode fsexn) {
exit = fsexn;
}
return id;
}
- public String getPrettyIdentifier() {
- if(isCallerProxySESE){
+ public String getPrettyIdentifier() {
+ if(isCallerProxySESE) {
return "proxy";
- }
+ }
if( treeNode != null && treeNode.getID() != null ) {
return treeNode.getID();
- }
+ }
return ""+id;
}
public String toString() {
return "sese "+getPrettyIdentifier()+" enter";
}
-
+
public String toPrettyString() {
return "sese "+getPrettyIdentifier()+getIdentifier();
}
- public void mustTrackAtLeastAge( Integer age ) {
+ public void mustTrackAtLeastAge(Integer age) {
if( age > oldestAgeToTrack ) {
- oldestAgeToTrack = new Integer( age );
- }
+ oldestAgeToTrack = new Integer(age);
+ }
}
public Integer getOldestAgeToTrack() {
}
- public void addParent( FlatSESEEnterNode parent ) {
- parents.add( parent );
+ public void addParent(FlatSESEEnterNode parent) {
+ parents.add(parent);
}
public Set<FlatSESEEnterNode> getParents() {
return parents;
}
- public void setLocalParent( FlatSESEEnterNode parent ) {
+ public void setLocalParent(FlatSESEEnterNode parent) {
localParent = parent;
}
return localParent;
}
- public void addChild( FlatSESEEnterNode child ) {
- children.add( child );
+ public void addChild(FlatSESEEnterNode child) {
+ children.add(child);
}
- public void addChildren( Set<FlatSESEEnterNode> batch ) {
- children.addAll( batch );
+ public void addChildren(Set<FlatSESEEnterNode> batch) {
+ children.addAll(batch);
}
public Set<FlatSESEEnterNode> getChildren() {
return children;
}
- public void addLocalChild( FlatSESEEnterNode child ) {
- localChildren.add( child );
+ public void addLocalChild(FlatSESEEnterNode child) {
+ localChildren.add(child);
}
public Set<FlatSESEEnterNode> getLocalChildren() {
- public void addInVar( TempDescriptor td ) {
+ public void addInVar(TempDescriptor td) {
if (!inVars.contains(td))
- inVars.add( td );
+ inVars.add(td);
}
- public void addOutVar( TempDescriptor td ) {
- outVars.add( td );
+ public void addOutVar(TempDescriptor td) {
+ outVars.add(td);
}
- public void addInVarSet( Set<TempDescriptor> s ) {
+ public void addInVarSet(Set<TempDescriptor> s) {
inVars.addAll(s);
}
- public void addOutVarSet( Set<TempDescriptor> s ) {
- outVars.addAll( s );
+ public void addOutVarSet(Set<TempDescriptor> s) {
+ outVars.addAll(s);
}
public Set<TempDescriptor> getInVarSet() {
FlatNode fn=tovisit.iterator().next();
tovisit.remove(fn);
visited.add(fn);
-
+
if (fn!=exit) {
for(int i=0; i<fn.numNext(); i++) {
FlatNode nn=fn.getNext(i);
return outVars;
}
- public void addStaticInVarSrc( SESEandAgePair p ) {
- staticInVarSrcs.add( p );
+ public void addStaticInVarSrc(SESEandAgePair p) {
+ staticInVarSrcs.add(p);
}
public Set<SESEandAgePair> getStaticInVarSrcs() {
return staticInVarSrcs;
}
- public void addReadyInVar( TempDescriptor td ) {
- readyInVars.add( td );
+ public void addReadyInVar(TempDescriptor td) {
+ readyInVars.add(td);
}
public Set<TempDescriptor> getReadyInVarSet() {
return readyInVars;
}
- public void addStaticInVar( TempDescriptor td ) {
- staticInVars.add( td );
+ public void addStaticInVar(TempDescriptor td) {
+ staticInVars.add(td);
}
public Set<TempDescriptor> getStaticInVarSet() {
return staticInVars;
}
- public void putStaticInVar2src( TempDescriptor staticInVar,
- VariableSourceToken vst ) {
- staticInVar2src.put( staticInVar, vst );
+ public void putStaticInVar2src(TempDescriptor staticInVar,
+ VariableSourceToken vst) {
+ staticInVar2src.put(staticInVar, vst);
}
- public VariableSourceToken getStaticInVarSrc( TempDescriptor staticInVar ) {
- return staticInVar2src.get( staticInVar );
+ public VariableSourceToken getStaticInVarSrc(TempDescriptor staticInVar) {
+ return staticInVar2src.get(staticInVar);
}
- public void addDynamicInVar( TempDescriptor td ) {
- dynamicInVars.add( td );
+ public void addDynamicInVar(TempDescriptor td) {
+ dynamicInVars.add(td);
}
public Set<TempDescriptor> getDynamicInVarSet() {
- public void addReadyOutVar( TempDescriptor td ) {
- readyOutVars.add( td );
+ public void addReadyOutVar(TempDescriptor td) {
+ readyOutVars.add(td);
}
public Set<TempDescriptor> getReadyOutVarSet() {
return readyOutVars;
}
- public void addStaticOutVarSrc( SESEandAgePair p ) {
- staticOutVarSrcs.add( p );
+ public void addStaticOutVarSrc(SESEandAgePair p) {
+ staticOutVarSrcs.add(p);
}
public Set<SESEandAgePair> getStaticOutVarSrcs() {
return staticOutVarSrcs;
}
- public void addStaticOutVar( TempDescriptor td ) {
- staticOutVars.add( td );
+ public void addStaticOutVar(TempDescriptor td) {
+ staticOutVars.add(td);
}
public Set<TempDescriptor> getStaticOutVarSet() {
return staticOutVars;
}
- public void putStaticOutVar2src( TempDescriptor staticOutVar,
- VariableSourceToken vst ) {
- staticOutVar2src.put( staticOutVar, vst );
+ public void putStaticOutVar2src(TempDescriptor staticOutVar,
+ VariableSourceToken vst) {
+ staticOutVar2src.put(staticOutVar, vst);
}
- public VariableSourceToken getStaticOutVarSrc( TempDescriptor staticOutVar ) {
- return staticOutVar2src.get( staticOutVar );
+ public VariableSourceToken getStaticOutVarSrc(TempDescriptor staticOutVar) {
+ return staticOutVar2src.get(staticOutVar);
}
- public void addDynamicOutVar( TempDescriptor td ) {
- dynamicOutVars.add( td );
+ public void addDynamicOutVar(TempDescriptor td) {
+ dynamicOutVars.add(td);
}
public Set<TempDescriptor> getDynamicOutVarSet() {
- public void setfmEnclosing( FlatMethod fm ) { fmEnclosing = fm; }
- public FlatMethod getfmEnclosing() { return fmEnclosing; }
+ public void setfmEnclosing(FlatMethod fm) {
+ fmEnclosing = fm;
+ }
+ public FlatMethod getfmEnclosing() {
+ return fmEnclosing;
+ }
- public void setmdEnclosing( MethodDescriptor md ) { mdEnclosing = md; }
- public MethodDescriptor getmdEnclosing() { return mdEnclosing; }
+ public void setmdEnclosing(MethodDescriptor md) {
+ mdEnclosing = md;
+ }
+ public MethodDescriptor getmdEnclosing() {
+ return mdEnclosing;
+ }
- public void setcdEnclosing( ClassDescriptor cd ) { cdEnclosing = cd; }
- public ClassDescriptor getcdEnclosing() { return cdEnclosing; }
+ public void setcdEnclosing(ClassDescriptor cd) {
+ cdEnclosing = cd;
+ }
+ public ClassDescriptor getcdEnclosing() {
+ return cdEnclosing;
+ }
- public void setfmBogus( FlatMethod fm ) { fmBogus = fm; }
- public FlatMethod getfmBogus() { return fmBogus; }
+ public void setfmBogus(FlatMethod fm) {
+ fmBogus = fm;
+ }
+ public FlatMethod getfmBogus() {
+ return fmBogus;
+ }
- public void setmdBogus( MethodDescriptor md ) { mdBogus = md; }
- public MethodDescriptor getmdBogus() { return mdBogus; }
+ public void setmdBogus(MethodDescriptor md) {
+ mdBogus = md;
+ }
+ public MethodDescriptor getmdBogus() {
+ return mdBogus;
+ }
public String getSESEmethodName() {
assert cdEnclosing != null;
assert mdBogus != null;
- return
+ return
cdEnclosing.getSafeSymbol()+
mdBogus.getSafeSymbol()+
"_"+
"_SESErec";
}
- public boolean equals( Object o ) {
+ public boolean equals(Object o) {
if( o == null ) {
return false;
}
public int hashCode() {
return 31*id;
}
-
- public void setFirstDepRecField( String field ) {
+
+ public void setFirstDepRecField(String field) {
firstDepRecField = field;
}
public int getNumDepRecs() {
return numDepRecs;
}
-
+
public Vector<TempDescriptor> getInVarsForDynamicCoarseConflictResolution() {
return inVarsForDynamicCoarseConflictResolution;
}
-
+
public void addInVarForDynamicCoarseConflictResolution(TempDescriptor inVar) {
if (!inVarsForDynamicCoarseConflictResolution.contains(inVar))
inVarsForDynamicCoarseConflictResolution.add(inVar);
}
-
- public void setIsLeafSESE( boolean isLeaf ) {
+
+ public void setIsLeafSESE(boolean isLeaf) {
if( isLeaf ) {
isLeafSESE = ISLEAF_TRUE;
} else {
public boolean getIsLeafSESE() {
if( isLeafSESE == ISLEAF_UNINIT ) {
- throw new Error( "isLeafSESE uninitialized" );
+ throw new Error("isLeafSESE uninitialized");
}
return isLeafSESE == ISLEAF_TRUE;
protected SESENode treeNode;
FlatSESEEnterNode enter;
- public FlatSESEExitNode( SESENode sn ) {
+ public FlatSESEExitNode(SESENode sn) {
treeNode = sn;
}
public void rewriteUse() {
return treeNode;
}
- public void setFlatEnter( FlatSESEEnterNode fsen ) {
+ public void setFlatEnter(FlatSESEEnterNode fsen) {
enter = fsen;
}
protected FlatNode tailNode;
protected FlatNode headNode;
-
+
protected Hashtable<TempDescriptor, VSTWrapper> var2src;
protected FlatSESEEnterNode enclosingSESE;
- public FlatWriteDynamicVarNode( FlatNode t,
- FlatNode h,
- Hashtable<TempDescriptor, VSTWrapper> v2s,
- FlatSESEEnterNode c
- ) {
+ public FlatWriteDynamicVarNode(FlatNode t,
+ FlatNode h,
+ Hashtable<TempDescriptor, VSTWrapper> v2s,
+ FlatSESEEnterNode c
+ ) {
tailNode = t;
headNode = h;
var2src = v2s;
}
public void spliceIntoIR() {
-
- if(tailNode instanceof FlatCondBranch){
-
- headNode.removePrev( tailNode );
-
- if(tailNode.next.elementAt(0).equals(headNode)){
- tailNode.removeNext( headNode );
- ((FlatCondBranch)tailNode).addTrueNext(this);
- }else{
- tailNode.removeNext( headNode );
- ((FlatCondBranch)tailNode).addFalseNext(this);
+
+ if(tailNode instanceof FlatCondBranch) {
+
+ headNode.removePrev(tailNode);
+
+ if(tailNode.next.elementAt(0).equals(headNode)) {
+ tailNode.removeNext(headNode);
+ ((FlatCondBranch)tailNode).addTrueNext(this);
+ } else {
+ tailNode.removeNext(headNode);
+ ((FlatCondBranch)tailNode).addFalseNext(this);
}
-
- this.addNext( headNode );
- }else{
- tailNode.removeNext( headNode );
- headNode.removePrev( tailNode );
-
- tailNode.addNext( this );
- this.addNext( headNode );
+
+ this.addNext(headNode);
+ } else {
+ tailNode.removeNext(headNode);
+ headNode.removePrev(tailNode);
+
+ tailNode.addNext(this);
+ this.addNext(headNode);
}
}
- public void addMoreVar2Src( Hashtable<TempDescriptor, VSTWrapper> more ) {
- var2src.putAll( more );
+ public void addMoreVar2Src(Hashtable<TempDescriptor, VSTWrapper> more) {
+ var2src.putAll(more);
}
public Hashtable<TempDescriptor, VSTWrapper> getVar2src() {
}
public FlatNode clone(TempMap t) {
- return new FlatWriteDynamicVarNode( tailNode,
- headNode,
- var2src,
- enclosingSESE
- );
+ return new FlatWriteDynamicVarNode(tailNode,
+ headNode,
+ var2src,
+ enclosingSESE
+ );
}
public void rewriteUse(TempMap t) {
}
atomicval++;
else if(fn.kind()==FKind.FlatAtomicExitNode)
atomicval--;
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnext=fn.getNext(i);
if (!visited.contains(fnext)) {
atomictable.put(fnext, new Integer(atomicval));
System.out.println("Inlining methods into "+fm.getMethod());
recursive(state, typeutil, atomicset, depth, new Stack<MethodDescriptor>());
}
-
+
public static void recursive(State state, TypeUtil typeutil, Set<FlatNode> fnset, int depth, Stack<MethodDescriptor> toexclude) {
- for(Iterator<FlatNode> fnit=fnset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=fnset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()==FKind.FlatCall) {
FlatCall fc=(FlatCall)fn;
Set<FlatNode> inlinefnset=inline(fc, typeutil, state);
if (inlinefnset==null)
continue;
-
+
toexclude.push(md);
if (depth>1)
recursive(state, typeutil, inlinefnset, depth-1, toexclude);
HashSet<FlatNode> newnodes=new HashSet<FlatNode>();
//Build the clones
- for(Iterator<FlatNode> fnit=nodeset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=nodeset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
if (fn.kind()==FKind.FlatReturnNode) {
//Convert FlatReturn node into move
last.addNext(fon);
last=fon;
}
- for(int j=0;j<fc.numArgs();i++,j++) {
+ for(int j=0; j<fc.numArgs(); i++,j++) {
FlatOpNode fon=new FlatOpNode(fm.getParameter(i), fc.getArg(j), null, new Operation(Operation.ASSIGN));
newnodes.add(fon);
last.addNext(fon);
}
//Add the edges
- for(Iterator<FlatNode> fnit=nodeset.iterator();fnit.hasNext();) {
+ for(Iterator<FlatNode> fnit=nodeset.iterator(); fnit.hasNext(); ) {
FlatNode fn=fnit.next();
FlatNode fnclone=flatmap.get(fn);
if (fn.kind()!=FKind.FlatReturnNode) {
//don't build old edges out of a flat return node
- for(int i=0;i<fn.numNext();i++) {
+ for(int i=0; i<fn.numNext(); i++) {
FlatNode fnnext=fn.getNext(i);
FlatNode fnnextclone=flatmap.get(fnnext);
fnclone.setNewNext(i, fnnextclone);
}
//Add edges to beginning of move chain
- for(int i=0;i<fc.numPrev();i++) {
+ for(int i=0; i<fc.numPrev(); i++) {
FlatNode fnprev=fc.getPrev(i);
- for(int j=0;j<fnprev.numNext();j++) {
+ for(int j=0; j<fnprev.numNext(); j++) {
if (fnprev.getNext(j)==fc) {
//doing setnewnext to avoid changing the node we are
//iterating over
Set subclasses=typeutil.getSubClasses(thiscd);
if (subclasses==null)
return true;
- for(Iterator classit=subclasses.iterator(); classit.hasNext();) {
+ for(Iterator classit=subclasses.iterator(); classit.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor)classit.next();
Set possiblematches=cd.getMethodTable().getSet(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
- MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
- if (md.matches(matchmd))
- return false;
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
+ MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
+ if (md.matches(matchmd))
+ return false;
}
}
return true;
/* An instance of this class manages all OoOJava coarse-grained runtime conflicts
* by generating C-code to either rule out the conflict at runtime or resolve one.
- *
+ *
* How to Use:
* 1) Instantiate singleton object (String input is to specify output dir)
- * 2) Call void close()
+ * 2) Call void close()
*/
public class RuntimeConflictResolver {
private CodePrinter headerFile, cFile;
private static final String hashAndQueueCFileDir = "oooJava/";
-
+
//This keeps track of taints we've traversed to prevent printing duplicate traverse functions
//The Integer keeps track of the weakly connected group it's in (used in enumerateHeapRoots)
//private Hashtable<Taint, Integer> doneTaints;
private Hashtable<Pair, Integer> idMap=new Hashtable<Pair,Integer>();
-
- //Keeps track of stallsites that we've generated code for.
+
+ //Keeps track of stallsites that we've generated code for.
protected Hashtable <FlatNode, TempDescriptor> processedStallSites = new Hashtable <FlatNode, TempDescriptor>();
-
+
public int currentID=1;
private int totalWeakGroups;
- private OoOJavaAnalysis oooa;
+ private OoOJavaAnalysis oooa;
private State globalState;
-
+
// initializing variables can be found in printHeader()
private static final String allocSite = "allocsite";
private static final String queryAndAddToVisitedHashtable = "hashRCRInsert";
private static final String deallocVisitedHashTable = "hashRCRDelete()";
private static final String resetVisitedHashTable = "hashRCRreset()";
- public RuntimeConflictResolver( String buildir,
- OoOJavaAnalysis oooa,
- State state)
+ public RuntimeConflictResolver(String buildir,
+ OoOJavaAnalysis oooa,
+ State state)
throws FileNotFoundException {
this.oooa = oooa;
this.globalState = state;
processedStallSites = new Hashtable <FlatNode, TempDescriptor>();
BuildStateMachines bsm = oooa.getBuildStateMachines();
totalWeakGroups = bsm.getTotalNumOfWeakGroups();
-
+
setupOutputFiles(buildir);
- for( Pair<FlatNode, TempDescriptor> p: bsm.getAllMachineNames() ) {
- FlatNode taskOrStallSite = p.getFirst();
- TempDescriptor var = p.getSecond();
- StateMachineForEffects stateMachine = bsm.getStateMachine( taskOrStallSite, var );
+ for( Pair<FlatNode, TempDescriptor> p : bsm.getAllMachineNames() ) {
+ FlatNode taskOrStallSite = p.getFirst();
+ TempDescriptor var = p.getSecond();
+ StateMachineForEffects stateMachine = bsm.getStateMachine(taskOrStallSite, var);
//prints the traversal code
- printCMethod( taskOrStallSite, var, stateMachine);
+ printCMethod(taskOrStallSite, var, stateMachine);
}
-
- //IMPORTANT must call .close() elsewhere to finish printing the C files.
+
+ //IMPORTANT must call .close() elsewhere to finish printing the C files.
}
-
+
/*
- * This method generates a C method for every inset variable and rblock.
- *
- * The C method works by generating a large switch statement that will run the appropriate
- * checking code for each object based on the current state. The switch statement is
+ * This method generates a C method for every inset variable and rblock.
+ *
+ * The C method works by generating a large switch statement that will run the appropriate
+ * checking code for each object based on the current state. The switch statement is
* surrounded by a while statement which dequeues objects to be checked from a queue. An
* object is added to a queue only if it contains a conflict (in itself or in its referencees)
- * and we came across it while checking through it's referencer. Because of this property,
- * conflicts will be signaled by the referencer; the only exception is the inset variable which can
- * signal a conflict within itself.
+ * and we came across it while checking through it's referencer. Because of this property,
+ * conflicts will be signaled by the referencer; the only exception is the inset variable which can
+ * signal a conflict within itself.
*/
-
- private void printCMethod( FlatNode taskOrStallSite,
- TempDescriptor var,
- StateMachineForEffects smfe) {
+
+ private void printCMethod(FlatNode taskOrStallSite,
+ TempDescriptor var,
+ StateMachineForEffects smfe) {
// collect info for code gen
FlatSESEEnterNode task = null;
- String inVar = var.getSafeSymbol();
- SMFEState initialState = smfe.getInitialState();
- boolean isStallSite = !(taskOrStallSite instanceof FlatSESEEnterNode);
- int weakID = smfe.getWeaklyConnectedGroupID(taskOrStallSite);
-
- String blockName;
+ String inVar = var.getSafeSymbol();
+ SMFEState initialState = smfe.getInitialState();
+ boolean isStallSite = !(taskOrStallSite instanceof FlatSESEEnterNode);
+ int weakID = smfe.getWeaklyConnectedGroupID(taskOrStallSite);
+
+ String blockName;
//No need generate code for empty traverser
if (smfe.isEmpty())
return;
processedStallSites.put(taskOrStallSite, var);
} else {
task = (FlatSESEEnterNode) taskOrStallSite;
-
+
//if the task is the main task, there's no traverser
if(task.isMainSESE)
- return;
-
+ return;
+
blockName = task.getPrettyIdentifier();
}
-
+
String methodName = "void traverse___" + inVar + removeInvalidChars(blockName) + "___(void * InVar, ";
- int index = -1;
+ int index = -1;
if( isStallSite ) {
methodName += "SESEstall *record)";
methodName += task.getSESErecordName() +" *record)";
//TODO check that this HACK is correct (i.e. adding and then polling immediately afterwards)
task.addInVarForDynamicCoarseConflictResolution(var);
- index = task.getInVarsForDynamicCoarseConflictResolution().indexOf( var );
+ index = task.getInVarsForDynamicCoarseConflictResolution().indexOf(var);
}
-
- cFile.println( methodName + " {");
- headerFile.println( methodName + ";" );
- cFile.println( " int totalcount = RUNBIAS;");
+ cFile.println(methodName + " {");
+ headerFile.println(methodName + ";");
+
+ cFile.println(" int totalcount = RUNBIAS;");
if( isStallSite ) {
cFile.println(" record->rcrRecords[0].count = RUNBIAS;");
} else {
cFile.println(" record->rcrRecords["+index+"].count = RUNBIAS;");
}
- //clears queue and hashtable that keeps track of where we've been.
+ //clears queue and hashtable that keeps track of where we've been.
cFile.println(clearQueue + ";");
- cFile.println(resetVisitedHashTable + ";");
+ cFile.println(resetVisitedHashTable + ";");
cFile.println(" RCRQueueEntry * queueEntry; //needed for dequeuing");
-
+
cFile.println(" int traverserState = "+initialState.getID()+";");
- //generic cast to ___Object___ to access ptr->allocsite field.
+ //generic cast to ___Object___ to access ptr->allocsite field.
cFile.println(" struct ___Object___ * ptr = (struct ___Object___ *) InVar;");
cFile.println(" if (InVar != NULL) {");
cFile.println(" " + queryAndAddToVisitedHashtable + "(ptr, "+initialState.getID()+");");
cFile.println(" }");
}
-
+
// Traverse the StateMachineForEffects (a graph)
// that serves as a plan for building the heap examiner code.
// SWITCH on the states in the state machine, THEN
// SWITCH on the concrete object's allocation site THEN
// consider conflicts, enqueue more work, inline more SWITCHES, etc.
-
+
boolean needswitch=smfe.getStates().size()>1;
if (needswitch) {
cFile.println(" switch( traverserState ) {");
}
- for(SMFEState state: smfe.getStates()) {
+ for(SMFEState state : smfe.getStates()) {
if(state.getRefCount() != 1 || initialState == state) {
if (needswitch) {
} else {
cFile.println(" if(traverserState=="+state.getID()+") {");
}
-
- printAllocChecksInsideState(smfe, state, taskOrStallSite, var, "ptr", 0, weakID);
-
+
+ printAllocChecksInsideState(smfe, state, taskOrStallSite, var, "ptr", 0, weakID);
+
cFile.println(" break;");
}
}
-
+
if (needswitch) {
cFile.println(" default: break;");
}
cFile.println(" traverserState = queueEntry->traverserState;");
cFile.println(" } while(ptr != NULL);");
cFile.println(" } // end if inVar not null");
-
+
if( isStallSite ) {
cFile.println(" if(atomic_sub_and_test(totalcount,&(record->rcrRecords[0].count))) {");
cFile.println("}");
cFile.flush();
}
-
+
public void printAllocChecksInsideState(StateMachineForEffects smfe, SMFEState state, FlatNode fn, TempDescriptor tmp, String prefix, int depth, int weakID) {
EffectsTable et = new EffectsTable(state);
boolean needswitch=et.getAllAllocs().size()>1;
cFile.println(" switch(" + prefix+"->"+allocSite + ") {");
}
- //we assume that all allocs given in the effects are starting locs.
- for(Alloc a: et.getAllAllocs()) {
+ //we assume that all allocs given in the effects are starting locs.
+ for(Alloc a : et.getAllAllocs()) {
if (needswitch) {
cFile.println(" case "+a.getUniqueAllocSiteID()+": {");
} else {
}
cFile.println(" }");
}
-
+
public void addChecker(StateMachineForEffects smfe, Alloc a, FlatNode fn, TempDescriptor tmp, SMFEState state, EffectsTable et, String prefix, int depth, int weakID) {
if (depth>30) {
System.out.println(fn+" "+state+" "+state.toStringDOT());
}
insertEntriesIntoHashStructureNew(fn, tmp, et, a, prefix, depth, weakID);
-
+
int pdepth = depth+1;
-
+
if(a.getType().isArray()) {
String childPtr = "((struct ___Object___ **)(((char *) &(((struct ArrayObject *)"+ prefix+")->___length___))+sizeof(int)))[i]";
String currPtr = "arrayElement" + pdepth;
-
+
boolean first=true;
-
- for(Effect e: et.getEffects(a)) {
+
+ for(Effect e : et.getEffects(a)) {
if (!state.transitionsTo(e).isEmpty()) {
if (first) {
cFile.println(" int i;");
}
printRefSwitch(smfe, fn, tmp, pdepth, childPtr, currPtr, state.transitionsTo(e), weakID);
- // only if we are traversing for a new task, not a stall site
- if( (fn instanceof FlatSESEEnterNode) &&
- smfe.getPossiblyEvilEffects().contains( e ) ) {
+ // only if we are traversing for a new task, not a stall site
+ if( (fn instanceof FlatSESEEnterNode) &&
+ smfe.getPossiblyEvilEffects().contains(e) ) {
+
+ FlatSESEEnterNode evilTask = (FlatSESEEnterNode)fn;
- FlatSESEEnterNode evilTask = (FlatSESEEnterNode)fn;
-
- detectPossiblyEvilExecution( evilTask,
- evilTask.getInVarsForDynamicCoarseConflictResolution().indexOf( tmp )
- );
- }
+ detectPossiblyEvilExecution(evilTask,
+ evilTask.getInVarsForDynamicCoarseConflictResolution().indexOf(tmp)
+ );
+ }
}
}
if (!first)
//All other cases
String currPtr = "myPtr" + pdepth;
cFile.println(" struct ___Object___ * "+currPtr+";");
-
- for(Effect e: et.getEffects(a)) {
+
+ for(Effect e : et.getEffects(a)) {
if (!state.transitionsTo(e).isEmpty()) {
String childPtr = "((struct "+a.getType().getSafeSymbol()+" *)"+prefix +")->" + e.getField().getSafeSymbol();
printRefSwitch(smfe, fn, tmp, pdepth, childPtr, currPtr, state.transitionsTo(e), weakID);
- // only if we are traversing for a new task, not a stall site
- if( (fn instanceof FlatSESEEnterNode) &&
- smfe.getPossiblyEvilEffects().contains( e ) ) {
+ // only if we are traversing for a new task, not a stall site
+ if( (fn instanceof FlatSESEEnterNode) &&
+ smfe.getPossiblyEvilEffects().contains(e) ) {
- FlatSESEEnterNode evilTask = (FlatSESEEnterNode)fn;
-
- detectPossiblyEvilExecution( evilTask,
- evilTask.getInVarsForDynamicCoarseConflictResolution().indexOf( tmp )
- );
- }
+ FlatSESEEnterNode evilTask = (FlatSESEEnterNode)fn;
+
+ detectPossiblyEvilExecution(evilTask,
+ evilTask.getInVarsForDynamicCoarseConflictResolution().indexOf(tmp)
+ );
+ }
}
}
}
}
- private void printRefSwitch(StateMachineForEffects smfe, FlatNode fn, TempDescriptor tmp, int pdepth, String childPtr, String currPtr, Set<SMFEState> transitions, int weakID) {
-
- for(SMFEState tr: transitions) {
+ private void printRefSwitch(StateMachineForEffects smfe, FlatNode fn, TempDescriptor tmp, int pdepth, String childPtr, String currPtr, Set<SMFEState> transitions, int weakID) {
+
+ for(SMFEState tr : transitions) {
if(tr.getRefCount() == 1) { //in-lineable case
//Don't need to update state counter since we don't care really if it's inlined...
cFile.println(" "+currPtr+"= (struct ___Object___ * ) " + childPtr + ";");
cFile.println(" if (" + currPtr + " != NULL) { ");
-
+
printAllocChecksInsideState(smfe, tr, fn, tmp, currPtr, pdepth+1, weakID);
-
+
cFile.println(" }"); //break for internal switch and if
} else { //non-inlineable cases
- cFile.println(" "+currPtr+"= (struct ___Object___ * ) " + childPtr + ";");
+ cFile.println(" "+currPtr+"= (struct ___Object___ * ) " + childPtr + ";");
cFile.println(" if("+queryAndAddToVisitedHashtable+"("+currPtr+","+tr.getID()+"))");
cFile.println(" " + enqueueInC +"("+ currPtr + ", "+tr.getID()+");");
- }
+ }
}
}
-
-
+
+
//FlatNode and TempDescriptor are what are used to make the taint
private void insertEntriesIntoHashStructureNew(FlatNode fn, TempDescriptor tmp, EffectsTable et, Alloc a, String prefix, int depth, int weakID) {
int index = 0;
FlatSESEEnterNode fsese = (FlatSESEEnterNode) fn;
index = fsese.getInVarsForDynamicCoarseConflictResolution().indexOf(tmp);
}
-
- String strrcr = isRblock ? "&record->rcrRecords[" + index + "], " : "NULL, ";
- String tasksrc =isRblock ? "(SESEcommon *) record, ":"(SESEcommon *)(((INTPTR)record)|1LL), ";
+
+ String strrcr = isRblock?"&record->rcrRecords[" + index + "], ":"NULL, ";
+ String tasksrc =isRblock?"(SESEcommon *) record, ":"(SESEcommon *)(((INTPTR)record)|1LL), ";
if(et.hasWriteConflict(a)) {
cFile.append(" int tmpkey" + depth + " = rcr_generateKey(" + prefix + ");\n");
if (et.conflictDereference(a))
- cFile.append(" int tmpvar" + depth + " = rcr_WTWRITEBINCASE(allHashStructures[" + weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
+ cFile.append(" int tmpvar" + depth + " = rcr_WTWRITEBINCASE(allHashStructures[" + weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
else
- cFile.append(" int tmpvar" + depth + " = rcr_WRITEBINCASE(allHashStructures["+ weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
- } else if(et.hasReadConflict(a)) {
+ cFile.append(" int tmpvar" + depth + " = rcr_WRITEBINCASE(allHashStructures["+ weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
+ } else if(et.hasReadConflict(a)) {
cFile.append(" int tmpkey" + depth + " = rcr_generateKey(" + prefix + ");\n");
if (et.conflictDereference(a))
- cFile.append(" int tmpvar" + depth + " = rcr_WTREADBINCASE(allHashStructures[" + weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
+ cFile.append(" int tmpvar" + depth + " = rcr_WTREADBINCASE(allHashStructures[" + weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
else
- cFile.append(" int tmpvar" + depth + " = rcr_READBINCASE(allHashStructures["+ weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
+ cFile.append(" int tmpvar" + depth + " = rcr_READBINCASE(allHashStructures["+ weakID + "], tmpkey" + depth + ", " + tasksrc + strrcr + index + ");\n");
}
if (et.hasReadConflict(a) || et.hasWriteConflict(a)) {
}
- private void detectPossiblyEvilExecution( FlatSESEEnterNode possiblyEvilTask,
- int rcrRecordIndex
- ) {
+ private void detectPossiblyEvilExecution(FlatSESEEnterNode possiblyEvilTask,
+ int rcrRecordIndex
+ ) {
// We have a situation in which a task can start executing and
// "evil-ly" destroy the paths to some objects it will access as
// it goes along. If this is the case, a traverser should not
cFile.append("// ^^^ \n");
cFile.append("BARRIER();\n");
cFile.append("if( unlikely( record->common.unresolvedDependencies == 0 &&");
- cFile.append( "BARRIER() &&");
- cFile.append( "record->common.doneExecuting == FALSE ) ) {\n");
+ cFile.append("BARRIER() &&");
+ cFile.append("record->common.doneExecuting == FALSE ) ) {\n");
cFile.append(" // first abort this traversal, doesn't matter what the flag is because\n");
cFile.append(" // the traverser is not going to clear the task, it's already running...\n");
cFile.println(" record->rcrstatus=0;");
private void setupOutputFiles(String buildir) throws FileNotFoundException {
cFile = new CodePrinter(new File(buildir + "RuntimeConflictResolver" + ".c"));
headerFile = new CodePrinter(new File(buildir + "RuntimeConflictResolver" + ".h"));
-
+
cFile.println("#include \"" + hashAndQueueCFileDir + "hashRCR.h\"\n#include \""
- + hashAndQueueCFileDir + "Queue_RCR.h\"\n#include <stdlib.h>");
+ + hashAndQueueCFileDir + "Queue_RCR.h\"\n#include <stdlib.h>");
cFile.println("#include \"classdefs.h\"");
cFile.println("#include \"structdefs.h\"");
cFile.println("#include \"mlp_runtime.h\"");
cFile.println("#include \"RuntimeConflictResolver.h\"");
cFile.println("#include \"hashStructure.h\"");
-
+
headerFile.println("#ifndef __3_RCR_H_");
headerFile.println("#define __3_RCR_H_");
}
-
+
//The official way to generate the name for a traverser call
public String getTraverserInvocation(TempDescriptor invar, String varString, FlatNode fn) {
String flatname;
} else { //is stallsite
flatname = fn.toString();
}
-
+
return "traverse___" + invar.getSafeSymbol() + removeInvalidChars(flatname) + "___("+varString+");";
}
-
+
public String removeInvalidChars(String in) {
StringBuilder s = new StringBuilder(in);
for(int i = 0; i < s.length(); i++) {
- if(s.charAt(i) == ' ' ||
- s.charAt(i) == '.' ||
+ if(s.charAt(i) == ' ' ||
+ s.charAt(i) == '.' ||
s.charAt(i) == '=' ||
s.charAt(i) == '[' ||
s.charAt(i) == ']' ) {
- s.deleteCharAt(i);
- i--;
+ s.deleteCharAt(i);
+ i--;
}
}
return s.toString();
idMap.put(t, new Integer(value));
return value;
}
-
+
public void close() {
//Prints out the master traverser Invocation that'll call all other traversers
//based on traverserID
- printMasterTraverserInvocation();
+ printMasterTraverserInvocation();
createMasterHashTableArray();
-
+
// Adds Extra supporting methods
cFile.println("void initializeStructsRCR() {\n " + mallocVisitedHashtable + ";\n " + clearQueue + ";\n}");
cFile.println("void destroyRCR() {\n " + deallocVisitedHashTable + ";\n}");
-
+
headerFile.println("void initializeStructsRCR();\nvoid destroyRCR();");
headerFile.println("#endif\n");
cFile.println(" return;");
cFile.println(" }");
cFile.println(" switch(record->classID) {");
-
- for(Iterator<FlatSESEEnterNode> seseit=oooa.getAllSESEs().iterator();seseit.hasNext();) {
+
+ for(Iterator<FlatSESEEnterNode> seseit=oooa.getAllSESEs().iterator(); seseit.hasNext(); ) {
FlatSESEEnterNode fsen=seseit.next();
- cFile.println( " /* "+fsen.getPrettyIdentifier()+" */");
- cFile.println( " case "+fsen.getIdentifier()+": {");
- cFile.println( " "+fsen.getSESErecordName()+" * rec=("+fsen.getSESErecordName()+" *) record;");
+ cFile.println(" /* "+fsen.getPrettyIdentifier()+" */");
+ cFile.println(" case "+fsen.getIdentifier()+": {");
+ cFile.println(" "+fsen.getSESErecordName()+" * rec=("+fsen.getSESErecordName()+" *) record;");
Vector<TempDescriptor> invars=fsen.getInVarsForDynamicCoarseConflictResolution();
- for(int i=0;i<invars.size();i++) {
- TempDescriptor tmp=invars.get(i);
-
+ for(int i=0; i<invars.size(); i++) {
+ TempDescriptor tmp=invars.get(i);
+
/* In some cases we don't want to a dynamic traversal if it is
* unlikely to increase parallelism...these are cases where we
* are just enabling a stall site to possible clear faster*/
boolean isValidToPrune=true;
- for( FlatSESEEnterNode parentSESE: fsen.getParents() ) {
- ConflictGraph graph = oooa.getConflictGraph(parentSESE);
- if(graph!=null){
- String id = tmp + "_sese" + fsen.getPrettyIdentifier();
- ConflictNode node = graph.getId2cn().get(id);
- isValidToPrune &= node.IsValidToPrune();
+ for( FlatSESEEnterNode parentSESE : fsen.getParents() ) {
+ ConflictGraph graph = oooa.getConflictGraph(parentSESE);
+ if(graph!=null) {
+ String id = tmp + "_sese" + fsen.getPrettyIdentifier();
+ ConflictNode node = graph.getId2cn().get(id);
+ isValidToPrune &= node.IsValidToPrune();
}
}
-
- if(isValidToPrune){
- // if node is valid to prune examiner,
+
+ if(isValidToPrune) {
+ // if node is valid to prune examiner,
// also needs to turn off stall site examiners connected to this node
- for( FlatSESEEnterNode parentSESE: fsen.getParents() ) {
- ConflictGraph graph = oooa.getConflictGraph(parentSESE);
- String id = tmp + "_sese" + fsen.getPrettyIdentifier();
- ConflictNode node = graph.getId2cn().get(id);
-
- for (Iterator iterator = node.getEdgeSet().iterator(); iterator.hasNext();) {
- ConflictEdge edge = (ConflictEdge) iterator.next();
- if (edge.getVertexU() == node) {
- if (edge.getVertexV().isStallSiteNode()) {
- edge.getVertexV().setToBePruned(true);
- }
- } else {
- if (edge.getVertexU().isStallSiteNode()) {
- edge.getVertexU().setToBePruned(true);
- }
- }
- }
+ for( FlatSESEEnterNode parentSESE : fsen.getParents() ) {
+ ConflictGraph graph = oooa.getConflictGraph(parentSESE);
+ String id = tmp + "_sese" + fsen.getPrettyIdentifier();
+ ConflictNode node = graph.getId2cn().get(id);
+
+ for (Iterator iterator = node.getEdgeSet().iterator(); iterator.hasNext(); ) {
+ ConflictEdge edge = (ConflictEdge) iterator.next();
+ if (edge.getVertexU() == node) {
+ if (edge.getVertexV().isStallSiteNode()) {
+ edge.getVertexV().setToBePruned(true);
+ }
+ } else {
+ if (edge.getVertexU().isStallSiteNode()) {
+ edge.getVertexU().setToBePruned(true);
+ }
+ }
+ }
}
}
-
+
if (i!=0) {
cFile.println(" if (record->rcrstatus!=0)");
}
-
+
if(globalState.NOSTALLTR && isValidToPrune) {
cFile.println(" /* " + getTraverserInvocation(tmp, "rec->"+tmp+", rec", fsen)+"*/");
} else {
cFile.println("#ifndef OOO_DISABLE_TASKMEMPOOL");
cFile.println(" RELEASE_REFERENCE_TO(record);");
cFile.println("#endif");
- cFile.println( " }");
- cFile.println( " break;");
+ cFile.println(" }");
+ cFile.println(" break;");
}
-
- for(FlatNode stallsite: processedStallSites.keySet()) {
-
+
+ for(FlatNode stallsite : processedStallSites.keySet()) {
+
TempDescriptor var = processedStallSites.get(stallsite);
Set<FlatSESEEnterNode> seseSet=oooa.getPossibleExecutingRBlocks(stallsite);
boolean isValidToPrune=true;
- for (Iterator iterator = seseSet.iterator(); iterator.hasNext();) {
- FlatSESEEnterNode sese = (FlatSESEEnterNode) iterator.next();
- ConflictGraph graph = oooa.getConflictGraph(sese);
- if(graph!=null){
- String id = var + "_fn" + stallsite.hashCode();
- ConflictNode node = graph.getId2cn().get(id);
- isValidToPrune &= node.isTobePruned();
- }
+ for (Iterator iterator = seseSet.iterator(); iterator.hasNext(); ) {
+ FlatSESEEnterNode sese = (FlatSESEEnterNode) iterator.next();
+ ConflictGraph graph = oooa.getConflictGraph(sese);
+ if(graph!=null) {
+ String id = var + "_fn" + stallsite.hashCode();
+ ConflictNode node = graph.getId2cn().get(id);
+ isValidToPrune &= node.isTobePruned();
+ }
}
-
- cFile.println( " case -" + getTraverserID(var, stallsite)+ ": {");
- cFile.println( " SESEstall * rec=(SESEstall*) record;");
- if(globalState.NOSTALLTR && isValidToPrune){
- cFile.println( " /*" + getTraverserInvocation(var, "rec->___obj___, rec", stallsite)+";*/");
- }else{
- cFile.println( " " + getTraverserInvocation(var, "rec->___obj___, rec", stallsite)+";");
- }
- cFile.println( " record->rcrstatus=0;");
- cFile.println( " }");
+
+ cFile.println(" case -" + getTraverserID(var, stallsite)+ ": {");
+ cFile.println(" SESEstall * rec=(SESEstall*) record;");
+ if(globalState.NOSTALLTR && isValidToPrune) {
+ cFile.println(" /*" + getTraverserInvocation(var, "rec->___obj___, rec", stallsite)+";*/");
+ } else {
+ cFile.println(" " + getTraverserInvocation(var, "rec->___obj___, rec", stallsite)+";");
+ }
+ cFile.println(" record->rcrstatus=0;");
+ cFile.println(" }");
cFile.println(" break;");
}
cFile.println(" }");
cFile.println("}");
}
-
+
private void createMasterHashTableArray() {
headerFile.println("struct Hashtable_rcr ** createAndFillMasterHashStructureArray();");
cFile.println("struct Hashtable_rcr ** createAndFillMasterHashStructureArray() {");
cFile.println(" struct Hashtable_rcr **table=rcr_createMasterHashTableArray("+totalWeakGroups + ");");
-
+
for(int i = 0; i < totalWeakGroups; i++) {
cFile.println(" table["+i+"] = (struct Hashtable_rcr *) rcr_createHashtable();");
}
public boolean hasEmptyTraversers(FlatSESEEnterNode fsen) {
boolean hasEmpty = true;
-
+
Set<FlatSESEEnterNode> children = fsen.getChildren();
- for (Iterator<FlatSESEEnterNode> iterator = children.iterator(); iterator.hasNext();) {
+ for (Iterator<FlatSESEEnterNode> iterator = children.iterator(); iterator.hasNext(); ) {
FlatSESEEnterNode child = (FlatSESEEnterNode) iterator.next();
hasEmpty &= child.getInVarsForDynamicCoarseConflictResolution().size() == 0;
}
return hasEmpty;
- }
+ }
+
-
//Simply rehashes and combines all effects for a AffectedAllocSite + Field.
private class EffectsTable {
private Hashtable<Alloc,Set<Effect>> table;
public EffectsTable(SMFEState state) {
table = new Hashtable<Alloc, Set<Effect>>();
this.state=state;
- for(Effect e: state.getEffectsAllowed()) {
+ for(Effect e : state.getEffectsAllowed()) {
Set<Effect> eg;
- if((eg = table.get(e.getAffectedAllocSite())) == null) {
- eg = new HashSet<Effect>();
- table.put(e.getAffectedAllocSite(), eg);
- }
- eg.add(e);
+ if((eg = table.get(e.getAffectedAllocSite())) == null) {
+ eg = new HashSet<Effect>();
+ table.put(e.getAffectedAllocSite(), eg);
+ }
+ eg.add(e);
}
}
-
+
public boolean conflictDereference(Alloc a) {
- for(Effect e:getEffects(a)) {
+ for(Effect e : getEffects(a)) {
if (!state.transitionsTo(e).isEmpty()&&state.getConflicts().contains(e))
return true;
}
}
public boolean hasWriteConflict(Alloc a) {
- for(Effect e:getEffects(a)) {
+ for(Effect e : getEffects(a)) {
if (e.isWrite() && state.getConflicts().contains(e))
return true;
}
}
public boolean hasReadConflict(Alloc a) {
- for(Effect e:getEffects(a)) {
+ for(Effect e : getEffects(a)) {
if (e.isRead() && state.getConflicts().contains(e))
return true;
}
if (!params.containsTemp(t)) {
Position p=(Position)temptostore.get(t);
if (p==null)
- System.out.println(t);
+ System.out.println(t);
return !p.inStruct;
}
return false;
protected boolean isglobal;
protected boolean isstaticblock; // flag to indicate if this is a static block
protected boolean isinvokedbystatic; // flag to indicate if this method is invoked by some static block
-
+
protected boolean isdefaultconstructor; // flag to indicate if this is a default constructor
public MethodDescriptor(Modifiers m, TypeDescriptor rt, String identifier) {
for(int i=0; i<numParameters(); i++) {
Descriptor d1=getParameter(i);
Descriptor d2=md.getParameter(i);
- TypeDescriptor td1=(d1 instanceof TagVarDescriptor) ? ((TagVarDescriptor)d1).getType() : ((VarDescriptor)d1).getType();
- TypeDescriptor td2=(d2 instanceof TagVarDescriptor) ? ((TagVarDescriptor)d2).getType() : ((VarDescriptor)d2).getType();
+ TypeDescriptor td1=(d1 instanceof TagVarDescriptor)?((TagVarDescriptor)d1).getType():((VarDescriptor)d1).getType();
+ TypeDescriptor td2=(d2 instanceof TagVarDescriptor)?((TagVarDescriptor)d2).getType():((VarDescriptor)d2).getType();
if (!td1.equals(td2))
return false;
}
public boolean isGlobal() {
return isglobal;
}
-
+
public boolean isStaticBlock() {
return isstaticblock;
}
-
+
public void setAsStaticBlock() {
isstaticblock = true;
}
-
+
public boolean isInvokedByStatic() {
return this.isinvokedbystatic;
}
-
+
public void setIsInvokedByStatic(boolean isinvokedbystatic) {
this.isinvokedbystatic = isinvokedbystatic;
}
public boolean isStatic() {
return modifier.isStatic();
}
-
+
public boolean isAbstract() {
return modifier.isAbstract();
}
st+=")";
return st;
}
-
+
public boolean isDefaultConstructor() {
return this.isdefaultconstructor;
}
-
+
public void setDefaultConstructor() {
this.isdefaultconstructor = true;
}
else
return nd.getRoot();
}
-
+
public String getPathFromRootToHere() {
return getPathFromRootToHere(identifier);
}
-
+
public String getPathFromRootToHere(String id) {
String path = id;
NameDescriptor temp = this.nd;
path = temp.identifier + "." + path;
temp = temp.nd;
}
-
+
return path;
}
-
+
public String toString() {
if (nd==null)
return identifier;
this.classpath=new Vector();
this.cd2locationOrderMap=new Hashtable();
this.cd2locationPropertyMap=new Hashtable();
- this.fn2labelMap=new Hashtable();
+ this.fn2labelMap=new Hashtable();
this.lines=0;
}
public String OWNERSHIPDEBUGCALLEE=null;
public String OWNERSHIPDEBUGCALLER=null;
-
+
public boolean DISJOINT=false;
public int DISJOINTALLOCDEPTH=1;
public boolean RCR_DEBUG=false;
public boolean RCR_DEBUG_VERBOSE=false;
public boolean NOSTALLTR=false;
-
+
//SSJava
public boolean SSJAVA=false;
public boolean METHODEFFECTS=false;
public static double TRUEPROB=0.8;
public static boolean PRINTFLAT=false;
-
+
//DSM options
public boolean DSM=false;
public boolean DSMTASK=false;
public boolean DELAYCOMP=false;
public boolean DUALVIEW=false;
public boolean HYBRID=false;
-
+
// Bamboo options
public boolean MULTICORE=false;
public boolean MULTICOREGC=false;
// MGC options
public boolean MGC=false;
public boolean OBJECTLOCKDEBUG=false;
-
+
//Other options
public String structfile;
public String main;
TypeDescriptor td=new TypeDescriptor(n);
return td;
}
-
+
public static TypeDescriptor getTypeDescriptor(String n) {
TypeDescriptor td=new TypeDescriptor(n);
return td;
sclasses.add(tdn);
}
}
-
+
public int numClasses() {
return numclasses;
}
-
+
public int numInterfaces() {
return numinterfaces;
}
-
+
public int numStaticBlocks() {
return numstaticblocks;
}
public SymbolTable getTaskSymbolTable() {
return tasks;
}
-
+
public SymbolTable getSClassSymbolTable() {
return sclasses;
}
tasks.add(td);
numtasks++;
}
-
- public void addLocationOrder(ClassDescriptor cd, Lattice order){
+
+ public void addLocationOrder(ClassDescriptor cd, Lattice order) {
cd2locationOrderMap.put(cd,order);
}
-
- public Hashtable getCd2LocationOrder(){
+
+ public Hashtable getCd2LocationOrder() {
return cd2locationOrderMap;
}
-
- public void addLocationPropertySet(ClassDescriptor cd, Set<String> set){
+
+ public void addLocationPropertySet(ClassDescriptor cd, Set<String> set) {
cd2locationPropertyMap.put(cd,set);
}
-
- public Hashtable getCd2LocationPropertyMap(){
+
+ public Hashtable getCd2LocationPropertyMap() {
return cd2locationPropertyMap;
}
-
+
}
private Hashtable table;
private SymbolTable parent;
private HashSet valueset;
-
+
private Vector<SymbolTable> parentIFs;
public SymbolTable() {
hs=new HashSet();
if(this.parentIFs != null) {
for(int i = 0; i < this.parentIFs.size(); i++) {
- if(this.parentIFs.elementAt(i).contains(name)) {
- hs.addAll((HashSet)(this.parentIFs.elementAt(i).getPSet(name)));
- }
+ if(this.parentIFs.elementAt(i).contains(name)) {
+ hs.addAll((HashSet)(this.parentIFs.elementAt(i).getPSet(name)));
+ }
}
}
if (table.containsKey(name)) {
public Descriptor get(String name) {
Descriptor d = getFromSameScope(name);
- if (d == null){
+ if (d == null) {
if(parent != null) {
- d = parent.get(name);
+ d = parent.get(name);
}
if((d == null) && (this.parentIFs != null)) {
- for(int i = 0; i < this.parentIFs.size(); i++) {
- d = this.parentIFs.elementAt(i).get(name);
- if(d != null) {
- return d;
- }
- }
+ for(int i = 0; i < this.parentIFs.size(); i++) {
+ d = this.parentIFs.elementAt(i).get(name);
+ if(d != null) {
+ return d;
+ }
+ }
}
}
return d;
return null;
}
-
+
public Enumeration getNames() {
return table.keys();
}
hs=new HashSet();
if (this.parentIFs != null) {
for(int i = 0; i < this.parentIFs.size(); i++) {
- hs.addAll(this.parentIFs.elementAt(i).getAllValueSet());
+ hs.addAll(this.parentIFs.elementAt(i).getAllValueSet());
}
}
hs.addAll(valueset);
public void setParent(SymbolTable parent) {
this.parent = parent;
}
-
+
public Vector<SymbolTable> getParentIFs() {
return this.parentIFs;
}
else
return left.getType().dereference();
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
public ExpressionNode getVarInitializer(int i) {
return (ExpressionNode) varInitList.get(i);
}
-
+
public void setType(TypeDescriptor type) {
this.type = type;
}
-
+
public TypeDescriptor getType() {
return this.type;
}
public int kind() {
return Kind.ArrayInitializerNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
public int kind() {
return Kind.AssignmentNode;
}
-
+
public Long evaluate() {
eval = left.evaluate();
return eval;
public final static int NORMAL=0;
public final static int NOBRACES=1;
public final static int EXPRLIST=2;
-
+
String label=null;
public BlockNode() {
public void addFirstBlockStatement(BlockStatementNode bsn) {
blockstatements.insertElementAt(bsn,0);
}
-
+
public void addBlockStatementAt(BlockStatementNode bsn, int i) {
blockstatements.insertElementAt(bsn,i);
}
public int kind() {
return Kind.BlockNode;
}
-
- public void setLabel(String l){
+
+ public void setLabel(String l) {
label=l;
}
-
- public String getLabel(){
+
+ public String getLabel() {
return label;
}
public void buildtree(ParseNode pn, Set toanalyze, String sourcefile) {
parseFile(pn, toanalyze, sourcefile);
-
+
// numering the interfaces
int if_num = 0;
Iterator it_classes = state.getClassSymbolTable().getValueSet().iterator();
while(it_classes.hasNext()) {
ClassDescriptor cd = (ClassDescriptor)it_classes.next();
if(cd.isInterface()) {
- cd.setInterfaceId(if_num++);
+ cd.setInterfaceId(if_num++);
}
}
}
//This is all single imports and a subset of the
- //multi imports that have been resolved.
+ //multi imports that have been resolved.
Hashtable mandatoryImports;
//maps class names in file to full name
- //Note may map a name to an ERROR.
+ //Note may map a name to an ERROR.
Hashtable multiimports;
NameDescriptor packages;
public void parseFile(ParseNode pn, Set toanalyze, String sourcefile) {
mandatoryImports = new Hashtable();
multiimports = new Hashtable();
-
+
if(state.JNI) {
//add java.lang as our default multi-import
this.addMultiImport(sourcefile, "java.lang", false);
}
-
+
ParseNode ipn = pn.getChild("imports").getChild("import_decls_list");
if (ipn != null) {
ParseNodeVector pnv = ipn.getChildren();
for (int i = 0; i < pnv.size(); i++) {
- ParseNode pnimport = pnv.elementAt(i);
- NameDescriptor nd = parseName(pnimport.getChild("name"));
- if (isNode(pnimport, "import_single")) {
- if (!mandatoryImports.containsKey(nd.getIdentifier())) {
- // map name to full name (includes package/directory
- mandatoryImports.put(nd.getIdentifier(), nd.getPathFromRootToHere());
- } else {
- throw new Error("An ambiguous class "+ nd.getIdentifier() +" has been found. It is included for " +
- ((String)mandatoryImports.get(nd.getIdentifier())) + " and " +
- nd.getPathFromRootToHere());
- }
- }
- else {
- addMultiImport(sourcefile, nd.getPathFromRootToHere(), false);
- }
+ ParseNode pnimport = pnv.elementAt(i);
+ NameDescriptor nd = parseName(pnimport.getChild("name"));
+ if (isNode(pnimport, "import_single")) {
+ if (!mandatoryImports.containsKey(nd.getIdentifier())) {
+ // map name to full name (includes package/directory
+ mandatoryImports.put(nd.getIdentifier(), nd.getPathFromRootToHere());
+ } else {
+ throw new Error("An ambiguous class "+ nd.getIdentifier() +" has been found. It is included for " +
+ ((String)mandatoryImports.get(nd.getIdentifier())) + " and " +
+ nd.getPathFromRootToHere());
+ }
+ } else {
+ addMultiImport(sourcefile, nd.getPathFromRootToHere(), false);
+ }
}
}
-
+
ParseNode ppn=pn.getChild("packages").getChild("package");
String packageName = null;
if (ppn!=null) {
NameDescriptor nd = parseClassName(ppn.getChild("name"));
- packageName = nd.getPathFromRootToHere();
- //Trick -> import the package directory as a multi-import and it'll
+ packageName = nd.getPathFromRootToHere();
+ //Trick -> import the package directory as a multi-import and it'll
//automatically recognize files in the same directory.
addMultiImport(sourcefile, packageName, true);
}
-
+
ParseNode tpn=pn.getChild("type_declaration_list");
if (tpn != null) {
ParseNodeVector pnv = tpn.getChildren();
for (int i = 0; i < pnv.size(); i++) {
- ParseNode type_pn = pnv.elementAt(i);
- if (isEmpty(type_pn)) /* Skip the semicolon */
- continue;
- if (isNode(type_pn, "class_declaration")) {
- ClassDescriptor cn = parseTypeDecl(type_pn, packageName);
- cn.setSourceFileName(sourcefile);
- parseInitializers(cn);
- if (toanalyze != null)
- toanalyze.add(cn);
- state.addClass(cn);
- // for inner classes/enum
- HashSet tovisit = new HashSet();
- Iterator it_icds = cn.getInnerClasses();
- while (it_icds.hasNext()) {
- tovisit.add(it_icds.next());
- }
-
- while (!tovisit.isEmpty()) {
- ClassDescriptor cd = (ClassDescriptor) tovisit.iterator().next();
- tovisit.remove(cd);
- parseInitializers(cd);
- if (toanalyze != null) {
- toanalyze.add(cd);
- }
- cd.setSourceFileName(sourcefile);
- state.addClass(cd);
-
- Iterator it_ics = cd.getInnerClasses();
- while (it_ics.hasNext()) {
- tovisit.add(it_ics.next());
- }
-
- Iterator it_ienums = cd.getEnum();
- while (it_ienums.hasNext()) {
- ClassDescriptor iecd = (ClassDescriptor) it_ienums.next();
- if (toanalyze != null) {
- toanalyze.add(iecd);
- }
- iecd.setSourceFileName(sourcefile);
- state.addClass(iecd);
- }
- }
-
- Iterator it_enums = cn.getEnum();
- while (it_enums.hasNext()) {
- ClassDescriptor ecd = (ClassDescriptor) it_enums.next();
- if (toanalyze != null) {
- toanalyze.add(ecd);
- }
- ecd.setSourceFileName(sourcefile);
- state.addClass(ecd);
- }
- } else if (isNode(type_pn, "task_declaration")) {
- TaskDescriptor td = parseTaskDecl(type_pn);
- if (toanalyze != null)
- toanalyze.add(td);
- state.addTask(td);
- } else if (isNode(type_pn, "interface_declaration")) {
- // TODO add version for normal Java later
- ClassDescriptor cn = parseInterfaceDecl(type_pn, packageName);
- if (toanalyze != null)
- toanalyze.add(cn);
- cn.setSourceFileName(sourcefile);
- state.addClass(cn);
-
- // for enum
- Iterator it_enums = cn.getEnum();
- while (it_enums.hasNext()) {
- ClassDescriptor ecd = (ClassDescriptor) it_enums.next();
- if (toanalyze != null) {
- toanalyze.add(ecd);
- }
- ecd.setSourceFileName(sourcefile);
- state.addClass(ecd);
- }
- } else if (isNode(type_pn, "enum_declaration")) {
- // TODO add version for normal Java later
- ClassDescriptor cn = parseEnumDecl(null, type_pn);
- if (toanalyze != null)
- toanalyze.add(cn);
- cn.setSourceFileName(sourcefile);
- state.addClass(cn);
- } else if(isNode(type_pn,"annotation_type_declaration")){
- ClassDescriptor cn=parseAnnotationTypeDecl(type_pn);
- if (toanalyze != null)
- toanalyze.add(cn);
- cn.setSourceFileName(sourcefile);
- state.addClass(cn);
- } else {
- throw new Error(type_pn.getLabel());
- }
+ ParseNode type_pn = pnv.elementAt(i);
+ if (isEmpty(type_pn)) /* Skip the semicolon */
+ continue;
+ if (isNode(type_pn, "class_declaration")) {
+ ClassDescriptor cn = parseTypeDecl(type_pn, packageName);
+ cn.setSourceFileName(sourcefile);
+ parseInitializers(cn);
+ if (toanalyze != null)
+ toanalyze.add(cn);
+ state.addClass(cn);
+ // for inner classes/enum
+ HashSet tovisit = new HashSet();
+ Iterator it_icds = cn.getInnerClasses();
+ while (it_icds.hasNext()) {
+ tovisit.add(it_icds.next());
+ }
+
+ while (!tovisit.isEmpty()) {
+ ClassDescriptor cd = (ClassDescriptor) tovisit.iterator().next();
+ tovisit.remove(cd);
+ parseInitializers(cd);
+ if (toanalyze != null) {
+ toanalyze.add(cd);
+ }
+ cd.setSourceFileName(sourcefile);
+ state.addClass(cd);
+
+ Iterator it_ics = cd.getInnerClasses();
+ while (it_ics.hasNext()) {
+ tovisit.add(it_ics.next());
+ }
+
+ Iterator it_ienums = cd.getEnum();
+ while (it_ienums.hasNext()) {
+ ClassDescriptor iecd = (ClassDescriptor) it_ienums.next();
+ if (toanalyze != null) {
+ toanalyze.add(iecd);
+ }
+ iecd.setSourceFileName(sourcefile);
+ state.addClass(iecd);
+ }
+ }
+
+ Iterator it_enums = cn.getEnum();
+ while (it_enums.hasNext()) {
+ ClassDescriptor ecd = (ClassDescriptor) it_enums.next();
+ if (toanalyze != null) {
+ toanalyze.add(ecd);
+ }
+ ecd.setSourceFileName(sourcefile);
+ state.addClass(ecd);
+ }
+ } else if (isNode(type_pn, "task_declaration")) {
+ TaskDescriptor td = parseTaskDecl(type_pn);
+ if (toanalyze != null)
+ toanalyze.add(td);
+ state.addTask(td);
+ } else if (isNode(type_pn, "interface_declaration")) {
+ // TODO add version for normal Java later
+ ClassDescriptor cn = parseInterfaceDecl(type_pn, packageName);
+ if (toanalyze != null)
+ toanalyze.add(cn);
+ cn.setSourceFileName(sourcefile);
+ state.addClass(cn);
+
+ // for enum
+ Iterator it_enums = cn.getEnum();
+ while (it_enums.hasNext()) {
+ ClassDescriptor ecd = (ClassDescriptor) it_enums.next();
+ if (toanalyze != null) {
+ toanalyze.add(ecd);
+ }
+ ecd.setSourceFileName(sourcefile);
+ state.addClass(ecd);
+ }
+ } else if (isNode(type_pn, "enum_declaration")) {
+ // TODO add version for normal Java later
+ ClassDescriptor cn = parseEnumDecl(null, type_pn);
+ if (toanalyze != null)
+ toanalyze.add(cn);
+ cn.setSourceFileName(sourcefile);
+ state.addClass(cn);
+ } else if(isNode(type_pn,"annotation_type_declaration")) {
+ ClassDescriptor cn=parseAnnotationTypeDecl(type_pn);
+ if (toanalyze != null)
+ toanalyze.add(cn);
+ cn.setSourceFileName(sourcefile);
+ state.addClass(cn);
+ } else {
+ throw new Error(type_pn.getLabel());
+ }
}
}
}
-
-
-
+
+
+
//This kind of breaks away from tradition a little bit by doing the file checks here
// instead of in Semantic check, but doing it here is easier because we have a mapping early on
// if I wait until semantic check, I have to change ALL the type descriptors to match the new
String path = (String) state.classpath.get(j);
File folder = new File(path, importPath.replace('.', '/'));
if (folder.exists()) {
- found = true;
- for (String file : folder.list()) {
- // if the file is of type *.java add to multiImport list.
- if (file.lastIndexOf('.') != -1 && file.substring(file.lastIndexOf('.')).equalsIgnoreCase(".java")) {
- String classname = file.substring(0, file.length() - 5);
- // single imports have precedence over multi-imports
- if (!mandatoryImports.containsKey(classname)) {
- //package files have precedence over multi-imports.
- if (multiimports.containsKey(classname) && !isPackageDirectory) {
- // put error in for later, in case we try to import
- multiimports.put(classname, new Error("Error: class " + classname + " is defined more than once in a multi-import in " + currentSource));
- } else {
- multiimports.put(classname, importPath + "." + classname);
- }
- }
- }
- }
+ found = true;
+ for (String file : folder.list()) {
+ // if the file is of type *.java add to multiImport list.
+ if (file.lastIndexOf('.') != -1 && file.substring(file.lastIndexOf('.')).equalsIgnoreCase(".java")) {
+ String classname = file.substring(0, file.length() - 5);
+ // single imports have precedence over multi-imports
+ if (!mandatoryImports.containsKey(classname)) {
+ //package files have precedence over multi-imports.
+ if (multiimports.containsKey(classname) && !isPackageDirectory) {
+ // put error in for later, in case we try to import
+ multiimports.put(classname, new Error("Error: class " + classname + " is defined more than once in a multi-import in " + currentSource));
+ } else {
+ multiimports.put(classname, importPath + "." + classname);
+ }
+ }
+ }
+ }
}
}
-
-
+
+
if(!found) {
throw new Error("Import package " + importPath + " in " + currentSource
- + " cannot be resolved.");
+ + " cannot be resolved.");
}
}
- public void parseInitializers(ClassDescriptor cn){
+ public void parseInitializers(ClassDescriptor cn) {
Vector fv=cn.getFieldVec();
int pos = 0;
- for(int i=0;i<fv.size();i++) {
+ for(int i=0; i<fv.size(); i++) {
FieldDescriptor fd=(FieldDescriptor)fv.get(i);
if(fd.getExpressionNode()!=null) {
Iterator methodit = cn.getMethods();
- while(methodit.hasNext()){
+ while(methodit.hasNext()) {
MethodDescriptor currmd=(MethodDescriptor)methodit.next();
- if(currmd.isConstructor()){
+ if(currmd.isConstructor()) {
BlockNode bn=state.getMethodBody(currmd);
NameNode nn=new NameNode(new NameDescriptor(fd.getSymbol()));
AssignmentNode an=new AssignmentNode(nn,fd.getExpressionNode(),new AssignOperation(1));
pos++;
}
}
- }
-
+ }
+
private ClassDescriptor parseEnumDecl(ClassDescriptor cn, ParseNode pn) {
ClassDescriptor ecd=new ClassDescriptor(pn.getChild("name").getTerminal(), false);
ecd.setImports(mandatoryImports);
cn.addEnum(ecd);
}
if (!(ecd.getSymbol().equals(TypeUtil.ObjectClass)||
- ecd.getSymbol().equals(TypeUtil.TagClass))) {
+ ecd.getSymbol().equals(TypeUtil.TagClass))) {
ecd.setSuper(TypeUtil.ObjectClass);
}
ecd.setModifiers(parseModifiersList(pn.getChild("modifiers")));
parseEnumBody(ecd, pn.getChild("enumbody"));
return ecd;
}
-
+
private void parseEnumBody(ClassDescriptor cn, ParseNode pn) {
ParseNode decls=pn.getChild("enum_constants_list");
if (decls!=null) {
ParseNodeVector pnv=decls.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode decl=pnv.elementAt(i);
- if (isNode(decl,"enum_constant")) {
- parseEnumConstant(cn,decl);
- } else throw new Error();
+ ParseNode decl=pnv.elementAt(i);
+ if (isNode(decl,"enum_constant")) {
+ parseEnumConstant(cn,decl);
+ } else throw new Error();
}
}
}
-
+
private void parseEnumConstant(ClassDescriptor cn, ParseNode pn) {
cn.addEnumConstant(pn.getChild("name").getTerminal());
}
-
- private ClassDescriptor parseAnnotationTypeDecl(ParseNode pn){
+
+ private ClassDescriptor parseAnnotationTypeDecl(ParseNode pn) {
ClassDescriptor cn=new ClassDescriptor(pn.getChild("name").getTerminal(), true);
cn.setImports(mandatoryImports);
ParseNode modifiers=pn.getChild("modifiers");
- if(modifiers!=null){
+ if(modifiers!=null) {
cn.setModifiers(parseModifiersList(modifiers));
}
parseAnnotationTypeBody(cn,pn.getChild("body"));
return cn;
}
-
- private void parseAnnotationTypeBody(ClassDescriptor cn, ParseNode pn){
+
+ private void parseAnnotationTypeBody(ClassDescriptor cn, ParseNode pn) {
ParseNode list_node=pn.getChild("annotation_type_element_list");
- if(list_node!=null){
+ if(list_node!=null) {
ParseNodeVector pnv = list_node.getChildren();
for (int i = 0; i < pnv.size(); i++) {
- ParseNode element_node = pnv.elementAt(i);
- if (isNode(element_node, "annotation_type_element_declaration")) {
- ParseNodeVector elementProps = element_node.getChildren();
- String identifier=null;
- TypeDescriptor type=null;
- Modifiers modifiers=new Modifiers();
- for(int eidx=0; eidx<elementProps.size(); eidx++) {
- ParseNode prop_node=elementProps.elementAt(eidx);
- if(isNode(prop_node,"name")){
- identifier=prop_node.getTerminal();
- }else if(isNode(prop_node,"type")){
- type=parseTypeDescriptor(prop_node);
- }else if(isNode(prop_node,"modifier")){
- modifiers=parseModifiersList(prop_node);
- }
- }
- cn.addField(new FieldDescriptor(modifiers, type, identifier, null, false));
- }
+ ParseNode element_node = pnv.elementAt(i);
+ if (isNode(element_node, "annotation_type_element_declaration")) {
+ ParseNodeVector elementProps = element_node.getChildren();
+ String identifier=null;
+ TypeDescriptor type=null;
+ Modifiers modifiers=new Modifiers();
+ for(int eidx=0; eidx<elementProps.size(); eidx++) {
+ ParseNode prop_node=elementProps.elementAt(eidx);
+ if(isNode(prop_node,"name")) {
+ identifier=prop_node.getTerminal();
+ } else if(isNode(prop_node,"type")) {
+ type=parseTypeDescriptor(prop_node);
+ } else if(isNode(prop_node,"modifier")) {
+ modifiers=parseModifiersList(prop_node);
+ }
+ }
+ cn.addField(new FieldDescriptor(modifiers, type, identifier, null, false));
+ }
}
}
}
-
+
public ClassDescriptor parseInterfaceDecl(ParseNode pn, String packageName) {
ClassDescriptor cn;
if(packageName == null) {
- cn=new ClassDescriptor(pn.getChild("name").getTerminal(), true);
+ cn=new ClassDescriptor(pn.getChild("name").getTerminal(), true);
} else {
String newClassname = packageName + "." + pn.getChild("name").getTerminal();
cn= new ClassDescriptor(packageName, newClassname, true);
}
-
+
cn.setImports(mandatoryImports);
//cn.setAsInterface();
if (!isEmpty(pn.getChild("superIF").getTerminal())) {
ParseNode snlist=pn.getChild("superIF").getChild("extend_interface_list");
ParseNodeVector pnv=snlist.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode decl=pnv.elementAt(i);
- if (isNode(decl,"type")) {
- NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
- cn.addSuperInterface(nd.toString());
- }
+ ParseNode decl=pnv.elementAt(i);
+ if (isNode(decl,"type")) {
+ NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
+ cn.addSuperInterface(nd.toString());
+ }
}
}
cn.setModifiers(parseModifiersList(pn.getChild("modifiers")));
parseInterfaceBody(cn, pn.getChild("interfacebody"));
return cn;
}
-
+
private void parseInterfaceBody(ClassDescriptor cn, ParseNode pn) {
assert(cn.isInterface());
ParseNode decls=pn.getChild("interface_member_declaration_list");
if (decls!=null) {
ParseNodeVector pnv=decls.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode decl=pnv.elementAt(i);
- if (isNode(decl,"constant")) {
- parseInterfaceConstant(cn,decl);
- } else if (isNode(decl,"method")) {
- parseInterfaceMethod(cn,decl.getChild("method_declaration"));
- } else throw new Error();
+ ParseNode decl=pnv.elementAt(i);
+ if (isNode(decl,"constant")) {
+ parseInterfaceConstant(cn,decl);
+ } else if (isNode(decl,"method")) {
+ parseInterfaceMethod(cn,decl.getChild("method_declaration"));
+ } else throw new Error();
}
}
}
-
-
-
+
+
+
private void parseInterfaceConstant(ClassDescriptor cn, ParseNode pn) {
if (pn!=null) {
parseFieldDecl(cn,pn.getChild("field_declaration"));
}
throw new Error();
}
-
+
private void parseInterfaceMethod(ClassDescriptor cn, ParseNode pn) {
ParseNode headern=pn.getChild("header");
ParseNode bodyn=pn.getChild("body");
// at the AST level, someday should evolve into a nice compiler
// option *wink*
//if( cn.getSymbol().equals( ***put a class in here like: "Test" ) &&
- // md.getSymbol().equals( ***put your method in here like: "main" )
+ // md.getSymbol().equals( ***put your method in here like: "main" )
//) {
// bn.setStyle( BlockNode.NORMAL );
// System.out.println( bn.printNode( 0 ) );
paramn = paramn.getChild("optional").getFirstChild();
System.out.println("OPTIONAL FOUND!!!!!!!");
} else { optional = false;
- System.out.println("NOT OPTIONAL");}
-
+ System.out.println("NOT OPTIONAL"); }
+
TypeDescriptor type=parseTypeDescriptor(paramn);
String paramname=paramn.getChild("single").getTerminal();
ParseNode snlist=pn.getChild("superIF").getChild("interface_type_list");
ParseNodeVector pnv=snlist.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode decl=pnv.elementAt(i);
- if (isNode(decl,"type")) {
- NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
- cn.addSuperInterface(nd.toString());
- }
+ ParseNode decl=pnv.elementAt(i);
+ if (isNode(decl,"type")) {
+ NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
+ cn.addSuperInterface(nd.toString());
+ }
}
}
cn.setModifiers(parseModifiersList(pn.getChild("modifiers")));
parseClassBody(cn, pn.getChild("classbody"));
-
+
boolean hasConstructor = false;
- for(Iterator method_it=cn.getMethods(); method_it.hasNext();) {
+ for(Iterator method_it=cn.getMethods(); method_it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)method_it.next();
hasConstructor |= md.isConstructor();
}
if((!hasConstructor) && (!cn.isEnum())) {
// add a default constructor for this class
MethodDescriptor md = new MethodDescriptor(new Modifiers(Modifiers.PUBLIC),
- cn.getSymbol(), false);
+ cn.getSymbol(), false);
BlockNode bn=new BlockNode();
state.addTreeCode(md,bn);
md.setDefaultConstructor();
}
}
}
-
+
private void parseLocationOrder(ClassDescriptor cd, ParseNode pn) {
ParseNodeVector pnv = pn.getChildren();
Lattice<String> locOrder =
- new Lattice<String>("_top_","_bottom_");
+ new Lattice<String>("_top_","_bottom_");
Set<String> spinLocSet=new HashSet<String>();
for (int i = 0; i < pnv.size(); i++) {
ParseNode loc = pnv.elementAt(i);
- if(isNode(loc,"location_property")){
- String spinLoc=loc.getChildren().elementAt(0).getLabel();
- spinLocSet.add(spinLoc);
+ if(isNode(loc,"location_property")) {
+ String spinLoc=loc.getChildren().elementAt(0).getLabel();
+ spinLocSet.add(spinLoc);
} else {
- String lowerLoc=loc.getChildren().elementAt(0).getLabel();
- String higherLoc= loc.getChildren().elementAt(1).getLabel();
- locOrder.put(higherLoc, lowerLoc);
- if (locOrder.isIntroducingCycle(higherLoc)) {
- throw new Error("Error: the order relation " + lowerLoc + " < " + higherLoc
- + " introduces a cycle.");
- }
+ String lowerLoc=loc.getChildren().elementAt(0).getLabel();
+ String higherLoc= loc.getChildren().elementAt(1).getLabel();
+ locOrder.put(higherLoc, lowerLoc);
+ if (locOrder.isIntroducingCycle(higherLoc)) {
+ throw new Error("Error: the order relation " + lowerLoc + " < " + higherLoc
+ + " introduces a cycle.");
+ }
}
}
- if(spinLocSet.size()>0){
+ if(spinLocSet.size()>0) {
//checking if location is actually defined in the hierarchy
- for (Iterator iterator = spinLocSet.iterator(); iterator.hasNext();) {
- String locID = (String) iterator.next();
- if(!locOrder.containsKey(locID)){
- throw new Error("Error: The spinning location '"+
- locID + "' is not defined in the hierarchy of the class '"+cd +"'.");
- }
+ for (Iterator iterator = spinLocSet.iterator(); iterator.hasNext(); ) {
+ String locID = (String) iterator.next();
+ if(!locOrder.containsKey(locID)) {
+ throw new Error("Error: The spinning location '"+
+ locID + "' is not defined in the hierarchy of the class '"+cd +"'.");
+ }
}
state.addLocationPropertySet(cd, spinLocSet);
}
state.addLocationOrder(cd, locOrder);
}
-
+
private void parseClassMember(ClassDescriptor cn, ParseNode pn) {
ParseNode fieldnode=pn.getChild("field");
if (fieldnode!=null) {
}
throw new Error();
}
-
+
private ClassDescriptor parseInnerClassDecl(ClassDescriptor cn, ParseNode pn) {
ClassDescriptor icn=new ClassDescriptor(pn.getChild("name").getTerminal(), false);
icn.setImports(mandatoryImports);
icn.setSuper(nd.toString());
} else {
if (!(icn.getSymbol().equals(TypeUtil.ObjectClass)||
- icn.getSymbol().equals(TypeUtil.TagClass)))
- icn.setSuper(TypeUtil.ObjectClass);
+ icn.getSymbol().equals(TypeUtil.TagClass)))
+ icn.setSuper(TypeUtil.ObjectClass);
}
// check inherited interfaces
if (!isEmpty(pn.getChild("superIF").getTerminal())) {
ParseNode snlist=pn.getChild("superIF").getChild("interface_type_list");
ParseNodeVector pnv=snlist.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode decl=pnv.elementAt(i);
- if (isNode(decl,"type")) {
- NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
- icn.addSuperInterface(nd.toString());
- }
+ ParseNode decl=pnv.elementAt(i);
+ if (isNode(decl,"type")) {
+ NameDescriptor nd=parseClassName(decl.getChild("class").getChild("name"));
+ icn.addSuperInterface(nd.toString());
+ }
}
}
icn.setModifiers(parseModifiersList(pn.getChild("modifiers")));
if(!icn.isStatic()) {
- throw new Error("Error: inner class " + icn.getSymbol() + " in Class " +
- cn.getSymbol() + " is not a nested class and is not supported yet!");
+ throw new Error("Error: inner class " + icn.getSymbol() + " in Class " +
+ cn.getSymbol() + " is not a nested class and is not supported yet!");
}
parseClassBody(icn, pn.getChild("classbody"));
return icn;
}
}
- //Needed to separate out top level call since if a base exists,
+ //Needed to separate out top level call since if a base exists,
//we do not want to apply our resolveName function (i.e. deal with imports)
- //otherwise, if base == null, we do just want to resolve name.
+ //otherwise, if base == null, we do just want to resolve name.
private NameDescriptor parseClassName(ParseNode nn) {
ParseNode base=nn.getChild("base");
ParseNode id=nn.getChild("identifier");
}
return new NameDescriptor(parseClassNameRecursive(base.getChild("name")),classname);
}
-
+
private NameDescriptor parseClassNameRecursive(ParseNode nn) {
ParseNode base=nn.getChild("base");
ParseNode id=nn.getChild("identifier");
}
return new NameDescriptor(parseClassNameRecursive(base.getChild("name")),classname);
}
-
+
//This will get the mapping of a terminal class name
//to a canonical classname (with imports/package locations in them)
private String resolveName(String terminal) {
if(mandatoryImports.containsKey(terminal)) {
- return (String) mandatoryImports.get(terminal);
+ return (String) mandatoryImports.get(terminal);
} else {
if(multiimports.containsKey(terminal)) {
- //Test for error
- Object o = multiimports.get(terminal);
- if(o instanceof Error) {
- throw new Error("Class " + terminal + " is ambiguous. Cause: more than 1 package import contain the same class.");
- } else {
- //At this point, if we found a unique class
- //we can treat it as a single, mandatory import.
- mandatoryImports.put(terminal, o);
- return (String) o;
- }
+ //Test for error
+ Object o = multiimports.get(terminal);
+ if(o instanceof Error) {
+ throw new Error("Class " + terminal + " is ambiguous. Cause: more than 1 package import contain the same class.");
+ } else {
+ //At this point, if we found a unique class
+ //we can treat it as a single, mandatory import.
+ mandatoryImports.put(terminal, o);
+ return (String) o;
+ }
}
}
-
+
return terminal;
}
-
+
//only function difference between this and parseName() is that this
- //does not look for a import mapping.
+ //does not look for a import mapping.
private NameDescriptor parseName(ParseNode nn) {
ParseNode base=nn.getChild("base");
ParseNode id=nn.getChild("identifier");
if(cn.isInterface()) {
// TODO add version for normal Java later
// Can only be PUBLIC or STATIC or FINAL
- if((m.isAbstract()) || (m.isAtomic()) || (m.isNative())
- || (m.isSynchronized())) {
- throw new Error("Error: field in Interface " + cn.getSymbol() + "can only be PUBLIC or STATIC or FINAL");
+ if((m.isAbstract()) || (m.isAtomic()) || (m.isNative())
+ || (m.isSynchronized())) {
+ throw new Error("Error: field in Interface " + cn.getSymbol() + "can only be PUBLIC or STATIC or FINAL");
}
m.addModifier(Modifiers.PUBLIC);
m.addModifier(Modifiers.STATIC);
ExpressionNode en=null;
if (epn!=null) {
- en=parseExpression(epn.getFirstChild());
- en.setNumLine(epn.getFirstChild().getLine());
- if(m.isStatic()) {
- // for static field, the initializer should be considered as a
- // static block
- boolean isfirst = false;
- MethodDescriptor md = (MethodDescriptor)cn.getMethodTable().getFromSameScope("staticblocks");
- if(md == null) {
- // the first static block for this class
- Modifiers m_i=new Modifiers();
- m_i.addModifier(Modifiers.STATIC);
- md = new MethodDescriptor(m_i, "staticblocks", false);
- md.setAsStaticBlock();
- isfirst = true;
- }
- if(isfirst) {
- cn.addMethod(md);
- }
- cn.incStaticBlocks();
- BlockNode bn=new BlockNode();
- NameNode nn=new NameNode(new NameDescriptor(identifier));
- nn.setNumLine(en.getNumLine());
- AssignmentNode an=new AssignmentNode(nn,en,new AssignOperation(1));
- an.setNumLine(pn.getLine());
- bn.addBlockStatement(new BlockExpressionNode(an));
- if(isfirst) {
- state.addTreeCode(md,bn);
- } else {
- BlockNode obn = state.getMethodBody(md);
- for(int ii = 0; ii < bn.size(); ii++) {
- BlockStatementNode bsn = bn.get(ii);
- obn.addBlockStatement(bsn);
- }
- state.addTreeCode(md, obn);
- bn = null;
- }
- en = null;
- }
+ en=parseExpression(epn.getFirstChild());
+ en.setNumLine(epn.getFirstChild().getLine());
+ if(m.isStatic()) {
+ // for static field, the initializer should be considered as a
+ // static block
+ boolean isfirst = false;
+ MethodDescriptor md = (MethodDescriptor)cn.getMethodTable().getFromSameScope("staticblocks");
+ if(md == null) {
+ // the first static block for this class
+ Modifiers m_i=new Modifiers();
+ m_i.addModifier(Modifiers.STATIC);
+ md = new MethodDescriptor(m_i, "staticblocks", false);
+ md.setAsStaticBlock();
+ isfirst = true;
+ }
+ if(isfirst) {
+ cn.addMethod(md);
+ }
+ cn.incStaticBlocks();
+ BlockNode bn=new BlockNode();
+ NameNode nn=new NameNode(new NameDescriptor(identifier));
+ nn.setNumLine(en.getNumLine());
+ AssignmentNode an=new AssignmentNode(nn,en,new AssignOperation(1));
+ an.setNumLine(pn.getLine());
+ bn.addBlockStatement(new BlockExpressionNode(an));
+ if(isfirst) {
+ state.addTreeCode(md,bn);
+ } else {
+ BlockNode obn = state.getMethodBody(md);
+ for(int ii = 0; ii < bn.size(); ii++) {
+ BlockStatementNode bsn = bn.get(ii);
+ obn.addBlockStatement(bsn);
+ }
+ state.addTreeCode(md, obn);
+ bn = null;
+ }
+ en = null;
+ }
}
cn.addField(new FieldDescriptor(m, arrayt, identifier, en, isglobal));
}
}
-
- private void assignAnnotationsToType(Modifiers modifiers, TypeDescriptor type){
+
+ private void assignAnnotationsToType(Modifiers modifiers, TypeDescriptor type) {
Vector<AnnotationDescriptor> annotations=modifiers.getAnnotations();
for(int i=0; i<annotations.size(); i++) {
// it only supports a marker annotation
AnnotationDescriptor an=annotations.elementAt(i);
- type.addAnnotationMarker(an);
- }
+ type.addAnnotationMarker(an);
+ }
}
int innerCount=0;
} else if (isNode(pn,"preinc")||
isNode(pn,"predec")) {
ParseNode left=pn.getFirstChild();
- AssignOperation op=isNode(pn,"preinc") ? new AssignOperation(AssignOperation.PLUSEQ) : new AssignOperation(AssignOperation.MINUSEQ);
+ AssignOperation op=isNode(pn,"preinc")?new AssignOperation(AssignOperation.PLUSEQ):new AssignOperation(AssignOperation.MINUSEQ);
AssignmentNode an=new AssignmentNode(parseExpression(left),
- new LiteralNode("integer",new Integer(1)),op);
+ new LiteralNode("integer",new Integer(1)),op);
an.setNumLine(pn.getLine());
return an;
} else if (isNode(pn,"literal")) {
String literaltype=pn.getTerminal();
ParseNode literalnode=pn.getChild(literaltype);
- Object literal_obj=literalnode.getLiteral();
+ Object literal_obj=literalnode.getLiteral();
LiteralNode ln=new LiteralNode(literaltype, literal_obj);
ln.setNumLine(pn.getLine());
return ln;
boolean isglobal = pn.getChild("global") != null || pn.getChild("scratch") != null;
String disjointId = null;
if (pn.getChild("disjoint") != null) {
- disjointId = pn.getChild("disjoint").getTerminal();
+ disjointId = pn.getChild("disjoint").getTerminal();
}
CreateObjectNode con = new CreateObjectNode(td, isglobal, disjointId);
con.setNumLine(pn.getLine());
for (int i = 0; i < args.size(); i++) {
- con.addArgument((ExpressionNode) args.get(i));
+ con.addArgument((ExpressionNode) args.get(i));
}
/* Could have flag set or tag added here */
if (pn.getChild("flag_list") != null || pn.getChild("tag_list") != null) {
- FlagEffects fe = new FlagEffects(null);
- if (pn.getChild("flag_list") != null)
- parseFlagEffect(fe, pn.getChild("flag_list"));
+ FlagEffects fe = new FlagEffects(null);
+ if (pn.getChild("flag_list") != null)
+ parseFlagEffect(fe, pn.getChild("flag_list"));
- if (pn.getChild("tag_list") != null)
- parseTagEffect(fe, pn.getChild("tag_list"));
- con.addFlagEffects(fe);
+ if (pn.getChild("tag_list") != null)
+ parseTagEffect(fe, pn.getChild("tag_list"));
+ con.addFlagEffects(fe);
}
return con;
} else if (isNode(pn,"createarray")) {
//System.out.println(pn.PPrint(3,true));
boolean isglobal=pn.getChild("global")!=null||
- pn.getChild("scratch")!=null;
+ pn.getChild("scratch")!=null;
String disjointId=null;
if( pn.getChild("disjoint") != null) {
disjointId = pn.getChild("disjoint").getTerminal();
con.addArgument((ExpressionNode)args.get(i));
}
return con;
- } if (isNode(pn,"createarray2")) {
+ }
+ if (isNode(pn,"createarray2")) {
TypeDescriptor td=parseTypeDescriptor(pn);
int num=0;
if (pn.getChild("dims_opt").getLiteral()!=null)
- num=((Integer)pn.getChild("dims_opt").getLiteral()).intValue();
+ num=((Integer)pn.getChild("dims_opt").getLiteral()).intValue();
for(int i=0; i<num; i++)
- td=td.makeArray(state);
+ td=td.makeArray(state);
CreateObjectNode con=new CreateObjectNode(td, false, null);
con.setNumLine(pn.getLine());
- ParseNode ipn = pn.getChild("initializer");
+ ParseNode ipn = pn.getChild("initializer");
Vector initializers=parseVariableInitializerList(ipn);
ArrayInitializerNode ain = new ArrayInitializerNode(initializers);
ain.setNumLine(pn.getLine());
} else if (isNode(pn,"fieldaccess")) {
ExpressionNode en=parseExpression(pn.getChild("base").getFirstChild());
String fieldname=pn.getChild("field").getTerminal();
-
+
FieldAccessNode fan=new FieldAccessNode(en,fieldname);
fan.setNumLine(pn.getLine());
return fan;
} else if (isNode(pn,"cast1")) {
try {
CastNode cn=new CastNode(parseTypeDescriptor(pn.getChild("type")),parseExpression(pn.getChild("exp").getFirstChild()));
- cn.setNumLine(pn.getLine());
+ cn.setNumLine(pn.getLine());
return cn;
} catch (Exception e) {
System.out.println(pn.PPrint(1,true));
//System.out.println("Checking the values of: "+ " td.toString()= " + td.toString()+ " fieldname= " + fieldname);
return new OffsetNode(td, fieldname);
} else if (isNode(pn, "tert")) {
-
+
TertiaryNode tn=new TertiaryNode(parseExpression(pn.getChild("cond").getFirstChild()),
- parseExpression(pn.getChild("trueexpr").getFirstChild()),
- parseExpression(pn.getChild("falseexpr").getFirstChild()) );
+ parseExpression(pn.getChild("trueexpr").getFirstChild()),
+ parseExpression(pn.getChild("falseexpr").getFirstChild()) );
tn.setNumLine(pn.getLine());
-
+
return tn;
} else if (isNode(pn, "instanceof")) {
ExpressionNode exp=parseExpression(pn.getChild("exp").getFirstChild());
InstanceOfNode ion=new InstanceOfNode(exp,t);
ion.setNumLine(pn.getLine());
return ion;
- } else if (isNode(pn, "array_initializer")) {
+ } else if (isNode(pn, "array_initializer")) {
Vector initializers=parseVariableInitializerList(pn);
return new ArrayInitializerNode(initializers);
} else if (isNode(pn, "class_type")) {
// at the AST level, someday should evolve into a nice compiler
// option *wink*
//if( cn.getSymbol().equals( ***put a class in here like: "Test" ) &&
- // md.getSymbol().equals( ***put your method in here like: "main" )
+ // md.getSymbol().equals( ***put your method in here like: "main" )
//) {
// bn.setStyle( BlockNode.NORMAL );
// System.out.println( bn.printNode( 0 ) );
}
state.addTreeCode(md,bn);
}
-
+
private void parseStaticBlockDecl(ClassDescriptor cn, ParseNode pn) {
- // Each class maintains one MethodDecscriptor which combines all its
+ // Each class maintains one MethodDecscriptor which combines all its
// static blocks in their declaration order
boolean isfirst = false;
MethodDescriptor md = (MethodDescriptor)cn.getMethodTable().getFromSameScope("staticblocks");
} else {
BlockNode obn = state.getMethodBody(md);
for(int i = 0; i < bn.size(); i++) {
- BlockStatementNode bsn = bn.get(i);
- obn.addBlockStatement(bsn);
+ BlockStatementNode bsn = bn.get(i);
+ obn.addBlockStatement(bsn);
}
state.addTreeCode(md, obn);
bn = null;
TagDeclarationNode tdn=new TagDeclarationNode(name, type);
tdn.setNumLine(pn.getLine());
-
+
blockstatements.add(tdn);
} else if (isNode(pn,"local_variable_declaration")) {
-
- ParseNode mn=pn.getChild("modifiers");
+
+ ParseNode mn=pn.getChild("modifiers");
TypeDescriptor t=parseTypeDescriptor(pn);
- if(mn!=null){
- Modifiers m=parseModifiersList(mn);
- assignAnnotationsToType(m, t);
- }
+ if(mn!=null) {
+ Modifiers m=parseModifiersList(mn);
+ assignAnnotationsToType(m, t);
+ }
ParseNode vn=pn.getChild("variable_declarators_list");
ParseNodeVector pnv=vn.getChildren();
for(int i=0; i<pnv.size(); i++) {
ExpressionNode en=null;
if (epn!=null)
en=parseExpression(epn.getFirstChild());
-
+
DeclarationNode dn=new DeclarationNode(new VarDescriptor(arrayt, identifier),en);
dn.setNumLine(tmp.getLine());
blockstatements.add(ben);
} else if (isNode(pn,"ifstatement")) {
IfStatementNode isn=new IfStatementNode(parseExpression(pn.getChild("condition").getFirstChild()),
- parseSingleBlock(pn.getChild("statement").getFirstChild()),
- pn.getChild("else_statement")!=null ? parseSingleBlock(pn.getChild("else_statement").getFirstChild()) : null);
+ parseSingleBlock(pn.getChild("statement").getFirstChild()),
+ pn.getChild("else_statement")!=null?parseSingleBlock(pn.getChild("else_statement").getFirstChild()):null);
isn.setNumLine(pn.getLine());
-
+
blockstatements.add(isn);
} else if (isNode(pn,"switch_statement")) {
// TODO add version for normal Java later
SwitchStatementNode ssn=new SwitchStatementNode(parseExpression(pn.getChild("condition").getFirstChild()),
- parseSingleBlock(pn.getChild("statement").getFirstChild()));
+ parseSingleBlock(pn.getChild("statement").getFirstChild()));
ssn.setNumLine(pn.getLine());
blockstatements.add(ssn);
} else if (isNode(pn,"switch_block_list")) {
// TODO add version for normal Java later
ParseNodeVector pnv=pn.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode sblockdecl=pnv.elementAt(i);
-
- if(isNode(sblockdecl, "switch_block")) {
- ParseNode lpn=sblockdecl.getChild("switch_labels").getChild("switch_label_list");
- ParseNodeVector labelv=lpn.getChildren();
- Vector<SwitchLabelNode> slv = new Vector<SwitchLabelNode>();
- for(int j=0; j<labelv.size(); j++) {
- ParseNode labeldecl=labelv.elementAt(j);
- if(isNode(labeldecl, "switch_label")) {
- SwitchLabelNode sln=new SwitchLabelNode(parseExpression(labeldecl.getChild("constant_expression").getFirstChild()), false);
- sln.setNumLine(labeldecl.getLine());
- slv.addElement(sln);
- } else if(isNode(labeldecl, "default_switch_label")) {
- SwitchLabelNode sln=new SwitchLabelNode(null, true);
- sln.setNumLine(labeldecl.getLine());
- slv.addElement(sln);
- }
- }
-
- SwitchBlockNode sbn=new SwitchBlockNode(slv,
- parseSingleBlock(sblockdecl.getChild("switch_statements").getFirstChild()));
- sbn.setNumLine(sblockdecl.getLine());
-
- blockstatements.add(sbn);
-
- }
+ ParseNode sblockdecl=pnv.elementAt(i);
+
+ if(isNode(sblockdecl, "switch_block")) {
+ ParseNode lpn=sblockdecl.getChild("switch_labels").getChild("switch_label_list");
+ ParseNodeVector labelv=lpn.getChildren();
+ Vector<SwitchLabelNode> slv = new Vector<SwitchLabelNode>();
+ for(int j=0; j<labelv.size(); j++) {
+ ParseNode labeldecl=labelv.elementAt(j);
+ if(isNode(labeldecl, "switch_label")) {
+ SwitchLabelNode sln=new SwitchLabelNode(parseExpression(labeldecl.getChild("constant_expression").getFirstChild()), false);
+ sln.setNumLine(labeldecl.getLine());
+ slv.addElement(sln);
+ } else if(isNode(labeldecl, "default_switch_label")) {
+ SwitchLabelNode sln=new SwitchLabelNode(null, true);
+ sln.setNumLine(labeldecl.getLine());
+ slv.addElement(sln);
+ }
+ }
+
+ SwitchBlockNode sbn=new SwitchBlockNode(slv,
+ parseSingleBlock(sblockdecl.getChild("switch_statements").getFirstChild()));
+ sbn.setNumLine(sblockdecl.getLine());
+
+ blockstatements.add(sbn);
+
+ }
}
} else if (isNode(pn, "trycatchstatement")) {
// TODO add version for normal Java later
ParseNode tpn = pn.getChild("tryblock").getFirstChild();
BlockNode bn=parseBlockHelper(tpn);
blockstatements.add(new SubBlockNode(bn));
-
+
ParseNode fbk = pn.getChild("finallyblock");
if(fbk != null) {
- ParseNode fpn = fbk.getFirstChild();
- BlockNode fbn=parseBlockHelper(fpn);
- blockstatements.add(new SubBlockNode(fbn));
+ ParseNode fpn = fbk.getFirstChild();
+ BlockNode fbn=parseBlockHelper(fpn);
+ blockstatements.add(new SubBlockNode(fbn));
}
} else if (isNode(pn, "throwstatement")) {
// TODO Simply return here
ExpressionNode condition=parseExpression(pn.getChild("condition").getFirstChild());
BlockNode body=parseSingleBlock(pn.getChild("statement").getFirstChild());
if(condition == null) {
- // no condition clause, make a 'true' expression as the condition
- condition = (ExpressionNode)new LiteralNode("boolean", new Boolean(true));
+ // no condition clause, make a 'true' expression as the condition
+ condition = (ExpressionNode) new LiteralNode("boolean", new Boolean(true));
}
LoopNode ln=new LoopNode(init,condition,update,body);
ln.setNumLine(pn.getLine());
ExpressionNode condition=parseExpression(pn.getChild("condition").getFirstChild());
BlockNode body=parseSingleBlock(pn.getChild("statement").getFirstChild());
if(condition == null) {
- // no condition clause, make a 'true' expression as the condition
- condition = (ExpressionNode)new LiteralNode("boolean", new Boolean(true));
+ // no condition clause, make a 'true' expression as the condition
+ condition = (ExpressionNode) new LiteralNode("boolean", new Boolean(true));
}
blockstatements.add(new LoopNode(condition,body,LoopNode.WHILELOOP));
} else if (isNode(pn,"dowhilestatement")) {
ExpressionNode condition=parseExpression(pn.getChild("condition").getFirstChild());
BlockNode body=parseSingleBlock(pn.getChild("statement").getFirstChild());
if(condition == null) {
- // no condition clause, make a 'true' expression as the condition
- condition = (ExpressionNode)new LiteralNode("boolean", new Boolean(true));
+ // no condition clause, make a 'true' expression as the condition
+ condition = (ExpressionNode) new LiteralNode("boolean", new Boolean(true));
}
blockstatements.add(new LoopNode(condition,body,LoopNode.DOWHILELOOP));
} else if (isNode(pn,"sese")) {
ParseNode pnID=pn.getChild("identifier");
String stID=null;
- if( pnID != null ) { stID=pnID.getFirstChild().getTerminal(); }
+ if( pnID != null ) {
+ stID=pnID.getFirstChild().getTerminal();
+ }
SESENode start=new SESENode(stID);
start.setNumLine(pn.getLine());
SESENode end =new SESENode(stID);
- start.setEnd( end );
- end.setStart( start );
+ start.setEnd(end);
+ end.setStart(start);
blockstatements.add(start);
blockstatements.addAll(parseSESEBlock(blockstatements,pn.getChild("body").getFirstChild()));
blockstatements.add(end);
// name_pn.getTerminal() gives you the label
} else if (isNode(pn,"genreach")) {
String graphName = pn.getChild("graphName").getTerminal();
- blockstatements.add( new GenReachNode( graphName ) );
+ blockstatements.add(new GenReachNode(graphName) );
- } else if(isNode(pn,"labeledstatement")){
+ } else if(isNode(pn,"labeledstatement")) {
String label = pn.getChild("name").getTerminal();
BlockNode bn=parseSingleBlock(pn.getChild("statement").getFirstChild());
bn.setLabel(label);
- blockstatements.add(new SubBlockNode(bn));
+ blockstatements.add(new SubBlockNode(bn));
} else {
System.out.println("---------------");
System.out.println(pn.PPrint(3,true));
TypeDescriptor type=new TypeDescriptor(TypeDescriptor.TAG);
md.addTagParameter(type, paramname);
} else {
-
+
TypeDescriptor type=parseTypeDescriptor(paramn);
ParseNode tmp=paramn;
String paramname=tmp.getChild("single").getTerminal();
md.addParameter(type, paramname);
- if(isNode(paramn, "annotation_parameter")){
- ParseNode bodynode=paramn.getChild("annotation_body");
- parseParameterAnnotation(bodynode,type);
- }
-
+ if(isNode(paramn, "annotation_parameter")) {
+ ParseNode bodynode=paramn.getChild("annotation_body");
+ parseParameterAnnotation(bodynode,type);
+ }
+
}
}
}
if (modlist!=null) {
ParseNodeVector pnv=modlist.getChildren();
for(int i=0; i<pnv.size(); i++) {
- ParseNode modn=pnv.elementAt(i);
+ ParseNode modn=pnv.elementAt(i);
if (isNode(modn,"public"))
m.addModifier(Modifiers.PUBLIC);
else if (isNode(modn,"protected"))
m.addModifier(Modifiers.SYNCHRONIZED);
else if (isNode(modn,"atomic"))
m.addModifier(Modifiers.ATOMIC);
- else if (isNode(modn,"abstract"))
- m.addModifier(Modifiers.ABSTRACT);
- else if (isNode(modn,"volatile"))
- m.addModifier(Modifiers.VOLATILE);
- else if (isNode(modn,"transient"))
- m.addModifier(Modifiers.TRANSIENT);
- else if(isNode(modn,"annotation_list"))
- parseAnnotationList(modn,m);
- else{
- throw new Error("Unrecognized Modifier:"+modn.getLabel());}
+ else if (isNode(modn,"abstract"))
+ m.addModifier(Modifiers.ABSTRACT);
+ else if (isNode(modn,"volatile"))
+ m.addModifier(Modifiers.VOLATILE);
+ else if (isNode(modn,"transient"))
+ m.addModifier(Modifiers.TRANSIENT);
+ else if(isNode(modn,"annotation_list"))
+ parseAnnotationList(modn,m);
+ else {
+ throw new Error("Unrecognized Modifier:"+modn.getLabel());
+ }
}
}
return m;
}
-
+
private void parseAnnotationList(ParseNode pn, Modifiers m) {
ParseNodeVector pnv = pn.getChildren();
for (int i = 0; i < pnv.size(); i++) {
ParseNode body_list = pnv.elementAt(i);
if (isNode(body_list, "annotation_body")) {
- ParseNode body_node = body_list.getFirstChild();
- if (isNode(body_node, "marker_annotation")) {
- m.addAnnotation(new AnnotationDescriptor(body_node.getChild("name").getTerminal()));
- } else if (isNode(body_node, "single_annotation")) {
- m.addAnnotation(new AnnotationDescriptor(body_node.getChild("name").getTerminal(),
- body_node.getChild("element_value").getTerminal()));
- } else if (isNode(body_node, "normal_annotation")) {
- throw new Error("Annotation with multiple data members is not supported yet.");
- }
+ ParseNode body_node = body_list.getFirstChild();
+ if (isNode(body_node, "marker_annotation")) {
+ m.addAnnotation(new AnnotationDescriptor(body_node.getChild("name").getTerminal()));
+ } else if (isNode(body_node, "single_annotation")) {
+ m.addAnnotation(new AnnotationDescriptor(body_node.getChild("name").getTerminal(),
+ body_node.getChild("element_value").getTerminal()));
+ } else if (isNode(body_node, "normal_annotation")) {
+ throw new Error("Annotation with multiple data members is not supported yet.");
+ }
}
}
}
-
- private void parseParameterAnnotation(ParseNode body_list,TypeDescriptor type){
+
+ private void parseParameterAnnotation(ParseNode body_list,TypeDescriptor type) {
ParseNode body_node = body_list.getFirstChild();
if (isNode(body_node, "marker_annotation")) {
type.addAnnotationMarker(new AnnotationDescriptor(body_node.getChild("name").getTerminal()));
} else if (isNode(body_node, "single_annotation")) {
type.addAnnotationMarker(new AnnotationDescriptor(body_node.getChild("name").getTerminal(),
- body_node.getChild("element_value").getTerminal()));
+ body_node.getChild("element_value").getTerminal()));
} else if (isNode(body_node, "normal_annotation")) {
throw new Error("Annotation with multiple data members is not supported yet.");
}
}
-
+
private boolean isNode(ParseNode pn, String label) {
if (pn.getLabel().equals(label))
return true;
public int kind() {
return Kind.CastNode;
}
-
+
public Long evaluate() {
eval = exp.evaluate();
return eval;
public ClassTypeNode(TypeDescriptor td) {
this.td=td;
}
-
+
public TypeDescriptor getTypeDesc() {
return this.td;
}
-
+
public void setTypeDesc(TypeDescriptor td) {
this.td = td;
}
public int kind() {
return Kind.ClassTypeNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
package IR.Tree;
public class ContinueBreakNode extends BlockStatementNode {
- LoopNode ln;
- boolean isbreak;
-
- public ContinueBreakNode(boolean isbreak) {
- this.isbreak=isbreak;
- }
-
- public boolean isBreak() {
- return isbreak;
- }
-
- public void setLoop(LoopNode l) {
- this.ln=l;
- }
-
- public String printNode(int indent) {
- if( isbreak )
- return "break;";
- else
- return "continue;";
- }
-
- public int kind() {
- return Kind.ContinueBreakNode;
- }
+ LoopNode ln;
+ boolean isbreak;
+
+ public ContinueBreakNode(boolean isbreak) {
+ this.isbreak=isbreak;
+ }
+
+ public boolean isBreak() {
+ return isbreak;
+ }
+
+ public void setLoop(LoopNode l) {
+ this.ln=l;
+ }
+
+ public String printNode(int indent) {
+ if( isbreak )
+ return "break;";
+ else
+ return "continue;";
+ }
+
+ public int kind() {
+ return Kind.ContinueBreakNode;
+ }
}
public ExpressionNode getArg(int i) {
return (ExpressionNode) argumentlist.get(i);
}
-
+
public void addArrayInitializer(ArrayInitializerNode ain) {
- this.ain = ain;
+ this.ain = ain;
}
-
+
public ArrayInitializerNode getArrayInitializer() {
return this.ain;
}
public int kind() {
return Kind.CreateObjectNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
public class ExpressionNode extends TreeNode {
Long eval = null;
-
+
public TypeDescriptor getType() {
throw new Error();
}
public String printNode(int indentlevel) {
return null;
}
-
+
public Long evaluate() {
throw new Error();
}
-
+
public Long getEval() {
return this.eval;
}
public DNFFlag getDNF() {
DNFFlag leftflag=left.getDNF();
- DNFFlag rightflag=right!=null ? right.getDNF() : null;
+ DNFFlag rightflag=right!=null?right.getDNF():null;
if (op.getOp()==Operation.LOGIC_NOT) {
return leftflag.not();
public class GenReachNode extends BlockStatementNode {
String graphName;
- public GenReachNode( String graphName ) {
+ public GenReachNode(String graphName) {
assert graphName != null;
this.graphName = graphName;
}
public int kind() {
return Kind.InstanceOfNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
HashMap<ClassDescriptor, Set<ClassDescriptor>> implementationMap=new HashMap<ClassDescriptor, Set<ClassDescriptor>>();
/* Maps methods to the methods they call */
-
+
HashMap<MethodDescriptor, Set<MethodDescriptor>> callMap=new HashMap<MethodDescriptor, Set<MethodDescriptor>>();
HashMap<MethodDescriptor, Set<MethodDescriptor>> revCallMap=new HashMap<MethodDescriptor, Set<MethodDescriptor>>();
Descriptor md=(Descriptor)tovisit.iterator().next();
tovisit.remove(md);
Set s=getCalleeSet(md);
-
+
if (s!=null) {
- for(Iterator it=s.iterator(); it.hasNext();) {
+ for(Iterator it=s.iterator(); it.hasNext(); ) {
MethodDescriptor md2=(MethodDescriptor)it.next();
if( !callable.contains(md2) ) {
callable.add(md2);
set.add(d);
return set;
}
-
+
public boolean isCalled(MethodDescriptor md) {
return !getMethods(md).isEmpty();
}
}
public Set getFirstReachableMethodContainingSESE(Descriptor d,
- Set<MethodDescriptor> methodsContainingSESEs) {
+ Set<MethodDescriptor> methodsContainingSESEs) {
throw new Error("");
}
try {
sc.checkMethodBody(md.getClassDesc(), md);
} catch( Error e ) {
- System.out.println( "Error in "+md );
+ System.out.println("Error in "+md);
throw e;
}
}
-
+
public boolean isInit(ClassDescriptor cd) {
return classStatus.get(cd)!=null&&classStatus.get(cd)>=CDINIT;
}
void initClassDesc(ClassDescriptor cd, int init) {
if (classStatus.get(cd)==null||classStatus.get(cd)!=init) {
if (classStatus.get(cd)==null) {
- MethodDescriptor mdstaticinit = (MethodDescriptor)cd.getMethodTable().get("staticblocks");
+ MethodDescriptor mdstaticinit = (MethodDescriptor)cd.getMethodTable().get("staticblocks");
if (mdstaticinit!=null) {
discovered.add(mdstaticinit);
toprocess.push(mdstaticinit);
classStatus.put(cd, init);
}
}
-
+
void computeFixPoint() {
while(!toprocess.isEmpty()) {
MethodDescriptor md=toprocess.pop();
}
//make sure every called method descriptor has a flat method
- for(MethodDescriptor callmd:canCall.keySet())
+ for(MethodDescriptor callmd : canCall.keySet())
bf.addJustFlatMethod(callmd);
}
-
+
void processCall(MethodDescriptor md, FlatCall fcall) {
MethodDescriptor callmd=fcall.getMethod();
//make sure we have a FlatMethod for the base method...
if (!invocationMap.containsKey(cn))
invocationMap.put(cn, new HashSet<Pair<MethodDescriptor,MethodDescriptor>>());
invocationMap.get(cn).add(new Pair<MethodDescriptor, MethodDescriptor>(md, callmd));
-
+
if (impSet!=null) {
- for(ClassDescriptor cdactual:impSet) {
- searchimp:
+ for(ClassDescriptor cdactual : impSet) {
+ searchimp :
while(cdactual!=null) {
Set possiblematches=cdactual.getMethodTable().getSetFromSameScope(callmd.getSymbol());
-
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
if (callmd.matches(matchmd)) {
//Found the method that will be called
break searchimp;
}
}
-
+
//Didn't find method...look in super class
cdactual=cdactual.getSuperDesc();
}
Stack<ClassDescriptor> tovisit=new Stack<ClassDescriptor>();
tovisit.add(cdnew);
-
+
while(!tovisit.isEmpty()) {
ClassDescriptor cdcurr=tovisit.pop();
if (!implementationMap.containsKey(cdcurr))
if (implementationMap.get(cdcurr).add(cdnew)) {
//new implementation...see if it affects implementationmap
if (invocationMap.containsKey(cdcurr)) {
- for(Pair<MethodDescriptor, MethodDescriptor> mdpair:invocationMap.get(cdcurr)) {
+ for(Pair<MethodDescriptor, MethodDescriptor> mdpair : invocationMap.get(cdcurr)) {
MethodDescriptor md=mdpair.getFirst();
MethodDescriptor callmd=mdpair.getSecond();
ClassDescriptor cdactual=cdnew;
-
- searchimp:
+
+searchimp:
while(cdactual!=null) {
Set possiblematches=cdactual.getMethodTable().getSetFromSameScope(callmd.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
if (callmd.matches(matchmd)) {
//Found the method that will be called
break searchimp;
}
}
-
+
//Didn't find method...look in super class
cdactual=cdactual.getSuperDesc();
}
}
if (cdcurr.getSuperDesc()!=null)
tovisit.push(cdcurr.getSuperDesc());
- for(Iterator interit=cdcurr.getSuperInterfaces();interit.hasNext();) {
+ for(Iterator interit=cdcurr.getSuperInterfaces(); interit.hasNext(); ) {
ClassDescriptor cdinter=(ClassDescriptor) interit.next();
tovisit.push(cdinter);
}
void processFlatMethod(MethodDescriptor md) {
if (!callMap.containsKey(md))
callMap.put(md, new HashSet<MethodDescriptor>());
-
+
FlatMethod fm=state.getMethodFlat(md);
- for(FlatNode fn:fm.getNodeSet()) {
+ for(FlatNode fn : fm.getNodeSet()) {
switch(fn.kind()) {
- case FKind.FlatCall: {
- FlatCall fcall=(FlatCall)fn;
- processCall(md, fcall);
- break;
+ case FKind.FlatCall : {
+ FlatCall fcall=(FlatCall)fn;
+ processCall(md, fcall);
+ break;
}
+
case FKind.FlatNew: {
FlatNew fnew=(FlatNew)fn;
processNew(fnew);
public int kind() {
return Kind.LiteralNode;
}
-
+
public Long evaluate() {
eval = null;
- if(this.type.isChar() || this.type.isInt()) {
+ if(this.type.isChar() || this.type.isInt()) {
eval = Long.parseLong(this.value.toString());
}
return eval;
public void addArgument(ExpressionNode en) {
argumentlist.add(en);
}
-
+
public void setArgument(ExpressionNode en, int index) {
argumentlist.setElementAt(en, index);
}
public int kind() {
return Kind.MethodInvokeNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
// java annotation can be intermixed freely with modifiers
// so Modifiers maintains the list of annotations for later usage
- Vector<AnnotationDescriptor> annotations;
+ Vector<AnnotationDescriptor> annotations;
private int value;
public Modifiers() {
public Modifiers(int v) {
value=v;
}
-
- public void addAnnotation(AnnotationDescriptor an){
+
+ public void addAnnotation(AnnotationDescriptor an) {
annotations.add(an);
}
-
- public Vector<AnnotationDescriptor> getAnnotations(){
+
+ public Vector<AnnotationDescriptor> getAnnotations() {
return annotations;
}
public boolean isAtomic() {
return ((value&ATOMIC)!=0);
}
-
+
public boolean isAbstract() {
return ((value&ABSTRACT)!=0);
}
public boolean isFinal() {
return ((value&FINAL)!=0);
}
-
+
public boolean isVolatile() {
return ((value&VOLATILE)!= 0);
}
-
+
public boolean isTransient() {
return ((value & TRANSIENT) != 0);
}
public ExpressionNode getExpression() {
return en;
}
-
+
public ClassDescriptor getClassDesc() {
return this.cd;
}
-
+
public void setClassDesc(ClassDescriptor cd) {
this.cd = cd;
}
return new TypeDescriptor(TypeDescriptor.TAG);
else if(vd != null) {
return ((VarDescriptor)vd).getType();
- } if(cd != null) {
+ }
+ if(cd != null) {
TypeDescriptor tp = new TypeDescriptor(cd);
tp.setClassNameRef();
return tp;
} else {
return null;
}
-
+
}
-
+
public TypeDescriptor getClassType() {
if(cd != null) {
TypeDescriptor tp = new TypeDescriptor(cd);
public int kind() {
return Kind.NameNode;
}
-
+
public Long evaluate() {
eval = null;
if(fd != null ) {
if(fd.isFinal() && fd.isStatic()) {
- eval = fd.getExpressionNode().evaluate();
+ eval = fd.getExpressionNode().evaluate();
} else if(fd.isEnum()) {
- eval = Long.valueOf((long)fd.enumValue());
- }
+ eval = Long.valueOf((long)fd.enumValue());
+ }
} else if(en!= null) {
eval = en.evaluate();
}
public int kind() {
return Kind.OffsetNode;
}
-
+
public Long evaluate() {
eval = null;
return eval; //null;
public int kind() {
return Kind.OpNode;
}
-
+
public Long evaluate() {
eval = null;
Long l = this.left.evaluate();
if(l != null) {
if (this.op.getOp() == Operation.LOGIC_NOT)
- eval = Long.valueOf(l.longValue() > 0 ? 0 : 1);
+ eval = Long.valueOf(l.longValue() > 0?0:1);
else if (this.op.getOp() == Operation.COMP)
- eval = Long.valueOf((long)(~l.longValue()));
+ eval = Long.valueOf((long)(~l.longValue()));
else if (this.op.getOp() == Operation.UNARYMINUS)
- eval = Long.valueOf(-l.longValue() );
+ eval = Long.valueOf(-l.longValue() );
else if (this.op.getOp() == Operation.UNARYPLUS)
- eval = Long.valueOf(+l.longValue());
+ eval = Long.valueOf(+l.longValue());
else {
- Long r = this.right.evaluate();
- if(r != null) {
- //if (this.op.getOp() == Operation.LOGIC_OR)
- // return Long.valueOf((long)(l.longValue() || r.longValue()));
- //else if (this.op.getOp() == Operation.LOGIC_AND)
- // return Long.valueOf((long)(l.longValue() && r.longValue()));
- /*else */if (this.op.getOp() == Operation.BIT_OR)
- eval = Long.valueOf(l.longValue() | r.longValue());
- else if (this.op.getOp() == Operation.BIT_XOR)
- eval = Long.valueOf(l.longValue() ^ r.longValue());
- else if (this.op.getOp() == Operation.BIT_AND)
- eval = Long.valueOf(l.longValue() & r.longValue());
- else if (this.op.getOp() == Operation.EQUAL)
- eval = Long.valueOf((l.longValue() == r.longValue())?1:0);
- else if (this.op.getOp() == Operation.NOTEQUAL)
- eval = Long.valueOf((l.longValue() != r.longValue())?1:0);
- else if (this.op.getOp() == Operation.LT)
- eval = Long.valueOf((l.longValue() < r.longValue())?1:0);
- else if (this.op.getOp() == Operation.GT)
- eval = Long.valueOf((l.longValue() > r.longValue())?1:0);
- else if (this.op.getOp() == Operation.LTE)
- eval = Long.valueOf((l.longValue() <= r.longValue())?1:0);
- else if (this.op.getOp() == Operation.GTE)
- eval = Long.valueOf((l.longValue() >= r.longValue())?1:0);
- else if (this.op.getOp() == Operation.LEFTSHIFT)
- eval = Long.valueOf(l.longValue() << r.longValue());
- else if (this.op.getOp() == Operation.RIGHTSHIFT)
- eval = Long.valueOf(l.longValue() >> r.longValue());
- else if (this.op.getOp() == Operation.URIGHTSHIFT)
- eval = Long.valueOf(l.longValue() >>> r.longValue());
- else if (this.op.getOp() == Operation.SUB)
- eval = Long.valueOf(l.longValue() - r.longValue());
- else if (this.op.getOp() == Operation.ADD)
- eval = Long.valueOf(l.longValue() + r.longValue());
- else if (this.op.getOp() == Operation.MULT)
- eval = Long.valueOf(l.longValue() * r.longValue());
- else if (this.op.getOp() == Operation.DIV)
- eval = Long.valueOf(l.longValue() / r.longValue());
- else if (this.op.getOp() == Operation.MOD)
- eval = Long.valueOf(l.longValue() % r.longValue());
- else if (this.op.getOp() == Operation.ASSIGN)
- eval = Long.valueOf(r.longValue());
- }
+ Long r = this.right.evaluate();
+ if(r != null) {
+ //if (this.op.getOp() == Operation.LOGIC_OR)
+ // return Long.valueOf((long)(l.longValue() || r.longValue()));
+ //else if (this.op.getOp() == Operation.LOGIC_AND)
+ // return Long.valueOf((long)(l.longValue() && r.longValue()));
+ /*else */ if (this.op.getOp() == Operation.BIT_OR)
+ eval = Long.valueOf(l.longValue() | r.longValue());
+ else if (this.op.getOp() == Operation.BIT_XOR)
+ eval = Long.valueOf(l.longValue() ^ r.longValue());
+ else if (this.op.getOp() == Operation.BIT_AND)
+ eval = Long.valueOf(l.longValue() & r.longValue());
+ else if (this.op.getOp() == Operation.EQUAL)
+ eval = Long.valueOf((l.longValue() == r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.NOTEQUAL)
+ eval = Long.valueOf((l.longValue() != r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.LT)
+ eval = Long.valueOf((l.longValue() < r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.GT)
+ eval = Long.valueOf((l.longValue() > r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.LTE)
+ eval = Long.valueOf((l.longValue() <= r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.GTE)
+ eval = Long.valueOf((l.longValue() >= r.longValue())?1:0);
+ else if (this.op.getOp() == Operation.LEFTSHIFT)
+ eval = Long.valueOf(l.longValue() << r.longValue());
+ else if (this.op.getOp() == Operation.RIGHTSHIFT)
+ eval = Long.valueOf(l.longValue() >> r.longValue());
+ else if (this.op.getOp() == Operation.URIGHTSHIFT)
+ eval = Long.valueOf(l.longValue() >>> r.longValue());
+ else if (this.op.getOp() == Operation.SUB)
+ eval = Long.valueOf(l.longValue() - r.longValue());
+ else if (this.op.getOp() == Operation.ADD)
+ eval = Long.valueOf(l.longValue() + r.longValue());
+ else if (this.op.getOp() == Operation.MULT)
+ eval = Long.valueOf(l.longValue() * r.longValue());
+ else if (this.op.getOp() == Operation.DIV)
+ eval = Long.valueOf(l.longValue() / r.longValue());
+ else if (this.op.getOp() == Operation.MOD)
+ eval = Long.valueOf(l.longValue() % r.longValue());
+ else if (this.op.getOp() == Operation.ASSIGN)
+ eval = Long.valueOf(r.longValue());
+ }
}
}
return eval;
}
public ParseNode getRoot() {
- return (parent == null) ? this : parent.getRoot();
+ return (parent == null)?this:parent.getRoot();
}
public String getTerminal() {
protected SESENode end;
protected FlatSESEEnterNode enter;
- protected FlatSESEExitNode exit;
+ protected FlatSESEExitNode exit;
- public SESENode( String id ) {
+ public SESENode(String id) {
this.id = id;
start = null;
end = null;
return id;
}
- public void setStart( SESENode n ) {
+ public void setStart(SESENode n) {
start = n;
}
- public void setEnd( SESENode n ) {
+ public void setEnd(SESENode n) {
end = n;
}
return end;
}
- public void setFlatEnter( FlatSESEEnterNode fsen ) {
+ public void setFlatEnter(FlatSESEEnterNode fsen) {
enter = fsen;
}
- public void setFlatExit( FlatSESEExitNode fsexn ) {
+ public void setFlatExit(FlatSESEExitNode fsexn) {
exit = fsexn;
}
Stack loopstack;
HashSet toanalyze;
HashMap<ClassDescriptor, Integer> completed;
-
- //This is the class mappings for a particular file based
- //on the import names. Maps class to canonical class name.
+
+ //This is the class mappings for a particular file based
+ //on the import names. Maps class to canonical class name.
static Hashtable singleImportMap;
public static final int NOCHECK=0;
public static final int REFERENCE=1;
public static final int INIT=2;
-
+
boolean checkAll;
public boolean hasLayout(ClassDescriptor cd) {
}
ClassDescriptor cd=typeutil.getClass(classname, toanalyze);
checkClass(cd, fullcheck);
-
+
return cd;
}
-
+
public void checkClass(ClassDescriptor cd) {
checkClass(cd, INIT);
}
if (!completed.containsKey(cd)||completed.get(cd)<fullcheck) {
int oldstatus=completed.containsKey(cd)?completed.get(cd):0;
completed.put(cd, fullcheck);
-
+
if (fullcheck>=REFERENCE&&oldstatus<INIT) {
//Set superclass link up
if (cd.getSuper()!=null) {
cd.getFlagTable().setParent(cd.getSuperDesc().getFlagTable());
}
}
- // Link together Field, Method tables do classes inherit these from
+ // Link together Field, Method tables do classes inherit these from
// their ancestor interfaces
Vector<String> sifv = cd.getSuperInterface();
for(int i = 0; i < sifv.size(); i++) {
}
if (oldstatus<INIT&&fullcheck>=INIT) {
/* Check to see that fields are well typed */
- for(Iterator field_it=cd.getFields(); field_it.hasNext();) {
+ for(Iterator field_it=cd.getFields(); field_it.hasNext(); ) {
FieldDescriptor fd=(FieldDescriptor)field_it.next();
checkField(cd,fd);
}
- for(Iterator method_it=cd.getMethods(); method_it.hasNext();) {
+ for(Iterator method_it=cd.getMethods(); method_it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)method_it.next();
checkMethod(cd,md);
}
while (!toanalyze.isEmpty()) {
Object obj = toanalyze.iterator().next();
if (obj instanceof TaskDescriptor) {
- toanalyze.remove(obj);
- TaskDescriptor td = (TaskDescriptor) obj;
- try {
- checkTask(td);
- } catch (Error e) {
- System.out.println("Error in " + td);
- throw e;
- }
+ toanalyze.remove(obj);
+ TaskDescriptor td = (TaskDescriptor) obj;
+ try {
+ checkTask(td);
+ } catch (Error e) {
+ System.out.println("Error in " + td);
+ throw e;
+ }
} else {
- ClassDescriptor cd = (ClassDescriptor) obj;
- toanalyze.remove(cd);
- //set the class mappings based on imports.
- singleImportMap = cd.getSingleImportMappings();
-
- // need to initialize typeutil object here...only place we can
- // get class descriptors without first calling getclass
- getClass(cd, cd.getSymbol());
- for (Iterator method_it = cd.getMethods(); method_it.hasNext();) {
- MethodDescriptor md = (MethodDescriptor) method_it.next();
- try {
- checkMethodBody(cd, md);
- } catch (Error e) {
- System.out.println("Error in " + md);
- throw e;
- }
- }
+ ClassDescriptor cd = (ClassDescriptor) obj;
+ toanalyze.remove(cd);
+ //set the class mappings based on imports.
+ singleImportMap = cd.getSingleImportMappings();
+
+ // need to initialize typeutil object here...only place we can
+ // get class descriptors without first calling getclass
+ getClass(cd, cd.getSymbol());
+ for (Iterator method_it = cd.getMethods(); method_it.hasNext(); ) {
+ MethodDescriptor md = (MethodDescriptor) method_it.next();
+ try {
+ checkMethodBody(cd, md);
+ } catch (Error e) {
+ System.out.println("Error in " + md);
+ throw e;
+ }
+ }
}
}
}
ClassDescriptor superdesc=cd.getSuperDesc();
if (superdesc!=null) {
Set possiblematches=superdesc.getMethodTable().getSet(md.getSymbol());
- for(Iterator methodit=possiblematches.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=possiblematches.iterator(); methodit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)methodit.next();
if (md.matches(matchmd)) {
if (matchmd.getModifiers().isFinal()) {
case Kind.IfStatementNode:
checkIfStatementNode(md, nametable, (IfStatementNode)bsn);
return;
-
+
case Kind.SwitchStatementNode:
checkSwitchStatementNode(md, nametable, (SwitchStatementNode)bsn);
return;
return;
case Kind.ContinueBreakNode:
- checkContinueBreakNode(md, nametable, (ContinueBreakNode) bsn);
- return;
+ checkContinueBreakNode(md, nametable, (ContinueBreakNode) bsn);
+ return;
case Kind.SESENode:
case Kind.GenReachNode:
}
void checkContinueBreakNode(Descriptor md, SymbolTable nametable, ContinueBreakNode cbn) {
- if (loopstack.empty())
- throw new Error("continue/break outside of loop");
- Object o = loopstack.peek();
- if(o instanceof LoopNode) {
- LoopNode ln=(LoopNode)o;
- cbn.setLoop(ln);
- }
+ if (loopstack.empty())
+ throw new Error("continue/break outside of loop");
+ Object o = loopstack.peek();
+ if(o instanceof LoopNode) {
+ LoopNode ln=(LoopNode)o;
+ cbn.setLoop(ln);
+ }
}
void checkReturnNode(Descriptor d, SymbolTable nametable, ReturnNode rn) {
if (isn.getFalseBlock()!=null)
checkBlockNode(md, nametable, isn.getFalseBlock());
}
-
+
void checkSwitchStatementNode(Descriptor md, SymbolTable nametable, SwitchStatementNode ssn) {
checkExpressionNode(md, nametable, ssn.getCondition(), new TypeDescriptor(TypeDescriptor.INT));
-
+
BlockNode sbn = ssn.getSwitchBody();
boolean hasdefault = false;
for(int i = 0; i < sbn.size(); i++) {
boolean containdefault = checkSwitchBlockNode(md, nametable, (SwitchBlockNode)sbn.get(i));
if(hasdefault && containdefault) {
- throw new Error("Error: duplicate default branch in switch-case statement in Method: " + md.getSymbol());
+ throw new Error("Error: duplicate default branch in switch-case statement in Method: " + md.getSymbol());
}
hasdefault = containdefault;
}
}
-
+
boolean checkSwitchBlockNode(Descriptor md, SymbolTable nametable, SwitchBlockNode sbn) {
Vector<SwitchLabelNode> slnv = sbn.getSwitchConditions();
int defaultb = 0;
for(int i = 0; i < slnv.size(); i++) {
if(slnv.elementAt(i).isdefault) {
- defaultb++;
+ defaultb++;
} else {
- checkConstantExpressionNode(md, nametable, slnv.elementAt(i).getCondition(), new TypeDescriptor(TypeDescriptor.INT));
+ checkConstantExpressionNode(md, nametable, slnv.elementAt(i).getCondition(), new TypeDescriptor(TypeDescriptor.INT));
}
}
if(defaultb > 1) {
return (defaultb > 0);
}
}
-
+
void checkConstantExpressionNode(Descriptor md, SymbolTable nametable, ExpressionNode en, TypeDescriptor td) {
switch(en.kind()) {
case Kind.FieldAccessNode:
checkFieldAccessNode(md,nametable,(FieldAccessNode)en,td);
return;
-
+
case Kind.LiteralNode:
checkLiteralNode(md,nametable,(LiteralNode)en,td);
return;
-
+
case Kind.NameNode:
checkNameNode(md,nametable,(NameNode)en,td);
return;
-
+
case Kind.OpNode:
checkOpNode(md, nametable, (OpNode)en, td);
return;
case Kind.TertiaryNode:
checkTertiaryNode(md, nametable, (TertiaryNode)en, td);
return;
-
+
case Kind.InstanceOfNode:
checkInstanceOfNode(md, nametable, (InstanceOfNode) en, td);
return;
case Kind.ArrayInitializerNode:
checkArrayInitializerNode(md, nametable, (ArrayInitializerNode) en, td);
return;
-
+
case Kind.ClassTypeNode:
checkClassTypeNode(md, nametable, (ClassTypeNode) en, td);
return;
void checkClassTypeNode(Descriptor md, SymbolTable nametable, ClassTypeNode tn, TypeDescriptor td) {
checkTypeDescriptor(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), tn.getType());
}
-
+
void checkCastNode(Descriptor md, SymbolTable nametable, CastNode cn, TypeDescriptor td) {
/* Get type descriptor */
if (cn.getType()==null) {
if(ltd.isClassNameRef()) {
// the field access is using a class name directly
if(ltd.getClassDesc().isEnum()) {
- int value = ltd.getClassDesc().getEnumConstant(fieldname);
- if(-1 == value) {
- // check if this field is an enum constant
- throw new Error(fieldname + " is not an enum constant in "+fan.printNode(0)+" in "+md);
- }
- fd = new FieldDescriptor(new Modifiers(Modifiers.PUBLIC|Modifiers.FINAL), new TypeDescriptor(TypeDescriptor.INT), fieldname, null, false);
- fd.setAsEnum();
- fd.setEnumValue(value);
+ int value = ltd.getClassDesc().getEnumConstant(fieldname);
+ if(-1 == value) {
+ // check if this field is an enum constant
+ throw new Error(fieldname + " is not an enum constant in "+fan.printNode(0)+" in "+md);
+ }
+ fd = new FieldDescriptor(new Modifiers(Modifiers.PUBLIC|Modifiers.FINAL), new TypeDescriptor(TypeDescriptor.INT), fieldname, null, false);
+ fd.setAsEnum();
+ fd.setEnumValue(value);
} else if(fd == null) {
- throw new Error("Could not find field "+ fieldname + " in "+fan.printNode(0)+" in "+md + " (Line: "+fan.getNumLine()+")");
+ throw new Error("Could not find field "+ fieldname + " in "+fan.printNode(0)+" in "+md + " (Line: "+fan.getNumLine()+")");
} else if(fd.isStatic()) {
- // check if this field is a static field
- if(fd.getExpressionNode() != null) {
- checkExpressionNode(md,nametable,fd.getExpressionNode(),null);
- }
+ // check if this field is a static field
+ if(fd.getExpressionNode() != null) {
+ checkExpressionNode(md,nametable,fd.getExpressionNode(),null);
+ }
} else {
- throw new Error("Dereference of the non-static field "+ fieldname + " in "+fan.printNode(0)+" in "+md);
+ throw new Error("Dereference of the non-static field "+ fieldname + " in "+fan.printNode(0)+" in "+md);
}
- }
+ }
if (fd==null)
throw new Error("Unknown field "+fieldname + " in "+fan.printNode(0)+" in "+md);
FieldDescriptor fdwr=(FieldDescriptor) ltdwr.getClassDesc().getFieldTable().get(fieldnamewr);
fan.setField(fdwr);
if (fdwr==null)
- throw new Error("Unknown field "+fieldnamewr + " in "+fan.printNode(0)+" in "+md);
+ throw new Error("Unknown field "+fieldnamewr + " in "+fan.printNode(0)+" in "+md);
} else {
fan.setField(fd);
}
if (td!=null)
if (!typeutil.isSuperorType(td,ln.getType())) {
- Long l = ln.evaluate();
- if((ln.getType().isByte() || ln.getType().isShort()
- || ln.getType().isChar() || ln.getType().isInt())
- && (l != null)
- && (td.isByte() || td.isShort() || td.isChar()
- || td.isInt() || td.isLong())) {
- long lnvalue = l.longValue();
- if((td.isByte() && ((lnvalue > 127) || (lnvalue < -128)))
- || (td.isShort() && ((lnvalue > 32767) || (lnvalue < -32768)))
- || (td.isChar() && ((lnvalue > 65535) || (lnvalue < 0)))
- || (td.isInt() && ((lnvalue > 2147483647) || (lnvalue < -2147483648)))
- || (td.isLong() && ((lnvalue > 9223372036854775807L) || (lnvalue < -9223372036854775808L)))) {
- throw new Error("Field node returns "+ln.getType()+", but need "+td+" in "+md);
- }
- } else {
- throw new Error("Field node returns "+ln.getType()+", but need "+td+" in "+md);
- }
+ Long l = ln.evaluate();
+ if((ln.getType().isByte() || ln.getType().isShort()
+ || ln.getType().isChar() || ln.getType().isInt())
+ && (l != null)
+ && (td.isByte() || td.isShort() || td.isChar()
+ || td.isInt() || td.isLong())) {
+ long lnvalue = l.longValue();
+ if((td.isByte() && ((lnvalue > 127) || (lnvalue < -128)))
+ || (td.isShort() && ((lnvalue > 32767) || (lnvalue < -32768)))
+ || (td.isChar() && ((lnvalue > 65535) || (lnvalue < 0)))
+ || (td.isInt() && ((lnvalue > 2147483647) || (lnvalue < -2147483648)))
+ || (td.isLong() && ((lnvalue > 9223372036854775807L) || (lnvalue < -9223372036854775808L)))) {
+ throw new Error("Field node returns "+ln.getType()+", but need "+td+" in "+md);
+ }
+ } else {
+ throw new Error("Field node returns "+ln.getType()+", but need "+td+" in "+md);
+ }
}
}
} else {
String varname=nd.toString();
if(varname.equals("this")) {
- // "this"
- nn.setVar((VarDescriptor)nametable.get("this"));
- return;
+ // "this"
+ nn.setVar((VarDescriptor)nametable.get("this"));
+ return;
}
Descriptor d=(Descriptor)nametable.get(varname);
if (d==null) {
- ClassDescriptor cd = null;
- if((md instanceof MethodDescriptor) && ((MethodDescriptor)md).isStaticBlock()) {
- // this is a static block, all the accessed fields should be static field
- cd = ((MethodDescriptor)md).getClassDesc();
- SymbolTable fieldtbl = cd.getFieldTable();
- FieldDescriptor fd=(FieldDescriptor)fieldtbl.get(varname);
- if((fd == null) || (!fd.isStatic())){
- // no such field in the class, check if this is a class
- if(varname.equals("this")) {
- throw new Error("Error: access this obj in a static block");
- }
- cd=getClass(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), varname);
- if(cd != null) {
- // this is a class name
- nn.setClassDesc(cd);
- return;
- } else {
- throw new Error("Name "+varname+" should not be used in static block: "+md);
- }
- } else {
- // this is a static field
- nn.setField(fd);
- nn.setClassDesc(cd);
- return;
- }
- } else {
- // check if the var is a static field of the class
- if(md instanceof MethodDescriptor) {
- cd = ((MethodDescriptor)md).getClassDesc();
- FieldDescriptor fd = (FieldDescriptor)cd.getFieldTable().get(varname);
- if((fd != null) && (fd.isStatic())) {
- nn.setField(fd);
- nn.setClassDesc(cd);
- if (td!=null)
- if (!typeutil.isSuperorType(td,nn.getType()))
- throw new Error("Field node returns "+nn.getType()+", but need "+td);
- return;
- } else if(fd != null) {
- throw new Error("Name "+varname+" should not be used in " + md);
- }
- }
- cd=getClass(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), varname);
- if(cd != null) {
- // this is a class name
- nn.setClassDesc(cd);
- return;
- } else {
- throw new Error("Name "+varname+" undefined in: "+md);
- }
- }
+ ClassDescriptor cd = null;
+ if((md instanceof MethodDescriptor) && ((MethodDescriptor)md).isStaticBlock()) {
+ // this is a static block, all the accessed fields should be static field
+ cd = ((MethodDescriptor)md).getClassDesc();
+ SymbolTable fieldtbl = cd.getFieldTable();
+ FieldDescriptor fd=(FieldDescriptor)fieldtbl.get(varname);
+ if((fd == null) || (!fd.isStatic())) {
+ // no such field in the class, check if this is a class
+ if(varname.equals("this")) {
+ throw new Error("Error: access this obj in a static block");
+ }
+ cd=getClass(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), varname);
+ if(cd != null) {
+ // this is a class name
+ nn.setClassDesc(cd);
+ return;
+ } else {
+ throw new Error("Name "+varname+" should not be used in static block: "+md);
+ }
+ } else {
+ // this is a static field
+ nn.setField(fd);
+ nn.setClassDesc(cd);
+ return;
+ }
+ } else {
+ // check if the var is a static field of the class
+ if(md instanceof MethodDescriptor) {
+ cd = ((MethodDescriptor)md).getClassDesc();
+ FieldDescriptor fd = (FieldDescriptor)cd.getFieldTable().get(varname);
+ if((fd != null) && (fd.isStatic())) {
+ nn.setField(fd);
+ nn.setClassDesc(cd);
+ if (td!=null)
+ if (!typeutil.isSuperorType(td,nn.getType()))
+ throw new Error("Field node returns "+nn.getType()+", but need "+td);
+ return;
+ } else if(fd != null) {
+ throw new Error("Name "+varname+" should not be used in " + md);
+ }
+ }
+ cd=getClass(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), varname);
+ if(cd != null) {
+ // this is a class name
+ nn.setClassDesc(cd);
+ return;
+ } else {
+ throw new Error("Name "+varname+" undefined in: "+md);
+ }
+ }
}
if (d instanceof VarDescriptor) {
nn.setVar(d);
void checkOffsetNode(Descriptor md, SymbolTable nameTable, OffsetNode ofn, TypeDescriptor td) {
TypeDescriptor ltd=ofn.td;
checkTypeDescriptor(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null),ltd);
-
+
String fieldname = ofn.fieldname;
FieldDescriptor fd=null;
if (ltd.isArray()&&fieldname.equals("length")) {
void checkTertiaryNode(Descriptor md, SymbolTable nametable, TertiaryNode tn, TypeDescriptor td) {
checkExpressionNode(md, nametable, tn.getCond(), new TypeDescriptor(TypeDescriptor.BOOLEAN));
- checkExpressionNode(md, nametable, tn.getTrueExpr(), td );
- checkExpressionNode(md, nametable, tn.getFalseExpr(), td );
+ checkExpressionNode(md, nametable, tn.getTrueExpr(), td);
+ checkExpressionNode(md, nametable, tn.getFalseExpr(), td);
}
void checkInstanceOfNode(Descriptor md, SymbolTable nametable, InstanceOfNode tn, TypeDescriptor td) {
if (td!=null&&!td.isBoolean())
throw new Error("Expecting type "+td+"for instanceof expression");
-
+
checkTypeDescriptor(((md instanceof MethodDescriptor)?((MethodDescriptor)md).getClassDesc():null), tn.getExprType());
checkExpressionNode(md, nametable, tn.getExpr(), null);
}
for(int i = 1; i < vec_type.size(); i++) {
TypeDescriptor tmp_type = vec_type.elementAt(i);
if(out_type == null) {
- if(tmp_type != null) {
- out_type = tmp_type;
- }
+ if(tmp_type != null) {
+ out_type = tmp_type;
+ }
} else if(out_type.isNull()) {
- if(!tmp_type.isNull() ) {
- if(!tmp_type.isArray()) {
- throw new Error("Error: mixed type in var initializer list");
- } else {
- out_type = tmp_type;
- }
- }
+ if(!tmp_type.isNull() ) {
+ if(!tmp_type.isArray()) {
+ throw new Error("Error: mixed type in var initializer list");
+ } else {
+ out_type = tmp_type;
+ }
+ }
} else if(out_type.isArray()) {
- if(tmp_type.isArray()) {
- if(tmp_type.getArrayCount() > out_type.getArrayCount()) {
- out_type = tmp_type;
- }
- } else if((tmp_type != null) && (!tmp_type.isNull())) {
- throw new Error("Error: mixed type in var initializer list");
- }
+ if(tmp_type.isArray()) {
+ if(tmp_type.getArrayCount() > out_type.getArrayCount()) {
+ out_type = tmp_type;
+ }
+ } else if((tmp_type != null) && (!tmp_type.isNull())) {
+ throw new Error("Error: mixed type in var initializer list");
+ }
} else if(out_type.isInt()) {
- if(!tmp_type.isInt()) {
- throw new Error("Error: mixed type in var initializer list");
- }
+ if(!tmp_type.isInt()) {
+ throw new Error("Error: mixed type in var initializer list");
+ }
} else if(out_type.isString()) {
- if(!tmp_type.isString()) {
- throw new Error("Error: mixed type in var initializer list");
- }
+ if(!tmp_type.isString()) {
+ throw new Error("Error: mixed type in var initializer list");
+ }
}
}
if(out_type != null) {
(an.getOperation().getBaseOp().getOp()!=Operation.POSTINC&&
an.getOperation().getBaseOp().getOp()!=Operation.POSTDEC))
postinc=false;
- if (!postinc)
+ if (!postinc)
checkExpressionNode(md, nametable, an.getSrc(),td);
//TODO: Need check on validity of operation here
if (!((an.getDest() instanceof FieldAccessNode)||
TypeDescriptor dt = an.getDest().getType();
TypeDescriptor st = an.getSrc().getType();
if(an.getSrc().kind() == Kind.ArrayInitializerNode) {
- if(dt.getArrayCount() != st.getArrayCount()) {
- throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
- } else {
- do {
- dt = dt.dereference();
- st = st.dereference();
- } while(dt.isArray());
- if((st.isByte() || st.isShort() || st.isChar() || st.isInt())
- && (dt.isByte() || dt.isShort() || dt.isChar() || dt.isInt() || dt.isLong())) {
- return;
- } else {
- throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
- }
- }
+ if(dt.getArrayCount() != st.getArrayCount()) {
+ throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
+ } else {
+ do {
+ dt = dt.dereference();
+ st = st.dereference();
+ } while(dt.isArray());
+ if((st.isByte() || st.isShort() || st.isChar() || st.isInt())
+ && (dt.isByte() || dt.isShort() || dt.isChar() || dt.isInt() || dt.isLong())) {
+ return;
+ } else {
+ throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
+ }
+ }
} else {
- Long l = an.getSrc().evaluate();
- if((st.isByte() || st.isShort() || st.isChar() || st.isInt())
- && (l != null)
- && (dt.isByte() || dt.isShort() || dt.isChar() || dt.isInt() || dt.isLong())) {
- long lnvalue = l.longValue();
- if((dt.isByte() && ((lnvalue > 127) || (lnvalue < -128)))
- || (dt.isShort() && ((lnvalue > 32767) || (lnvalue < -32768)))
- || (dt.isChar() && ((lnvalue > 65535) || (lnvalue < 0)))
- || (dt.isInt() && ((lnvalue > 2147483647) || (lnvalue < -2147483648)))
- || (dt.isLong() && ((lnvalue > 9223372036854775807L) || (lnvalue < -9223372036854775808L)))) {
- throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
- }
- } else {
- throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
- }
+ Long l = an.getSrc().evaluate();
+ if((st.isByte() || st.isShort() || st.isChar() || st.isInt())
+ && (l != null)
+ && (dt.isByte() || dt.isShort() || dt.isChar() || dt.isInt() || dt.isLong())) {
+ long lnvalue = l.longValue();
+ if((dt.isByte() && ((lnvalue > 127) || (lnvalue < -128)))
+ || (dt.isShort() && ((lnvalue > 32767) || (lnvalue < -32768)))
+ || (dt.isChar() && ((lnvalue > 65535) || (lnvalue < 0)))
+ || (dt.isInt() && ((lnvalue > 2147483647) || (lnvalue < -2147483648)))
+ || (dt.isLong() && ((lnvalue > 9223372036854775807L) || (lnvalue < -9223372036854775808L)))) {
+ throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
+ }
+ } else {
+ throw new Error("Type of rside ("+an.getSrc().getType().toPrettyString()+") not compatible with type of lside ("+an.getDest().getType().toPrettyString()+")"+an.printNode(0));
+ }
}
}
}
void checkLoopNode(Descriptor md, SymbolTable nametable, LoopNode ln) {
- loopstack.push(ln);
+ loopstack.push(ln);
if (ln.getType()==LoopNode.WHILELOOP||ln.getType()==LoopNode.DOWHILELOOP) {
checkExpressionNode(md, nametable, ln.getCondition(), new TypeDescriptor(TypeDescriptor.BOOLEAN));
checkBlockNode(md, nametable, ln.getBody());
void checkCreateObjectNode(Descriptor md, SymbolTable nametable, CreateObjectNode con,
- TypeDescriptor td) {
+ TypeDescriptor td) {
TypeDescriptor[] tdarray = new TypeDescriptor[con.numArgs()];
for (int i = 0; i < con.numArgs(); i++) {
ExpressionNode en = con.getArg(i);
ClassDescriptor cd = typetolookin.getClassDesc();
for (int j = 0; j < fe.numEffects(); j++) {
- FlagEffect flag = fe.getEffect(j);
- String name = flag.getName();
- FlagDescriptor flag_d = (FlagDescriptor) cd.getFlagTable().get(name);
- // Make sure the flag is declared
- if (flag_d == null)
- throw new Error("Flag descriptor " + name + " undefined in class: " + cd.getSymbol());
- if (flag_d.getExternal())
- throw new Error("Attempting to modify external flag: " + name);
- flag.setFlag(flag_d);
+ FlagEffect flag = fe.getEffect(j);
+ String name = flag.getName();
+ FlagDescriptor flag_d = (FlagDescriptor) cd.getFlagTable().get(name);
+ // Make sure the flag is declared
+ if (flag_d == null)
+ throw new Error("Flag descriptor " + name + " undefined in class: " + cd.getSymbol());
+ if (flag_d.getExternal())
+ throw new Error("Attempting to modify external flag: " + name);
+ flag.setFlag(flag_d);
}
for (int j = 0; j < fe.numTagEffects(); j++) {
- TagEffect tag = fe.getTagEffect(j);
- String name = tag.getName();
-
- Descriptor d = (Descriptor) nametable.get(name);
- if (d == null)
- throw new Error("Tag descriptor " + name + " undeclared");
- else if (!(d instanceof TagVarDescriptor))
- throw new Error(name + " is not a tag descriptor");
- tag.setTag((TagVarDescriptor) d);
+ TagEffect tag = fe.getTagEffect(j);
+ String name = tag.getName();
+
+ Descriptor d = (Descriptor) nametable.get(name);
+ if (d == null)
+ throw new Error("Tag descriptor " + name + " undeclared");
+ else if (!(d instanceof TagVarDescriptor))
+ throw new Error(name + " is not a tag descriptor");
+ tag.setTag((TagVarDescriptor) d);
}
}
Set methoddescriptorset = classtolookin.getMethodTable().getSet(typetolookin.getSymbol());
MethodDescriptor bestmd = null;
- NextMethod: for (Iterator methodit = methoddescriptorset.iterator(); methodit.hasNext();) {
- MethodDescriptor currmd = (MethodDescriptor) methodit.next();
- /* Need correct number of parameters */
- if (con.numArgs() != currmd.numParameters())
- continue;
- for (int i = 0; i < con.numArgs(); i++) {
- if (!typeutil.isSuperorType(currmd.getParamType(i), tdarray[i]))
- continue NextMethod;
- }
- /* Local allocations can't call global allocator */
- if (!con.isGlobal() && currmd.isGlobal())
- continue;
-
- /* Method okay so far */
- if (bestmd == null)
- bestmd = currmd;
- else {
- if (typeutil.isMoreSpecific(currmd, bestmd)) {
- bestmd = currmd;
- } else if (con.isGlobal() && match(currmd, bestmd)) {
- if (currmd.isGlobal() && !bestmd.isGlobal())
- bestmd = currmd;
- else if (currmd.isGlobal() && bestmd.isGlobal())
- throw new Error();
- } else if (!typeutil.isMoreSpecific(bestmd, currmd)) {
- throw new Error("No method is most specific:" + bestmd + " and " + currmd);
- }
-
- /* Is this more specific than bestmd */
- }
+NextMethod: for (Iterator methodit = methoddescriptorset.iterator(); methodit.hasNext(); ) {
+ MethodDescriptor currmd = (MethodDescriptor) methodit.next();
+ /* Need correct number of parameters */
+ if (con.numArgs() != currmd.numParameters())
+ continue;
+ for (int i = 0; i < con.numArgs(); i++) {
+ if (!typeutil.isSuperorType(currmd.getParamType(i), tdarray[i]))
+ continue NextMethod;
+ }
+ /* Local allocations can't call global allocator */
+ if (!con.isGlobal() && currmd.isGlobal())
+ continue;
+
+ /* Method okay so far */
+ if (bestmd == null)
+ bestmd = currmd;
+ else {
+ if (typeutil.isMoreSpecific(currmd, bestmd)) {
+ bestmd = currmd;
+ } else if (con.isGlobal() && match(currmd, bestmd)) {
+ if (currmd.isGlobal() && !bestmd.isGlobal())
+ bestmd = currmd;
+ else if (currmd.isGlobal() && bestmd.isGlobal())
+ throw new Error();
+ } else if (!typeutil.isMoreSpecific(bestmd, currmd)) {
+ throw new Error("No method is most specific:" + bestmd + " and " + currmd);
+ }
+
+ /* Is this more specific than bestmd */
+ }
}
if (bestmd == null)
- throw new Error("No method found for " + con.printNode(0) + " in " + md);
+ throw new Error("No method found for " + con.printNode(0) + " in " + md);
con.setConstructor(bestmd);
}
}
ExpressionNode translateNameDescriptorintoExpression(NameDescriptor nd, int numLine) {
String id=nd.getIdentifier();
NameDescriptor base=nd.getBase();
- if (base==null){
+ if (base==null) {
NameNode nn=new NameNode(nd);
nn.setNumLine(numLine);
return nn;
- }else{
+ } else {
FieldAccessNode fan=new FieldAccessNode(translateNameDescriptorintoExpression(base,numLine),id);
fan.setNumLine(numLine);
return fan;
checkExpressionNode(md,nametable,en,null);
tdarray[i]=en.getType();
if(en.getType().isClass() && en.getType().getClassDesc().isEnum()) {
- tdarray[i] = new TypeDescriptor(TypeDescriptor.INT);
+ tdarray[i] = new TypeDescriptor(TypeDescriptor.INT);
}
}
TypeDescriptor typetolookin=null;
typetolookin=new TypeDescriptor(supercd);
min.setSuper();
} else if (rootname.equals("this")) {
- if(isstatic) {
- throw new Error("use this object in static method md = "+ md.toString());
- }
- ClassDescriptor cd=((MethodDescriptor)md).getClassDesc();
- typetolookin=new TypeDescriptor(cd);
+ if(isstatic) {
+ throw new Error("use this object in static method md = "+ md.toString());
+ }
+ ClassDescriptor cd=((MethodDescriptor)md).getClassDesc();
+ typetolookin=new TypeDescriptor(cd);
} else if (nametable.get(rootname)!=null) {
//we have an expression
min.setExpression(translateNameDescriptorintoExpression(min.getBaseName(),min.getNumLine()));
typetolookin = nn.getType();
if(!((nn.kind()== Kind.NameNode) && (((NameNode)nn).getField() == null)
&& (((NameNode)nn).getVar() == null) && (((NameNode)nn).getExpression() == null))) {
- // this is not a pure class name, need to add to
+ // this is not a pure class name, need to add to
min.setExpression(nn);
}
} else {
//if (min.getBaseName().getSymbol().equals("System.out"))
cd=getClass(null, "System");
/*else {
- cd=getClass(min.getBaseName().getSymbol());
- }*/
+ cd=getClass(min.getBaseName().getSymbol());
+ }*/
if (cd==null)
throw new Error("md = "+ md.toString()+ " "+min.getBaseName()+" undefined");
typetolookin=new TypeDescriptor(cd);
Set methoddescriptorset=classtolookin.getMethodTable().getSet(min.getMethodName());
MethodDescriptor bestmd=null;
NextMethod:
- for(Iterator methodit=methoddescriptorset.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=methoddescriptorset.iterator(); methodit.hasNext(); ) {
MethodDescriptor currmd=(MethodDescriptor)methodit.next();
/* Need correct number of parameters */
if (min.numArgs()!=currmd.numParameters())
continue;
for(int i=0; i<min.numArgs(); i++) {
if (!typeutil.isSuperorType(currmd.getParamType(i),tdarray[i]))
- if(((!tdarray[i].isArray() &&(tdarray[i].isInt() || tdarray[i].isLong()))
+ if(((!tdarray[i].isArray() &&(tdarray[i].isInt() || tdarray[i].isLong()))
&& currmd.getParamType(i).isClass() && currmd.getParamType(i).getClassDesc().getSymbol().equals("Object"))) {
// primitive parameters vs object
} else {
if ((td!=null)&&(min.getType()!=null)&&!typeutil.isSuperorType(td, min.getType()))
throw new Error(min.getType()+ " is not equal to or a subclass of "+td);
/* Check whether we need to set this parameter to implied this */
- if (! isstatic && !bestmd.isStatic()) {
+ if (!isstatic && !bestmd.isStatic()) {
if (min.getExpression()==null) {
ExpressionNode en=new NameNode(new NameDescriptor("this"));
min.setExpression(en);
checkExpressionNode(md, nametable, min.getExpression(), null);
}
}
-
+
/* Check if we need to wrap primitive paratmeters to objects */
for(int i=0; i<min.numArgs(); i++) {
if(!tdarray[i].isArray() && (tdarray[i].isInt() || tdarray[i].isLong())
- && min.getMethod().getParamType(i).isClass() && min.getMethod().getParamType(i).getClassDesc().getSymbol().equals("Object")) {
+ && min.getMethod().getParamType(i).isClass() && min.getMethod().getParamType(i).getClassDesc().getSymbol().equals("Object")) {
// Shall wrap this primitive parameter as a object
ExpressionNode exp = min.getArg(i);
TypeDescriptor ptd = null;
if (on.getRight()!=null)
checkExpressionNode(md, nametable, on.getRight(), null);
TypeDescriptor ltd=on.getLeft().getType();
- TypeDescriptor rtd=on.getRight()!=null ? on.getRight().getType() : null;
+ TypeDescriptor rtd=on.getRight()!=null?on.getRight().getType():null;
TypeDescriptor lefttype=null;
TypeDescriptor righttype=null;
Operation op=on.getOp();
righttype=lefttype=new TypeDescriptor(TypeDescriptor.BOOLEAN);
} else if (ltd.isPtr()||rtd.isPtr()) {
if (!(ltd.isPtr()&&rtd.isPtr())) {
- if(!rtd.isEnum()) {
- throw new Error();
- }
- }
+ if(!rtd.isEnum()) {
+ throw new Error();
+ }
+ }
righttype=rtd;
lefttype=ltd;
} else if (ltd.isDouble()||rtd.isDouble())
throw new Error("Type of rside not compatible with type of lside"+on.printNode(0));
}
}
-
+
}
public Vector<SwitchLabelNode> getSwitchConditions() {
return this.switch_conds;
}
-
+
public BlockNode getSwitchBlockStatement() {
return this.switch_st;
}
result += this.switch_st.printNode(indent);
return result;
}
-
+
public int kind() {
return Kind.SwitchBlockNode;
}
public ExpressionNode getCondition() {
return cond;
}
-
+
public boolean isDefault() {
return this.isdefault;
}
}
return "case " + cond.printNode(indent) + ": ";
}
-
+
public int kind() {
return Kind.SwitchLabelNode;
}
public ExpressionNode getCondition() {
return cond;
}
-
+
public BlockNode getSwitchBody() {
return this.switch_st;
}
public String printNode(int indent) {
return "switch(" + cond.printNode(indent) + ") " + switch_st.printNode(indent);
}
-
+
public int kind() {
return Kind.SwitchStatementNode;
}
ExpressionNode trueExpr;
ExpressionNode falseExpr;
- public TertiaryNode( ExpressionNode cond,
- ExpressionNode trueExpr,
- ExpressionNode falseExpr ) {
+ public TertiaryNode(ExpressionNode cond,
+ ExpressionNode trueExpr,
+ ExpressionNode falseExpr) {
this.cond = cond;
this.trueExpr = trueExpr;
this.falseExpr = falseExpr;
public ExpressionNode getFalseExpr() {
return falseExpr;
}
-
+
public String printNode(int indent) {
return cond.printNode(indent)+" ? "+trueExpr.printNode(indent)+" : "+falseExpr.printNode(indent);
}
public int kind() {
return Kind.TertiaryNode;
}
-
+
public Long evaluate() {
eval = null;
Long c = this.cond.evaluate();
if(c != null) {
Long t = this.trueExpr.evaluate();
if(t != null) {
- Long f = this.falseExpr.evaluate();
- if(f != null) {
- if(c.intValue() > 0) {
- eval = t;
- } else {
- eval = f;
- }
- }
+ Long f = this.falseExpr.evaluate();
+ if(f != null) {
+ if(c.intValue() > 0) {
+ eval = t;
+ } else {
+ eval = f;
+ }
+ }
}
}
return eval;
public int kind() {
throw new Error();
}
-
- public void setNumLine(int numLine){
+
+ public void setNumLine(int numLine) {
this.numLine=numLine;
}
-
- public int getNumLine(){
+
+ public int getNumLine() {
return this.numLine;
}
-
+
}
private int type;
ClassDescriptor class_desc;
boolean isClassNameRef = false;
-
+
private Vector<AnnotationDescriptor> annotationSet;
private TypeExtension typeExtension;
if (t.arraycount!=arraycount)
return false;
if (t.isClassNameRef != this.isClassNameRef)
- return false;
+ return false;
return true;
}
return false;
return false;
return true;
}
-
+
public boolean isClassNameRef() {
return this.isClassNameRef;
}
-
+
public void setClassNameRef() {
this.isClassNameRef = true;
}
if (arraycount!=0||!isClass())
return false;
return (name.equals("bytewrapper")||
- name.equals("booleanwrapper")||
- name.equals("shortwrapper")||
- name.equals("intwrapper")||
- name.equals("longwrapper")||
- name.equals("charwrapper")||
- name.equals("floatwrapper")||
- name.equals("doublewrapper")||
- name.equals("Objectwrapper"));
+ name.equals("booleanwrapper")||
+ name.equals("shortwrapper")||
+ name.equals("intwrapper")||
+ name.equals("longwrapper")||
+ name.equals("charwrapper")||
+ name.equals("floatwrapper")||
+ name.equals("doublewrapper")||
+ name.equals("Objectwrapper"));
}
public TypeDescriptor makeArray(State state) {
return "float";
else if (isOffset())
return "short";
- else
+ else
throw new Error("Error Type: "+type);
}
public boolean isEnum() {
if(this.type != CLASS) {
return false;
- } else if(this.class_desc != null){
+ } else if(this.class_desc != null) {
return this.class_desc.isEnum();
}
return false;
}
-
+
public boolean isClass() {
return (type==CLASS && !isEnum());
}
}
for(int i=0; i<arraycount; i++)
str+="[]";
- return str;
+ return str;
}
private static String decodeInt(int type) {
return "offset";
else throw new Error();
}
-
- public void addAnnotationMarker(AnnotationDescriptor an){
+
+ public void addAnnotationMarker(AnnotationDescriptor an) {
annotationSet.add(an);
}
-
- public Vector<AnnotationDescriptor> getAnnotationMarkers(){
+
+ public Vector<AnnotationDescriptor> getAnnotationMarkers() {
return annotationSet;
}
-
- public void setExtension(TypeExtension te){
+
+ public void setExtension(TypeExtension te) {
typeExtension=te;
}
-
- public TypeExtension getExtension(){
+
+ public TypeExtension getExtension() {
return typeExtension;
}
-
+
}
Hashtable supertable;
Hashtable subclasstable;
BuildIR bir;
-
+
// for interfaces
Hashtable<ClassDescriptor, Set<ClassDescriptor>> superIFtbl;
createTables();
}
- public void addNewClass(String cl, Set todo) {
+ public void addNewClass(String cl, Set todo) {
//search through the default locations for the file.
for (int i = 0; i < state.classpath.size(); i++) {
String path = (String) state.classpath.get(i);
File f = new File(path, cl.replace('.', '/') + ".java");
if (f.exists()) {
- try {
- ParseNode pn = Main.readSourceFile(state, f.getCanonicalPath());
- bir.buildtree(pn, todo, f.getCanonicalPath());
- return;
- } catch (Exception e) {
- throw new Error(e);
- }
+ try {
+ ParseNode pn = Main.readSourceFile(state, f.getCanonicalPath());
+ bir.buildtree(pn, todo, f.getCanonicalPath());
+ return;
+ } catch (Exception e) {
+ throw new Error(e);
+ }
}
- }
+ }
throw new Error("Couldn't find class " + cl);
}
//have to find class
addNewClass(classname, todo);
cd=(ClassDescriptor)state.getClassSymbolTable().get(classname);
-
+
System.out.println("Build class:"+cd);
todo.add(cd);
}
HashSet hs=(HashSet)superIFtbl.get(cd);
Vector<String> superifv = cd.getSuperInterface();
for(int i = 0; i < superifv.size(); i++) {
- String superif = superifv.elementAt(i);
- ClassDescriptor if_super = getClass(superif, todo);
- hs.add(if_super);
+ String superif = superifv.elementAt(i);
+ ClassDescriptor if_super = getClass(superif, todo);
+ hs.add(if_super);
}
}
return cd;
public MethodDescriptor getRun() {
ClassDescriptor cd=getClass(TypeUtil.ThreadClass);
- for(Iterator methodit=cd.getMethodTable().getSet("run").iterator(); methodit.hasNext();) {
+ for(Iterator methodit=cd.getMethodTable().getSet("run").iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
if (md.numParameters()!=0||md.getModifiers().isStatic())
continue;
public MethodDescriptor getStaticStart() {
ClassDescriptor cd=getClass(TypeUtil.ThreadClass);
- for(Iterator methodit=cd.getMethodTable().getSet("staticStart").iterator(); methodit.hasNext();) {
+ for(Iterator methodit=cd.getMethodTable().getSet("staticStart").iterator(); methodit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor) methodit.next();
if (md.numParameters()!=1||!md.getModifiers().isStatic()||!md.getParamType(0).isClass()||md.getParamType(0).getClassDesc()!=cd)
continue;
}
throw new Error("Can't find Thread.run");
}
-
+
public MethodDescriptor getExecute() {
ClassDescriptor cd = getClass(TypeUtil.TaskClass);
if(cd == null && state.DSMTASK)
throw new Error("Task.java is not included");
- for(Iterator methodit = cd.getMethodTable().getSet("execute").iterator(); methodit.hasNext();) {
+ for(Iterator methodit = cd.getMethodTable().getSet("execute").iterator(); methodit.hasNext(); ) {
MethodDescriptor md = (MethodDescriptor) methodit.next();
if (md.numParameters()!=0 || md.getModifiers().isStatic())
- continue;
+ continue;
return md;
}
throw new Error("Can't find Task.execute");
ClassDescriptor cd=getMainClass();
Set mainset=cd.getMethodTable().getSet("main");
- for(Iterator mainit=mainset.iterator(); mainit.hasNext();) {
+ for(Iterator mainit=mainset.iterator(); mainit.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)mainit.next();
if (md.numParameters()!=1)
continue;
Descriptor pd=md.getParameter(0);
- TypeDescriptor tpd=(pd instanceof TagVarDescriptor) ? ((TagVarDescriptor)pd).getType() : ((VarDescriptor)pd)
+ TypeDescriptor tpd=(pd instanceof TagVarDescriptor)?((TagVarDescriptor)pd).getType():((VarDescriptor)pd)
.getType();
if (tpd.getArrayCount()!=1)
continue;
throw new Error();
for(int i=0; i<md1.numParameters(); i++) {
if (!this.isSuperorType(md2.getParamType(i), md1.getParamType(i))) {
- if(((!md1.getParamType(i).isArray() &&
- (md1.getParamType(i).isInt() || md1.getParamType(i).isLong() || md1.getParamType(i).isDouble() || md1.getParamType(i).isFloat()))
- && md2.getParamType(i).isClass() && md2.getParamType(i).getClassDesc().getSymbol().equals("Object"))) {
- // primitive parameters vs Object
- } else {
- return false;
- }
+ if(((!md1.getParamType(i).isArray() &&
+ (md1.getParamType(i).isInt() || md1.getParamType(i).isLong() || md1.getParamType(i).isDouble() || md1.getParamType(i).isFloat()))
+ && md2.getParamType(i).isClass() && md2.getParamType(i).getClassDesc().getSymbol().equals("Object"))) {
+ // primitive parameters vs Object
+ } else {
+ return false;
+ }
}
}
if (md1.getReturnType()==null||md2.getReturnType()==null) {
- if (md1.getReturnType()!=md2.getReturnType())
- return false;
+ if (md1.getReturnType()!=md2.getReturnType())
+ return false;
} else
- if (!this.isSuperorType(md2.getReturnType(), md1.getReturnType()))
- return false;
+ if (!this.isSuperorType(md2.getReturnType(), md1.getReturnType()))
+ return false;
if (!this.isSuperorType(md2.getClassDesc(), md1.getClassDesc()))
return false;
Set methoddescriptorset=cd.getMethodTable().getSet(name);
MethodDescriptor bestmd=null;
NextMethod:
- for(Iterator methodit=methoddescriptorset.iterator(); methodit.hasNext();) {
+ for(Iterator methodit=methoddescriptorset.iterator(); methodit.hasNext(); ) {
MethodDescriptor currmd=(MethodDescriptor)methodit.next();
/* Need correct number of parameters */
if (types.length!=currmd.numParameters())
visited.clear();
ClassDescriptor cd=(ClassDescriptor)classit.next();
ClassDescriptor tmp=cd.getSuperDesc();
-
+
// check cd's interface ancestors
{
Iterator it_sifs = cd.getSuperInterfaces();
while(it_sifs.hasNext()) {
ClassDescriptor cdt = (ClassDescriptor)it_sifs.next();
- if(!tovisit.contains(cdt)){
+ if(!tovisit.contains(cdt)) {
tovisit.add(cdt);
}
}
}
while(tmp!=null) {
- if (!subclasstable.containsKey(tmp))
- subclasstable.put(tmp,new HashSet());
- HashSet hs=(HashSet)subclasstable.get(tmp);
- hs.add(cd);
+ if (!subclasstable.containsKey(tmp))
+ subclasstable.put(tmp,new HashSet());
+ HashSet hs=(HashSet)subclasstable.get(tmp);
+ hs.add(cd);
// check tmp's interface ancestors
Iterator it_sifs = tmp.getSuperInterfaces();
while(it_sifs.hasNext()) {
ClassDescriptor cdt = (ClassDescriptor)it_sifs.next();
- if(!tovisit.contains(cdt)){
+ if(!tovisit.contains(cdt)) {
tovisit.add(cdt);
}
}
-
- tmp=tmp.getSuperDesc();
+
+ tmp=tmp.getSuperDesc();
}
-
+
while(!tovisit.isEmpty()) {
ClassDescriptor sif = (ClassDescriptor)tovisit.iterator().next();
tovisit.remove(sif);
-
+
if(!visited.contains(sif)) {
if(!this.subclasstable.containsKey(sif)) {
this.subclasstable.put(sif, new HashSet());
}
- HashSet hs = (HashSet)this.subclasstable.get(sif);
+ HashSet hs = (HashSet) this.subclasstable.get(sif);
hs.add(cd);
-
+
Iterator it_sifs = sif.getSuperInterfaces();
while(it_sifs.hasNext()) {
ClassDescriptor siftmp = (ClassDescriptor)it_sifs.next();
- if(!tovisit.contains(siftmp)){
+ if(!tovisit.contains(siftmp)) {
tovisit.add(siftmp);
}
}
public ClassDescriptor getSuper(ClassDescriptor cd) {
return (ClassDescriptor)supertable.get(cd);
}
-
+
public Set<ClassDescriptor> getSuperIFs(ClassDescriptor cd) {
return superIFtbl.get(cd);
}
return true;
if (cd2.isEnum()&&(possiblesuper.isInt()||possiblesuper.isLong()||
possiblesuper.isFloat()||possiblesuper.isDouble()))
- return true;
+ return true;
if(cd2.isEnum()&&possiblesuper.isEnum()&&cd2.class_desc.equals(possiblesuper.class_desc))
- return true;
+ return true;
if (cd2.isLong()&&(possiblesuper.isLong()||
possiblesuper.isFloat()||possiblesuper.isDouble()))
return true;
}
public TypeDescriptor mostSpecific(TypeDescriptor td1, TypeDescriptor td2) {
- if( isSuperorType( td1, td2 ) ) {
+ if( isSuperorType(td1, td2) ) {
return td2;
}
- if( isSuperorType( td2, td1 ) ) {
+ if( isSuperorType(td2, td1) ) {
return td1;
}
- throw new Error( td1+" and "+td2+" have no superclass relationship" );
+ throw new Error(td1+" and "+td2+" have no superclass relationship");
}
public TypeDescriptor mostSpecific(TypeDescriptor td1, TypeDescriptor td2, TypeDescriptor td3) {
- return mostSpecific( td1, mostSpecific( td2, td3 ) );
+ return mostSpecific(td1, mostSpecific(td2, td3) );
}
public boolean isSuperorType(ClassDescriptor possiblesuper, ClassDescriptor cd2) {
private boolean isSuper(ClassDescriptor possiblesuper, ClassDescriptor cd2) {
HashSet tovisit=new HashSet();
HashSet visited=new HashSet();
-
+
{
// check cd2's interface ancestors
Iterator<ClassDescriptor> it_sifs = getSuperIFs(cd2).iterator();
ClassDescriptor cd = it_sifs.next();
if(cd == possiblesuper) {
return true;
- } else if(!tovisit.contains(cd)){
+ } else if(!tovisit.contains(cd)) {
tovisit.add(cd);
}
}
cd2=getSuper(cd2);
if (cd2==possiblesuper)
return true;
-
+
// check cd2's interface ancestors
if(cd2 != null) {
Iterator it_sifs = getSuperIFs(cd2).iterator();
ClassDescriptor cd = (ClassDescriptor)it_sifs.next();
if(cd == possiblesuper) {
return true;
- } else if(!tovisit.contains(cd)){
+ } else if(!tovisit.contains(cd)) {
tovisit.add(cd);
}
}
}
}
-
+
while(!tovisit.isEmpty()) {
ClassDescriptor cd = (ClassDescriptor)tovisit.iterator().next();
tovisit.remove(cd);
-
+
if(!visited.contains(cd)) {
Iterator it_sifs = getSuperIFs(cd).iterator();
while(it_sifs.hasNext()) {
ClassDescriptor cdt = (ClassDescriptor)it_sifs.next();
if(cdt == possiblesuper) {
return true;
- } else if(!tovisit.contains(cdt)){
+ } else if(!tovisit.contains(cdt)) {
tovisit.add(cdt);
}
}
while(classit.hasNext()) {
ClassDescriptor cd=(ClassDescriptor)classit.next();
if(!cd.isInterface()) {
- int count = classmethodcount.get(cd).intValue();
- classmethodcount.put(cd, new Integer(count+this.if_starts));
+ int count = classmethodcount.get(cd).intValue();
+ classmethodcount.put(cd, new Integer(count+this.if_starts));
}
}
}
start=numberLocality(superdesc);
if (locality.getClassBindings(cd)!=null)
- for(Iterator<LocalityBinding> lbit=locality.getClassBindings(cd).iterator(); lbit.hasNext();) {
+ for(Iterator<LocalityBinding> lbit=locality.getClassBindings(cd).iterator(); lbit.hasNext(); ) {
LocalityBinding lb=lbit.next();
MethodDescriptor md=lb.getMethod();
//Is it a static method or constructor
if (superdesc!=null) {
Set possiblematches=superdesc.getMethodTable().getSet(md.getSymbol());
boolean foundmatch=false;
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
if (md.matches(matchmd)) {
Set<LocalityBinding> lbset=locality.getMethodBindings(matchmd);
if (lbset!=null)
- for(Iterator<LocalityBinding> suplbit=lbset.iterator(); suplbit.hasNext();) {
+ for(Iterator<LocalityBinding> suplbit=lbset.iterator(); suplbit.hasNext(); ) {
LocalityBinding suplb=suplbit.next();
if (lb.contextMatches(suplb)) {
foundmatch=true;
classmethodcount.put(cd, new Integer(start));
return start;
}
-
+
private int numberMethodsIF(ClassDescriptor cd) {
if(!cd.isInterface()) {
return 0;
ClassDescriptor superif = (ClassDescriptor)it_sifs.next();
mnum += numberMethodsIF(superif);
}
- for(Iterator it=cd.getMethods(); it.hasNext();) {
+ for(Iterator it=cd.getMethods(); it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)it.next();
if (md.isStatic()||md.getReturnType()==null)
- continue;
+ continue;
if (!callgraph.isCallable(md)&&!callgraph.isCalled(md))
continue;
boolean foundmatch=false;
// check if there is a matched method that has been assigned method num
Set possiblematches_if = if_methods.getSet(md.getSymbol());
- for(Iterator matchit=possiblematches_if.iterator(); matchit.hasNext();) {
- MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
- if (md.matches(matchmd)) {
- int num=methodnumber.get(matchmd);
- methodnumber.put(md, new Integer(num));
- foundmatch=true;
- break;
- }
+ for(Iterator matchit=possiblematches_if.iterator(); matchit.hasNext(); ) {
+ MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
+ if (md.matches(matchmd)) {
+ int num=methodnumber.get(matchmd);
+ methodnumber.put(md, new Integer(num));
+ foundmatch=true;
+ break;
+ }
}
if (!foundmatch) {
methodnumber.put(md, new Integer(if_starts++));
mnum = numberMethods(superdesc);
start += mnum;
}
- methodit:
- for(Iterator it=cd.getMethods(); it.hasNext();) {
+methodit:
+ for(Iterator it=cd.getMethods(); it.hasNext(); ) {
MethodDescriptor md=(MethodDescriptor)it.next();
if (md.isStatic()||md.getReturnType()==null)
- continue;
+ continue;
if (!callgraph.isCallable(md)&&!callgraph.isCalled(md))
continue;
// check if there is a matched method in methods defined in interfaces
Set possiblematches_if=if_methods.getSet(md.getSymbol());
- for(Iterator matchit=possiblematches_if.iterator(); matchit.hasNext();) {
- MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
- if (md.matches(matchmd)) {
+ for(Iterator matchit=possiblematches_if.iterator(); matchit.hasNext(); ) {
+ MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
+ if (md.matches(matchmd)) {
int num;
if (!methodnumber.containsKey(matchmd)) {
num=start++;
methodnumber.put(matchmd,num);
} else
num = methodnumber.get(matchmd);
- methodnumber.put(md, new Integer(num));
+ methodnumber.put(md, new Integer(num));
continue methodit;
- }
+ }
}
if (superdesc!=null) {
- Set possiblematches=superdesc.getMethodTable().getSet(md.getSymbol());
- for(Iterator matchit=possiblematches.iterator(); matchit.hasNext();) {
- MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
- if (md.matches(matchmd)) {
+ Set possiblematches=superdesc.getMethodTable().getSet(md.getSymbol());
+ for(Iterator matchit=possiblematches.iterator(); matchit.hasNext(); ) {
+ MethodDescriptor matchmd=(MethodDescriptor)matchit.next();
+ if (md.matches(matchmd)) {
int num;
if (!methodnumber.containsKey(matchmd)) {
num=start++;
methodnumber.put(matchmd,num);
} else
num = methodnumber.get(matchmd);
- methodnumber.put(md, new Integer(num));
+ methodnumber.put(md, new Integer(num));
continue methodit;
- }
- }
+ }
+ }
}
methodnumber.put(md, new Integer(start++));
taskmap = new Hashtable();
sourcenodemap=new Hashtable();
- for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext();) {
+ for(Iterator it_tasks=state.getTaskSymbolTable().getDescriptorsIterator(); it_tasks.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)it_tasks.next();
taskmap.put("/"+td.getSymbol()+".html",td);
}
- for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext();) {
+ for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) it_classes.next();
if(cd.hasFlags()) {
Vector rootnodes=taskanalysis.getRootNodes(cd);
if(rootnodes!=null)
- for(Iterator it_rootnodes=rootnodes.iterator(); it_rootnodes.hasNext();) {
+ for(Iterator it_rootnodes=rootnodes.iterator(); it_rootnodes.hasNext(); ) {
FlagState root=(FlagState)it_rootnodes.next();
Vector cd_nodeid=new Vector(); //Vector is designed to contain only 2 elements: ClassDescriptor,Node label
// Both the values are required to correctly resolve the rootnode.
//printing out the classes that are instantiated by this task
pw.println("<br><h3>Instantiated Classes:</h3>");
Set newstates=taganalysis.getFlagStates(td);
- for(Iterator fsit=newstates.iterator(); fsit.hasNext();) {
+ for(Iterator fsit=newstates.iterator(); fsit.hasNext(); ) {
FlagState fsnew=(FlagState) fsit.next();
ClassDescriptor cd=fsnew.getClassDescriptor();
pw.println(" <a href=\"/"+cd.getSymbol()+".html\">"+cd.getSymbol()+"</a><br>");
private String sourcenode(Vector cd_nodeid,OutputStream out, HTTPResponse resp) {
Vector rootnodes=taskanalysis.getRootNodes((ClassDescriptor)cd_nodeid.elementAt(0));
- for(Iterator it_rootnodes=rootnodes.iterator(); it_rootnodes.hasNext();) {
+ for(Iterator it_rootnodes=rootnodes.iterator(); it_rootnodes.hasNext(); ) {
FlagState root=(FlagState)it_rootnodes.next();
if (root.getLabel().equals((String)cd_nodeid.elementAt(1))) {
try {
PrintWriter pw=new PrintWriter(out);
pw.println("<br><br><h3>Allocating tasks for "+root.getTextLabel()+":</h3><br>");
Vector tasks=root.getAllocatingTasks();
- for(Iterator it_tasks=tasks.iterator(); it_tasks.hasNext();) {
+ for(Iterator it_tasks=tasks.iterator(); it_tasks.hasNext(); ) {
TaskDescriptor td=(TaskDescriptor)it_tasks.next();
pw.println("<br><strong>Task: "+td.toString()+"</strong><br>");
printTask(td,pw);
private String indexpage(OutputStream out, HTTPResponse resp) {
PrintWriter pw=new PrintWriter(out);
- for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext();) {
+ for(Iterator it_classes=state.getClassSymbolTable().getDescriptorsIterator(); it_classes.hasNext(); ) {
ClassDescriptor cd=(ClassDescriptor) it_classes.next();
if (cd.hasFlags()) {
if (taskanalysis.getFlagStates(cd)!=null) {
super(in);
}
public int read() throws IOException {
- int r = (pushback==-1) ? in.read() : pushback; pushback=-1;
+ int r = (pushback==-1)?in.read():pushback; pushback=-1;
if (r!='\\') {
isEvenSlash=true;
public int read(char cbuf[], int off, int len) throws IOException {
for (int i=0; i<len; i++) {
int c = read();
- if (c==-1) return (i==0) ? -1 : i; // end of stream reached.
+ if (c==-1) return (i==0)?-1:i; // end of stream reached.
else cbuf[i+off] = (char) c;
}
return len;
return start==end+1 || (start==0 && end==backing.length-1);
}
private int size() {
- return ((end<start) ? end+backing.length : end)-start;
+ return ((end<start)?end+backing.length:end)-start;
}
public void put(java_cup.runtime.Symbol o) {
if (isFull()) {
public java_cup.runtime.Symbol nextToken() throws java.io.IOException {
java_cup.runtime.Symbol sym =
- lookahead==null ? _nextToken() : lookahead.get();
+ lookahead==null?_nextToken():lookahead.get();
last = sym;
return sym;
}
switch (line.charAt(line_pos)) {
- // White space:
+ // White space:
case ' ': // ASCII SP
case '\t': // ASCII HT
case '\f': // ASCII FF
case '\n': // LineTerminator
return new WhiteSpace(consume());
- // EOF character:
+ // EOF character:
case '\020': // ASCII SUB
consume();
return new EOF();
- // Comment prefix:
+ // Comment prefix:
case '/':
return getComment();
- // else, a Token
+ // else, a Token
default:
return getToken();
}
Token getToken() throws java.io.IOException {
// Tokens are: Identifiers, Keywords, Literals, Separators, Operators.
switch (line.charAt(line_pos)) {
- // Separators: (period is a special case)
+ // Separators: (period is a special case)
case '(':
case ')':
case '{':
case '@':
return new Separator(consume());
- // Operators:
+ // Operators:
case '=':
case '>':
case '<':
case '\"':
return getStringLiteral();
- // a period is a special case:
+ // a period is a special case:
case '.':
if (Character.digit(line.charAt(line_pos+1),10)!=-1)
return getNumericLiteral();
"import", "instanceof", "int",
"interface",
"isavailable",
- "locdef", "long",
- "native", "new", "newflag", "optional", "package", "private", "protected", "public",
+ "locdef", "long",
+ "native", "new", "newflag", "optional", "package", "private", "protected", "public",
"rblock", "return",
"scratch", "sese", "short", "static", "strictfp", "super", "switch", "synchronized",
"tag", "task", "taskexit", //keywords for failure aware computation
// use binary search.
for (int l=0, r=keywords.length; r > l; ) {
int x = (l+r)/2, cmp = s.compareTo(keywords[x]);
- if (cmp < 0) r=x;else l=x+1;
+ if (cmp < 0) r=x; else l=x+1;
if (cmp== 0) return new Keyword(s);
}
// not a keyword.
case 'D':
consume();
- /* falls through */
+ /* falls through */
default:
return new DoubleLiteral(Double.valueOf(rep).doubleValue());
}
char second= line.charAt(line_pos);
switch(first) {
- // single-character operators.
+ // single-character operators.
case '~':
case '?':
case ':':
return new Operator(new String(new char[] {first}));
- // doubled operators
+ // doubled operators
case '+':
case '-':
case '&':
boolean isDistributeInfo = false;
boolean isDisAll = false;
int startnum = 0;
-
+
for(int i=0; i<args.length; i++) {
String option=args[i];
state.FASTCHECK=true;
else if (option.equals("-selfloop"))
state.selfloops.add(args[++i]);
- else if (option.equals("-outputdir"))
+ else if (option.equals("-outputdir"))
state.outputdir = args[++i];
else if (option.equals("-excprefetch"))
state.excprefetch.add(args[++i]);
++i;
state.CORENUM = Integer.parseInt(args[i]);
} else if(option.equals("-numcore4gc")) {
- ++i;
- state.CORENUM4GC = Integer.parseInt(args[i]);
+ ++i;
+ state.CORENUM4GC = Integer.parseInt(args[i]);
} else if (option.equals("-mainclass"))
state.main=args[++i];
else if (option.equals("-trueprob")) {
else if (option.equals("-printschedulesim"))
State.PRINTSCHEDULESIM=true;
else if (option.equals("-printcriticalpath"))
- State.PRINTCRITICALPATH=true;
+ State.PRINTCRITICALPATH=true;
else if (option.equals("-struct"))
state.structfile=args[++i];
else if (option.equals("-conscheck"))
state.FLATIRGRAPH=true;
state.FLATIRGRAPHLIBMETHODS=true;
} else if (option.equals("-bamboocompiletime")) {
- state.BAMBOOCOMPILETIME = true;
+ state.BAMBOOCOMPILETIME = true;
} else if (option.equals("-multicore"))
state.MULTICORE=true;
else if (option.equals("-multicoregc"))
- state.MULTICOREGC=true;
+ state.MULTICOREGC=true;
else if (option.equals("-mgc")) {
- state.MGC = true;
+ state.MGC = true;
} else if (option.equals("-objectlockdebug")) {
- state.OBJECTLOCKDEBUG = true;
+ state.OBJECTLOCKDEBUG = true;
} else if (option.equals("-ownership"))
state.OWNERSHIP=true;
else if (option.equals("-ownallocdepth")) {
state.OWNERSHIPALIASFILE=args[++i];
} else if (option.equals("-ownaliasfiletab")) {
state.OWNERSHIPALIASFILE=args[++i];
- state.OWNERSHIPALIASTAB=true;
+ state.OWNERSHIPALIASTAB=true;
} else if (option.equals("-owndebugcallee")) {
state.OWNERSHIPDEBUGCALLEE=args[++i];
} else if (option.equals("-owndebugcaller")) {
} else if (option.equals("-disjoint-write-dots")) {
state.DISJOINTWRITEDOTS = true;
- String arg = args[++i];
+ String arg = args[++i];
if( arg.equals("all") ) {
state.DISJOINTWRITEALL = true;
} else if( arg.equals("final") ) {
- state.DISJOINTWRITEALL = false;
- } else {
- throw new Error("disjoint-write-dots requires argument <all/final>");
- }
+ state.DISJOINTWRITEALL = false;
+ } else {
+ throw new Error("disjoint-write-dots requires argument <all/final>");
+ }
} else if (option.equals("-disjoint-write-initial-contexts")) {
state.DISJOINTWRITEINITCONTEXTS = true;
} else if (option.equals("-disjoint-alias-file")) {
state.DISJOINTALIASFILE = args[++i];
- String arg = args[++i];
+ String arg = args[++i];
if( arg.equals("normal") ) {
state.DISJOINTALIASTAB = false;
} else if( arg.equals("tabbed") ) {
- state.DISJOINTALIASTAB = true;
- } else {
- throw new Error("disjoint-alias-file requires arguments: <filename> <normal/tabbed>");
- }
+ state.DISJOINTALIASTAB = true;
+ } else {
+ throw new Error("disjoint-alias-file requires arguments: <filename> <normal/tabbed>");
+ }
} else if (option.equals("-disjoint-debug-callsite")) {
state.DISJOINTDEBUGCALLEE=args[++i];
state.DISJOINTDEBUGCALLER=args[++i];
state.DISJOINTDEBUGCALLVISITTOSTART=Integer.parseInt(args[++i]);
state.DISJOINTDEBUGCALLNUMVISITS=Integer.parseInt(args[++i]);
- String arg = args[++i];
+ String arg = args[++i];
if( arg.equals("true") ) {
state.DISJOINTDEBUGCALLSTOPAFTER = true;
} else if( arg.equals("false") ) {
- state.DISJOINTDEBUGCALLSTOPAFTER = false;
- } else {
- throw new Error("disjoint-debug-callsite requires arguments:\n"+
- " <callee symbol> <caller symbol> <# visit to start> <# visits to capture> <T/F stop after>");
- }
-
+ state.DISJOINTDEBUGCALLSTOPAFTER = false;
+ } else {
+ throw new Error("disjoint-debug-callsite requires arguments:\n"+
+ " <callee symbol> <caller symbol> <# visit to start> <# visits to capture> <T/F stop after>");
+ }
+
} else if (option.equals("-disjoint-debug-snap-method")) {
state.DISJOINTSNAPSYMBOL=args[++i];
- state.DISJOINTSNAPVISITTOSTART=Integer.parseInt(args[++i]);
+ state.DISJOINTSNAPVISITTOSTART=Integer.parseInt(args[++i]);
state.DISJOINTSNAPNUMVISITS=Integer.parseInt(args[++i]);
- String arg = args[++i];
+ String arg = args[++i];
if( arg.equals("true") ) {
state.DISJOINTSNAPSTOPAFTER = true;
} else if( arg.equals("false") ) {
- state.DISJOINTSNAPSTOPAFTER = false;
- } else {
- throw new Error("disjoint-debug-snap-method requires arguments:\n"+
- " <method symbol> <# visit to start> <# visits to snap> <T/F stop after>");
- }
-
- } else if( option.equals( "-disjoint-release-mode" ) ) {
- state.DISJOINTRELEASEMODE = true;
-
- } else if( option.equals( "-disjoint-dvisit-stack" ) ) {
- state.DISJOINTDVISITSTACK = true;
- state.DISJOINTDVISITPQUE = false;
- state.DISJOINTDVISITSTACKEESONTOP = false;
-
- } else if( option.equals( "-disjoint-dvisit-pqueue" ) ) {
- state.DISJOINTDVISITPQUE = true;
- state.DISJOINTDVISITSTACK = false;
- state.DISJOINTDVISITSTACKEESONTOP = false;
-
- } else if( option.equals( "-disjoint-dvisit-stack-callees-on-top" ) ) {
- state.DISJOINTDVISITSTACKEESONTOP = true;
- state.DISJOINTDVISITPQUE = false;
- state.DISJOINTDVISITSTACK = false;
-
- } else if( option.equals( "-disjoint-desire-determinism" ) ) {
- state.DISJOINTDETERMINISM = true;
-
- // when asking analysis for a deterministic result, force
- // a stack-based visiting scheme, because the priority queue
- // requires a non-deterministic topological sort
- state.DISJOINTDVISITSTACKEESONTOP = true;
- state.DISJOINTDVISITPQUE = false;
- state.DISJOINTDVISITSTACK = false;
-
-
- } else if( option.equals( "-disjoint-debug-scheduling" ) ) {
- state.DISJOINTDEBUGSCHEDULING = true;
- }
-
+ state.DISJOINTSNAPSTOPAFTER = false;
+ } else {
+ throw new Error("disjoint-debug-snap-method requires arguments:\n"+
+ " <method symbol> <# visit to start> <# visits to snap> <T/F stop after>");
+ }
+
+ } else if( option.equals("-disjoint-release-mode") ) {
+ state.DISJOINTRELEASEMODE = true;
+
+ } else if( option.equals("-disjoint-dvisit-stack") ) {
+ state.DISJOINTDVISITSTACK = true;
+ state.DISJOINTDVISITPQUE = false;
+ state.DISJOINTDVISITSTACKEESONTOP = false;
+
+ } else if( option.equals("-disjoint-dvisit-pqueue") ) {
+ state.DISJOINTDVISITPQUE = true;
+ state.DISJOINTDVISITSTACK = false;
+ state.DISJOINTDVISITSTACKEESONTOP = false;
- else if (option.equals("-optional"))
+ } else if( option.equals("-disjoint-dvisit-stack-callees-on-top") ) {
+ state.DISJOINTDVISITSTACKEESONTOP = true;
+ state.DISJOINTDVISITPQUE = false;
+ state.DISJOINTDVISITSTACK = false;
+
+ } else if( option.equals("-disjoint-desire-determinism") ) {
+ state.DISJOINTDETERMINISM = true;
+
+ // when asking analysis for a deterministic result, force
+ // a stack-based visiting scheme, because the priority queue
+ // requires a non-deterministic topological sort
+ state.DISJOINTDVISITSTACKEESONTOP = true;
+ state.DISJOINTDVISITPQUE = false;
+ state.DISJOINTDVISITSTACK = false;
+
+
+ } else if( option.equals("-disjoint-debug-scheduling") ) {
+ state.DISJOINTDEBUGSCHEDULING = true;
+ } else if (option.equals("-optional"))
state.OPTIONAL=true;
else if (option.equals("-optimize"))
state.OPTIMIZE=true;
else if (option.equals("-distributioninfo"))
isDistributeInfo=true;
else if (option.equals("-disall"))
- isDisAll=true;
+ isDisAll=true;
else if (option.equals("-disstart"))
- startnum = Integer.parseInt(args[++i]);
+ startnum = Integer.parseInt(args[++i]);
else if (option.equals("-useprofile")) {
state.USEPROFILE=true;
state.profilename = args[++i];
- }
- else if (option.equals("-thread"))
+ } else if (option.equals("-thread"))
state.THREAD=true;
else if (option.equals("-dsm"))
state.DSM=true;
else if (option.equals("-methodeffects")) {
state.METHODEFFECTS=true;
-
+
} else if (option.equals("-coreprof")) {
state.COREPROF=true;
} else if (option.equals("-ooojava")) {
state.OOOJAVA = true;
state.DISJOINT = true;
- state.OOO_NUMCORES = Integer.parseInt( args[++i] );
- state.OOO_MAXSESEAGE = Integer.parseInt( args[++i] );
+ state.OOO_NUMCORES = Integer.parseInt(args[++i]);
+ state.OOO_MAXSESEAGE = Integer.parseInt(args[++i]);
- } else if (option.equals("-ooodebug") ){
+ } else if (option.equals("-ooodebug") ) {
state.OOODEBUG = true;
- } else if (option.equals("-rcr")){
+ } else if (option.equals("-rcr")) {
state.RCR = true;
state.KEEP_RG_FOR_ALL_PROGRAM_POINTS=true;
- } else if (option.equals("-rcr_debug")){
+ } else if (option.equals("-rcr_debug")) {
state.RCR_DEBUG = true;
state.KEEP_RG_FOR_ALL_PROGRAM_POINTS=true;
- } else if (option.equals("-rcr_debug_verbose")){
+ } else if (option.equals("-rcr_debug_verbose")) {
state.RCR_DEBUG_VERBOSE = true;
state.KEEP_RG_FOR_ALL_PROGRAM_POINTS=true;
- } else if (option.equals("-nostalltr")){
- state.NOSTALLTR = true;
- } else if (option.equals("-ssjava")){
- state.SSJAVA = true;
- } else if (option.equals("-printlinenum")){
+ } else if (option.equals("-nostalltr")) {
+ state.NOSTALLTR = true;
+ } else if (option.equals("-ssjava")) {
+ state.SSJAVA = true;
+ } else if (option.equals("-printlinenum")) {
state.LINENUM=true;
- }else if (option.equals("-help")) {
+ } else if (option.equals("-help")) {
System.out.println("-classlibrary classlibrarydirectory -- directory where classlibrary is located");
System.out.println("-selfloop task -- this task doesn't self loop its parameters forever");
System.out.println("-dir outputdirectory -- output code in outputdirectory");
sourcefiles.add(args[i]);
}
}
-
+
//add default classpath
if (state.classpath.size()==1)
state.classpath.add(ClassLibraryPrefix);
BuildIR bir=new BuildIR(state);
tu=new TypeUtil(state, bir);
SemanticCheck sc=new SemanticCheck(state,tu);
-
- for(int i=0;i<sourcefiles.size();i++)
+
+ for(int i=0; i<sourcefiles.size(); i++)
loadClass(state, bir, sourcefiles.get(i));
-
+
//Stuff the runtime wants to see
-
+
if (state.TASK) {
sc.getClass(null, "TagDescriptor");
}
-
+
sc.semanticCheck();
State.logEvent("Done Semantic Checking");
-
+
tu.createFullTable();
State.logEvent("Done Creating TypeUtil");
-
+
bf=new BuildFlat(state,tu);
bf.buildFlat();
State.logEvent("Done Building Flat");
tu=jb.getTypeUtil();
bf=jb.getBuildFlat();
}
-
+
SafetyAnalysis sa=null;
PrefetchAnalysis pa=null;
- OoOJavaAnalysis oooa=null;
+ OoOJavaAnalysis oooa=null;
if (state.INLINEATOMIC) {
Iterator classit=state.getClassSymbolTable().getDescriptorsIterator();
while(classit.hasNext()) {
- ClassDescriptor cn=(ClassDescriptor)classit.next();
- Iterator methodit=cn.getMethods();
- while(methodit.hasNext()) {
+ ClassDescriptor cn=(ClassDescriptor)classit.next();
+ Iterator methodit=cn.getMethods();
+ while(methodit.hasNext()) {
// do inlining
- MethodDescriptor md=(MethodDescriptor)methodit.next();
- FlatMethod fm=state.getMethodFlat(md);
+ MethodDescriptor md=(MethodDescriptor)methodit.next();
+ FlatMethod fm=state.getMethodFlat(md);
Inliner.inlineAtomic(state, tu, fm, state.inlineatomicdepth);
}
}
}
CallGraph callgraph=jb!=null?jb:(state.TASK?new BaseCallGraph(state, tu):new JavaCallGraph(state, tu));
-
+
// SSJava
- if(state.SSJAVA){
+ if(state.SSJAVA) {
ssjava.doCheck();
State.logEvent("Done SSJava Checking");
}
localCSE lcse=new localCSE(gft, tu);
LoopOptimize lo=null;
if (!state.NOLOOP)
- lo=new LoopOptimize(gft, tu);
+ lo=new LoopOptimize(gft, tu);
Iterator classit=state.getClassSymbolTable().getDescriptorsIterator();
while(classit.hasNext()) {
- ClassDescriptor cn=(ClassDescriptor)classit.next();
- Iterator methodit=cn.getMethods();
- while(methodit.hasNext()) {
- /* Classify parameters */
- MethodDescriptor md=(MethodDescriptor)methodit.next();
- FlatMethod fm=state.getMethodFlat(md);
+ ClassDescriptor cn=(ClassDescriptor)classit.next();
+ Iterator methodit=cn.getMethods();
+ while(methodit.hasNext()) {
+ /* Classify parameters */
+ MethodDescriptor md=(MethodDescriptor)methodit.next();
+ FlatMethod fm=state.getMethodFlat(md);
if (fm==null)
continue;
cp.optimize(fm);
dc.optimize(fm);
if (!state.NOLOOP)
- lo.optimize(fm);
+ lo.optimize(fm);
cp.optimize(fm);
dc.optimize(fm);
lcse.doAnalysis(fm);
dc.optimize(fm);
cp.optimize(fm);
dc.optimize(fm);
- }
+ }
}
State.logEvent("Done Optimizing");
}
state.FLATIRGRAPHUSERMETHODS,
state.FLATIRGRAPHLIBMETHODS);
}
-
+
if (state.OWNERSHIP) {
Liveness liveness = new Liveness();
ArrayReferencees ar = new ArrayReferencees(state, tu, callgraph);
OwnershipAnalysis oa = new OwnershipAnalysis(state,
tu,
callgraph,
- liveness,
+ liveness,
ar,
state.OWNERSHIPALLOCDEPTH,
state.OWNERSHIPWRITEDOTS,
}
if (state.DISJOINT && !state.OOOJAVA) {
- Liveness l = new Liveness();
+ Liveness l = new Liveness();
ArrayReferencees ar = new ArrayReferencees(state, tu, callgraph);
DisjointAnalysis da = new DisjointAnalysis(state, tu, callgraph, l, ar, null, null);
}
if (state.OOOJAVA) {
- Liveness l = new Liveness();
+ Liveness l = new Liveness();
ArrayReferencees ar = new ArrayReferencees(state, tu, callgraph);
oooa = new OoOJavaAnalysis(state, tu, callgraph, l, ar);
}
if (state.SCHEDULING) {
// Use ownership analysis to get alias information
Liveness liveness = new Liveness();
- ArrayReferencees ar = new ArrayReferencees(state, tu, callgraph);
- OwnershipAnalysis oa = null;/*new OwnershipAnalysis(state,
- tu,
- callGraph,
- liveness,
- ar,
- state.OWNERSHIPALLOCDEPTH,
- state.OWNERSHIPWRITEDOTS,
- state.OWNERSHIPWRITEALL,
- state.OWNERSHIPALIASFILE);*/
-
+ ArrayReferencees ar = new ArrayReferencees(state, tu, callgraph);
+ OwnershipAnalysis oa = null; /*new OwnershipAnalysis(state,
+ tu,
+ callGraph,
+ liveness,
+ ar,
+ state.OWNERSHIPALLOCDEPTH,
+ state.OWNERSHIPWRITEDOTS,
+ state.OWNERSHIPWRITEALL,
+ state.OWNERSHIPALIASFILE);*/
+
// synthesis a layout according to target multicore processor
MCImplSynthesis mcImplSynthesis = new MCImplSynthesis(state,
- ta,
- oa);
+ ta,
+ oa);
if(isDistributeInfo) {
- mcImplSynthesis.distribution(isDisAll, startnum);
+ mcImplSynthesis.distribution(isDisAll, startnum);
} else {
- double timeStartAnalysis = (double) System.nanoTime();
- mcImplSynthesis.setScheduleThreshold(20);
- mcImplSynthesis.setProbThreshold(0);
- mcImplSynthesis.setGenerateThreshold(30);
- Vector<Schedule> scheduling = mcImplSynthesis.synthesis();
-
- double timeEndAnalysis = (double) System.nanoTime();
- if(state.BAMBOOCOMPILETIME) {
- double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
- System.err.println("The analysis took" + dt + "sec.");
- System.exit(0);
- }
-
- // generate multicore codes
- if(state.MULTICORE) {
- BuildCodeMultiCore bcm=new BuildCodeMultiCore(state,
- bf.getMap(),
- tu,
- sa,
- scheduling,
- mcImplSynthesis.getCoreNum(),
- state.CORENUM4GC, callgraph);
- bcm.setOwnershipAnalysis(oa);
- bcm.buildCode();
- }
- scheduling.clear();
- scheduling = null;
+ double timeStartAnalysis = (double) System.nanoTime();
+ mcImplSynthesis.setScheduleThreshold(20);
+ mcImplSynthesis.setProbThreshold(0);
+ mcImplSynthesis.setGenerateThreshold(30);
+ Vector<Schedule> scheduling = mcImplSynthesis.synthesis();
+
+ double timeEndAnalysis = (double) System.nanoTime();
+ if(state.BAMBOOCOMPILETIME) {
+ double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow(10.0, 9.0) );
+ System.err.println("The analysis took" + dt + "sec.");
+ System.exit(0);
+ }
+
+ // generate multicore codes
+ if(state.MULTICORE) {
+ BuildCodeMultiCore bcm=new BuildCodeMultiCore(state,
+ bf.getMap(),
+ tu,
+ sa,
+ scheduling,
+ mcImplSynthesis.getCoreNum(),
+ state.CORENUM4GC, callgraph);
+ bcm.setOwnershipAnalysis(oa);
+ bcm.buildCode();
+ }
+ scheduling.clear();
+ scheduling = null;
}
}
}
-
+
if (state.MGC) {
// generate multicore codes
if(state.MULTICORE) {
- BuildCodeMGC bcmgc=new BuildCodeMGC(state,
- bf.getMap(),
- tu,
- sa,
- state.CORENUM,
- state.CORENUM,
- state.CORENUM4GC, callgraph);
- bcmgc.buildCode();
+ BuildCodeMGC bcmgc=new BuildCodeMGC(state,
+ bf.getMap(),
+ tu,
+ sa,
+ state.CORENUM,
+ state.CORENUM,
+ state.CORENUM4GC, callgraph);
+ bcmgc.buildCode();
}
}
-
+
if(!state.MULTICORE) {
BuildCode bc;
GenerateConversions gc=new GenerateConversions(la, state);
bc=new BuildCodeTran(state, bf.getMap(), tu, la, pa, callgraph);
} else {
- if( state.OOOJAVA ) {
- bc=new BuildOoOJavaCode(state, bf.getMap(), tu, sa, oooa, callgraph);
- } else {
- bc=new BuildCode(state, bf.getMap(), tu, sa, callgraph, jb);
- }
+ if( state.OOOJAVA ) {
+ bc=new BuildOoOJavaCode(state, bf.getMap(), tu, sa, oooa, callgraph);
+ } else {
+ bc=new BuildCode(state, bf.getMap(), tu, sa, callgraph, jb);
+ }
}
bc.buildCode();
State.logEvent("Done With BuildCode");
-
+
}
System.out.println("Lines="+state.lines);
g = new Parse.Parser(l);
ParseNode p=null;
try {
- p=(ParseNode) g./*debug_*/parse().value;
+ p=(ParseNode) g./*debug_*/ parse().value;
} catch (Exception e) {
System.err.println("Error parsing file:"+sourcefile);
e.printStackTrace();
}
}
rl->numreaders++;
- for(i=0;i<READERSIZE;i++) {
+ for(i=0; i<READERSIZE; i++) {
if (rl->array[i]==NULL) {
rl->array[i]=&t_abort;
pthread_mutex_unlock(&aborttablelock);
void removetransaction(unsigned int oidarray[], unsigned int numoids) {
int i,j;
pthread_mutex_lock(&aborttablelock);
- for(i=0;i<numoids;i++) {
+ for(i=0; i<numoids; i++) {
unsigned int oid=oidarray[i];
struct readerlist *rl=chashRemove2(aborttable, oid);
struct readerlist *tmp;
do {
int count=rl->numreaders;
int j;
- for(j=0;count;j++) {
+ for(j=0; count; j++) {
int *t_abort=rl->array[j];
if (t_abort!=NULL) {
- *t_abort=1;//It's okay to set our own abort flag...it is
- //too late to abort us
+ *t_abort=1; //It's okay to set our own abort flag...it is
+ //too late to abort us
count--;
}
}
void removethisreadtransaction(unsigned char* oidverread, unsigned int numoids) {
int i,j;
pthread_mutex_lock(&aborttablelock);
- for(i=0;i<numoids;i++) {
+ for(i=0; i<numoids; i++) {
unsigned int oid=*((unsigned int *)oidverread);
struct readerlist * rl=chashSearch(aborttable, oid);
struct readerlist *first=rl;
oidverread+=(sizeof(unsigned int)+sizeof(unsigned short));
while(rl!=NULL) {
- for(j=0;j<READERSIZE;j++) {
+ for(j=0; j<READERSIZE; j++) {
if (rl->array[j]==&t_abort) {
rl->array[j]=NULL;
if ((--rl->numreaders)==0) {
if (first==rl) {
chashRemove2(aborttable, oid);
- if (rl->next!=NULL)
+ if (rl->next!=NULL)
chashInsert(aborttable, oid, rl->next);
rl->next=freelist;
freelist=rl;
first=rl;
rl=rl->next;
}
- nextitem:
+nextitem:
;
}
pthread_mutex_unlock(&aborttablelock);
chashlistnode_t *ptr=c_table;
int i,j;
pthread_mutex_lock(&aborttablelock);
- for(i=0;i<c_size;i++) {
+ for(i=0; i<c_size; i++) {
chashlistnode_t *curr=&ptr[i];
do {
unsigned int oid=curr->key;
struct readerlist * rl=chashSearch(aborttable, oid);
struct readerlist *first=rl;
while(rl!=NULL) {
- for(j=0;j<READERSIZE;j++) {
+ for(j=0; j<READERSIZE; j++) {
if (rl->array[j]==&t_abort) {
rl->array[j]=NULL;
if ((--rl->numreaders)==0) {
if (first==rl) {
chashRemove2(aborttable, oid);
- if (rl->next!=NULL)
+ if (rl->next!=NULL)
chashInsert(aborttable, oid, rl->next);
rl->next=freelist;
freelist=rl;
first=rl;
rl=rl->next;
}
- nextitem:
+nextitem:
curr=curr->next;
} while(curr!=NULL);
}
void removethistransaction(unsigned int oidarray[], unsigned int numoids) {
int i,j;
pthread_mutex_lock(&aborttablelock);
- for(i=0;i<numoids;i++) {
+ for(i=0; i<numoids; i++) {
unsigned int oid=oidarray[i];
struct readerlist * rl=chashSearch(aborttable, oid);
-
+
struct readerlist *first=rl;
while(rl!=NULL) {
- for(j=0;j<READERSIZE;j++) {
+ for(j=0; j<READERSIZE; j++) {
if (rl->array[j]==&t_abort) {
rl->array[j]=NULL;
if ((--rl->numreaders)==0) {
if (first==rl) {
chashRemove2(aborttable, oid);
- if (rl->next!=NULL)
+ if (rl->next!=NULL)
chashInsert(aborttable, oid, rl->next);
rl->next=freelist;
freelist=rl;
first=rl;
rl=rl->next;
}
- nextitem:
+nextitem:
;
}
pthread_mutex_unlock(&aborttablelock);
extern char bigarray[16*1024*1024];
extern int bigindex;
#define LOGEVENT(x) { \
- int tmp=bigindex++; \
- bigarray[tmp]=x; \
- }
+ int tmp=bigindex ++; \
+ bigarray[tmp]=x; \
+}
#else
#define LOGEVENT(x)
#endif
int retval;
char oidType;
/*//TODO comment it for now because objects read are already in the prefetch cache
- oidType = 'R';
- if(tdata->f.numread > 0) {
- if((retval = copyToCache(tdata->f.numread, (unsigned int *)(tdata->objread), oidType)) != 0) {
+ oidType = 'R';
+ if(tdata->f.numread > 0) {
+ if((retval = copyToCache(tdata->f.numread, (unsigned int *)(tdata->objread), oidType)) != 0) {
printf("%s(): Error in copying objects read at %s, %d\n", __func__, __FILE__, __LINE__);
return -1;
- }
- }
- */
+ }
+ }
+ */
if(tdata->f.nummod > 0) {
oidType = 'M';
if((retval = copyToCache(tdata->f.nummod, tdata->oidmod, oidType)) != 0) {
// oid = *((unsigned int *)(objread+(sizeof(unsigned int)+
// sizeof(unsigned short))*i));
//} else {
- oid = oidarray[i];
+ oid = oidarray[i];
//}
pthread_mutex_lock(&prefetchcache_mutex);
objheader_t * header;
int retval;
int i;
int nummod=0;
- for(i=0;i<pilecount;i++) {
+ for(i=0; i<pilecount; i++) {
nummod+=tdata[i].f.nummod;
}
bzero(&clientaddr, sizeof(clientaddr));
offset += sizeof(unsigned int);
while(nummod>0) {
- int numtosend=nummod>maxObjsPerMsg?maxObjsPerMsg:nummod;
+ int numtosend=nummod>maxObjsPerMsg ? maxObjsPerMsg : nummod;
int localoffset=offset;
int sentmsgs=0;
*((short *)(writeBuffer+offset)) = (short) (sizeof(unsigned int) * numtosend);
for(; j < pilecount; j++) {
for(; i < tdata[j].f.nummod; i++) {
- *((unsigned int *) (writeBuffer+localoffset)) = tdata[j].oidmod[i]; //copy objects
- localoffset += sizeof(unsigned int);
- if ((++sentmsgs)==numtosend) {
- i++;
- goto send;
- }
+ *((unsigned int *) (writeBuffer+localoffset)) = tdata[j].oidmod[i]; //copy objects
+ localoffset += sizeof(unsigned int);
+ if ((++sentmsgs)==numtosend) {
+ i++;
+ goto send;
+ }
}
i=0;
}
objheader_t *header;
/* Lookup Objects in prefetch cache and remove them */
if(((header = prehashSearch(oid)) != NULL)) {
- //Keep invalid objects
- STATUS(header)=DIRTY;
- //prehashRemove(oid);
+ //Keep invalid objects
+ STATUS(header)=DIRTY;
+ //prehashRemove(oid);
}
offset += sizeof(unsigned int);
}
mlookup.numelements = 0; // Initial number of elements in the hash
mlookup.loadfactor = loadfactor;
int i;
- for(i=0;i<NUMLOCKS;i++)
+ for(i=0; i<NUMLOCKS; i++)
mlookup.larray[i].lock=RW_LOCK_BIAS;
//Initialize the pthread_mutex variable
return 0;
// Assign to keys to bins inside hash table
unsigned int mhashFunction(unsigned int key) {
- return( key & mlookup.mask) >>1;
+ return ( key & mlookup.mask) >>1;
}
// Insert value and key mapping into the hash table
unsigned int i,index;
unsigned int mask;
- for(i=0;i<NUMLOCKS;i++) {
+ for(i=0; i<NUMLOCKS; i++) {
volatile unsigned int * lockptr=&mlookup.larray[i].lock;
-
+
while(!write_trylock(lockptr)) {
sched_yield();
}
}
-
+
if (mlookup.numelements < mlookup.threshold) {
//release lock and return
- for(i=0;i<NUMLOCKS;i++) {
+ for(i=0; i<NUMLOCKS; i++) {
volatile unsigned int * lockptr=&mlookup.larray[i].lock;
write_unlock(lockptr);
}
mhashlistnode_t * ptr = mlookup.table;
unsigned int oldsize = mlookup.size;
-
+
if((node = calloc(newsize, sizeof(mhashlistnode_t))) == NULL) {
printf("Calloc error %s %d\n", __FILE__, __LINE__);
return;
free(curr);
} /*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
-else if (isfirst) {
- mhashlistnode_t *newnode = calloc(1, sizeof(mhashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ mhashlistnode_t *newnode = calloc(1, sizeof(mhashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
}
free(ptr);
- for(i=0;i<NUMLOCKS;i++) {
+ for(i=0; i<NUMLOCKS; i++) {
volatile unsigned int * lockptr=&mlookup.larray[i].lock;
write_unlock(lockptr);
}
return;
}
/*
-unsigned int *mhashGetKeys(unsigned int *numKeys) {
- unsigned int *keys;
- int i, keyindex;
- mhashlistnode_t *curr;
+ unsigned int *mhashGetKeys(unsigned int *numKeys) {
+ unsigned int *keys;
+ int i, keyindex;
+ mhashlistnode_t *curr;
- pthread_mutex_lock(&mlookup.locktable);
+ pthread_mutex_lock(&mlookup.locktable);
- *numKeys = mlookup.numelements;
- keys = calloc(*numKeys, sizeof(unsigned int));
+ *numKeys = mlookup.numelements;
+ keys = calloc(*numKeys, sizeof(unsigned int));
- keyindex = 0;
- for (i = 0; i < mlookup.size; i++) {
+ keyindex = 0;
+ for (i = 0; i < mlookup.size; i++) {
if (mlookup.table[i].key != 0) {
curr = &mlookup.table[i];
while (curr != NULL) {
- keys[keyindex++] = curr->key;
- curr = curr->next;
+ keys[keyindex++] = curr->key;
+ curr = curr->next;
}
}
- }
+ }
- if (keyindex != *numKeys)
+ if (keyindex != *numKeys)
printf("mhashGetKeys(): WARNING: incorrect mlookup.numelements value!\n");
- pthread_mutex_unlock(&mlookup.locktable);
- return keys;
- }*/
+ pthread_mutex_unlock(&mlookup.locktable);
+ return keys;
+ }*/
pflookup.numelements = 0; // Initial number of elements in the hash
pflookup.loadfactor = loadfactor;
pflookup.threshold=loadfactor*size;
-
- //Initilize
- for(i=0;i<PRENUMLOCKS;i++){
+
+ //Initilize
+ for(i=0; i<PRENUMLOCKS; i++) {
pflookup.larray[i].lock=RW_LOCK_BIAS;
}
/*
- //Intiliaze and set prefetch table mutex attribute
- pthread_mutexattr_init(&pflookup.prefetchmutexattr);
- //NOTE:PTHREAD_MUTEX_RECURSIVE is currently inside a #if_def UNIX98 in the pthread.h file
- //Therefore use PTHREAD_MUTEX_RECURSIVE_NP instead
- pthread_mutexattr_settype(&pflookup.prefetchmutexattr, PTHREAD_MUTEX_RECURSIVE_NP);
-
- //Initialize mutex var
- pthread_mutex_init(&pflookup.lock, &pflookup.prefetchmutexattr);
- //pthread_mutex_init(&pflookup.lock, NULL);
- pthread_cond_init(&pflookup.cond, NULL);
- */
+ //Intiliaze and set prefetch table mutex attribute
+ pthread_mutexattr_init(&pflookup.prefetchmutexattr);
+ //NOTE:PTHREAD_MUTEX_RECURSIVE is currently inside a #if_def UNIX98 in the pthread.h file
+ //Therefore use PTHREAD_MUTEX_RECURSIVE_NP instead
+ pthread_mutexattr_settype(&pflookup.prefetchmutexattr, PTHREAD_MUTEX_RECURSIVE_NP);
+
+ //Initialize mutex var
+ pthread_mutex_init(&pflookup.lock, &pflookup.prefetchmutexattr);
+ //pthread_mutex_init(&pflookup.lock, NULL);
+ pthread_cond_init(&pflookup.cond, NULL);
+ */
return 0;
}
//Store oids and their pointers into hash
void prehashInsert(unsigned int key, void *val) {
-
+
int isFound=0;
prehashlistnode_t *ptr, *tmp, *node;
atomic_inc(&pflookup.numelements);
} else {
tmp = ptr;
- while(tmp != NULL) {
+ while(tmp != NULL) {
if(tmp->key == key) {
- isFound=1;
- tmp->val = val;//Replace value for an exsisting key
- write_unlock(lockptr);
- return;
+ isFound=1;
+ tmp->val = val; //Replace value for an exsisting key
+ write_unlock(lockptr);
+ return;
}
tmp=tmp->next;
}
if(!isFound) { //Insert new key and value into the chain of linked list for the given bin
node = calloc(1, sizeof(prehashlistnode_t));
node->key = key;
- node->val = val ;
+ node->val = val;
node->next = ptr->next;
ptr->next=node;
atomic_inc(&pflookup.numelements);
// Search for an address for a given oid
void *prehashSearch(unsigned int key) {
int index;
-
+
unsigned int keyindex=key>>1;
volatile unsigned int * lockptr=&pflookup.larray[keyindex&PRELOCKMASK].lock;
while(!read_trylock(lockptr)) {
sched_yield();
}
prehashlistnode_t *node = &pflookup.table[keyindex&pflookup.mask];
-
+
do {
if(node->key == key) {
void * tmp=node->val;
write_unlock(lockptr);
return 1;
}
-
+
unsigned int prehashResize(unsigned int newsize) {
prehashlistnode_t *node, *ptr; // curr and next keep track of the current and the next chashlistnodes in a linked list
unsigned int oldsize;
int i,index;
unsigned int mask;
- for(i=0;i<PRENUMLOCKS;i++) {
+ for(i=0; i<PRENUMLOCKS; i++) {
volatile unsigned int * lockptr=&pflookup.larray[i].lock;
-
+
while(!write_trylock(lockptr)) {
sched_yield();
}
}
-
+
if (pflookup.numelements < pflookup.threshold) {
//release lock and return
- for(i=0;i<PRENUMLOCKS;i++) {
+ for(i=0; i<PRENUMLOCKS; i++) {
volatile unsigned int * lockptr=&pflookup.larray[i].lock;
write_unlock(lockptr);
}
if (!isfirst)
free(curr);
} /*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
-else if (isfirst) {
- prehashlistnode_t * newnode = calloc(1, sizeof(prehashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ prehashlistnode_t * newnode = calloc(1, sizeof(prehashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
}
free(ptr); //Free the memory of the old hash table
- for(i=0;i<PRENUMLOCKS;i++) {
+ for(i=0; i<PRENUMLOCKS; i++) {
volatile unsigned int * lockptr=&pflookup.larray[i].lock;
write_unlock(lockptr);
}
//Note: This is based on the implementation of the inserting a key in the first position of the hashtable
void prehashClear() {
/*
-#ifdef CACHE
- int i, isFirstBin;
- prehashlistnode_t *ptr, *prev, *curr;
+ #ifdef CACHE
+ int i, isFirstBin;
+ prehashlistnode_t *ptr, *prev, *curr;
- pthread_mutex_lock(&pflookup.lock);
+ pthread_mutex_lock(&pflookup.lock);
- ptr = pflookup.table;
- for(i = 0; i < pflookup.size; i++) {
- prev = &ptr[i];
- isFirstBin = 1;
- while(prev->next != NULL) {
+ ptr = pflookup.table;
+ for(i = 0; i < pflookup.size; i++) {
+ prev = &ptr[i];
+ isFirstBin = 1;
+ while(prev->next != NULL) {
isFirstBin = 0;
curr = prev->next;
prev->next = curr->next;
free(curr);
- }
- if(isFirstBin == 1) {
+ }
+ if(isFirstBin == 1) {
prev->key = 0;
prev->next = NULL;
- }
- }
- {
- int stale;
- pthread_mutex_unlock(&pflookup.lock);
- pthread_mutex_lock(&prefetchcache_mutex);
- if (pNodeInfo.newstale==NULL) {
+ }
+ }
+ {
+ int stale;
+ pthread_mutex_unlock(&pflookup.lock);
+ pthread_mutex_lock(&prefetchcache_mutex);
+ if (pNodeInfo.newstale==NULL) {
//transfer the list wholesale;
pNodeInfo.oldstale=pNodeInfo.oldptr;
pNodeInfo.newstale=pNodeInfo.newptr;
- } else {
+ } else {
//merge the two lists
pNodeInfo.newstale->prev=pNodeInfo.oldptr;
pNodeInfo.newstale=pNodeInfo.newptr;
- }
- stale=STALL_THRESHOLD-pNodeInfo.stale_count;
-
- if (stale>0&&stale>pNodeInfo.stall)
+ }
+ stale=STALL_THRESHOLD-pNodeInfo.stale_count;
+
+ if (stale>0&&stale>pNodeInfo.stall)
pNodeInfo.stall=stale;
- pNodeInfo.stale_count+=pNodeInfo.os_count;
- pNodeInfo.oldptr=getObjStr(DEFAULT_OBJ_STORE_SIZE);
- pNodeInfo.newptr=pNodeInfo.oldptr;
- pNodeInfo.os_count=1;
- pthread_mutex_unlock(&prefetchcache_mutex);
- }
-#endif
- */
+ pNodeInfo.stale_count+=pNodeInfo.os_count;
+ pNodeInfo.oldptr=getObjStr(DEFAULT_OBJ_STORE_SIZE);
+ pNodeInfo.newptr=pNodeInfo.oldptr;
+ pNodeInfo.os_count=1;
+ pthread_mutex_unlock(&prefetchcache_mutex);
+ }
+ #endif
+ */
}
struct prelockarray {
- volatile unsigned int lock;
- int buf[15];
+ volatile unsigned int lock;
+ int buf[15];
};
#define PRENUMLOCKS 16
/** This program runs the client for clock synchronization on all machines
- Client on all machines **/
+ Client on all machines **/
// One clock tick = (1 / CPU processor speed in Hz) secs
//compile:
// gcc -Wall -o server clocksyncclient.c
#include <math.h>
#define PORT 8500
- /* REPLACE with your server machine name*/
+/* REPLACE with your server machine name*/
#define DIRSIZE 64
#define NUMITER 10000
-static __inline__ unsigned long long rdtsc(void)
-{
- unsigned hi, lo;
- __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
- return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
+static __inline__ unsigned long long rdtsc(void) {
+ unsigned hi, lo;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
int main(int argc, char **argv) {
printf("%lld\n", array2[i]);
}
- for(i=0;i<(NUMITER-1);i++) {
+ for(i=0; i<(NUMITER-1); i++) {
norm += array2[i];
}
long long average=(norm/(NUMITER-1));
printf("average= %lld",(norm/(NUMITER-1)));
long long stddev, avg1=0;
- for(i=0;i<(NUMITER-1);i++) {
+ for(i=0; i<(NUMITER-1); i++) {
avg1 += ((array2[i] - average) * (array2[i] - average));
}
float ans = (avg1/(NUMITER-1));
float squareroot= sqrt(ans);
float squareroot2= sqrt(avg1);
- printf("stddev= %f\n", squareroot);
+ printf("stddev= %f\n", squareroot);
printf("error= %f\n", squareroot2/(NUMITER-1));
fprintf(f1,"%lld",(norm/(NUMITER-1)));
#include <unistd.h>
#include <math.h>
-#define PORT 8500
+#define PORT 8500
#define NUMITER 10000
-#define DIRSIZE 1
+#define DIRSIZE 1
-static __inline__ unsigned long long rdtsc(void)
-{
- unsigned hi, lo;
- __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
- return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
+static __inline__ unsigned long long rdtsc(void) {
+ unsigned hi, lo;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
int main() {
unsigned long long dir[DIRSIZE]; /* used for incomming dir name, and
- outgoing data */
- int sd, sd_current;
- socklen_t addrlen;
+ outgoing data */
+ int sd, sd_current;
+ socklen_t addrlen;
struct sockaddr_in sin;
struct sockaddr_in pin;
exit(1);
}
/* wait for a client to talk to us */
- addrlen = sizeof(pin);
+ addrlen = sizeof(pin);
if ((sd_current = accept(sd, (struct sockaddr *)&pin, &addrlen)) == -1) {
perror("accept");
exit(1);
}
- /* if you want to see the ip address and port of the client, uncomment the
+ /* if you want to see the ip address and port of the client, uncomment the
next two lines */
/*
//array2[i]=rdtsc();
}
- for(i=0;i<(NUMITER-1);i++) {
+ for(i=0; i<(NUMITER-1); i++) {
norm += array1[i];
}
printf("average= %lld",(norm/(NUMITER-1)));
long long stddev, avg1=0;
- for(i=0;i<(NUMITER-1);i++) {
+ for(i=0; i<(NUMITER-1); i++) {
avg1 += ((array1[i] - average) * (array1[i] - average));
}
float ans = (avg1/(NUMITER-1));
float squareroot= sqrt(ans);
float squareroot2= sqrt(avg1);
- printf("stddev= %f\n", squareroot);
+ printf("stddev= %f\n", squareroot);
printf("error= %f\n", squareroot2/(NUMITER-1));
fprintf(f1,"%lld\n",(norm/(NUMITER-1)));
sleep(1);
/* close up both sockets */
- close(sd_current);
+ close(sd_current);
close(sd);
int i;
// Allocate space for the hash table
-
+
c_table = calloc(size, sizeof(chashlistnode_t));
c_loadfactor = loadfactor;
node=&c_structs->array[c_structs->num];
c_structs->num++;
} else {
- //get new list
+ //get new list
cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
tcl->next=c_structs;
c_structs=tcl;
int isfirst; // Keeps track of the first element in the chashlistnode_t for each bin in hashtable
unsigned int i,index;
unsigned int mask;
-
+
ptr = table->table;
oldsize = table->size;
do { //Inner loop to go through linked lists
unsigned int key;
chashlistnode_t *tmp,*next;
-
+
if ((key=curr->key) == 0) { //Exit inner loop if there the first element is 0
break; //key = val =0 for element if not present within the hash table
}
if (!isfirst) {
free(curr);
}
- }/*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
- else if (isfirst) {
- chashlistnode_t *newnode= calloc(1, sizeof(chashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ } /*
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ chashlistnode_t *newnode= calloc(1, sizeof(chashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
int isfirst; // Keeps track of the first element in the chashlistnode_t for each bin in hashtable
unsigned int i,index;
unsigned int mask;
-
+
ptr = c_table;
oldsize = c_size;
do { //Inner loop to go through linked lists
unsigned int key;
chashlistnode_t *tmp,*next;
-
+
if ((key=curr->key) == 0) { //Exit inner loop if there the first element is 0
break; //key = val =0 for element if not present within the hash table
}
if(tmp->key == 0) {
tmp->key = key;
tmp->val = curr->val;
- }/*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
- else if (isfirst) {
- chashlistnode_t *newnode= calloc(1, sizeof(chashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ } /*
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ chashlistnode_t *newnode= calloc(1, sizeof(chashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
int i;
chashlistnode_t *ptr = ctable->table;
- for(i=0 ; i<ctable->size ; i++) {
+ for(i=0; i<ctable->size; i++) {
chashlistnode_t * curr = ptr[i].next;
while(curr!=NULL) {
chashlistnode_t * next = curr->next;
}
- for(i=1;1;i++) {
+ for(i=1; 1; i++) {
node = &table->table[((key+i*331) & table->mask)>>1];
if (node->key==0) {
node->ptr=val;
if (node->key==0)
return NULL;
- for(i=1;1;i++) {
+ for(i=1; 1; i++) {
node = &table->table[((key+i*331) & table->mask)>>1];
ckey=node->key;
if (ckey==key)
continue;
}
- for(bin=1;1;bin++) {
+ for(bin=1; 1; bin++) {
newnode = &table->table[((key+bin*331) & mask)>>1];
if (newnode->key==0) {
newnode->key=key;
/** Macro to print prefetch site id **/
//#define LOGPREFETCHSITES
-#ifdef LOGPREFETCHSITES
+#ifdef LOGPREFETCHSITES
#define LOGPREFETCHSITE(PTR) printf("[siteid= %u] ", PTR->siteid);
#else
#define LOGPREFETCHSITE(PTR)
/*
-#define LOGEVENTS //turn on Logging events
-#ifdef LOGEVENTS
-char bigarray[16*1024*1024];
-int bigindex=0;
-#define LOGEVENT(x) { \
+ #define LOGEVENTS //turn on Logging events
+ #ifdef LOGEVENTS
+ char bigarray[16*1024*1024];
+ int bigindex=0;
+ #define LOGEVENT(x) { \
int tmp=bigindex++; \
bigarray[tmp]=x; \
- }
-#else
-#define LOGEVENT(x)
-#endif
-*/
+ }
+ #else
+ #define LOGEVENT(x)
+ #endif
+ */
/**
- * Record Time after clock synchronization
+ * Record Time after clock synchronization
**/
/*
-#define LOGTIMES
-#ifdef LOGTIMES
-char bigarray1[8*1024*1024];
-unsigned int bigarray2[8*1024*1024];
-unsigned int bigarray3[8*1024*1024];
-long long bigarray4[8*1024*1024];
-int bigindex1=0;
-#define LOGTIME(x,y,z,a) {\
- int tmp=bigindex1++; \
- bigarray1[tmp]=x; \
- bigarray2[tmp]=y; \
- bigarray3[tmp]=z; \
- bigarray4[tmp]=a; \
-}
-#else
-#define LOGTIME(x,y,z,a)
-#endif
-*/
+ #define LOGTIMES
+ #ifdef LOGTIMES
+ char bigarray1[8*1024*1024];
+ unsigned int bigarray2[8*1024*1024];
+ unsigned int bigarray3[8*1024*1024];
+ long long bigarray4[8*1024*1024];
+ int bigindex1=0;
+ #define LOGTIME(x,y,z,a) {\
+ int tmp=bigindex1++; \
+ bigarray1[tmp]=x; \
+ bigarray2[tmp]=y; \
+ bigarray3[tmp]=z; \
+ bigarray4[tmp]=a; \
+ }
+ #else
+ #define LOGTIME(x,y,z,a)
+ #endif
+ */
#endif
#include <sys/time.h>
-#define TABORT1(s) {printf("%s\n", s); fflush(stdout);}
-#define TABORT2(s, msg) {printf("%s(): %s\n", s, msg); fflush(stdout);}
-#define TABORT3(func, s, msg, d) {printf("%s(): %s: for %s = %d\n", func, s, msg, d); fflush(stdout);}
-#define TABORT4(s, d) {printf("%s = %d\n", s, d); fflush(stdout);}
-#define TABORT5(func, msg1, msg2, val1, val2) {printf("%s(): %s = %x, %s = %d\n", func, msg1, val1, msg2, val2); fflush(stdout);}
-#define TABORT6(a, b, c, val1, val2) {printf("%s = %x, %s for %s = %x\n", a, val1, b, c, val2); fflush(stdout);}
-#define TABORT7(func, a, b, c, val1, val2) {printf("%s(): %s for %s =%d, %s = %x\n", func, a, b, val1, c, val2); fflush(stdout);}
-#define TABORT8(func, s, d) {printf("%s(): %s = %d\n", func, s, d); fflush(stdout);}
-#define TABORT9(func, a, b, c, d, val1, val2, val3) {printf("%s(): %s for %s =%x, %s = %d, %s = %x\n", func, a, b, val1, c, val2, d, val3); fflush(stdout);}
+#define TABORT1(s) {printf("%s\n", s); fflush(stdout); }
+#define TABORT2(s, msg) {printf("%s(): %s\n", s, msg); fflush(stdout); }
+#define TABORT3(func, s, msg, d) {printf("%s(): %s: for %s = %d\n", func, s, msg, d); fflush(stdout); }
+#define TABORT4(s, d) {printf("%s = %d\n", s, d); fflush(stdout); }
+#define TABORT5(func, msg1, msg2, val1, val2) {printf("%s(): %s = %x, %s = %d\n", func, msg1, val1, msg2, val2); fflush(stdout); }
+#define TABORT6(a, b, c, val1, val2) {printf("%s = %x, %s for %s = %x\n", a, val1, b, c, val2); fflush(stdout); }
+#define TABORT7(func, a, b, c, val1, val2) {printf("%s(): %s for %s =%d, %s = %x\n", func, a, b, val1, c, val2); fflush(stdout); }
+#define TABORT8(func, s, d) {printf("%s(): %s = %d\n", func, s, d); fflush(stdout); }
+#define TABORT9(func, a, b, c, d, val1, val2, val3) {printf("%s(): %s for %s =%x, %s = %d, %s = %x\n", func, a, b, val1, c, val2, d, val3); fflush(stdout); }
#define ARRAY_SIZE 10100
#define GETSTARTDELAY(start, count) { \
inline int read_trylock(volatile int *lock) {
atomic_dec(lock);
if (atomic_read(lock) >= 0)
- return 1; //can aquire a new read lock
+ return 1; //can aquire a new read lock
atomic_inc(lock);
return 0; //failure
}
#ifndef _DSMLOCK_H_
#define _DSMLOCK_H_
-#define CFENCE asm volatile("":::"memory");
+#define CFENCE asm volatile ("" ::: "memory");
#define RW_LOCK_BIAS 0x01000000
#define atomic_read(v) (*v)
#define RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 4\n" \
- " .long 661f\n" /* address */\
+ " .long 661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
#define MSG_NOSIGNAL 0
#endif
-#define CFENCE asm volatile("":::"memory");
+#define CFENCE asm volatile ("" ::: "memory");
/***********************************************************
* Macros
**********************************************************/
#define GET_PTR_EOFF(x,n) ((short *)(x + 2*sizeof(int) + (n*sizeof(unsigned int))))
#define GET_PTR_ARRYFLD(x,n) ((short *)(x + 2*sizeof(int) + (n*sizeof(unsigned int)) + (n*sizeof(short))))
-#define ENDEBUG(s) { printf("Inside %s()\n", s); fflush(stdout);}
-#define EXDEBUG(s) {printf("Outside %s()\n", s); fflush(stdout);}
+#define ENDEBUG(s) { printf("Inside %s()\n", s); fflush(stdout); }
+#define EXDEBUG(s) {printf("Outside %s()\n", s); fflush(stdout); }
/*****************************************
* Coordinator Messages
***************************************/
#define RETRYINTERVAL 1 //N (For MatrixMultiply, 2DFFT, 2DConv benchmarks)
#define SHUTDOWNINTERVAL 75 //M
#define NUM_TRY_TO_COMMIT 2
-#define MEM_ALLOC_THRESHOLD 20485760//20MB
+#define MEM_ALLOC_THRESHOLD 20485760 //20MB
#include <stdlib.h>
#include <stdio.h>
char getCommitCountForObjRead(unsigned int *, unsigned int *, unsigned int *, int *, int *, int *, int *, int *,
int *, int *, char *, unsigned int, unsigned short);
-void procRestObjs(char *, char *, int , int, int, unsigned int *, unsigned int *, int *, int *, int *, int *);
+void procRestObjs(char *, char *, int, int, int, unsigned int *, unsigned int *, int *, int *, int *, int *);
void processVerNoMatch(unsigned int *, unsigned int *, int *, int *, int *, int *, unsigned int, unsigned short);
-
+
/* end server portion */
/* Prototypes for transactions */
void transStart();
//#define TRANSREAD(x,y,z) /* Use "z" which is program point at which TRANSREAD is generated, use this as transRead2(inputvalue,z) */
#define TRANSREAD(x,y) { \
- unsigned int inputvalue;\
-if ((inputvalue=(unsigned int)y)==0) x=NULL;\
-else { \
-chashlistnode_t * cnodetmp=&c_table[(inputvalue&c_mask)>>1]; \
-do { \
- if (cnodetmp->key==inputvalue) {x=(void *)&((objheader_t*)cnodetmp->val)[1];break;} \
-cnodetmp=cnodetmp->next;\
- if (cnodetmp==NULL) {x=(void *)transRead2(inputvalue); asm volatile("":"=m"(c_table),"=m"(c_mask));break;} \
-} while(1);\
-}}
+ unsigned int inputvalue; \
+ if ((inputvalue=(unsigned int)y)==0) x=NULL; \
+ else { \
+ chashlistnode_t * cnodetmp=&c_table[(inputvalue&c_mask)>>1]; \
+ do { \
+ if (cnodetmp->key==inputvalue) {x=(void *)& ((objheader_t*)cnodetmp->val)[1]; break; } \
+ cnodetmp=cnodetmp->next; \
+ if (cnodetmp==NULL) {x=(void *)transRead2(inputvalue); asm volatile ("" : "=m" (c_table),"=m" (c_mask)); break; } \
+ } while(1); \
+ }}
__attribute__((pure)) objheader_t *transRead(unsigned int);
__attribute__((pure)) objheader_t *transRead2(unsigned int);
extern long long bigarray4[6*1024*1024];
extern int bigarray5[6*1024*1024];
extern int bigindex1;
-#define LOGTIME(x,y,z,a,b) {\
- int tmp=bigindex1; \
- bigarray1[tmp]=x; \
- bigarray2[tmp]=y; \
- bigarray3[tmp]=z; \
- bigarray4[tmp]=a; \
- bigarray5[tmp]=b; \
- bigindex1++; \
+#define LOGTIME(x,y,z,a,b) { \
+ int tmp=bigindex1; \
+ bigarray1[tmp]=x; \
+ bigarray2[tmp]=y; \
+ bigarray3[tmp]=z; \
+ bigarray4[tmp]=a; \
+ bigarray5[tmp]=b; \
+ bigindex1++; \
}
#else
#define LOGTIME(x,y,z,a,b)
#endif
-long long myrdtsc(void)
-{
- unsigned hi, lo;
- __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
+long long myrdtsc(void) {
+ unsigned hi, lo;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
/* If object is write locked, just wait */
/* May want to remove at some point */
while((*(volatile int *)STATUSPTR(h))<=0)
- sched_yield();
+ sched_yield();
GETSIZE(size, h);
size += sizeof(objheader_t);
sockid = (int) acceptfd;
return 1;
}
ptr = (char *) modptr;
- for(i = 0 ; i < fixed.nummod; i++) {
+ for(i = 0; i < fixed.nummod; i++) {
headaddr = (objheader_t *) ptr;
oid = OID(headaddr);
oidmod[i] = oid;
incr += sizeof(unsigned int);
version = *((unsigned short *)(objread + incr));
retval=getCommitCountForObjRead(oidnotfound, oidlocked, oidvernotmatch, &objnotfound, &objlocked, &objvernotmatch,
- &v_matchnolock, &v_matchlock, &v_nomatch, &numBytes, &control, oid, version);
+ &v_matchnolock, &v_matchlock, &v_nomatch, &numBytes, &control, oid, version);
} else { //Objs modified
if(i == fixed->numread) {
- oidlocked[objlocked++] = -1;
+ oidlocked[objlocked++] = -1;
}
int tmpsize;
headptr = (objheader_t *) ptr;
GETSIZE(tmpsize, headptr);
ptr += sizeof(objheader_t) + tmpsize;
retval=getCommitCountForObjMod(oidnotfound, oidlocked, oidvernotmatch, &objnotfound,
- &objlocked, &objvernotmatch, &v_matchnolock, &v_matchlock, &v_nomatch,
- &numBytes, &control, oid, version);
+ &objlocked, &objvernotmatch, &v_matchnolock, &v_matchlock, &v_nomatch,
+ &numBytes, &control, oid, version);
}
if(retval==TRANS_DISAGREE || retval==TRANS_SOFT_ABORT) {
//unlock objects as soon versions mismatch or locks cannot be acquired)
if (objlocked > 0) {
- int useWriteUnlock = 0;
- for(j = 0; j < objlocked; j++) {
- if(oidlocked[j] == -1) {
- useWriteUnlock = 1;
- continue;
- }
- if((headptr = mhashSearch(oidlocked[j])) == NULL) {
- printf("mhashSearch returns NULL at %s, %d\n", __FILE__, __LINE__);
- return 0;
- }
- if(useWriteUnlock) {
- write_unlock(STATUSPTR(headptr));
- } else {
- read_unlock(STATUSPTR(headptr));
- }
- }
- if(v_nomatch > 0)
- free(oidlocked);
+ int useWriteUnlock = 0;
+ for(j = 0; j < objlocked; j++) {
+ if(oidlocked[j] == -1) {
+ useWriteUnlock = 1;
+ continue;
+ }
+ if((headptr = mhashSearch(oidlocked[j])) == NULL) {
+ printf("mhashSearch returns NULL at %s, %d\n", __FILE__, __LINE__);
+ return 0;
+ }
+ if(useWriteUnlock) {
+ write_unlock(STATUSPTR(headptr));
+ } else {
+ read_unlock(STATUSPTR(headptr));
+ }
+ }
+ if(v_nomatch > 0)
+ free(oidlocked);
}
objlocked=0;
break;
}
#endif
/*
- if (objlocked > 0) {
- int useWriteUnlock = 0;
- for(j = 0; j < objlocked; j++) {
+ if (objlocked > 0) {
+ int useWriteUnlock = 0;
+ for(j = 0; j < objlocked; j++) {
if(oidlocked[j] == -1) {
useWriteUnlock = 1;
continue;
} else {
read_unlock(STATUSPTR(headptr));
}
- }
- free(oidlocked);
- }
- */
+ }
+ free(oidlocked);
+ }
+ */
control=TRANS_DISAGREE;
send_data(acceptfd, &control, sizeof(char));
#ifdef CACHE
if (write_trylock(STATUSPTR(mobj))) { // Can acquire write lock
if (version == ((objheader_t *)mobj)->version) { /* match versions */
(*v_matchnolock)++;
- *control = TRANS_AGREE;
+ *control = TRANS_AGREE;
} else { /* If versions don't match ...HARD ABORT */
(*v_nomatch)++;
oidvernotmatch[*objvernotmatch] = oid;
} else { //we are locked
if (version == ((objheader_t *)mobj)->version) { /* Check if versions match */
(*v_matchlock)++;
- *control=TRANS_SOFT_ABORT;
+ *control=TRANS_SOFT_ABORT;
} else { /* If versions don't match ...HARD ABORT */
(*v_nomatch)++;
oidvernotmatch[*objvernotmatch] = oid;
/* Save the oids not found and number of oids not found for later use */
oidnotfound[*objnotfound] = oid;
(*objnotfound)++;
- *control = TRANS_DISAGREE;
+ *control = TRANS_DISAGREE;
} else { /* If Obj found in machine (i.e. has not moved) */
/* Check if Obj is locked by any previous transaction */
if (read_trylock(STATUSPTR(mobj))) { //Can further acquire read locks
if (version == ((objheader_t *)mobj)->version) { /* match versions */
(*v_matchnolock)++;
- *control=TRANS_AGREE;
+ *control=TRANS_AGREE;
} else { /* If versions don't match ...HARD ABORT */
(*v_nomatch)++;
oidvernotmatch[(*objvernotmatch)++] = oid;
} else { /* Some other transaction has aquired a write lock on this object */
if (version == ((objheader_t *)mobj)->version) { /* Check if versions match */
(*v_matchlock)++;
- *control=TRANS_SOFT_ABORT;
+ *control=TRANS_SOFT_ABORT;
} else { /* If versions don't match ...HARD ABORT */
(*v_nomatch)++;
oidvernotmatch[*objvernotmatch] = oid;
return *control;
}
-void procRestObjs(char *objread,
- char *objmod,
- int index,
- int numread,
- int nummod,
- unsigned int *oidnotfound,
+void procRestObjs(char *objread,
+ char *objmod,
+ int index,
+ int numread,
+ int nummod,
+ unsigned int *oidnotfound,
unsigned int *oidvernotmatch,
- int *objnotfound,
- int *objvernotmatch,
- int *v_nomatch,
+ int *objnotfound,
+ int *objvernotmatch,
+ int *v_nomatch,
int *numBytes) {
int i;
unsigned int oid;
objmod += sizeof(objheader_t) + tmpsize;
}
processVerNoMatch(oidnotfound,
- oidvernotmatch,
- objnotfound,
- objvernotmatch,
- v_nomatch,
- numBytes,
- oid,
- version);
+ oidvernotmatch,
+ objnotfound,
+ objvernotmatch,
+ v_nomatch,
+ numBytes,
+ oid,
+ version);
}
return;
}
-void processVerNoMatch(unsigned int *oidnotfound,
- unsigned int *oidvernotmatch,
- int *objnotfound,
- int *objvernotmatch,
- int *v_nomatch,
- int *numBytes,
- unsigned int oid,
- unsigned short version) {
+void processVerNoMatch(unsigned int *oidnotfound,
+ unsigned int *oidvernotmatch,
+ int *objnotfound,
+ int *objvernotmatch,
+ int *v_nomatch,
+ int *numBytes,
+ unsigned int oid,
+ unsigned short version) {
void *mobj;
/* Check if object is still present in the machine since the beginning of TRANS_REQUEST */
if (version != ((objheader_t *)mobj)->version) { /* match versions */
(*v_nomatch)++;
oidvernotmatch[*objvernotmatch] = oid;
- (*objvernotmatch)++;
- int size;
+ (*objvernotmatch)++;
+ int size;
GETSIZE(size, mobj);
size += sizeof(objheader_t);
*numBytes += size;
}
}
- /* This function decides what control message such as TRANS_AGREE, TRANS_DISAGREE or TRANS_SOFT_ABORT
- * to send to Coordinator based on the votes of oids involved in the transaction */
- char decideCtrlMessage(fixed_data_t *fixed, trans_commit_data_t *transinfo, int *v_matchnolock, int *v_matchlock,
- int *v_nomatch, int *objnotfound, int *objlocked, void *modptr,
- unsigned int *oidnotfound, unsigned int *oidlocked, int acceptfd) {
- int val;
- char control = 0;
-
- /* Condition to send TRANS_AGREE */
- if(*(v_matchnolock) == fixed->numread + fixed->nummod) {
- control = TRANS_AGREE;
- /* Send control message */
- send_data(acceptfd, &control, sizeof(char));
- }
- /* Condition to send TRANS_SOFT_ABORT */
- if((*(v_matchlock) > 0 && *(v_nomatch) == 0) || (*(objnotfound) > 0 && *(v_nomatch) == 0)) {
- control = TRANS_SOFT_ABORT;
-
- /* Send control message */
- send_data(acceptfd, &control, sizeof(char));
-
- /* FIXME how to send objs Send number of oids not found and the missing oids if objects are missing in the machine */
- if(*(objnotfound) != 0) {
- int msg[1];
- msg[0] = *(objnotfound);
- send_data(acceptfd, &msg, sizeof(int));
- int size = sizeof(unsigned int)* *(objnotfound);
- send_data(acceptfd, oidnotfound, size);
- }
- }
+/* This function decides what control message such as TRANS_AGREE, TRANS_DISAGREE or TRANS_SOFT_ABORT
+ * to send to Coordinator based on the votes of oids involved in the transaction */
+char decideCtrlMessage(fixed_data_t *fixed, trans_commit_data_t *transinfo, int *v_matchnolock, int *v_matchlock,
+ int *v_nomatch, int *objnotfound, int *objlocked, void *modptr,
+ unsigned int *oidnotfound, unsigned int *oidlocked, int acceptfd) {
+ int val;
+ char control = 0;
+
+ /* Condition to send TRANS_AGREE */
+ if(*(v_matchnolock) == fixed->numread + fixed->nummod) {
+ control = TRANS_AGREE;
+ /* Send control message */
+ send_data(acceptfd, &control, sizeof(char));
+ }
+ /* Condition to send TRANS_SOFT_ABORT */
+ if((*(v_matchlock) > 0 && *(v_nomatch) == 0) || (*(objnotfound) > 0 && *(v_nomatch) == 0)) {
+ control = TRANS_SOFT_ABORT;
- /* Fill out the trans_commit_data_t data structure. This is required for a trans commit process
- * if Participant receives a TRANS_COMMIT */
- transinfo->objlocked = oidlocked;
- transinfo->objnotfound = oidnotfound;
- transinfo->modptr = modptr;
- transinfo->numlocked = *(objlocked);
- transinfo->numnotfound = *(objnotfound);
- return control;
+ /* Send control message */
+ send_data(acceptfd, &control, sizeof(char));
+
+ /* FIXME how to send objs Send number of oids not found and the missing oids if objects are missing in the machine */
+ if(*(objnotfound) != 0) {
+ int msg[1];
+ msg[0] = *(objnotfound);
+ send_data(acceptfd, &msg, sizeof(int));
+ int size = sizeof(unsigned int)* *(objnotfound);
+ send_data(acceptfd, oidnotfound, size);
+ }
}
- /* This function processes all modified objects involved in a TRANS_COMMIT and updates pointer
- * addresses in lookup table and also changes version number
- * Sends an ACK back to Coordinator */
- int transCommitProcess(void *modptr, unsigned int *oidmod, unsigned int *oidlocked, int nummod, int numlocked, int acceptfd) {
- objheader_t *header;
- objheader_t *newheader;
- int i = 0, offset = 0;
- char control;
- int tmpsize;
-
- /* Process each modified object saved in the mainobject store */
- for(i = 0; i < nummod; i++) {
- if((header = (objheader_t *) mhashSearch(oidmod[i])) == NULL) {
- printf("Error: mhashsearch returns NULL at %s, %d\n", __FILE__, __LINE__);
- return 1;
- }
- GETSIZE(tmpsize,header);
-
- {
- struct ___Object___ *dst=(struct ___Object___*)((char*)header+sizeof(objheader_t));
- struct ___Object___ *src=(struct ___Object___*)((char*)modptr+sizeof(objheader_t)+offset);
- dst->type=src->type;
- dst->___cachedCode___=src->___cachedCode___;
- dst->___cachedHash___=src->___cachedHash___;
- memcpy(&dst[1], &src[1], tmpsize-sizeof(struct ___Object___));
- }
+ /* Fill out the trans_commit_data_t data structure. This is required for a trans commit process
+ * if Participant receives a TRANS_COMMIT */
+ transinfo->objlocked = oidlocked;
+ transinfo->objnotfound = oidnotfound;
+ transinfo->modptr = modptr;
+ transinfo->numlocked = *(objlocked);
+ transinfo->numnotfound = *(objnotfound);
+ return control;
+}
+
+/* This function processes all modified objects involved in a TRANS_COMMIT and updates pointer
+ * addresses in lookup table and also changes version number
+ * Sends an ACK back to Coordinator */
+int transCommitProcess(void *modptr, unsigned int *oidmod, unsigned int *oidlocked, int nummod, int numlocked, int acceptfd) {
+ objheader_t *header;
+ objheader_t *newheader;
+ int i = 0, offset = 0;
+ char control;
+ int tmpsize;
+
+ /* Process each modified object saved in the mainobject store */
+ for(i = 0; i < nummod; i++) {
+ if((header = (objheader_t *) mhashSearch(oidmod[i])) == NULL) {
+ printf("Error: mhashsearch returns NULL at %s, %d\n", __FILE__, __LINE__);
+ return 1;
+ }
+ GETSIZE(tmpsize,header);
+
+ {
+ struct ___Object___ *dst=(struct ___Object___*)((char*)header+sizeof(objheader_t));
+ struct ___Object___ *src=(struct ___Object___*)((char*)modptr+sizeof(objheader_t)+offset);
+ dst->type=src->type;
+ dst->___cachedCode___=src->___cachedCode___;
+ dst->___cachedHash___=src->___cachedHash___;
+ memcpy(&dst[1], &src[1], tmpsize-sizeof(struct ___Object___));
+ }
- //memory barrier
- CFENCE;
+ //memory barrier
+ CFENCE;
- header->version += 1;
- /* If threads are waiting on this object to be updated, notify them */
- if(header->notifylist != NULL) {
- notifyAll(&header->notifylist, OID(header), header->version);
- }
- offset += sizeof(objheader_t) + tmpsize;
+ header->version += 1;
+ /* If threads are waiting on this object to be updated, notify them */
+ if(header->notifylist != NULL) {
+ notifyAll(&header->notifylist, OID(header), header->version);
}
+ offset += sizeof(objheader_t) + tmpsize;
+ }
if (nummod > 0)
free(modptr);
short offsetarry[numoffset];
recv_data_buf((int)acceptfd, readbuffer, &gid, sizeof(int));
recv_data_buf((int) acceptfd, readbuffer, offsetarry, numoffset*sizeof(short));
- LOGTIME('A',oid ,0,myrdtsc(),gid); //after recv the entire prefetch request
+ LOGTIME('A',oid,0,myrdtsc(),gid); //after recv the entire prefetch request
/*Process each oid */
if ((header = mhashSearch(oid)) == NULL) { /* Obj not found */
/* Save the oids not found in buffer for later use */
- size = sizeof(int)+sizeof(int) + sizeof(char) + sizeof(unsigned int) ;
+ size = sizeof(int)+sizeof(int) + sizeof(char) + sizeof(unsigned int);
char sendbuffer[size+1];
sendbuffer[0]=TRANS_PREFETCH_RESPONSE;
*((int *) (sendbuffer+sizeof(char))) = size;
LOGTIME('C',oid,TYPE(header),myrdtsc(), gid); //send first oid found from prefetch request
/* Calculate the oid corresponding to the offset value */
- for(i = 0 ; i< numoffset ; i++) {
+ for(i = 0; i< numoffset; i++) {
/* Check for arrays */
if(TYPE(header) >= NUMCLASSES) {
int elementsize = classsize[TYPE(header)];
if (oid==0)
break;
- LOGTIME('B',oid,0,myrdtsc(),gid); //send next oid found from prefetch request
+ LOGTIME('B',oid,0,myrdtsc(),gid); //send next oid found from prefetch request
if((header = mhashSearch(oid)) == NULL) {
- size = sizeof(int)+sizeof(int) + sizeof(char) + sizeof(unsigned int) ;
+ size = sizeof(int)+sizeof(int) + sizeof(char) + sizeof(unsigned int);
char sendbuffer[size+1];
sendbuffer[0]=TRANS_PREFETCH_RESPONSE;
*((int *) (sendbuffer+1)) = size;
*((char *)(sendbuffer + sizeof(char)+sizeof(int))) = OBJECT_NOT_FOUND;
*((unsigned int *)(sendbuffer + sizeof(char)+sizeof(int) + sizeof(char))) = oid;
- *((int *)(sendbuffer+sizeof(int) + sizeof(char)+sizeof(char)+sizeof(unsigned int))) = gid;
+ *((int *)(sendbuffer+sizeof(int) + sizeof(char)+sizeof(char)+sizeof(unsigned int))) = gid;
send_buf(sd, &writebuffer, sendbuffer, size+1);
- LOGTIME('J',oid, 0,myrdtsc(), gid); //send first oid not found prefetch request
+ LOGTIME('J',oid, 0,myrdtsc(), gid); //send first oid not found prefetch request
break;
} else { /* Obj Found */
int incr = 1;
incr += sizeof(char);
*((unsigned int *)(sendbuffer+incr)) = oid;
incr += sizeof(unsigned int);
- *((int *)(sendbuffer+incr)) = gid;
- incr += sizeof(int);
+ *((int *)(sendbuffer+incr)) = gid;
+ incr += sizeof(int);
memcpy(sendbuffer + incr, header, objsize + sizeof(objheader_t));
send_buf(sd, &writebuffer, sendbuffer, size+1);
- LOGOIDTYPE("SRES", oid, TYPE(header), (myrdtsc()-clockoffset));
- LOGTIME('C',oid,TYPE(header),myrdtsc(), gid); //send first oid found from prefetch request
+ LOGOIDTYPE("SRES", oid, TYPE(header), (myrdtsc()-clockoffset));
+ LOGTIME('C',oid,TYPE(header),myrdtsc(), gid); //send first oid found from prefetch request
}
} //end of for
}
} //end of while
- //Release socket
+ //Release socket
if (mid!=-1) {
forcesend_buf(sd, &writebuffer, NULL, 0);
freeSockWithLock(transPResponseSocketPool, mid, sd);
close(sd);
return;
} else {
-
+
//Send Update notification
msg[0] = THREAD_NOTIFY_RESPONSE;
*((unsigned int *)&msg[1]) = oid;
tmp->prev=NULL;
return tmp;
} else {
- int allocsize=(size>DEFAULT_OBJ_STORE_SIZE)?size:DEFAULT_OBJ_STORE_SIZE;
+ int allocsize=(size>DEFAULT_OBJ_STORE_SIZE) ? size : DEFAULT_OBJ_STORE_SIZE;
return objstrCreate(allocsize);
}
}
}
}
- if(((i+1)&(pflookup.mask>>4))==0 && (i+1)<pflookup.size){
+ if(((i+1)&(pflookup.mask>>4))==0 && (i+1)<pflookup.size) {
// try to grab new lock
lockindex++;
volatile unsigned int * lockptr_new=&pflookup.larray[lockindex].lock;
- while(!write_trylock(lockptr_new)){
- sched_yield();
+ while(!write_trylock(lockptr_new)) {
+ sched_yield();
}
write_unlock(lockptr_current);
- lockptr_current=lockptr_new;
+ lockptr_current=lockptr_new;
}
-
- }// end of for (pflokup)
-
+
+ } // end of for (pflokup)
+
write_unlock(lockptr_current);
}
objstr_t *newstale;
int stale_count;
int stall;
-
+
} prefetchNodeInfo_t;
/********************************
/********** for DEBUGGING **************/
char ip[16]; // for debugging purpose
-char* midtoIPString(unsigned int mid){
+char* midtoIPString(unsigned int mid) {
midtoIP(mid, ip);
return ip;
}
objnode->offset = offset;
objnode->oid = oid;
objnode->numoffset = numoffset;
- objnode->siteid = siteid;
+ objnode->siteid = siteid;
objnode->next = *tmp;
*tmp = objnode;
return;
objnode->offset = offset;
objnode->oid = oid;
objnode->numoffset = numoffset;
- objnode->siteid = siteid;
+ objnode->siteid = siteid;
objnode->next = *tmp;
*tmp = objnode;
return;
create_objstr(size);
createHash(&mlut, HASH_SIZE, 0.75);
- for(i=0; i< 4 ; i++) {
+ for(i=0; i< 4; i++) {
createObject(i);
}
// Assign to keys to bins inside hash table
unsigned int mhashFunction(unsigned int key) {
- return( key & mlookup.mask) >>1;
+ return ( key & mlookup.mask) >>1;
}
// Insert value and key mapping into the hash table
free(curr);
} /*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
-else if (isfirst) {
- mhashlistnode_t *newnode = calloc(1, sizeof(mhashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ mhashlistnode_t *newnode = calloc(1, sizeof(mhashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
size+=(8-(size&7));
}
- for(;i<3;i++) {
+ for(; i<3; i++) {
if (OSFREE(store)>=size) {
tmp=store->top;
store->top +=size;
}
{
- unsigned int newsize=size>DEFAULT_OBJ_STORE_SIZE?size:DEFAULT_OBJ_STORE_SIZE;
+ unsigned int newsize=size>DEFAULT_OBJ_STORE_SIZE ? size : DEFAULT_OBJ_STORE_SIZE;
objstr_t *os=(objstr_t *)calloc(1,(sizeof(objstr_t) + newsize));
void *ptr=&os[1];
os->next=*osptr;
extern long long bigarray4[6*1024*1024];
extern int bigarray5[6*1024*1024];
extern int bigindex1;
-#define LOGTIME(x,y,z,a,b) {\
- int tmp=bigindex1; \
- bigarray1[tmp]=x; \
- bigarray2[tmp]=y; \
- bigarray3[tmp]=z; \
- bigarray4[tmp]=a; \
- bigarray5[tmp]=b; \
- bigindex1++; \
+#define LOGTIME(x,y,z,a,b) { \
+ int tmp=bigindex1; \
+ bigarray1[tmp]=x; \
+ bigarray2[tmp]=y; \
+ bigarray3[tmp]=z; \
+ bigarray4[tmp]=a; \
+ bigarray5[tmp]=b; \
+ bigindex1++; \
}
#else
#define LOGTIME(x,y,z,a,b)
/* Initialize */
perMcPrefetchList_t *head = NULL;
- for(j=0;j<numprefetches; j++) {
+ for(j=0; j<numprefetches; j++) {
unsigned int oid = *(GET_OID(ptr));
short numoffset = *(GET_NUM_OFFSETS(ptr));
short *offsetarray = GET_OFFSETS(ptr);
int countInvalidObj=-1;
int isLastOffset=0;
- if(offstop==0) { //if no offsets
+ if(offstop==0) { //if no offsets
isLastOffset=1;
}
LOGTIME('b',oid,0,0,countInvalidObj);
//forward prefetch
if(oid!=0) {
- int machinenum = lhashSearch(oid);
- if(machinenum != myIpAddr) {
- insertPrefetch(machinenum, oid, numoffset, offsetarray, &head);
- }
+ int machinenum = lhashSearch(oid);
+ if(machinenum != myIpAddr) {
+ insertPrefetch(machinenum, oid, numoffset, offsetarray, &head);
+ }
}
//update ptr
ptr=((char *)&offsetarray[numoffset])+sizeof(int);
LOGTIME('B',oid,TYPE(header),0,countInvalidObj);
//Start searching the dfsList
- for(top=0; top>=0;) {
+ for(top=0; top>=0; ) {
if(top == offstop) {
- isLastOffset=1;
+ isLastOffset=1;
}
oid=getNextOid(header, offsetarray, dfsList, top, &countInvalidObj, &isLastOffset);
LOGTIME('O',oid,0,0,countInvalidObj);
if (oid&1) {
- int oldisField=TYPE(header) < NUMCLASSES;
- top+=2;
- dfsList[top]=oid;
- dfsList[top+1]=0;
- header=searchObjInv(oid, top, &countInvalidObj, &isLastOffset);
- if (header==NULL) {
- LOGTIME('c',oid,top,0,countInvalidObj);
- //forward prefetch
- int machinenum = lhashSearch(oid);
- if(machinenum != myIpAddr) {
- if (oldisField&&(dfsList[top-1]!=GET_RANGE(offsetarray[top+1]))) {
- insertPrefetch(machinenum, oid, 2+numoffset-top, &offsetarray[top-2], &head);
- } else {
- insertPrefetch(machinenum, oid, numoffset-top, &offsetarray[top], &head);
- }
- }
- } else if (top<offstop) {
- LOGTIME('C',oid,TYPE(header),0,top);
- //okay to continue going down
- continue;
- }
+ int oldisField=TYPE(header) < NUMCLASSES;
+ top+=2;
+ dfsList[top]=oid;
+ dfsList[top+1]=0;
+ header=searchObjInv(oid, top, &countInvalidObj, &isLastOffset);
+ if (header==NULL) {
+ LOGTIME('c',oid,top,0,countInvalidObj);
+ //forward prefetch
+ int machinenum = lhashSearch(oid);
+ if(machinenum != myIpAddr) {
+ if (oldisField&&(dfsList[top-1]!=GET_RANGE(offsetarray[top+1]))) {
+ insertPrefetch(machinenum, oid, 2+numoffset-top, &offsetarray[top-2], &head);
+ } else {
+ insertPrefetch(machinenum, oid, numoffset-top, &offsetarray[top], &head);
+ }
+ }
+ } else if (top<offstop) {
+ LOGTIME('C',oid,TYPE(header),0,top);
+ //okay to continue going down
+ continue;
+ }
} else if (oid==2) {
- LOGTIME('D',oid,0,0,top);
- //send prefetch first
- int objindex=top+2;
- int machinenum = lhashSearch(dfsList[objindex]);
- if(machinenum != myIpAddr) {
- insertPrefetch(machinenum, dfsList[objindex], numoffset-top, &offsetarray[top], &head);
- }
+ LOGTIME('D',oid,0,0,top);
+ //send prefetch first
+ int objindex=top+2;
+ int machinenum = lhashSearch(dfsList[objindex]);
+ if(machinenum != myIpAddr) {
+ insertPrefetch(machinenum, dfsList[objindex], numoffset-top, &offsetarray[top], &head);
+ }
}
//oid is 0
//go backwards until we can increment
do {
- do {
- top-=2;
- if (top<0) {
- goto tuple;
- //return head;
- }
- } while(dfsList[top+1] == GET_RANGE(offsetarray[top + 3]));
-
- //we backtracked past the invalid obj...set out countInvalidObj=-1
- if (top<countInvalidObj)
- countInvalidObj=-1;
-
- header=searchObjInv(dfsList[top], top, &countInvalidObj, NULL);
- //header shouldn't be null unless the object moves away, but allow
- //ourselves the option to just continue on if we lose the object
+ do {
+ top-=2;
+ if (top<0) {
+ goto tuple;
+ //return head;
+ }
+ } while(dfsList[top+1] == GET_RANGE(offsetarray[top + 3]));
+
+ //we backtracked past the invalid obj...set out countInvalidObj=-1
+ if (top<countInvalidObj)
+ countInvalidObj=-1;
+
+ header=searchObjInv(dfsList[top], top, &countInvalidObj, NULL);
+ //header shouldn't be null unless the object moves away, but allow
+ //ourselves the option to just continue on if we lose the object
} while(header==NULL);
LOGTIME('F',OID(header),TYPE(header),0,top);
//increment
LOGTIME('G',OID(header),TYPE(header),0, 0);
//Start searching the dfsList
- for(top=0; top>=0;) {
+ for(top=0; top>=0; ) {
oid=getNextOid(header, offsetarray, dfsList, top, NULL, NULL);
if (oid&1) {
int oldisField=TYPE(header) < NUMCLASSES;
dfsList[top+1]=0;
header=searchObj(oid);
if (header==NULL) {
- LOGTIME('h',oid,top,0,0);
- //forward prefetch
- /*
- int machinenum = lhashSearch(oid);
- if (oldisField&&(dfsList[top-1]!=GET_RANGE(offsetarray[top+1]))) {
- insertPrefetch(machinenum, oid, 2+numoffset-top, &offsetarray[top-2], &head);
- } else {
- insertPrefetch(machinenum, oid, numoffset-top, &offsetarray[top], &head);
- }
- */
+ LOGTIME('h',oid,top,0,0);
+ //forward prefetch
+ /*
+ int machinenum = lhashSearch(oid);
+ if (oldisField&&(dfsList[top-1]!=GET_RANGE(offsetarray[top+1]))) {
+ insertPrefetch(machinenum, oid, 2+numoffset-top, &offsetarray[top-2], &head);
+ } else {
+ insertPrefetch(machinenum, oid, numoffset-top, &offsetarray[top], &head);
+ }
+ */
} else {
- sendOidFound(header, oid, sd,writebuffer);
- LOGTIME('H',oid,TYPE(header),0,top);
+ sendOidFound(header, oid, sd,writebuffer);
+ LOGTIME('H',oid,TYPE(header),0,top);
if (top<offstop)
//okay to continue going down
continue;
int objindex=top+2;
//forward prefetch
/*
- int machinenum = lhashSearch(dfsList[objindex]);
- insertPrefetch(machinenum, dfsList[objindex], numoffset-top, &offsetarray[top], &head);
- */
+ int machinenum = lhashSearch(dfsList[objindex]);
+ insertPrefetch(machinenum, dfsList[objindex], numoffset-top, &offsetarray[top], &head);
+ */
}
//oid is 0
//go backwards until we can increment
header = prehashSearch(oid);
if(header != NULL) {
if((STATUS(header) & DIRTY) && (countInvalidObj!= NULL)) {
- if ((*countInvalidObj)==-1) {
- *countInvalidObj=top;
- } else {
- return NULL;
- }
+ if ((*countInvalidObj)==-1) {
+ *countInvalidObj=top;
+ } else {
+ return NULL;
+ }
}
if((STATUS(header) & DIRTY) && isLastOffset)
- return NULL;
+ return NULL;
}
return header;
}
char *buf=oidnoffset;
if (first) {
*buf=TRANS_PREFETCH;
- buf++;len++;
+ buf++; len++;
first=0;
}
*((int*)buf) = tmp->numoffset;
send_buf(sd, &writebuffer, oidnoffset, len);
else
forcesend_buf(sd, &writebuffer, oidnoffset, len);
- //send_data(sd, oidnoffset, len);
+ //send_data(sd, oidnoffset, len);
//tmp = tmp->next;
}
STATUS(ptr)=0;
oid=OID(ptr);
objsize+=sizeof(objheader_t);
-
+
/* Insert into prefetch hash lookup table */
void * oldptr;
if((oldptr = prehashSearch(oid)) != NULL) {
- if(((objheader_t *)oldptr)->version < ((objheader_t *)ptr)->version) {
- prehashInsert(oid, ptr);
- }
+ if(((objheader_t *)oldptr)->version < ((objheader_t *)ptr)->version) {
+ prehashInsert(oid, ptr);
+ }
} else {
prehashInsert(oid, ptr);
}
baseoid = oidmid.oid;
if(mid != oidmid.mid) {
if(mid!= -1) {
- forcesend_buf(sd, &writebuffer, NULL, 0);
- freeSockWithLock(transPResponseSocketPool, mid, sd);
+ forcesend_buf(sd, &writebuffer, NULL, 0);
+ freeSockWithLock(transPResponseSocketPool, mid, sd);
}
mid = oidmid.mid;
sd = getSockWithLock(transPResponseSocketPool, mid);
//Release socket
if(mid!=-1)
forcesend_buf(sd,&writebuffer, NULL, 0);
- freeSockWithLock(transPResponseSocketPool, mid, sd);
+ freeSockWithLock(transPResponseSocketPool, mid, sd);
return 0;
}
-unsigned int getNextOid(objheader_t * header, short * offsetarray, unsigned int *dfsList, int top, int *countInvalidObj, int *isLastOffset)
-{
+unsigned int getNextOid(objheader_t * header, short * offsetarray, unsigned int *dfsList, int top, int *countInvalidObj, int *isLastOffset) {
int startindex= offsetarray[top+2];
int currcount = dfsList[top+1];
int range = GET_RANGE(offsetarray[top + 3]);
send_buf(sd, writebuffer, sendbuffer, size+1);
/*
- //TODO: dead code --- stick it around for sometime
- if ((incr=(*bufoffset))==0) {
- buffer[incr] = TRANS_PREFETCH_RESPONSE;
- incr+=sizeof(char);
- *((int *)(buffer + incr)) = size+sizeof(int)+sizeof(char)+sizeof(unsigned int);
- incr += sizeof(int);
- *((char *)(buffer + incr)) = OBJECT_FOUND;
- incr += sizeof(char);
- *((unsigned int *)(buffer + incr)) = oid;
- incr += sizeof(unsigned int);
- } else
- *((int *)(buffer+sizeof(char)))+=size;
-
- if ((incr+size)<PBUFFERSIZE) {
- //don't need to allocate, just copy
- sendbuffer=buffer;
- (*bufoffset)=incr+size;
- } else {
- sendbuffer=alloca(size+incr);
- memcpy(sendbuffer, buffer, incr);
- *bufoffset=0;
- }
-
- memcpy(sendbuffer + incr, header, size);
- if ((*bufoffset)==0) {
- send_data(sd, sendbuffer, size+incr);
- }
- */
+ //TODO: dead code --- stick it around for sometime
+ if ((incr=(*bufoffset))==0) {
+ buffer[incr] = TRANS_PREFETCH_RESPONSE;
+ incr+=sizeof(char);
+ *((int *)(buffer + incr)) = size+sizeof(int)+sizeof(char)+sizeof(unsigned int);
+ incr += sizeof(int);
+ *((char *)(buffer + incr)) = OBJECT_FOUND;
+ incr += sizeof(char);
+ *((unsigned int *)(buffer + incr)) = oid;
+ incr += sizeof(unsigned int);
+ } else
+ *((int *)(buffer+sizeof(char)))+=size;
+
+ if ((incr+size)<PBUFFERSIZE) {
+ //don't need to allocate, just copy
+ sendbuffer=buffer;
+ (*bufoffset)=incr+size;
+ } else {
+ sendbuffer=alloca(size+incr);
+ memcpy(sendbuffer, buffer, incr);
+ *bufoffset=0;
+ }
+
+ memcpy(sendbuffer + incr, header, size);
+ if ((*bufoffset)==0) {
+ send_data(sd, sendbuffer, size+incr);
+ }
+ */
return 0;
}
pflookup.numelements++;
} else {
tmp = ptr;
- while(tmp != NULL) {
+ while(tmp != NULL) {
if(tmp->key == key) {
- isFound=1;
- tmp->val = val;//Replace value for an exsisting key
- pthread_mutex_unlock(&pflookup.lock);
- return;
+ isFound=1;
+ tmp->val = val; //Replace value for an exsisting key
+ pthread_mutex_unlock(&pflookup.lock);
+ return;
}
tmp=tmp->next;
}
if(!isFound) { //Insert new key and value into the chain of linked list for the given bin
node = calloc(1, sizeof(prehashlistnode_t));
node->key = key;
- node->val = val ;
+ node->val = val;
node->next = ptr->next;
ptr->next=node;
pflookup.numelements++;
}
/*
-void prehashInsert(unsigned int key, void *val) {
- prehashlistnode_t *ptr,*node;
- pthread_mutex_lock(&pflookup.lock);
+ void prehashInsert(unsigned int key, void *val) {
+ prehashlistnode_t *ptr,*node;
+ pthread_mutex_lock(&pflookup.lock);
- if(pflookup.numelements > (pflookup.threshold)) {
+ if(pflookup.numelements > (pflookup.threshold)) {
//Resize
unsigned int newsize = pflookup.size << 1;
prehashResize(newsize);
- }
+ }
- ptr = &pflookup.table[(key & pflookup.mask)>>1];
- pflookup.numelements++;
+ ptr = &pflookup.table[(key & pflookup.mask)>>1];
+ pflookup.numelements++;
- if(ptr->key==0) {
+ if(ptr->key==0) {
ptr->key = key;
ptr->val = val;
- } else { // Insert in the beginning of linked list
+ } else { // Insert in the beginning of linked list
node = calloc(1, sizeof(prehashlistnode_t));
node->key = key;
node->val = val ;
node->next = ptr->next;
ptr->next=node;
- }
- pthread_mutex_unlock(&pflookup.lock);
-}
-*/
+ }
+ pthread_mutex_unlock(&pflookup.lock);
+ }
+ */
// Search for an address for a given oid
INLINE void *prehashSearch(unsigned int key) {
prehashlistnode_t *ptr, *node;
pthread_mutex_lock(&pflookup.lock);
- node = & pflookup.table[(key & pflookup.mask)>>1];
+ node = &pflookup.table[(key & pflookup.mask)>>1];
do {
if(node->key == key) {
void * tmp=node->val;
if (!isfirst)
free(curr);
} /*
- NOTE: Add this case if you change this...
- This case currently never happens because of the way things rehash....
-else if (isfirst) {
- prehashlistnode_t * newnode = calloc(1, sizeof(prehashlistnode_t));
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- tmp->next=newnode;
- } */
+ NOTE: Add this case if you change this...
+ This case currently never happens because of the way things rehash....
+ else if (isfirst) {
+ prehashlistnode_t * newnode = calloc(1, sizeof(prehashlistnode_t));
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ tmp->next=newnode;
+ } */
else {
curr->next=tmp->next;
tmp->next=curr;
pNodeInfo.newstale=pNodeInfo.newptr;
}
stale=STALL_THRESHOLD-pNodeInfo.stale_count;
-
+
if (stale>0&&stale>pNodeInfo.stall)
pNodeInfo.stall=stale;
extern char bigarray[16*1024*1024];
extern int bigindex;
#define LOGEVENT(x) { \
- int tmp=bigindex++; \
- bigarray[tmp]=x; \
- }
+ int tmp=bigindex++; \
+ bigarray[tmp]=x; \
+}
#else
#define LOGEVENT(x)
#endif
void incmulttail(int num) {
int i;
- for(i=0;i<num;i++) {
+ for(i=0; i<num; i++) {
int tmpoffset=tailoffset+*((int *)(memory+tailoffset));
if (tmpoffset>QSIZE)
tailoffset=0;
sigset_t toclear;
sigemptyset(&toclear);
sigaddset(&toclear, sig);
- sigprocmask(SIG_UNBLOCK, &toclear,NULL);
+ sigprocmask(SIG_UNBLOCK, &toclear,NULL);
#ifdef TRANSSTATS
numTransAbort++;
#endif
}
-/*
+/*
* returns 0 when read set objects are consistent
* returns 1 when objects are inconsistent
*/
int checktrans() {
- /* Create info to keep track of numelements */
+ /* Create info to keep track of numelements */
unsigned int size = c_size;
chashlistnode_t *ptr = c_table;
int i;
/* Inner loop to traverse the linked list of the cache lookupTable */
while(curr != NULL) {
if (curr->key == 0)
- break;
+ break;
objheader_t *headeraddr=(objheader_t*) curr->val;
unsigned int machinenum;
objheader_t *tmp;
-
+
if (STATUS(headeraddr) & NEW) {
//new objects cannot be stale
} else if ((tmp=mhashSearch(curr->key)) != NULL) {
- //memory barrier
- CFENCE;
+ //memory barrier
+ CFENCE;
if (tmp->version!=headeraddr->version) {
//version mismatch
deletehead(head);
}
} else {
machinenum = lhashSearch(curr->key);
- head = createList(head, headeraddr, machinenum, c_numelements);
+ head = createList(head, headeraddr, machinenum, c_numelements);
}
curr = curr->next;
}
}
/* Send oid and versions for checking */
- if(head == NULL)
+ if(head == NULL)
return 0;
-
+
int retval = verify(head);
deletehead(head);
return retval==0;
}
nodeElem_t * createList(nodeElem_t *head, objheader_t *headeraddr, unsigned int mid,
- unsigned int c_numelements) {
+ unsigned int c_numelements) {
nodeElem_t *ptr, *tmp;
int found = 0, offset = 0;
while(tmp != NULL) {
if(tmp->mid == mid) {
if (STATUS(headeraddr) & DIRTY) {
- offset = (sizeof(unsigned int) + sizeof(short)) * tmp->nummod;
- *((unsigned int *)(((char *)tmp->objmod) + offset))=OID(headeraddr);
- offset += sizeof(unsigned int);
- *((unsigned short *)(((char *)tmp->objmod) + offset)) = headeraddr->version;
- tmp->nummod++;
+ offset = (sizeof(unsigned int) + sizeof(short)) * tmp->nummod;
+ *((unsigned int *)(((char *)tmp->objmod) + offset))=OID(headeraddr);
+ offset += sizeof(unsigned int);
+ *((unsigned short *)(((char *)tmp->objmod) + offset)) = headeraddr->version;
+ tmp->nummod++;
} else {
- offset = (sizeof(unsigned int) + sizeof(short)) * tmp->numread;
- *((unsigned int *)(((char *)tmp->objread) + offset))=OID(headeraddr);
- offset += sizeof(unsigned int);
- *((unsigned short *)(((char *)tmp->objread) + offset)) = headeraddr->version;
- tmp->numread++;
+ offset = (sizeof(unsigned int) + sizeof(short)) * tmp->numread;
+ *((unsigned int *)(((char *)tmp->objread) + offset))=OID(headeraddr);
+ offset += sizeof(unsigned int);
+ *((unsigned short *)(((char *)tmp->objread) + offset)) = headeraddr->version;
+ tmp->numread++;
}
found = 1;
break;
nodeElem_t * makehead(unsigned int numelements) {
nodeElem_t *head;
- //Create the first element
+ //Create the first element
if((head = calloc(1, sizeof(nodeElem_t))) == NULL) {
printf("Calloc error %s %d\n", __func__, __LINE__);
return NULL;
}
-
+
if ((head->objmod = calloc(numelements,sizeof(unsigned int) + sizeof(unsigned short))) == NULL) {
printf("Calloc error %s %d\n", __func__, __LINE__);
free(head);
int pilecount = 0;
while(pile != NULL) {
/* send total bytes */
- tosend[pilecount].control = CHECK_OBJECTS;
- tosend[pilecount].numread = pile->numread;
- tosend[pilecount].nummod = pile->nummod;
+ tosend[pilecount].control = CHECK_OBJECTS;
+ tosend[pilecount].numread = pile->numread;
+ tosend[pilecount].nummod = pile->nummod;
int sd = 0;
if((sd = getSock2WithLock(transRequestSockPool, pile->mid)) < 0) {
printf("Error: Getting a socket descriptor at %s(), %s(), %d\n", __FILE__, __func__, __LINE__);
}
pilecount++;
pile = pile->next;
- }// end of pile processing
+ } // end of pile processing
int checkObj = 0;
int countConsistent = 0;
recv_data(sd, &control, sizeof(char));
getReplyCtrl[i] = control;
if(control == OBJ_INCONSISTENT) { /* Inconsistent */
- checkObj = 1;
- break;
+ checkObj = 1;
+ break;
}
countConsistent++;
}
size = backtrace(array, 100);
strings = backtrace_symbols(array, size);
- printf ("Obtained %zd stack frames.\n", size);
+ printf("Obtained %zd stack frames.\n", size);
for (i = 0; i < size; i++)
- printf ("%s\n", strings[i]);
- free (strings);
+ printf("%s\n", strings[i]);
+ free(strings);
}
void checkObjVersion(struct readstruct * readbuffer, int sd, unsigned int numread, unsigned int nummod) {
return;
} else {
if(is_write_locked(STATUSPTR(header))) { //object write locked
- control = OBJ_INCONSISTENT;
- send_data(sd, &control, sizeof(char));
- return;
+ control = OBJ_INCONSISTENT;
+ send_data(sd, &control, sizeof(char));
+ return;
}
CFENCE;
//compare versions
if(version == header->version)
- v_match++;
+ v_match++;
else {
- control = OBJ_INCONSISTENT;
- send_data(sd, &control, sizeof(char));
- return;
+ control = OBJ_INCONSISTENT;
+ send_data(sd, &control, sizeof(char));
+ return;
}
}
- } // end of objects read
+ } // end of objects read
for(i=0; i<nummod; i++) {
//unsigned int oid = objmod[i].oid;
return;
} else {
if(is_write_locked(STATUSPTR(header))) { //object write locked
- control = OBJ_INCONSISTENT;
- send_data(sd, &control, sizeof(char));
- return;
+ control = OBJ_INCONSISTENT;
+ send_data(sd, &control, sizeof(char));
+ return;
}
//compare versions
if(version == header->version)
- v_match++;
+ v_match++;
else {
- control = OBJ_INCONSISTENT;
- send_data(sd, &control, sizeof(char));
- return;
+ control = OBJ_INCONSISTENT;
+ send_data(sd, &control, sizeof(char));
+ return;
}
}
} // end of objects modified
void transStatsHandler(int sig, siginfo_t* info, void *context) {
#ifdef TRANSSTATS
- char filepath[200], exectime[10];
+ char filepath[200], exectime[10];
struct utsname buf;
FILE *fp, *envfp;
/*
-void transStatsHandler(int sig, siginfo_t* info, void *context) {
-#ifdef TRANSSTATS
- printf("****** Transaction Stats ******\n");
- printf("myIpAddr = %x\n", myIpAddr);
- printf("numTransCommit = %d\n", numTransCommit);
- printf("numTransAbort = %d\n", numTransAbort);
- printf("nchashSearch = %d\n", nchashSearch);
- printf("nmhashSearch = %d\n", nmhashSearch);
- printf("nprehashSearch = %d\n", nprehashSearch);
- printf("ndirtyCacheObj = %d\n", ndirtyCacheObj);
- printf("nRemoteReadSend = %d\n", nRemoteSend);
- printf("nSoftAbort = %d\n", nSoftAbort);
- printf("bytesSent = %d\n", bytesSent);
- printf("bytesRecv = %d\n", bytesRecv);
- printf("totalObjSize= %d\n", totalObjSize);
- printf("sendRemoteReq= %d\n", sendRemoteReq);
- printf("getResponse= %d\n", getResponse);
- printf("**********************************\n");
- exit(0);
-#endif
-}
-*/
+ void transStatsHandler(int sig, siginfo_t* info, void *context) {
+ #ifdef TRANSSTATS
+ printf("****** Transaction Stats ******\n");
+ printf("myIpAddr = %x\n", myIpAddr);
+ printf("numTransCommit = %d\n", numTransCommit);
+ printf("numTransAbort = %d\n", numTransAbort);
+ printf("nchashSearch = %d\n", nchashSearch);
+ printf("nmhashSearch = %d\n", nmhashSearch);
+ printf("nprehashSearch = %d\n", nprehashSearch);
+ printf("ndirtyCacheObj = %d\n", ndirtyCacheObj);
+ printf("nRemoteReadSend = %d\n", nRemoteSend);
+ printf("nSoftAbort = %d\n", nSoftAbort);
+ printf("bytesSent = %d\n", bytesSent);
+ printf("bytesRecv = %d\n", bytesRecv);
+ printf("totalObjSize= %d\n", totalObjSize);
+ printf("sendRemoteReq= %d\n", sendRemoteReq);
+ printf("getResponse= %d\n", getResponse);
+ printf("**********************************\n");
+ exit(0);
+ #endif
+ }
+ */
void handle() {
#ifdef TRANSSTATS
if(tmp->threadid == tid) {
isFound = 1;
tmp->ndata = ndata;
- pthread_mutex_unlock(&nlookup.locktable);
- return 0;
+ pthread_mutex_unlock(&nlookup.locktable);
+ return 0;
}
tmp = tmp->next;
}
char bigarray[16*1024*1024];
int bigindex=0;
#define LOGEVENT(x) { \
- int tmp=bigindex++; \
- bigarray[tmp]=x; \
- }
+ int tmp=bigindex++; \
+ bigarray[tmp]=x; \
+}
#else
#define LOGEVENT(x)
#endif
long long bigarray4[6*1024*1024];
int bigarray5[6*1024*1024];
int bigindex1=0;
-#define LOGTIME(x,y,z,a,b) {\
- int tmp=bigindex1; \
- bigarray1[tmp]=x; \
- bigarray2[tmp]=y; \
- bigarray3[tmp]=z; \
- bigarray4[tmp]=a; \
- bigarray5[tmp]=b; \
- bigindex1++; \
+#define LOGTIME(x,y,z,a,b) { \
+ int tmp=bigindex1; \
+ bigarray1[tmp]=x; \
+ bigarray2[tmp]=y; \
+ bigarray3[tmp]=z; \
+ bigarray4[tmp]=a; \
+ bigarray5[tmp]=b; \
+ bigindex1++; \
}
#else
#define LOGTIME(x,y,z,a,b)
int recvw(int fd, void *buf, int len, int flags) {
return recv(fd, buf, len, flags);
}
-
+
void recv_data_buf(int fd, struct readstruct * readbuffer, void *buffer, int buflen) {
char *buf=(char *)buffer;
int numbytes=readbuffer->head-readbuffer->tail;
recv_data(fd, buf, buflen);
return;
}
-
+
int maxbuf=MAXBUF;
int obufflen=buflen;
readbuffer->head=0;
-
+
while (buflen > 0) {
int numbytes = recvw(fd, &readbuffer->buf[readbuffer->head], maxbuf, 0);
if (numbytes == -1) {
int maxbuf=MAXBUF;
int obufflen=buflen;
readbuffer->head=0;
-
+
while (buflen > 0) {
int numbytes = recvw(fd, &readbuffer->buf[readbuffer->head], maxbuf, 0);
if (numbytes ==0) {
inline int arrayLength(int *array) {
int i;
- for(i=0 ; array[i] != -1; i++)
+ for(i=0; array[i] != -1; i++)
;
return i;
}
int attempted=0;
char *node;
do {
- node=getmemory(qnodesize);
- if (node==NULL&&attempted)
- break;
- if (node!=NULL) {
+ node=getmemory(qnodesize);
+ if (node==NULL&&attempted)
+ break;
+ if (node!=NULL) {
#else
char *node=getmemory(qnodesize);
#endif
- int top=endoffsets[ntuples-1];
+ int top=endoffsets[ntuples-1];
- if (node==NULL) {
- LOGEVENT('D');
- return;
- }
- /* Set queue node values */
+ if (node==NULL) {
+ LOGEVENT('D');
+ return;
+ }
+ /* Set queue node values */
- /* TODO: Remove this after testing */
- evalPrefetch[siteid].callcount++;
+ /* TODO: Remove this after testing */
+ evalPrefetch[siteid].callcount++;
- *((int *)(node))=siteid;
- *((int *)(node + sizeof(int))) = ntuples;
- len = 2*sizeof(int);
- memcpy(node+len, oids, ntuples*sizeof(unsigned int));
- memcpy(node+len+ntuples*sizeof(unsigned int), endoffsets, ntuples*sizeof(unsigned short));
- memcpy(node+len+ntuples*(sizeof(unsigned int)+sizeof(short)), arrayfields, top*sizeof(short));
+ *((int *)(node))=siteid;
+ *((int *)(node + sizeof(int))) = ntuples;
+ len = 2*sizeof(int);
+ memcpy(node+len, oids, ntuples*sizeof(unsigned int));
+ memcpy(node+len+ntuples*sizeof(unsigned int), endoffsets, ntuples*sizeof(unsigned short));
+ memcpy(node+len+ntuples*(sizeof(unsigned int)+sizeof(short)), arrayfields, top*sizeof(short));
#ifdef INLINEPREFETCH
- movehead(qnodesize);
- }
- int numpref=numavailable();
- attempted=1;
+ movehead(qnodesize);
+ }
+ int numpref=numavailable();
+ attempted=1;
+
+ if (node==NULL && numpref!=0 || numpref>=PREFTHRESHOLD) {
+ node=gettail();
+ prefetchpile_t *pilehead = foundLocal(node,numpref,siteid);
+ if (pilehead!=NULL) {
+ // Get sock from shared pool
+
+ /* Send Prefetch Request */
+ prefetchpile_t *ptr = pilehead;
+ while(ptr != NULL) {
+ globalid++;
+ int sd = getSock2(transPrefetchSockPool, ptr->mid);
+ sendPrefetchReq(ptr, sd, globalid);
+ ptr = ptr->next;
+ }
- if (node==NULL && numpref!=0 || numpref>=PREFTHRESHOLD) {
- node=gettail();
- prefetchpile_t *pilehead = foundLocal(node,numpref,siteid);
- if (pilehead!=NULL) {
- // Get sock from shared pool
-
- /* Send Prefetch Request */
- prefetchpile_t *ptr = pilehead;
- while(ptr != NULL) {
- globalid++;
- int sd = getSock2(transPrefetchSockPool, ptr->mid);
- sendPrefetchReq(ptr, sd, globalid);
- ptr = ptr->next;
+ mcdealloc(pilehead);
}
-
- mcdealloc(pilehead);
- }
- resetqueue();
- }//end do prefetch if condition
+ resetqueue();
+ } //end do prefetch if condition
} while(node==NULL);
#else
- /* Lock and insert into primary prefetch queue */
- movehead(qnodesize);
+ /* Lock and insert into primary prefetch queue */
+ movehead(qnodesize);
#endif
}
int udpfd;
if (processConfigFile() != 0)
- return 0; //TODO: return error value, cause main program to exit
+ return 0; //TODO: return error value, cause main program to exit
#ifdef COMPILER
if (!master)
threadcount--;
#endif
}
-// Search for an address for a given oid
+// Search for an address for a given oid
/*#define INLINE inline __attribute__((always_inline))
-INLINE void * chashSearchI(chashtable_t *table, unsigned int key) {
- //REMOVE HASH FUNCTION CALL TO MAKE SURE IT IS INLINED HERE
- chashlistnode_t *node = &table->table[(key & table->mask)>>1];
+ INLINE void * chashSearchI(chashtable_t *table, unsigned int key) {
+ //REMOVE HASH FUNCTION CALL TO MAKE SURE IT IS INLINED HERE
+ chashlistnode_t *node = &table->table[(key & table->mask)>>1];
- do {
+ do {
if(node->key == key) {
return node->val;
}
node = node->next;
- } while(node != NULL);
+ } while(node != NULL);
- return NULL;
- }*/
+ return NULL;
+ }*/
if(oid == 0) {
return NULL;
}
-
+
node= &c_table[(oid & c_mask)>>1];
do {
if(node->key == oid) {
#ifdef TRANSSTATS
- nchashSearch++;
+ nchashSearch++;
#endif
#ifdef COMPILER
- return &((objheader_t*)node->val)[1];
+ return &((objheader_t*)node->val)[1];
#else
- return node->val;
+ return node->val;
#endif
}
node = node->next;
} while(node != NULL);
-
- /*
- if((objheader = chashSearchI(record->lookupTable, oid)) != NULL) {
-#ifdef TRANSSTATS
- nchashSearch++;
-#endif
-#ifdef COMPILER
- return &objheader[1];
-#else
- return objheader;
-#endif
- } else
- */
+
+ /*
+ if((objheader = chashSearchI(record->lookupTable, oid)) != NULL) {
+ #ifdef TRANSSTATS
+ nchashSearch++;
+ #endif
+ #ifdef COMPILER
+ return &objheader[1];
+ #else
+ return objheader;
+ #endif
+ } else
+ */
#ifdef ABORTREADERS
if (t_abort) {
if((tmp = (objheader_t *) prehashSearch(oid)) != NULL) {
if(STATUS(tmp) & DIRTY) {
#ifdef TRANSSTATS
- ndirtyCacheObj++;
+ ndirtyCacheObj++;
#endif
- goto remoteread;
+ goto remoteread;
}
#ifdef TRANSSTATS
nprehashSearch++;
int size;
GETSIZE(size, objcopy);
if((headerObj = prefetchobjstrAlloc(size + sizeof(objheader_t))) == NULL) {
- printf("%s(): Error in getting memory from prefetch cache at %s, %d\n", __func__,
- __FILE__, __LINE__);
- pthread_mutex_unlock(&prefetchcache_mutex);
- return NULL;
+ printf("%s(): Error in getting memory from prefetch cache at %s, %d\n", __func__,
+ __FILE__, __LINE__);
+ pthread_mutex_unlock(&prefetchcache_mutex);
+ return NULL;
}
pthread_mutex_unlock(&prefetchcache_mutex);
memcpy(headerObj, objcopy, size+sizeof(objheader_t));
if((tmp = (objheader_t *) prehashSearch(oid)) != NULL) {
if(STATUS(tmp) & DIRTY) {
#ifdef TRANSSTATS
- ndirtyCacheObj++;
+ ndirtyCacheObj++;
#endif
- goto remoteread;
+ goto remoteread;
}
#ifdef TRANSSTATS
LOGEVENT('P')
}
objcopy = getRemoteObj(machinenumber, oid);
#ifdef TRANSSTATS
- LOGEVENT('R');
- nRemoteSend++;
+ LOGEVENT('R');
+ nRemoteSend++;
#endif
if(objcopy == NULL) {
int size;
GETSIZE(size, objcopy);
if((headerObj = prefetchobjstrAlloc(size+sizeof(objheader_t))) == NULL) {
- printf("%s(): Error in getting memory from prefetch cache at %s, %d\n", __func__,
- __FILE__, __LINE__);
- pthread_mutex_unlock(&prefetchcache_mutex);
- return NULL;
+ printf("%s(): Error in getting memory from prefetch cache at %s, %d\n", __func__,
+ __FILE__, __LINE__);
+ pthread_mutex_unlock(&prefetchcache_mutex);
+ return NULL;
}
pthread_mutex_unlock(&prefetchcache_mutex);
memcpy(headerObj, objcopy, size+sizeof(objheader_t));
/* Represents number of bins in the chash table */
unsigned int size = c_size;
- for(i = 0; i < size ; i++) {
+ for(i = 0; i < size; i++) {
chashlistnode_t * curr = &ptr[i];
/* Inner loop to traverse the linked list of the cache lookupTable */
while(curr != NULL) {
/* Represents number of bins in the chash table */
unsigned int size = c_size;
- for(i = 0; i < size ; i++) {
- struct chashentry * curr = & ptr[i];
+ for(i = 0; i < size; i++) {
+ struct chashentry * curr = &ptr[i];
/* Inner loop to traverse the linked list of the cache lookupTable */
//if the first bin in hash table is empty
if(curr->key == 0)
#ifdef LOGEVENTS
int iii;
- for(iii=0;iii<bigindex;iii++) {
+ for(iii=0; iii<bigindex; iii++) {
printf("%c", bigarray[iii]);
}
#endif
int treplyretryCount = 0;
/* Initialize timeout for exponential delay */
exponential_backoff.tv_sec = 0;
- exponential_backoff.tv_nsec = (long)(10000);//10 microsec
+ exponential_backoff.tv_nsec = (long)(10000); //10 microsec
count_exponential_backoff = 0;
do {
treplyretry = 0;
int socklist[pilecount];
char getReplyCtrl[pilecount];
int loopcount;
- for(loopcount = 0 ; loopcount < pilecount; loopcount++){
+ for(loopcount = 0; loopcount < pilecount; loopcount++) {
socklist[loopcount] = 0;
getReplyCtrl[loopcount] = 0;
}
}
int offset = 0;
int i;
- for(i = 0; i < tosend[sockindex].f.nummod ; i++) {
+ for(i = 0; i < tosend[sockindex].f.nummod; i++) {
int size;
objheader_t *headeraddr;
if((headeraddr = t_chashSearch(tosend[sockindex].oidmod[i])) == NULL) {
free(tosend);
return 1;
}
- GETSIZE(size,headeraddr);
+ GETSIZE(size,headeraddr);
size+=sizeof(objheader_t);
memcpy(modptr+offset, headeraddr, size);
offset+=size;
pile = pile->next;
} //end of pile processing
- /* Recv Ctrl msgs from all machines */
+ /* Recv Ctrl msgs from all machines */
int i;
for(i = 0; i < pilecount; i++) {
int sd = socklist[i];
GETSIZE(size, header);
size += sizeof(objheader_t);
//make an entry in prefetch hash table
- prehashInsert(oidToPrefetch, header);
- LOGEVENT('E');
+ prehashInsert(oidToPrefetch, header);
+ LOGEVENT('E');
length = length - size;
offset += size;
}
/* wait a random amount of time before retrying to commit transaction*/
if(treplyretry) {
treplyretryCount++;
- // if(treplyretryCount >= NUM_TRY_TO_COMMIT)
- // exponentialdelay();
- // else
+ // if(treplyretryCount >= NUM_TRY_TO_COMMIT)
+ // exponentialdelay();
+ // else
randomdelay();
#ifdef TRANSSTATS
nSoftAbort++;
objstrDelete(t_cache);
t_chashDelete();
#ifdef SANDBOX
- abortenabled=1;
+ abortenabled=1;
#endif
return TRANS_ABORT;
} else if(finalResponse == TRANS_COMMIT) {
transinfo->modptr = NULL;
transinfo->numlocked = numoidlocked;
transinfo->numnotfound = numoidnotfound;
-
+
/* Condition to send TRANS_AGREE */
if(v_matchnolock == tdata->f.numread + tdata->f.nummod) {
*getReplyCtrl = TRANS_AGREE;
char decideResponse(char *getReplyCtrl, char *treplyretry, int pilecount) {
int i, transagree = 0, transdisagree = 0, transsoftabort = 0; /* Counters to formulate decision of what
message to send */
- for (i = 0 ; i < pilecount; i++) {
+ for (i = 0; i < pilecount; i++) {
char control;
control = getReplyCtrl[i];
switch(control) {
default:
printf("Participant sent unknown message %d in %s, %d\n", control, __FILE__, __LINE__);
- /* treat as disagree, pass thru */
+ /* treat as disagree, pass thru */
case TRANS_DISAGREE:
transdisagree++;
break;
int j;
prefetchpile_t * head=NULL;
- for(j=0;j<numprefetches;j++) {
+ for(j=0; j<numprefetches; j++) {
int siteid = *(GET_SITEID(ptr));
int ntuples = *(GET_NTUPLES(ptr));
unsigned int * oidarray = GET_PTR_OID(ptr);
unsigned short * endoffsets = GET_PTR_EOFF(ptr, ntuples);
short * arryfields = GET_PTR_ARRYFLD(ptr, ntuples);
int numLocal = 0;
-
+
for(i=0; i<ntuples; i++) {
unsigned short baseindex=(i==0) ? 0 : endoffsets[i-1];
unsigned short endindex=endoffsets[i];
//Look up fields locally
int isLastOffset=0;
if(endindex==0)
- isLastOffset=1;
+ isLastOffset=1;
for(newbase=baseindex; newbase<endindex; newbase++) {
- if(newbase==(endindex-1))
- isLastOffset=1;
+ if(newbase==(endindex-1))
+ isLastOffset=1;
if (!lookupObject(&oid,arryfields[newbase],&countInvalidObj)) {
break;
}
//Add to remote requests
machinenum=lhashSearch(oid);
insertPile(machinenum, oid, siteid,endindex-newbase, &arryfields[newbase], &head);
- tuple:
+tuple:
;
}
-
+
/* handle dynamic prefetching */
handleDynPrefetching(numLocal, ntuples, siteid);
ptr=((char *)&arryfields[endoffsets[ntuples-1]])+sizeof(int);
;
} else if ((header=prehashSearch(*oid))!=NULL) {
//Found in cache
- if(STATUS(header) & DIRTY) {//Read an oid that is an old entry in the cache;
+ if(STATUS(header) & DIRTY) { //Read an oid that is an old entry in the cache;
//only once because later old entries may still cause unnecessary roundtrips during prefetching
(*countInvalidObj)+=1;
if(*countInvalidObj > 1) {
- return 0;
+ return 0;
}
}
} else {
/* Send Prefetch Request */
prefetchpile_t *ptr = pilehead;
while(ptr != NULL) {
- globalid++;
- int sd = getSock2(transPrefetchSockPool, ptr->mid);
- sendPrefetchReq(ptr, sd,globalid);
- ptr = ptr->next;
+ globalid++;
+ int sd = getSock2(transPrefetchSockPool, ptr->mid);
+ sendPrefetchReq(ptr, sd,globalid);
+ ptr = ptr->next;
}
/* Release socket */
char *buf=oidnoffset;
if (first) {
*buf=TRANS_PREFETCH;
- buf++;len++;
+ buf++; len++;
first=0;
}
*((int*)buf) = tmp->numoffset;
if((oldptr = prehashSearch(oid)) != NULL) {
/* If older version then update with new object ptr */
if(((objheader_t *)oldptr)->version < ((objheader_t *)modptr)->version) {
- prehashInsert(oid, modptr);
+ prehashInsert(oid, modptr);
}
} else { /* Else add the object ptr to hash table*/
prehashInsert(oid, modptr);
/* Send array of oids */
size = sizeof(unsigned int);
- for(i = 0;i < numoid; i++) {
+ for(i = 0; i < numoid; i++) {
oid = oidarry[i];
*((unsigned int *)(&msg[1] + size)) = oid;
size += sizeof(unsigned int);
}
/* Send array of version */
- for(i = 0;i < numoid; i++) {
+ for(i = 0; i < numoid; i++) {
version = versionarry[i];
*((unsigned short *)(&msg[1] + size)) = version;
size += sizeof(unsigned short);
// relocate the position of myIp pile to end of list
plistnode_t *sortPiles(plistnode_t *pileptr) {
- plistnode_t *ptr, *tail;
- tail = pileptr;
+ plistnode_t *ptr, *tail;
+ tail = pileptr;
ptr = NULL;
- /* Get tail pointer and myIp pile ptr */
+ /* Get tail pointer and myIp pile ptr */
if(pileptr == NULL)
return pileptr;
- while(tail->next != NULL) {
+ while(tail->next != NULL) {
if(tail->mid == myIpAddr)
ptr = tail;
- tail = tail->next;
- }
+ tail = tail->next;
+ }
// if ptr is null, then myIp pile is already at tail
if(ptr != NULL) {
- /* Arrange local machine processing at the end of the pile list */
+ /* Arrange local machine processing at the end of the pile list */
tail->next = pileptr;
pileptr = ptr->next;
ptr->next = NULL;
if(curr->inext!=NULL)
it->ptr=curr->inext;
else
- it->finished=1; /* change offsetting scheme */
+ it->finished=1; /* change offsetting scheme */
return curr->src;
}
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct ObjectNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct ObjectNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct ObjectNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct ObjectNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct ObjectNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct ObjectNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
}
queue->head=q->next;
if(queue->tail == q) {
- queue->tail = NULL;
+ queue->tail = NULL;
}
RUNFREE(q);
return ptr;
}
void clearQueue(struct Queue * queue) {
- struct QueueItem * item=queue->head;
+ struct QueueItem * item=queue->head;
while(item!=NULL) {
- struct QueueItem * next=item->next;
- RUNFREE(item);
+ struct QueueItem * next=item->next;
+ RUNFREE(item);
item=next;
}
- queue->head=queue->tail=NULL;
+ queue->head=queue->tail=NULL;
return;
}
if( i->prev == NULL ) {
if( queue->head != i ) {
- return 0;
+ return 0;
}
- // i->prev != NULL
+ // i->prev != NULL
} else {
if( i->prev->next == NULL ) {
- return 0;
+ return 0;
} else if( i->prev->next != i ) {
- return 0;
+ return 0;
}
}
if( i->next == NULL ) {
if( queue->tail != i ) {
- return 0;
+ return 0;
}
- // i->next != NULL
+ // i->next != NULL
} else {
if( i->next->prev == NULL ) {
- return 0;
+ return 0;
} else if( i->next->prev != i ) {
- return 0;
+ return 0;
}
}
struct QueueItem* i;
printf("Queue empty? %d\n", isEmpty(queue));
-
- printf("head ");
+
+ printf("head ");
i = queue->head;
while( i != NULL ) {
printf("item ");
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct RuntimeNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct RuntimeNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct RuntimeNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct RuntimeNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
return 1;
}
-#ifdef MULTICORE
+#ifdef MULTICORE
struct RuntimeHash * allocateRuntimeHash_I(int size) {
struct RuntimeHash *thisvar; //=(struct RuntimeHash *)RUNMALLOC(sizeof(struct RuntimeHash));
if (size <= 0) {
int i;
for(i=thisvar->size-1; i>=0; i--) {
struct RuntimeNode *ptr;
- for(ptr=thisvar->bucket[i]; ptr!=NULL;) {
+ for(ptr=thisvar->bucket[i]; ptr!=NULL; ) {
struct RuntimeNode * nextptr=ptr->next;
unsigned int newhashkey=(unsigned int)ptr->key % newsize;
ptr->next=newbucket[newhashkey];
CPU_ZERO(&cpumask);
CPU_SET(cpu, &cpumask);
err = sched_setaffinity(syscall(SYS_gettid),
- sizeof(cpu_set_t), &cpumask);
+ sizeof(cpu_set_t), &cpumask);
if (err == -1)
printf("set_affinity: %s\n", strerror(errno));
cnode_t *ptr, *curr, *next;
ptr = ctable->table;
- for(i=0 ; i<ctable->size ; i++) {
+ for(i=0; i<ctable->size; i++) {
curr = &ptr[i];
- isFirst = 1 ;
+ isFirst = 1;
while(curr != NULL) {
next = curr->next;
if(isFirst != 1) {
// This is an implementation of the structure described in
// A Dynamic-Sized Nonblocking Work Stealing Deque
// Hendler, Lev, Moir, and Shavit
-//
+//
// The bottom and top values for the deque must be CAS-able
// and fit into 64 bits. Our strategy for this is:
-//
+//
// 19-bit Tag 36-bit Node Pointer 9-bit Index
// +-----------+-------------------------+------------+
// | 63 ... 45 | 44 ... 9 | 8 ... 0 |
// +-----------+-------------------------+------------+
//
-// Let's call the encoded info E. To retrieve the values:
+// Let's call the encoded info E. To retrieve the values:
// tag = (0xffffe00000000000 & E) >> 45;
// ptr = (0x00001ffffffffe00 & E) << 3;
// idx = (0x00000000000001ff & E);
// http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
// And 64-bit addresses are 2^3=8 byte aligned, so the lower 3 bits
// of a 64-bit pointer are always zero. This means if we are only
-// alloted 36 bits to store a pointer to a Node we have
+// alloted 36 bits to store a pointer to a Node we have
// 48 - 3 - 36 = 9 bits that could be lost. Instead of aligning Node
// pointers to 8 bytes we can align them to 2^(3+9)=4096 bytes and be
// sure the lower 12 bits of the address are zero. THEREFORE:
// Nodes must be 4096-byte aligned so the lower 12 bits are zeroes and
-// we can ecnode the rest in 36 bits without a loss of information.
+// we can ecnode the rest in 36 bits without a loss of information.
//
////////////////////////////////////////////////////////////////
-// the dequeNode struct must be 4096-byte aligned,
+// the dequeNode struct must be 4096-byte aligned,
// see above, so use the following magic to ask
// the allocator for a space that wastes 4095 bytes
// but gaurantees the address of the struct within
// that space is 4096-aligned
const INTPTR DQNODE_SIZETOREQUEST = sizeof( dequeNode ) + 4095;
-static inline dequeNode* dqGet4096aligned( void* fromAllocator ) {
+static inline dequeNode* dqGet4096aligned(void* fromAllocator) {
INTPTR aligned = ((INTPTR)fromAllocator + 4095) & (~4095);
#ifdef DEBUG_DEQUE
}
-static inline INTPTR dqEncode( int tag, dequeNode* ptr, int idx ) {
+static inline INTPTR dqEncode(int tag, dequeNode* ptr, int idx) {
INTPTR ptrE = (0x00001ffffffffe00 & // second, mask off the addr's high-order 1's
(((INTPTR)ptr) >> 3)); // first, shift down 8-byte alignment bits
(ptrE) |
((INTPTR)idx);
#ifdef DEBUG_DEQUE
- int tagOut = dqDecodeTag( E );
- if( tag != tagOut ) { printf( "Lost tag information.\n" ); exit( -1 ); }
+ int tagOut = dqDecodeTag(E);
+ if( tag != tagOut ) {
+ printf("Lost tag information.\n"); exit(-1);
+ }
- dequeNode* ptrOut = dqDecodePtr( E );
- if( ptr != ptrOut ) { printf( "Lost ptr information.\n" ); exit( -1 ); }
+ dequeNode* ptrOut = dqDecodePtr(E);
+ if( ptr != ptrOut ) {
+ printf("Lost ptr information.\n"); exit(-1);
+ }
- int idxOut = dqDecodeIdx( E );
- if( idx != idxOut ) { printf( "Lost idx information.\n" ); exit( -1 ); }
+ int idxOut = dqDecodeIdx(E);
+ if( idx != idxOut ) {
+ printf("Lost idx information.\n"); exit(-1);
+ }
#endif
return E;
}
-static inline int dqIndicateEmpty( INTPTR bottom, INTPTR top ) {
- dequeNode* botNode = dqDecodePtr( bottom );
- int botIndx = dqDecodeIdx( bottom );
- dequeNode* topNode = dqDecodePtr( top );
- int topIndx = dqDecodeIdx( top );
+static inline int dqIndicateEmpty(INTPTR bottom, INTPTR top) {
+ dequeNode* botNode = dqDecodePtr(bottom);
+ int botIndx = dqDecodeIdx(bottom);
+ dequeNode* topNode = dqDecodePtr(top);
+ int topIndx = dqDecodeIdx(top);
if( (botNode == topNode) &&
(botIndx == topIndx || botIndx == (topIndx+1))
-void dqInit( deque* dq ) {
+void dqInit(deque* dq) {
+
+ dq->memPool = poolcreate(DQNODE_SIZETOREQUEST, NULL);
- dq->memPool = poolcreate( DQNODE_SIZETOREQUEST, NULL );
+ dequeNode* a = dqGet4096aligned(poolalloc(dq->memPool) );
+ dequeNode* b = dqGet4096aligned(poolalloc(dq->memPool) );
- dequeNode* a = dqGet4096aligned( poolalloc( dq->memPool ) );
- dequeNode* b = dqGet4096aligned( poolalloc( dq->memPool ) );
-
a->next = b;
b->prev = a;
- dq->bottom = dqEncode( BOTTOM_NULL_TAG, a, DQNODE_ARRAYSIZE - 1 );
- dq->top = dqEncode( 0, a, DQNODE_ARRAYSIZE - 1 );
+ dq->bottom = dqEncode(BOTTOM_NULL_TAG, a, DQNODE_ARRAYSIZE - 1);
+ dq->top = dqEncode(0, a, DQNODE_ARRAYSIZE - 1);
}
-void dqPushBottom( deque* dq, void* item ) {
+void dqPushBottom(deque* dq, void* item) {
#ifdef DEBUG_DEQUE
if( item == 0x0 ) {
- printf( "Pushing invalid work into the deque.\n" );
+ printf("Pushing invalid work into the deque.\n");
}
#endif
- dequeNode* currNode = dqDecodePtr( dq->bottom );
- int currIndx = dqDecodeIdx( dq->bottom );
+ dequeNode* currNode = dqDecodePtr(dq->bottom);
+ int currIndx = dqDecodeIdx(dq->bottom);
currNode->itsDataArr[currIndx] = item;
dequeNode* newNode;
- int newIndx;
+ int newIndx;
if( currIndx != 0 ) {
newNode = currNode;
newIndx = currIndx - 1;
} else {
- newNode = dqGet4096aligned( poolalloc( dq->memPool ) );
+ newNode = dqGet4096aligned(poolalloc(dq->memPool) );
newNode->next = currNode;
currNode->prev = newNode;
newIndx = DQNODE_ARRAYSIZE - 1;
}
- dq->bottom = dqEncode( BOTTOM_NULL_TAG, newNode, newIndx );
+ dq->bottom = dqEncode(BOTTOM_NULL_TAG, newNode, newIndx);
}
-void* dqPopTop( deque* dq ) {
+void* dqPopTop(deque* dq) {
INTPTR currTop = dq->top;
- int currTopTag = dqDecodeTag( currTop );
- dequeNode* currTopNode = dqDecodePtr( currTop );
- int currTopIndx = dqDecodeIdx( currTop );
+ int currTopTag = dqDecodeTag(currTop);
+ dequeNode* currTopNode = dqDecodePtr(currTop);
+ int currTopIndx = dqDecodeIdx(currTop);
// read of top followed by read of bottom, algorithm
// says specifically must be in this order
BARRIER();
-
+
INTPTR currBottom = dq->bottom;
- if( dqIndicateEmpty( currBottom, currTop ) ) {
+ if( dqIndicateEmpty(currBottom, currTop) ) {
if( currTop == dq->top ) {
return DQ_POP_EMPTY;
} else {
}
dequeNode* nodeToFree;
- int newTopTag;
+ int newTopTag;
dequeNode* newTopNode;
- int newTopIndx;
+ int newTopIndx;
if( currTopIndx != 0 ) {
nodeToFree = NULL;
void* retVal = currTopNode->itsDataArr[currTopIndx];
- INTPTR newTop = dqEncode( newTopTag, newTopNode, newTopIndx );
+ INTPTR newTop = dqEncode(newTopTag, newTopNode, newTopIndx);
// algorithm states above should happen
// before attempting the CAS
BARRIER();
INTPTR actualTop = (INTPTR)
- CAS( &(dq->top), // location
- currTop, // expected value
- newTop ); // desired value
+ CAS(&(dq->top), // location
+ currTop, // expected value
+ newTop); // desired value
if( actualTop == currTop ) {
// CAS succeeded
if( nodeToFree != NULL ) {
- poolfreeinto( dq->memPool, nodeToFree );
+ poolfreeinto(dq->memPool, nodeToFree);
}
return retVal;
}
-void* dqPopBottom ( deque* dq ) {
+void* dqPopBottom(deque* dq) {
INTPTR oldBot = dq->bottom;
- dequeNode* oldBotNode = dqDecodePtr( oldBot );
- int oldBotIndx = dqDecodeIdx( oldBot );
-
+ dequeNode* oldBotNode = dqDecodePtr(oldBot);
+ int oldBotIndx = dqDecodeIdx(oldBot);
+
dequeNode* newBotNode;
- int newBotIndx;
+ int newBotIndx;
if( oldBotIndx != DQNODE_ARRAYSIZE - 1 ) {
newBotNode = oldBotNode;
void* retVal = newBotNode->itsDataArr[newBotIndx];
- dq->bottom = dqEncode( BOTTOM_NULL_TAG, newBotNode, newBotIndx );
+ dq->bottom = dqEncode(BOTTOM_NULL_TAG, newBotNode, newBotIndx);
// algorithm states above should happen
// before attempting the CAS
INTPTR currTop = dq->top;
- int currTopTag = dqDecodeTag( currTop );
- dequeNode* currTopNode = dqDecodePtr( currTop );
- int currTopIndx = dqDecodeIdx( currTop );
+ int currTopTag = dqDecodeTag(currTop);
+ dequeNode* currTopNode = dqDecodePtr(currTop);
+ int currTopIndx = dqDecodeIdx(currTop);
if( oldBotNode == currTopNode &&
oldBotIndx == currTopIndx ) {
- dq->bottom = dqEncode( BOTTOM_NULL_TAG, oldBotNode, oldBotIndx );
+ dq->bottom = dqEncode(BOTTOM_NULL_TAG, oldBotNode, oldBotIndx);
return DQ_POP_EMPTY;
} else if( newBotNode == currTopNode &&
newBotIndx == currTopIndx ) {
- INTPTR newTop = dqEncode( currTopTag + 1, currTopNode, currTopIndx );
+ INTPTR newTop = dqEncode(currTopTag + 1, currTopNode, currTopIndx);
INTPTR actualTop = (INTPTR)
- CAS( &(dq->top), // location
- currTop, // expected value
- newTop ); // desired value
+ CAS(&(dq->top), // location
+ currTop, // expected value
+ newTop); // desired value
if( actualTop == currTop ) {
// CAS succeeded
if( oldBotNode != newBotNode ) {
- poolfreeinto( dq->memPool, oldBotNode );
+ poolfreeinto(dq->memPool, oldBotNode);
}
return retVal;
-
+
} else {
- dq->bottom = dqEncode( BOTTOM_NULL_TAG, oldBotNode, oldBotIndx );
+ dq->bottom = dqEncode(BOTTOM_NULL_TAG, oldBotNode, oldBotIndx);
return DQ_POP_EMPTY;
}
-
+
} else {
if( oldBotNode != newBotNode ) {
- poolfreeinto( dq->memPool, oldBotNode );
+ poolfreeinto(dq->memPool, oldBotNode);
}
- return retVal;
+ return retVal;
}
}
} deque;
-void dqInit ( deque* dq );
-void dqPushBottom( deque* dq, void* item );
-void* dqPopTop ( deque* dq );
-void* dqPopBottom ( deque* dq );
+void dqInit(deque* dq);
+void dqPushBottom(deque* dq, void* item);
+void* dqPopTop(deque* dq);
+void* dqPopBottom(deque* dq);
// pop operations may return these values
} dequeNode;
-static inline int dqDecodeTag( INTPTR E ) { return (int) ((0xffffe00000000000 & E) >> 45); }
-static inline dequeNode* dqDecodePtr( INTPTR E ) { return (dequeNode*) ((0x00001ffffffffe00 & E) << 3); }
-static inline int dqDecodeIdx( INTPTR E ) { return (int) ((0x00000000000001ff & E) ); }
+static inline int dqDecodeTag(INTPTR E) {
+ return (int) ((0xffffe00000000000 & E) >> 45);
+}
+static inline dequeNode* dqDecodePtr(INTPTR E) {
+ return (dequeNode*) ((0x00001ffffffffe00 & E) << 3);
+}
+static inline int dqDecodeIdx(INTPTR E) {
+ return (int) ((0x00000000000001ff & E) );
+}
#endif // ___DEQUE_H__
int CALL01(___FileOutputStream______nativeAppend_____AR_B, struct ArrayObject * ___filename___) {
#ifdef MULTICORE
return 0;
-#else
+#else
int length=VAR(___filename___)->___length___;
char* filename= (((char *)&VAR(___filename___)->___length___)+sizeof(int));
int fd=open(filename, O_WRONLY|O_CREAT|O_APPEND, S_IRWXU);
if (spare!=NULL) {
tmp=spare;
spare=NULL;
- } else tmp=malloc(sizeof(struct pointerblock));
+ } else tmp=malloc(sizeof(struct pointerblock));
head->next=tmp;
head=tmp;
headindex=0;
void fixobjlist(struct objlist * ptr) {
while(ptr!=NULL) {
int i;
- for(i=0;i<ptr->offset;i++) {
+ for(i=0; i<ptr->offset; i++) {
SENQUEUE(ptr->objs[i], ptr->objs[i]);
}
ptr=ptr->next;
unsigned int index;
int isfirst;
chashlistnode_t *newlist=NULL;
- for(i=0;i<tc_size;i++) {
+ for(i=0; i<tc_size; i++) {
curr=&ptr[i];
isfirst=1;
do { //Inner loop to go through linked lists
void * key;
chashlistnode_t *tmp,*next;
-
+
if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
break; //key = val =0 for element if not present within the hash table
}
if (lockval!=STMNONE) {
int lowi=(j<<INDEXSHIFT)/sizeof(void *);
int highi=lowi+(INDEXLENGTH/sizeof(void *));
- for(i=lowi; i<highi;i++) {
+ for(i=lowi; i<highi; i++) {
#else
- for(i=0; i<length; i++) {
+ for(i=0; i<length; i++) {
#endif
void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
}
}
#endif
- } else {
- INTPTR size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)vptr)+offset));
- SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
+ } else {
+ INTPTR size=pointer[0];
+ int i;
+ for(i=1; i<=size; i++) {
+ unsigned int offset=pointer[i];
+ void * objptr=*((void **)(((char *)vptr)+offset));
+ SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
+ }
+ }
}
- }
- }
- next = curr->next;
- index = (((unsigned INTPTR)key) & mask) >>4;
-
- curr->key=key;
- tmp=&node[index];
- // Insert into the new table
- if(tmp->key == 0) {
- tmp->key = curr->key;
- tmp->val = curr->val;
- tmp->lnext=newlist;
- newlist=tmp;
- } else if (isfirst) {
- chashlistnode_t *newnode;
- if ((*cstr)->num<NUMCLIST) {
- newnode=&(*cstr)->array[(*cstr)->num];
- (*cstr)->num++;
- } else {
- //get new list
- cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
- tcl->next=*cstr;
- *cstr=tcl;
- newnode=&tcl->array[0];
- tcl->num=1;
+ next = curr->next;
+ index = (((unsigned INTPTR)key) & mask) >>4;
+
+ curr->key=key;
+ tmp=&node[index];
+ // Insert into the new table
+ if(tmp->key == 0) {
+ tmp->key = curr->key;
+ tmp->val = curr->val;
+ tmp->lnext=newlist;
+ newlist=tmp;
+ } else if (isfirst) {
+ chashlistnode_t *newnode;
+ if ((*cstr)->num<NUMCLIST) {
+ newnode=&(*cstr)->array[(*cstr)->num];
+ (*cstr)->num++;
+ } else {
+ //get new list
+ cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
+ tcl->next=*cstr;
+ *cstr=tcl;
+ newnode=&tcl->array[0];
+ tcl->num=1;
+ }
+ newnode->key = curr->key;
+ newnode->val = curr->val;
+ newnode->next = tmp->next;
+ newnode->lnext=newlist;
+ newlist=newnode;
+ tmp->next=newnode;
+ } else {
+ curr->lnext=newlist;
+ newlist=curr;
+ curr->next=tmp->next;
+ tmp->next=curr;
+ }
+ isfirst = 0;
+ curr = next;
}
- newnode->key = curr->key;
- newnode->val = curr->val;
- newnode->next = tmp->next;
- newnode->lnext=newlist;
- newlist=newnode;
- tmp->next=newnode;
- } else {
- curr->lnext=newlist;
- newlist=curr;
- curr->next=tmp->next;
- tmp->next=curr;
+ while(curr!=NULL) ;
}
- isfirst = 0;
- curr = next;
- } while(curr!=NULL);
- }
- free(ptr);
- (*tc_table)=node;
- (*tc_list)=newlist;
-}
+ free(ptr);
+ (*tc_table)=node;
+ (*tc_list)=newlist;
+ }
#endif
int moreItems() {
#ifdef GARBAGESTATS
{
int i;
- for(i=0;i<MAXSTATS;i++)
+ for(i=0; i<MAXSTATS; i++)
garbagearray[i]=0;
}
#endif
while(jniptr!=NULL) {
int i;
//update table
- for(i=0;i<jniptr->index;i++) {
+ for(i=0; i<jniptr->index; i++) {
ENQUEUE((struct ___Object___ *)jniptr->array[i].ref, *((struct ___Object___**)&jniptr->array[i].ref));
}
//go to next table
jniptr=jniptr->next;
- }
+ }
}
#endif
#else
litem.stackptr=stackptr;
#endif
-
+
while(listptr!=NULL) {
searchstack(listptr->stackptr);
#ifdef THREADS
struct lockvector * lvector=listptr->lvector;
int i;
- for(i=0;i<lvector->index;i++) {
+ for(i=0; i<lvector->index; i++) {
struct ___Object___ *orig=lvector->locks[i].object;
ENQUEUE(orig, lvector->locks[i].object);
}
stackptr=(struct garbagelist *) &arraystack;
#endif
#endif
-
+
searchroots(stackptr);
while(moreItems()) {
char * memorytop=*(char **)pthread_getspecific(memorytopkey);
#endif
if (memorybase==NULL||size>(memorytop-memorybase)) {
- int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
+ int toallocate=(size>MEMORYBLOCK) ? size : MEMORYBLOCK;
memorybase=helper(stackptr, toallocate);
bzero(memorybase, toallocate);
memorytop=memorybase+toallocate;
curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
curr_heapptr=curr_heapbase+size;
-
+
to_heapbase=malloc(INITIALHEAPSIZE);
if (to_heapbase==NULL) {
printf("malloc failed. Garbage collector couldn't get enough memory. Try changing heap size.\n");
exit(-1);
}
-
+
to_heaptop=to_heapbase+INITIALHEAPSIZE;
to_heapptr=to_heapbase;
ptr=curr_heapbase;
printf("Total space: %u\n", to_heaptop-to_heapbase);
{
int i;
- for(i=0;i<MAXSTATS;i++) {
+ for(i=0; i<MAXSTATS; i++) {
if (garbagearray[i]!=0)
printf("Type=%d Size=%u\n", i, garbagearray[i]);
}
}
}
-int within(void *ptr){ //debug function
- if(ptr>curr_heapptr || ptr<curr_heapbase){
+int within(void *ptr) { //debug function
+ if(ptr>curr_heapptr || ptr<curr_heapbase) {
__asm__ __volatile__ ("int $3"); // breakpoint
}
}
if ((!(((unsigned int)orig)&0x1))) { \
if (orig>=curr_heapbase&&orig<curr_heaptop) { \
void *copy; \
- if (gc_createcopy(orig,©)) \
- enqueue(copy);\
+ if (gc_createcopy(orig,©)) \
+ enqueue(copy); \
dst=copy; \
} \
}
#define ENQUEUE(orig, dst) \
if (orig>=curr_heapbase&&orig<curr_heaptop) { \
void *copy; \
- if (gc_createcopy(orig,©)) \
- enqueue(copy);\
+ if (gc_createcopy(orig,©)) \
+ enqueue(copy); \
dst=copy; \
}
#define SENQUEUE(orig, dst) \
{ \
void *copy; \
- if (gc_createcopy(orig,©)) \
- enqueue(copy);\
+ if (gc_createcopy(orig,©)) \
+ enqueue(copy); \
dst=copy; \
}
#elif defined(FASTCHECK)
#define ENQUEUE(orig, dst) \
if (((unsigned int)orig)!=1) { \
void *copy; \
- if (gc_createcopy(orig,©)) \
- enqueue(copy);\
+ if (gc_createcopy(orig,©)) \
+ enqueue(copy); \
dst=copy; }
#else
#define ENQUEUE(orig, dst) \
if (orig!=NULL) { \
void *copy; \
- if (gc_createcopy(orig,©)) \
- enqueue(copy); \
+ if (gc_createcopy(orig,©)) \
+ enqueue(copy); \
dst=copy; \
}
#endif
-struct garbagelist {
+struct garbagelist {
int size;
- struct garbagelist *next;
- void * array[];
+ struct garbagelist *next;
+ void * array[];
};
extern void * curr_heapbase;
struct listitem {
struct listitem * prev;
struct listitem * next;
- struct garbagelist * stackptr;
+ struct garbagelist * stackptr;
#ifdef THREADS
struct lockvector * lvector;
#endif
#ifdef MULTICORE_GC
extern volatile bool gcflag;
-void * mycalloc_share(struct garbagelist * stackptr,
- int m,
- int size) {
+void * mycalloc_share(struct garbagelist * stackptr,
+ int m,
+ int size) {
void * p = NULL;
//int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK);
int isize = (size & (~(BAMBOO_CACHE_LINE_MASK))) + (BAMBOO_CACHE_LINE_SIZE);
memalloc:
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
while(gcflag) {
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- gc(stackptr);
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
+ BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
+ gc(stackptr);
+ BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
}
p = BAMBOO_SHARE_MEM_CALLOC_I(m, isize); // calloc(m, isize);
if(p == NULL) {
- // no more global shared memory
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- if(hasgc < 5) {
- // start gc
- while(gcflag) {
- gc(stackptr);
- }
- hasgc++;
- } else {
- // no more global shared memory
- BAMBOO_EXIT(0xc001);
- }
+ // no more global shared memory
+ BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
+ if(hasgc < 5) {
+ // start gc
+ while(gcflag) {
+ gc(stackptr);
+ }
+ hasgc++;
+ } else {
+ // no more global shared memory
+ BAMBOO_EXIT(0xc001);
+ }
- // try to malloc again
- goto memalloc;
+ // try to malloc again
+ goto memalloc;
}
BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- void * alignedp =
- (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
- BAMBOO_MEMSET_WH(p, -2, (alignedp - p));
+ void * alignedp =
+ (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
+ BAMBOO_MEMSET_WH(p, -2, (alignedp - p));
BAMBOO_MEMSET_WH(alignedp + size, -2, p + isize - alignedp - size);
- return alignedp;
+ return alignedp;
}
#else
-void * mycalloc_share(int m,
- int size) {
+void * mycalloc_share(int m,
+ int size) {
void * p = NULL;
//int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK);
int isize = (size & (~(BAMBOO_CACHE_LINE_MASK))) + (BAMBOO_CACHE_LINE_SIZE);
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
p = BAMBOO_SHARE_MEM_CALLOC_I(m, isize); // calloc(m, isize);
if(p == NULL) {
- // no more global shared memory
- BAMBOO_EXIT(0xc002);
+ // no more global shared memory
+ BAMBOO_EXIT(0xc002);
}
BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- return
- (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
+ return
+ (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
}
#endif
-void * mycalloc(int m,
- int size,
- char * file,
- int line) {
+void * mycalloc(int m,
+ int size,
+ char * file,
+ int line) {
void * p = NULL;
- int isize = size;
+ int isize = size;
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
#ifdef MULTICORE_GC
extern bool gc_localheap_s;
inermycalloc_i:
- p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(m, isize) :
- BAMBOO_LOCAL_MEM_CALLOC(m, isize);
+ p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(m, isize) :
+ BAMBOO_LOCAL_MEM_CALLOC(m, isize);
#else
p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize);
#endif
if(p == NULL) {
#ifdef MULTICORE_GC
- if(!gc_localheap_s) {
- gc_localheap_s = true;
- goto inermycalloc_i;
- }
+ if(!gc_localheap_s) {
+ gc_localheap_s = true;
+ goto inermycalloc_i;
+ }
#endif
- printf("mycalloc %s %d \n", file, line);
- BAMBOO_EXIT(0xc003);
+ printf("mycalloc %s %d \n", file, line);
+ BAMBOO_EXIT(0xc003);
}
BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
return p;
}
-void * mycalloc_i(int m,
- int size,
- char * file,
- int line) {
+void * mycalloc_i(int m,
+ int size,
+ char * file,
+ int line) {
void * p = NULL;
- int isize = size;
+ int isize = size;
#ifdef MULTICORE_GC
extern bool gc_localheap_s;
inermycalloc_i:
- p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(m, isize) :
- BAMBOO_LOCAL_MEM_CALLOC(m, isize);
+ p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(m, isize) :
+ BAMBOO_LOCAL_MEM_CALLOC(m, isize);
#else
p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize);
#endif
if(p == NULL) {
#ifdef MULTICORE_GC
- if(!gc_localheap_s) {
- gc_localheap_s = true;
- goto inermycalloc_i;
- }
+ if(!gc_localheap_s) {
+ gc_localheap_s = true;
+ goto inermycalloc_i;
+ }
#endif
- tprintf("macalloc_i %s %d \n", file, line);
- BAMBOO_EXIT(0xc004);
+ tprintf("macalloc_i %s %d \n", file, line);
+ BAMBOO_EXIT(0xc004);
}
return p;
}
#ifdef MULTICORE_GC
if(ptr >= BAMBOO_LOCAL_HEAP_START_VA ) {
#endif
- BAMBOO_LOCAL_MEM_FREE(ptr);
+ BAMBOO_LOCAL_MEM_FREE(ptr);
#ifdef MULTICORE_GC
- } else if(ptr >= BAMBOO_LOCAL_HEAP_START_VA_S) {
- BAMBOO_LOCAL_MEM_FREE_S(ptr);
- }
+} else if(ptr >= BAMBOO_LOCAL_HEAP_START_VA_S) {
+ BAMBOO_LOCAL_MEM_FREE_S(ptr);
+}
#endif
return;
}
//////////////////////////////////////////////////////////
//
-// A memory pool implements POOLCREATE, POOLALLOC and
+// A memory pool implements POOLCREATE, POOLALLOC and
// POOLFREE to improve memory allocation by reusing records.
//
// This implementation uses a lock-free singly-linked list
// only invoke this on items that are
// actually new, saves time for reused
// items
- void(*initFreshlyAllocated)(void*);
+ void (*initFreshlyAllocated)(void*);
#ifdef MEMPOOL_DETECT_MISUSE
int allocSize;
// the memory pool must always have at least one
// item in it
-static MemPool* poolcreate( int itemSize,
- void(*initializer)(void*)
- ) {
+static MemPool* poolcreate(int itemSize,
+ void (*initializer)(void*)
+ ) {
- MemPool* p = RUNMALLOC( sizeof( MemPool ) );
+ MemPool* p = RUNMALLOC(sizeof( MemPool ) );
p->itemSize = itemSize;
-
+
p->initFreshlyAllocated = initializer;
#ifdef MEMPOOL_DETECT_MISUSE
// when detecting misuse, round the item size
// up to a page and add a page, so whatever
// allocated memory you get, you can use a
- // page-aligned subset as the record
- pageSize = sysconf( _SC_PAGESIZE );
+ // page-aligned subset as the record
+ pageSize = sysconf(_SC_PAGESIZE);
if( itemSize % pageSize == 0 ) {
// if the item size is already an exact multiple
#else
// normal version
- p->head = RUNMALLOC( p->itemSize );
+ p->head = RUNMALLOC(p->itemSize);
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( p->head );
+ p->initFreshlyAllocated(p->head);
}
p->head->next = NULL;
#ifdef MEMPOOL_DETECT_MISUSE
-static inline void poolfreeinto( MemPool* p, void* ptr ) {
+static inline void poolfreeinto(MemPool* p, void* ptr) {
// don't actually return memory to the pool, just lock
// it up tight so first code to touch it badly gets caught
// also, mprotect automatically protects full pages
- if( mprotect( ptr, p->protectSize, PROT_NONE ) != 0 ) {
+ if( mprotect(ptr, p->protectSize, PROT_NONE) != 0 ) {
switch( errno ) {
-
+
case ENOMEM: {
- printf( "mprotect failed, ENOMEM.\n" );
+ printf("mprotect failed, ENOMEM.\n");
} break;
default:
- printf( "mprotect failed, errno=%d.\n", errno );
- }
+ printf("mprotect failed, errno=%d.\n", errno);
+ }
- printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize );
- printf( "Intended to protect 0x%x to 0x%x,\n\n", (INTPTR)ptr, (INTPTR)ptr + (INTPTR)(p->protectSize) );
+ printf("itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize);
+ printf("Intended to protect 0x%x to 0x%x,\n\n", (INTPTR)ptr, (INTPTR)ptr + (INTPTR)(p->protectSize) );
- exit( -1 );
+ exit(-1);
}
}
// normal version
-static inline void poolfreeinto( MemPool* p, void* ptr ) {
+static inline void poolfreeinto(MemPool* p, void* ptr) {
MemPoolItem* tailNew = (MemPoolItem*) ptr;
tailNew->next = NULL;
CFENCE;
#ifdef MEMPOOL_DETECT_MISUSE
-static inline void* poolalloc( MemPool* p ) {
+static inline void* poolalloc(MemPool* p) {
// put the memory we intend to expose to client
// on a page-aligned boundary, always return
// new memory
- INTPTR nonAligned = (INTPTR) RUNMALLOC( p->allocSize );
+ INTPTR nonAligned = (INTPTR) RUNMALLOC(p->allocSize);
void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1));
//printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize );
//printf( "Allocation returned 0x%x to 0x%x,\n", (INTPTR)nonAligned, (INTPTR)nonAligned + (INTPTR)(p->allocSize) );
//printf( "Intend to use 0x%x to 0x%x,\n\n", (INTPTR)newRec, (INTPTR)newRec + (INTPTR)(p->itemSize) );
-
+
// intentionally touch the top of the new, aligned record in terms of the
// pages that will be locked when it eventually is free'd
INTPTR topOfRec = (INTPTR)newRec;
((char*)topOfRec)[0] = 0x1;
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( newRec );
+ p->initFreshlyAllocated(newRec);
}
return newRec;
#else
// normal version
-static inline void* poolalloc( MemPool* p ) {
+static inline void* poolalloc(MemPool* p) {
// to protect CAS in poolfree from dereferencing
// null, treat the queue as empty when there is
// only one item, so don't take from pool
void *newRec=RUNMALLOC(p->itemSize);
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( newRec );
+ p->initFreshlyAllocated(newRec);
}
return newRec;
}
-
+
p->head = next;
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (next));
next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (next));
return (void*)headCurrent;
}
-static void pooldestroy( MemPool* p ) {
+static void pooldestroy(MemPool* p) {
#ifndef MEMPOOL_DETECT_MISUSE
MemPoolItem* i = p->head;
while( i != NULL ) {
n = i->next;
- free( i );
+ free(i);
i = n;
}
#endif
- free( p );
+ free(p);
}
#define __xg(x) ((volatile INTPTR *)(x))
-#define CFENCE asm volatile("":::"memory");
-#define MFENCE asm volatile("mfence":::"memory");
+#define CFENCE asm volatile ("" ::: "memory");
+#define MFENCE asm volatile ("mfence" ::: "memory");
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 4\n" \
- " .long 661f\n" /* address */\
+ " .long 661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
static inline int atomicincandread(volatile unsigned int *lock) {
int retval=1;
- __asm__ __volatile__("lock; xadd %0,%1"
- : "=r"(retval)
- : "m"(*lock), "0"(retval)
- : "memory");
+ __asm__ __volatile__ ("lock; xadd %0,%1"
+ : "=r" (retval)
+ : "m" (*lock), "0" (retval)
+ : "memory");
return retval;
}
static inline void atomic_dec(volatile int *v) {
__asm__ __volatile__ (LOCK_PREFIX "decl %0"
- : "+m" (*v));
+ : "+m" (*v));
}
static inline void atomic_inc(volatile int *v) {
__asm__ __volatile__ (LOCK_PREFIX "incl %0"
- : "+m" (*v));
+ : "+m" (*v));
}
// this returns TRUE if the atomic subtraction results in
unsigned char c;
__asm__ __volatile__ (LOCK_PREFIX "subl %2,%0; sete %1"
- : "+m" (*v), "=qm" (c)
- : "ir" (i) : "memory");
+ : "+m" (*v), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
static inline void atomic_add(int i, volatile int *v) {
__asm__ __volatile__ (LOCK_PREFIX "addl %1,%0"
- : "+m" (*v)
- : "ir" (i));
+ : "+m" (*v)
+ : "ir" (i));
}
-static inline int LOCKXCHG32(volatile int* ptr, int val){
+static inline int LOCKXCHG32(volatile int* ptr, int val) {
int retval;
- //note: xchgl always implies lock
- __asm__ __volatile__("xchgl %0,%1"
- : "=r"(retval)
- : "m"(*ptr), "0"(val)
- : "memory");
+ //note: xchgl always implies lock
+ __asm__ __volatile__ ("xchgl %0,%1"
+ : "=r" (retval)
+ : "m" (*ptr), "0" (val)
+ : "memory");
return retval;
-
+
}
// LOCKXCH atomically does the following:
-// INTPTR retval=*ptr;
-// *ptr=val;
+// INTPTR retval=*ptr;
+// *ptr=val;
// return retval
#ifdef BIT64
-static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){
+static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val) {
INTPTR retval;
- //note: xchgl always implies lock
- __asm__ __volatile__("xchgq %0,%1"
- : "=r"(retval)
- : "m"(*ptr), "0"(val)
- : "memory");
+ //note: xchgl always implies lock
+ __asm__ __volatile__ ("xchgq %0,%1"
+ : "=r" (retval)
+ : "m" (*ptr), "0" (val)
+ : "memory");
return retval;
-
+
}
#else
#define LOCKXCHG LOCKXCHG32
#endif
/*
-static inline int write_trylock(volatile int *lock) {
- int retval=0;
- __asm__ __volatile__("xchgl %0,%1"
- : "=r"(retval)
- : "m"(*lock), "0"(retval)
- : "memory");
- return retval;
-}
-*/
+ static inline int write_trylock(volatile int *lock) {
+ int retval=0;
+ __asm__ __volatile__("xchgl %0,%1"
+ : "=r"(retval)
+ : "m"(*lock), "0"(retval)
+ : "memory");
+ return retval;
+ }
+ */
#ifdef BIT64
-static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){
+static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new) {
unsigned INTPTR prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgq %1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
return prev;
}
-static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new){
+static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new) {
unsigned long prev;
- __asm__ __volatile__("lock; cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgl %k1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
return prev;
}
#else
-static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){
+static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new) {
unsigned long prev;
- __asm__ __volatile__("lock; cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgl %k1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
return prev;
}
#define CAS32 CAS
#endif
-static inline int BARRIER(){
+static inline int BARRIER() {
CFENCE;
return 1;
}
-static inline int MBARRIER(){
+static inline int MBARRIER() {
MFENCE;
return 1;
}
// pass this into the poolcreate so it will run your
// custom init code ONLY for fresh records, reused records
// can be returned as is
-void freshTaskRecordInitializer( void* seseRecord ) {
+void freshTaskRecordInitializer(void* seseRecord) {
SESEcommon* c = (SESEcommon*) seseRecord;
- pthread_cond_init( &(c->runningChildrenCond), NULL );
- pthread_mutex_init( &(c->lock), NULL );
+ pthread_cond_init(&(c->runningChildrenCond), NULL);
+ pthread_mutex_init(&(c->lock), NULL);
c->refCount = 0;
//c->fresh = 1;
}
-void* mlpAllocSESErecord( int size ) {
- void* newrec = RUNMALLOC( size );
+void* mlpAllocSESErecord(int size) {
+ void* newrec = RUNMALLOC(size);
if( newrec == 0 ) {
- printf( "mlpAllocSESErecord did not obtain memory!\n" );
- exit( -1 );
+ printf("mlpAllocSESErecord did not obtain memory!\n");
+ exit(-1);
}
return newrec;
}
-void mlpFreeSESErecord( SESEcommon* seseRecord ) {
- RUNFREE( seseRecord );
+void mlpFreeSESErecord(SESEcommon* seseRecord) {
+ RUNFREE(seseRecord);
}
-MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue){
+MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue) {
int i;
- MemoryQueue** newMemoryQueue=(MemoryQueue**)RUNMALLOC( sizeof( MemoryQueue* ) * numMemoryQueue );
- for(i=0; i<numMemoryQueue; i++){
+ MemoryQueue** newMemoryQueue=(MemoryQueue**)RUNMALLOC(sizeof( MemoryQueue* ) * numMemoryQueue);
+ for(i=0; i<numMemoryQueue; i++) {
newMemoryQueue[i]=createMemoryQueue();
}
return newMemoryQueue;
}
-REntry* mlpCreateFineREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, void* dynID){
+REntry* mlpCreateFineREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, void* dynID) {
#ifdef OOO_DISABLE_TASKMEMPOOL
REntry* newREntry=(REntry*)RUNMALLOC(sizeof(REntry));
#else
}
}
-int isParentCoarse(REntry *r){
- if (r->type==PARENTCOARSE){
+int isParentCoarse(REntry *r) {
+ if (r->type==PARENTCOARSE) {
return TRUE;
- }else{
+ } else {
return FALSE;
}
}
}
}
-int isCoarse(REntry *r){
- if(r->type==COARSE || r->type==PARENTCOARSE){
+int isCoarse(REntry *r) {
+ if(r->type==COARSE || r->type==PARENTCOARSE) {
return TRUE;
} else {
return FALSE;
}
}
-int isSCC(REntry *r){
- if(r->type==SCCITEM){
+int isSCC(REntry *r) {
+ if(r->type==SCCITEM) {
return TRUE;
} else {
return FALSE;
}
}
-int isSingleItem(MemoryQueueItem *qItem){
- if(qItem->type==SINGLEITEM){
+int isSingleItem(MemoryQueueItem *qItem) {
+ if(qItem->type==SINGLEITEM) {
return TRUE;
} else {
return FALSE;
}
}
-int isHashtable(MemoryQueueItem *qItem){
- if(qItem->type==HASHTABLE){
+int isHashtable(MemoryQueueItem *qItem) {
+ if(qItem->type==HASHTABLE) {
return TRUE;
} else {
return FALSE;
}
}
-int isVector(MemoryQueueItem *qItem){
- if(qItem->type==VECTOR){
+int isVector(MemoryQueueItem *qItem) {
+ if(qItem->type==VECTOR) {
return TRUE;
} else {
return FALSE;
}
}
-int isReadBinItem(BinItem* b){
- if(b->type==READBIN){
+int isReadBinItem(BinItem* b) {
+ if(b->type==READBIN) {
return TRUE;
- }else{
+ } else {
return FALSE;
}
}
-int isWriteBinItem(BinItem* b){
- if(b->type==WRITEBIN){
+int isWriteBinItem(BinItem* b) {
+ if(b->type==WRITEBIN) {
return TRUE;
- }else{
+ } else {
return FALSE;
}
}
-int generateKey(unsigned int data){
+int generateKey(unsigned int data) {
return (data&H_MASK);
}
-Hashtable* createHashtable(){
+Hashtable* createHashtable() {
int i=0;
Hashtable* newTable=(Hashtable*)RUNMALLOC(sizeof(Hashtable));
newTable->item.type=HASHTABLE;
- for(i=0;i<NUMBINS;i++){
+ for(i=0; i<NUMBINS; i++) {
newTable->array[i]=(BinElement*)RUNMALLOC(sizeof(BinElement));
newTable->array[i]->head=NULL;
newTable->array[i]->tail=NULL;
return newTable;
}
-WriteBinItem* createWriteBinItem(){
+WriteBinItem* createWriteBinItem() {
WriteBinItem* binitem=(WriteBinItem*)RUNMALLOC(sizeof(WriteBinItem));
binitem->item.type=WRITEBIN;
return binitem;
}
-ReadBinItem* createReadBinItem(){
+ReadBinItem* createReadBinItem() {
ReadBinItem* binitem=(ReadBinItem*)RUNMALLOC(sizeof(ReadBinItem));
binitem->index=0;
binitem->item.type=READBIN;
return binitem;
}
-Vector* createVector(){
+Vector* createVector() {
Vector* vector=(Vector*)RUNMALLOC(sizeof(Vector));
vector->index=0;
vector->item.type=VECTOR;
return vector;
}
-SCC* createSCC(){
+SCC* createSCC() {
SCC* scc=(SCC*)RUNMALLOC(sizeof(SCC));
scc->item.type=SINGLEITEM;
return scc;
}
-MemoryQueue* createMemoryQueue(){
+MemoryQueue* createMemoryQueue() {
MemoryQueue* queue = (MemoryQueue*)RUNMALLOC(sizeof(MemoryQueue));
MemoryQueueItem* dummy=(MemoryQueueItem*)RUNMALLOC(sizeof(MemoryQueueItem));
dummy->type=3; // dummy type
queue->head = dummy;
queue->tail = dummy;
#ifndef OOO_DISABLE_TASKMEMPOOL
- queue->rentrypool = poolcreate( sizeof(REntry), NULL );
+ queue->rentrypool = poolcreate(sizeof(REntry), NULL);
#endif
return queue;
}
int ADDRENTRY(MemoryQueue * q, REntry * r) {
- if (isFineRead(r) || isFineWrite(r)) {
+ if (isFineRead(r) || isFineWrite(r)) {
return ADDTABLE(q, r);
} else if (isCoarse(r)) {
return ADDVECTOR(q, r);
tail->next=(MemoryQueueItem*)h;
//************NEED memory barrier here to ensure compiler does not cache Q.tail.status********
MBARRIER();
- if (tail->status==READY && tail->total==0 && q->tail==q->head) {
+ if (tail->status==READY && tail->total==0 && q->tail==q->head) {
//previous Q item is finished
h->item.status=READY;
}
q->tail=(MemoryQueueItem*)h;
// handle the the queue item case
- if(q->head->type==3){
+ if(q->head->type==3) {
q->head=(MemoryQueueItem*)h;
}
}
//at this point, have table
Hashtable* table=(Hashtable*)q->tail;
r->qitem=(MemoryQueueItem *) table; // set rentry's hashtable
- if( *(r->pointer)==0 ||
- ( *(r->pointer)!=0 &&
- BARRIER() &&
+ if( *(r->pointer)==0 ||
+ ( *(r->pointer)!=0 &&
+ BARRIER() &&
table->unresolvedQueue!=NULL
- )
- ){
+ )
+ ) {
struct Queue* val;
- // grab lock on the queue
- do {
- val=(struct Queue*)0x1;
+ // grab lock on the queue
+ do {
+ val=(struct Queue*)0x1;
val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
- } while(val==(struct Queue*)0x1);
- if(val==NULL){
+ } while(val==(struct Queue*)0x1);
+ if(val==NULL) {
//queue is null, first case
- if(*(r->pointer)!=0){
+ if(*(r->pointer)!=0) {
// check whether pointer is already resolved, or not.
table->unresolvedQueue=NULL; //released lock;
return ADDTABLEITEM(table,r,TRUE);
}
struct Queue* queue=createQueue();
addNewItemBack(queue,r);
- atomic_inc(&table->item.total);
- table->unresolvedQueue=queue; // expose new queue
- }else{
+ atomic_inc(&table->item.total);
+ table->unresolvedQueue=queue; // expose new queue
+ } else {
// add unresolved rentry at the end of the queue.
addNewItemBack(val,r);
- atomic_inc(&table->item.total);
+ atomic_inc(&table->item.total);
table->unresolvedQueue=val; // released lock
- }
+ }
return NOTREADY;
}
BinItem * val;
// OBJPTRPTR_2_OBJTYPE( r->pointer ) );
// exit( -1 );
//}
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- do {
- val=(BinItem*)0x1;
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ do {
+ val=(BinItem*)0x1;
BinElement* bin=table->array[key];
- val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);//note...talk to me about optimizations here.
+ val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val); //note...talk to me about optimizations here.
} while(val==(BinItem*)0x1);
//at this point have locked bin
if (val==NULL) {
}
}
-int ADDTABLEITEM(Hashtable* table, REntry* r, int inc){
-
+int ADDTABLEITEM(Hashtable* table, REntry* r, int inc) {
+
BinItem * val;
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- do {
- val=(BinItem*)0x1;
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ do {
+ val=(BinItem*)0x1;
BinElement* bin=table->array[key];
- val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
+ val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
} while(val==(BinItem*)0x1);
//at this point have locked bin
if (val==NULL) {
BinItem* b;
if (isFineWrite(r)) {
b=(BinItem*)createWriteBinItem();
- ((WriteBinItem*)b)->val=r;//<-only different statement
+ ((WriteBinItem*)b)->val=r; //<-only different statement
} else if (isFineRead(r)) {
b=(BinItem*)createReadBinItem();
ReadBinItem* readbin=(ReadBinItem*)b;
}
b->total=1;
- if (T->item.status==READY) {
+ if (T->item.status==READY) {
//current entry is ready
b->status=READY;
retval=READY;
retval=NOTREADY;
}
- if(inc){
+ if(inc) {
atomic_inc(&T->item.total);
}
r->qitem=(MemoryQueueItem *)T;
r->binitem=b;
be->tail=b;
- be->head=b;//released lock
+ be->head=b; //released lock
return retval;
}
b->item.total=1;
// note: If current table clears all dependencies, then write bin is ready
-
-
- if(inc){
+
+
+ if(inc) {
atomic_inc(&T->item.total);
}
//need to check if we can go...
MBARRIER();
if (T->item.status==READY) {
- for(;val!=NULL;val=val->next) {
+ for(; val!=NULL; val=val->next) {
if (val==((BinItem *)b)) {
//ready to retire
retval=READY;
if (isParent(r)) {
- b->item.status=retval;//unsure if really needed at this point..
+ b->item.status=retval; //unsure if really needed at this point..
be->head=NULL; // released lock
return retval;
}
}
}
}
-
+
b->item.status=retval;
be->tail=(BinItem*)b;
be->head=val;
int TAILREADCASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc) {
ReadBinItem * readbintail=(ReadBinItem*)T->array[key]->tail;
int status, retval;
- if (readbintail->item.status==READY) {
+ if (readbintail->item.status==READY) {
status=READY;
retval=READY;
if (isParent(r)) {
- T->array[key]->head=val;//released lock
+ T->array[key]->head=val; //released lock
return READY;
}
} else {
if (readbintail->index==NUMREAD) { // create new read group
ReadBinItem* rb=createReadBinItem();
rb->array[rb->index++]=r;
- rb->item.total=1;//safe only because item could not have started
+ rb->item.total=1; //safe only because item could not have started
rb->item.status=status;
T->array[key]->tail->next=(BinItem*)rb;
T->array[key]->tail=(BinItem*)rb;
atomic_inc(&readbintail->item.total);
r->binitem=(BinItem*)readbintail;
}
- if(inc){
+ if(inc) {
atomic_inc(&T->item.total);
}
r->qitem=(MemoryQueueItem *)T;
- T->array[key]->head=val;//released lock
+ T->array[key]->head=val; //released lock
return retval;
}
//wb->item.status=NOTREADY;
ReadBinItem* rb=createReadBinItem();
rb->array[rb->index++]=r;
- rb->item.total=1;//safe because item could not have started
+ rb->item.total=1; //safe because item could not have started
rb->item.status=NOTREADY;
- if(inc){
+ if(inc) {
atomic_inc(&T->item.total);
}
r->qitem=(MemoryQueueItem *)T;
r->binitem=(BinItem*)rb;
T->array[key]->tail->next=(BinItem*)rb;
T->array[key]->tail=(BinItem*)rb;
- T->array[key]->head=val;//released lock
+ T->array[key]->head=val; //released lock
}
int ADDVECTOR(MemoryQueue *Q, REntry *r) {
}
Q->tail=(MemoryQueueItem*)V;
// handle the the queue item case
- if(Q->head->type==3){
+ if(Q->head->type==3) {
Q->head=(MemoryQueueItem*)V;
}
}
Q->tail->next=(MemoryQueueItem*)V;
//***NEED memory barrier here to ensure compiler does not cache Q.tail.status******
MBARRIER();
- if (Q->tail->status==READY) {
+ if (Q->tail->status==READY) {
V->item.status=READY;
}
if (Q->tail->total==0&&Q->head==Q->tail) {
flag=(void*)LOCKXCHG((unsigned INTPTR*)&(V->array[index]), (unsigned INTPTR)flag);
if (flag!=NULL) {
if (isParentCoarse(r)) { //parent's retire immediately
- atomic_dec(&V->item.total);
- V->index--;
+ atomic_dec(&V->item.total);
+ V->index--;
} else {
#if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
if (atomic_sub_and_test(1, &r->count))
}
return READY;
} else {
- return NOTREADY;//<- means that some other dispatcher got this one...so need to do accounting correctly
+ return NOTREADY; //<- means that some other dispatcher got this one...so need to do accounting correctly
}
} else {
return NOTREADY;
int ADDSCC(MemoryQueue *Q, REntry *r) {
//added SCC
SCC* S=createSCC();
- S->item.total=1;
+ S->item.total=1;
S->val=r;
S->item.status=NOTREADY;
r->qitem=(MemoryQueueItem *)S;
S->item.status=READY;
Q->tail=(MemoryQueueItem*)S;
// handle the the queue item case
- if(Q->head->type==3){
+ if(Q->head->type==3) {
Q->head=(MemoryQueueItem*)S;
}
void* flag=NULL;
#endif
return READY;
} else {
- return NOTREADY;//<- means that some other dispatcher got this one...so need to do accounting correctly
+ return NOTREADY; //<- means that some other dispatcher got this one...so need to do accounting correctly
}
} else {
Q->tail=(MemoryQueueItem*)S;
#ifdef RCR
if (atomic_sub_and_test(1, &r->count))
#endif
- poolfreeinto(Q->rentrypool, r);
+ poolfreeinto(Q->rentrypool, r);
#endif
}
void RETIRESCC(MemoryQueue *Q, REntry *r) {
SCC* s=(SCC *)r->qitem;
- s->item.total=0;//don't need atomicdec
+ s->item.total=0; //don't need atomicdec
#ifdef RCR
void *flag=NULL;
- flag=(void*)LOCKXCHG((unsigned INTPTR*)&(s->val), (unsigned INTPTR)flag);
+ flag=(void*)LOCKXCHG((unsigned INTPTR*)&(s->val), (unsigned INTPTR)flag);
if (flag!=NULL) {
#ifndef OOO_DISABLE_TASKMEMPOOL
RELEASE_REFERENCE_TO(((REntry*)flag)->seseRec);
RETIREBIN(T,r,b);
atomic_dec(&T->item.total);
BARRIER();
- if (T->item.next!=NULL && T->item.total==0) {
+ if (T->item.next!=NULL && T->item.total==0) {
RESOLVECHAIN(q);
}
}
void RETIREBIN(Hashtable *T, REntry *r, BinItem *b) {
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
if(isFineRead(r)) {
atomic_dec(&b->total);
}
if (isFineWrite(r) || (isFineRead(r) && b->next!=NULL && b->total==0)) {
- // CHECK FIRST IF next is nonnull to guarantee that b.total cannot change
+ // CHECK FIRST IF next is nonnull to guarantee that b.total cannot change
BinItem * val;
- do {
+ do {
val=(BinItem*)0x1;
val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(T->array[key]->head), (unsigned INTPTR)val);
} while(val==(BinItem*)0x1);
int haveread=FALSE;
int i;
while (ptr!=NULL) {
- if (isReadBinItem(ptr)) {
+ if (isReadBinItem(ptr)) {
ReadBinItem* rptr=(ReadBinItem*)ptr;
- if (rptr->item.status==NOTREADY) {
- for (i=0;i<rptr->index;i++) {
+ if (rptr->item.status==NOTREADY) {
+ for (i=0; i<rptr->index; i++) {
resolveDependencies(rptr->array[i]);
- if (isParent(rptr->array[i])) {
- //parents go immediately
- atomic_dec(&rptr->item.total);
- atomic_dec(&T->item.total);
- }
- }
- }
- rptr->item.status=READY;
- if (rptr->item.next==NULL) {
- break;
- }
- if (rptr->item.total!=0) {
- haveread=TRUE;
- } else if ((BinItem*)rptr==val) {
- val=val->next;
- }
+ if (isParent(rptr->array[i])) {
+ //parents go immediately
+ atomic_dec(&rptr->item.total);
+ atomic_dec(&T->item.total);
+ }
+ }
+ }
+ rptr->item.status=READY;
+ if (rptr->item.next==NULL) {
+ break;
+ }
+ if (rptr->item.total!=0) {
+ haveread=TRUE;
+ } else if ((BinItem*)rptr==val) {
+ val=val->next;
+ }
} else if(isWriteBinItem(ptr)) {
- if (haveread)
- break;
- if(ptr->status==NOTREADY){
+ if (haveread)
+ break;
+ if(ptr->status==NOTREADY) {
resolveDependencies(((WriteBinItem*)ptr)->val);
ptr->status=READY;
- if(isParent(((WriteBinItem*)ptr)->val)){
+ if(isParent(((WriteBinItem*)ptr)->val)) {
atomic_dec(&T->item.total);
- val=val->next;
- }else
+ val=val->next;
+ } else
break;
- }else{ // write bin is already resolved
+ } else { // write bin is already resolved
val=val->next;
}
/*
- if(ptr->status==NOTREADY) {
- resolveDependencies(((WriteBinItem*)ptr)->val);
- }
- ptr->status=READY;
- if (isParent(((WriteBinItem*)ptr)->val)) {
+ if(ptr->status==NOTREADY) {
+ resolveDependencies(((WriteBinItem*)ptr)->val);
+ }
+ ptr->status=READY;
+ if (isParent(((WriteBinItem*)ptr)->val)) {
atomic_dec(&T->item.total);
//val=val->next;
val=ptr->next;
- } else
+ } else
break;
- }
- */
- }
+ }
+ */
+ }
ptr=ptr->next;
}
T->array[key]->head=val; // release lock
atomic_dec(&V->item.total);
#ifdef RCR
REntry* val=NULL;
- val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(V->array[r->index]), (unsigned INTPTR)val);
- if (val!=NULL) {
+ val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(V->array[r->index]), (unsigned INTPTR)val);
+ if (val!=NULL) {
//release reference if we haven't cleared this one
#if !defined(OOO_DISABLE_TASKMEMPOOL)&&defined(RCR)
if (atomic_sub_and_test(1, &r->count))
}
-void RESOLVEHASHTABLE(MemoryQueue *Q, Hashtable *T) {
+void RESOLVEHASHTABLE(MemoryQueue *Q, Hashtable *T) {
int binidx;
- for (binidx=0;binidx<NUMBINS;binidx++) {
+ for (binidx=0; binidx<NUMBINS; binidx++) {
BinElement* bin=T->array[binidx];
BinItem* val;
do {
val=(BinItem*)1;
val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
} while (val==(BinItem*)1);
- //at this point have locked bin
- int haveread=FALSE;
+ //at this point have locked bin
+ int haveread=FALSE;
BinItem* ptr=val;
if(ptr!=NULL&&ptr->status==NOTREADY) {
do {
- if (isWriteBinItem(ptr)) {
- if (haveread)
- break;
+ if (isWriteBinItem(ptr)) {
+ if (haveread)
+ break;
resolveDependencies(((WriteBinItem*)ptr)->val);
- ptr->status=READY;
- if (isParent(((WriteBinItem*)ptr)->val)) {
- atomic_dec(&T->item.total);
- val=val->next;
- } else
+ ptr->status=READY;
+ if (isParent(((WriteBinItem*)ptr)->val)) {
+ atomic_dec(&T->item.total);
+ val=val->next;
+ } else
break;
- } else if (isReadBinItem(ptr)) {
+ } else if (isReadBinItem(ptr)) {
int i;
ReadBinItem* rptr=(ReadBinItem*)ptr;
- for(i=0;i<rptr->index;i++) {
+ for(i=0; i<rptr->index; i++) {
resolveDependencies(rptr->array[i]);
if (isParent(rptr->array[i])) {
- atomic_dec(&rptr->item.total);
- atomic_dec(&T->item.total);
- }
- }
- if (rptr->item.next==NULL||rptr->item.total!=0) {
- haveread=TRUE;
- } else if((BinItem*)rptr==val) {
- val=val->next;
- }
- rptr->item.status=READY;
- }
- ptr=ptr->next;
- }while(ptr!=NULL);
+ atomic_dec(&rptr->item.total);
+ atomic_dec(&T->item.total);
+ }
+ }
+ if (rptr->item.next==NULL||rptr->item.total!=0) {
+ haveread=TRUE;
+ } else if((BinItem*)rptr==val) {
+ val=val->next;
+ }
+ rptr->item.status=READY;
+ }
+ ptr=ptr->next;
+ } while(ptr!=NULL);
}
bin->head=val; // released lock;
}
//handle ready cases
while(TRUE) {
//enqueue everything
- for (i=0;i<NUMITEMS;i++) {
+ for (i=0; i<NUMITEMS; i++) {
REntry* val=NULL;
- val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(tmp->array[i]), (unsigned INTPTR)val);
- if (val!=NULL) {
- SESEcommon *seseCommon=val->seseRec;
+ val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(tmp->array[i]), (unsigned INTPTR)val);
+ if (val!=NULL) {
+ SESEcommon *seseCommon=val->seseRec;
resolveDependencies(val);
if (isParent(val)) {
atomic_dec(&tmp->item.total);
#if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
else if (atomic_sub_and_test(1, &((REntry *)val)->count))
poolfreeinto(q->rentrypool,val);
- RELEASE_REFERENCE_TO(seseCommon);
+ RELEASE_REFERENCE_TO(seseCommon);
#endif
}
}
}
}
- void RESOLVESCC(MemoryQueue *q, SCC *S) {
+void RESOLVESCC(MemoryQueue *q, SCC *S) {
//precondition: SCC's state is READY
void* flag=NULL;
- flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
+ flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
if (flag!=NULL) {
SESEcommon *seseCommon=((REntry *)flag)->seseRec;
resolveDependencies(flag);
}
-void resolveDependencies(REntry* rentry){
+void resolveDependencies(REntry* rentry) {
SESEcommon* seseCommon=(SESEcommon*)rentry->seseRec;
int type=rentry->type;
#ifdef RCR
mask=mask>>shift;
index+=shift;
if(atomic_sub_and_test(1, &array[index].flag)) {
- if(atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)))
+ if(atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)))
workScheduleSubmit((void *)seseCommon);
}
}
printf("ERROR: REntry type %d should never be generated in RCR..\n", rentry->type);
}
#else
- if(type==READ || type==WRITE || type==COARSE || type==SCCITEM){
- if( atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)) ){
+ if(type==READ || type==WRITE || type==COARSE || type==SCCITEM) {
+ if( atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)) ) {
workScheduleSubmit(seseCommon);
- }
- }else if(type==PARENTREAD || type==PARENTWRITE || type==PARENTCOARSE){
+ }
+ } else if(type==PARENTREAD || type==PARENTWRITE || type==PARENTCOARSE) {
psem_give_tag(rentry->parentStallSem, rentry->tag);
}
#endif
}
-void INITIALIZEBUF(MemoryQueue * q){
+void INITIALIZEBUF(MemoryQueue * q) {
int i=0;
- for(i=0; i<NUMBINS; i++){
+ for(i=0; i<NUMBINS; i++) {
q->binbuf[i]=NULL;
}
q->bufcount=0;
}
-void ADDRENTRYTOBUF(MemoryQueue * q, REntry * r){
+void ADDRENTRYTOBUF(MemoryQueue * q, REntry * r) {
q->buf[q->bufcount]=r;
q->bufcount++;
}
-int RESOLVEBUFFORHASHTABLE(MemoryQueue * q, Hashtable* table, SESEcommon *seseCommon){
+int RESOLVEBUFFORHASHTABLE(MemoryQueue * q, Hashtable* table, SESEcommon *seseCommon) {
int i;
- // first phase: only consider write rentry
- for(i=0; i<q->bufcount;i++){
+ // first phase: only consider write rentry
+ for(i=0; i<q->bufcount; i++) {
REntry *r=q->buf[i];
- if(r->type==WRITE){
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- if(q->binbuf[key]==NULL){
+ if(r->type==WRITE) {
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ if(q->binbuf[key]==NULL) {
// for multiple writes, add only the first write that hashes to the same bin
- q->binbuf[key]=r;
- }else{
+ q->binbuf[key]=r;
+ } else {
q->buf[i]=NULL;
}
}
}
// second phase: enqueue read items if it is eligible
- for(i=0; i<q->bufcount;i++){
- REntry *r=q->buf[i];
- if(r!=NULL && r->type==READ){
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- if(q->binbuf[key]==NULL){
+ for(i=0; i<q->bufcount; i++) {
+ REntry *r=q->buf[i];
+ if(r!=NULL && r->type==READ) {
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ if(q->binbuf[key]==NULL) {
// read item that hashes to the bin which doen't contain any write
seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
- if(ADDTABLEITEM(table, r, FALSE)==READY){
+ if(ADDTABLEITEM(table, r, FALSE)==READY) {
resolveDependencies(r);
}
}
q->buf[i]=NULL;
}
}
-
+
// then, add only one of write items that hashes to the same bin
- for(i=0; i<q->bufcount;i++){
- REntry *r=q->buf[i];
- if(r!=NULL){
+ for(i=0; i<q->bufcount; i++) {
+ REntry *r=q->buf[i];
+ if(r!=NULL) {
seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
- if(ADDTABLEITEM(table, r, FALSE)==READY){
+ if(ADDTABLEITEM(table, r, FALSE)==READY) {
resolveDependencies(r);
- }
+ }
}
}
}
#ifndef RCR
-int RESOLVEBUF(MemoryQueue * q, SESEcommon *seseCommon){
+int RESOLVEBUF(MemoryQueue * q, SESEcommon *seseCommon) {
int localCount=0;
int i;
// check if every waiting entry is resolved
// if not, defer every items for hashtable until it is resolved.
int unresolved=FALSE;
- for(i=0; i<q->bufcount;i++){
- REntry *r=q->buf[i];
- if(*(r->pointer)==0){
- unresolved=TRUE;
- }
+ for(i=0; i<q->bufcount; i++) {
+ REntry *r=q->buf[i];
+ if(*(r->pointer)==0) {
+ unresolved=TRUE;
+ }
}
- if(unresolved==TRUE){
- for(i=0; i<q->bufcount;i++){
+ if(unresolved==TRUE) {
+ for(i=0; i<q->bufcount; i++) {
REntry *r=q->buf[i];
r->queue=q;
r->isBufMode=TRUE;
- if(ADDRENTRY(q,r)==NOTREADY){
+ if(ADDRENTRY(q,r)==NOTREADY) {
localCount++;
}
}
}
// first phase: only consider write rentry
- for(i=0; i<q->bufcount;i++){
+ for(i=0; i<q->bufcount; i++) {
REntry *r=q->buf[i];
- if(r->type==WRITE){
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- if(q->binbuf[key]==NULL){
+ if(r->type==WRITE) {
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ if(q->binbuf[key]==NULL) {
// for multiple writes, add only the first write that hashes to the same bin
- q->binbuf[key]=r;
- }else{
+ q->binbuf[key]=r;
+ } else {
q->buf[i]=NULL;
}
}
}
// second phase: enqueue read items if it is eligible
- for(i=0; i<q->bufcount;i++){
- REntry *r=q->buf[i];
- if(r!=NULL && r->type==READ){
- int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
- if(q->binbuf[key]==NULL){
+ for(i=0; i<q->bufcount; i++) {
+ REntry *r=q->buf[i];
+ if(r!=NULL && r->type==READ) {
+ int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
+ if(q->binbuf[key]==NULL) {
// read item that hashes to the bin which doen't contain any write
seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
- if(ADDRENTRY(q,r)==NOTREADY){
+ if(ADDRENTRY(q,r)==NOTREADY) {
localCount++;
}
}
q->buf[i]=NULL;
}
}
-
+
// then, add only one of write items that hashes to the same bin
- for(i=0; i<q->bufcount;i++){
- REntry *r=q->buf[i];
- if(r!=NULL){
+ for(i=0; i<q->bufcount; i++) {
+ REntry *r=q->buf[i];
+ if(r!=NULL) {
seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
- if(ADDRENTRY(q,r)==NOTREADY){
+ if(ADDRENTRY(q,r)==NOTREADY) {
localCount++;
}
}
}
-void resolvePointer(REntry* rentry){
+void resolvePointer(REntry* rentry) {
Hashtable* table=(Hashtable *)rentry->qitem;
MemoryQueue* queue;
// we don't need to consider unresolved cases for coarse rentries.
// or if resolved already before related rentry is enqueued to the waiting queue
- if(rentry->type==COARSE ||
+ if(rentry->type==COARSE ||
rentry->type==PARENTCOARSE ||
- rentry->type==SCCITEM ||
+ rentry->type==SCCITEM ||
table==NULL ||
- table->unresolvedQueue==NULL){
+ table->unresolvedQueue==NULL) {
return;
}
struct Queue* val;
- do {
+ do {
val=(struct Queue*)0x1;
val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
- } while(val==(struct Queue*)0x1);
- if(val!=NULL &&
+ } while(val==(struct Queue*)0x1);
+ if(val!=NULL &&
getHead(val)!=NULL &&
- getHead(val)->objectptr==rentry){
+ getHead(val)->objectptr==rentry) {
// handling pointer is the first item of the queue
// start to resolve until it reaches unresolved pointer or end of queue
INTPTR currentSESE=0;
- do{
+ do {
struct QueueItem* head=getHead(val);
- if(head!=NULL){
- REntry* rentry=(REntry*)head->objectptr;
- if(*(rentry->pointer)==0){
+ if(head!=NULL) {
+ REntry* rentry=(REntry*)head->objectptr;
+ if(*(rentry->pointer)==0) {
// encounters following unresolved pointer
- table->unresolvedQueue=val;//released lock
+ table->unresolvedQueue=val; //released lock
break;
}
removeItem(val,head);
//now, address is resolved
-
+
//check if rentry is buffer mode
- if(rentry->isBufMode==TRUE){
- if(currentSESE==0){
+ if(rentry->isBufMode==TRUE) {
+ if(currentSESE==0) {
queue=rentry->queue;
INITIALIZEBUF(queue);
currentSESE=(INTPTR)rentry;
ADDRENTRYTOBUF(queue,rentry);
- } else if(currentSESE==(INTPTR)rentry){
+ } else if(currentSESE==(INTPTR)rentry) {
ADDRENTRYTOBUF(queue,rentry);
- } else if(currentSESE!=(INTPTR)rentry){
+ } else if(currentSESE!=(INTPTR)rentry) {
RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
currentSESE=(INTPTR)rentry;
INITIALIZEBUF(queue);
ADDRENTRYTOBUF(rentry->queue,rentry);
}
- }else{
- if(currentSESE!=0){
+ } else {
+ if(currentSESE!=0) {
//previous SESE has buf mode, need to invoke resolve buffer
RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
currentSESE=0;
}
//normal mode
- if(ADDTABLEITEM(table, rentry, FALSE)==READY){
+ if(ADDTABLEITEM(table, rentry, FALSE)==READY) {
resolveDependencies(rentry);
}
}
- }else{
+ } else {
table->unresolvedQueue=NULL; // set hashtable as normal-mode.
break;
}
- }while(TRUE);
- }else{
+ } while(TRUE);
+ } else {
// resolved rentry is not head of queue
- table->unresolvedQueue=val;//released lock;
- }
+ table->unresolvedQueue=val; //released lock;
+ }
}
#endif
// these are useful for interpreting an INTPTR to an
// Object at runtime to retrieve the object's type
// or object id (OID)
-#define OBJPTRPTR_2_OBJTYPE( opp ) ((struct ___Object___*)*(opp))->type
-#define OBJPTRPTR_2_OBJOID( opp ) ((struct ___Object___*)*(opp))->oid
+#define OBJPTRPTR_2_OBJTYPE(opp) ((struct ___Object___*)*(opp))->type
+#define OBJPTRPTR_2_OBJOID(opp) ((struct ___Object___*)*(opp))->oid
// forwarding list elements is a linked
// a lot of items are on the list
#define FLIST_ITEMS_PER_ELEMENT 30
typedef struct ForwardingListElement_t {
- int numItems;
+ int numItems;
struct ForwardingListElement_t* nextElement;
- INTPTR items[FLIST_ITEMS_PER_ELEMENT];
+ INTPTR items[FLIST_ITEMS_PER_ELEMENT];
} ForwardingListElement;
struct MemPool_t;
// generated SESE record to this can be used, because
// the common structure is always the first item in a
// customized SESE record
-typedef struct SESEcommon_t {
+typedef struct SESEcommon_t {
// the identifier for the class of sese's that
// are instances of one particular static code block
// IMPORTANT: the class ID must be the first field of
// the task record so task dispatch works correctly!
int classID;
- volatile int unresolvedDependencies;
+ volatile int unresolvedDependencies;
// a parent waits on this semaphore when stalling on
// this child, the child gives it at its SESE exit
psemaphore* parentsStallSem;
-
+
// NOTE: first element is embedded in the task
// record, so don't free it!
//ForwardingListElement forwardList;
struct Queue forwardList;
- volatile int doneExecuting;
- volatile int numRunningChildren;
+ volatile int doneExecuting;
+ volatile int numRunningChildren;
struct SESEcommon_t* parent;
-
+
int numMemoryQueue;
int rentryIdx;
int unresolvedRentryIdx;
// the lock guards the following data SESE's
// use to coordinate with one another
pthread_mutex_t lock;
- pthread_cond_t runningChildrenCond;
+ pthread_cond_t runningChildrenCond;
} SESEcommon;
// a thread-local var refers to the currently
-typedef struct REntry_t{
- // fine read:0, fine write:1, parent read:2,
+typedef struct REntry_t {
+ // fine read:0, fine write:1, parent read:2,
// parent write:3 coarse: 4, parent coarse:5, scc: 6
int type;
#ifdef RCR
struct rcrRecord *next;
};
-typedef struct SESEstall_t {
+typedef struct SESEstall_t {
SESEcommon common;
int size;
void * next;
int total; //total non-retired
int status; //NOTREADY, READY
struct MemoryQueueItem_t *next;
-
+
} MemoryQueueItem;
typedef struct MemoryQueue_t {
MemoryQueueItem * head;
- MemoryQueueItem * tail;
+ MemoryQueueItem * tail;
REntry * binbuf[NUMBINS];
REntry * buf[NUMRENTRY];
int bufcount;
-static inline void ADD_FORWARD_ITEM( ForwardingListElement* e,
- SESEcommon* s ) {
+static inline void ADD_FORWARD_ITEM(ForwardingListElement* e,
+ SESEcommon* s) {
//atomic_inc( &(s->refCount) );
}
-// simple mechanical allocation and
+// simple mechanical allocation and
// deallocation of SESE records
-void* mlpAllocSESErecord( int size );
-void mlpFreeSESErecord( SESEcommon* seseRecord );
+void* mlpAllocSESErecord(int size);
+void mlpFreeSESErecord(SESEcommon* seseRecord);
MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue);
REntry* mlpCreateFineREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue, void* dynID);
void resolvePointer(REntry* rentry);
#endif
-static inline void ADD_REFERENCE_TO( SESEcommon* seseRec ) {
- atomic_inc( &(seseRec->refCount) );
+static inline void ADD_REFERENCE_TO(SESEcommon* seseRec) {
+ atomic_inc(&(seseRec->refCount) );
}
-static inline int RELEASE_REFERENCE_TO( SESEcommon* seseRec ) {
- if( atomic_sub_and_test( 1, &(seseRec->refCount) ) ) {
- poolfreeinto( seseRec->parent->taskRecordMemPool, seseRec );
+static inline int RELEASE_REFERENCE_TO(SESEcommon* seseRec) {
+ if( atomic_sub_and_test(1, &(seseRec->refCount) ) ) {
+ poolfreeinto(seseRec->parent->taskRecordMemPool, seseRec);
return 1;
}
return 0;
}
-static inline int RELEASE_REFERENCES_TO( SESEcommon* seseRec, int refCount) {
- if( atomic_sub_and_test( refCount, &(seseRec->refCount) ) ) {
- poolfreeinto( seseRec->parent->taskRecordMemPool, seseRec );
+static inline int RELEASE_REFERENCES_TO(SESEcommon* seseRec, int refCount) {
+ if( atomic_sub_and_test(refCount, &(seseRec->refCount) ) ) {
+ poolfreeinto(seseRec->parent->taskRecordMemPool, seseRec);
return 1;
}
return 0;
////////////////////////////////////////////////
-//
+//
// Some available debug versions of the above
// pool allocation-related helpers. The lower
// 'x' appended to names means they are not hooked
// use them for debugging
//
////////////////////////////////////////////////
-#define ADD_REFERENCE_TOx(x) atomic_inc( &((x)->refCount) ); printf("0x%x ADD 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);
+#define ADD_REFERENCE_TOx(x) atomic_inc(&((x)->refCount) ); printf("0x%x ADD 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);
-#define RELEASE_REFERENCE_TOx(x) if (atomic_sub_and_test(1, &((x)->refCount))) {poolfreeinto(x->parent->taskRecordMemPool, x);printf("0x%x REL 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);}
+#define RELEASE_REFERENCE_TOx(x) if (atomic_sub_and_test(1, &((x)->refCount))) {poolfreeinto(x->parent->taskRecordMemPool, x); printf("0x%x REL 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__); }
#define CHECK_RECORDx(x) { \
if( ((SESEcommon*)(x))->refCount < 0 || \
((SESEcommon*)(x))->refCount < 0 ) { \
- printf( "Acquired 0x%x from poolalloc, with refCount=%d\n", (INTPTR)(x), ((SESEcommon*)(x))->refCount ); } \
- }
+ printf("Acquired 0x%x from poolalloc, with refCount=%d\n", (INTPTR)(x), ((SESEcommon*)(x))->refCount); } \
+}
// pass this into the poolcreate so it will run your
// custom init code ONLY for fresh records, reused records
// can be returned as is
-void freshTaskRecordInitializer( void* seseRecord );
-
+void freshTaskRecordInitializer(void* seseRecord);
+
#endif /* __MLP_RUNTIME__ */
BARRIER();
VAR(___this___)->tid=0;
BARRIER();
-
+
while(notifycount==VAR(___this___)->notifycount) {
#ifdef PRECISE_GC
if (unlikely(needtocollect))
struct lockvector *lptr=&lvector;
#endif
struct lockpair *lpair=&lptr->locks[--lptr->index];
-
+
if (lpair->islastlock) {
MBARRIER();
lpair->object->tid=0;
#ifdef D___Object______nativehashCode____
int CALL01(___Object______nativehashCode____, struct ___Object___ * ___this___);
-#endif
+#endif
#ifdef D___Object______hashCode____
int CALL01(___Object______hashCode____, struct ___Object___ * ___this___);
#endif
#endif
#include "mlp_runtime.h"
#include "workschedule.h"
-extern volatile int numWorkSchedWorkers;
+extern volatile int numWorkSchedWorkers;
extern deque* deques;
__thread SESEcommon* seseCommon;
void searchoojroots() {
#ifdef SQUEUE
{
- int i;
+ int i;
deque* dq;
dequeItem *di;
- int j;
+ int j;
// goes over ready-to-run SESEs
for( i = 0; i < numWorkSchedWorkers; ++i ) {
di=dq->head;
do {
- // check all the relevant indices of this
- // node in the deque, noting if we are in
- // the top/bottom node which can be partially
- // full
+ // check all the relevant indices of this
+ // node in the deque, noting if we are in
+ // the top/bottom node which can be partially
+ // full
- // WHAT?
- //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
- //if(common==seseCommon){
- // skip the current running SESE
- // continue;
- //}
+ // WHAT?
+ //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
+ //if(common==seseCommon){
+ // skip the current running SESE
+ // continue;
+ //}
di=(dequeItem *) EXTRACTPTR((INTPTR)di);
SESEcommon* seseRec = (SESEcommon*) di->work;
if (seseRec!=NULL) {
- struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
- struct garbagelist* glroot = gl;
-
- updateAscendantSESE( seseRec );
-
- while( gl != NULL ) {
- int k;
- for( k = 0; k < gl->size; k++ ) {
- void* orig = gl->array[k];
- ENQUEUE( orig, gl->array[k] );
- }
- gl = gl->next;
- }
- }
- // we only have to move across the nodes
- // of the deque if the top and bottom are
- // not the same already
- di=di->next;
- } while( di !=NULL) ;
+ struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
+ struct garbagelist* glroot = gl;
+
+ updateAscendantSESE(seseRec);
+
+ while( gl != NULL ) {
+ int k;
+ for( k = 0; k < gl->size; k++ ) {
+ void* orig = gl->array[k];
+ ENQUEUE(orig, gl->array[k]);
+ }
+ gl = gl->next;
+ }
+ }
+ // we only have to move across the nodes
+ // of the deque if the top and bottom are
+ // not the same already
+ di=di->next;
+ } while( di !=NULL);
}
- }
+ }
#else
{
- int i;
+ int i;
deque* dq;
dequeNode* botNode;
- int botIndx;
+ int botIndx;
dequeNode* topNode;
- int topIndx;
+ int topIndx;
dequeNode* n;
- int j;
- int jLo;
- int jHi;
-
+ int j;
+ int jLo;
+ int jHi;
+
// goes over ready-to-run SESEs
for( i = 0; i < numWorkSchedWorkers; ++i ) {
dq = &(deques[i]);
-
- botNode = dqDecodePtr( dq->bottom );
- botIndx = dqDecodeIdx( dq->bottom );
-
- topNode = dqDecodePtr( dq->top );
- topIndx = dqDecodeIdx( dq->top );
-
-
+
+ botNode = dqDecodePtr(dq->bottom);
+ botIndx = dqDecodeIdx(dq->bottom);
+
+ topNode = dqDecodePtr(dq->top);
+ topIndx = dqDecodeIdx(dq->top);
+
+
n = botNode;
do {
// check all the relevant indices of this
// node in the deque, noting if we are in
// the top/bottom node which can be partially
// full
- if( n == botNode ) { jLo = botIndx; } else { jLo = 0; }
- if( n == topNode ) { jHi = topIndx; } else { jHi = DQNODE_ARRAYSIZE; }
-
+ if( n == botNode ) {
+ jLo = botIndx;
+ } else { jLo = 0; }
+ if( n == topNode ) {
+ jHi = topIndx;
+ } else { jHi = DQNODE_ARRAYSIZE; }
+
for( j = jLo; j < jHi; ++j ) {
-
+
// WHAT?
//SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
//if(common==seseCommon){
// continue;
//}
-
- SESEcommon* seseRec = (SESEcommon*) n->itsDataArr[j];
-
+
+ SESEcommon* seseRec = (SESEcommon*) n->itsDataArr[j];
+
struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
- struct garbagelist* glroot = gl;
-
- updateAscendantSESE( seseRec );
-
- while( gl != NULL ) {
- int k;
- for( k = 0; k < gl->size; k++ ) {
- void* orig = gl->array[k];
- ENQUEUE( orig, gl->array[k] );
+ struct garbagelist* glroot = gl;
+
+ updateAscendantSESE(seseRec);
+
+ while( gl != NULL ) {
+ int k;
+ for( k = 0; k < gl->size; k++ ) {
+ void* orig = gl->array[k];
+ ENQUEUE(orig, gl->array[k]);
}
gl = gl->next;
}
}
-
+
// we only have to move across the nodes
// of the deque if the top and bottom are
// not the same already
- if( botNode != topNode ) {
- n = n->next;
- }
+ if( botNode != topNode ) {
+ n = n->next;
+ }
} while( n != topNode );
}
}
updateForwardList(struct Queue *forwardList, int prevUpdate) {
struct QueueItem * fqItem=getHead(forwardList);
- while(fqItem!=NULL){
+ while(fqItem!=NULL) {
SESEcommon* seseRec = (SESEcommon*)(fqItem->objectptr);
struct garbagelist * gl=(struct garbagelist *)&(seseRec[1]);
- if(prevUpdate==TRUE){
- updateAscendantSESE(seseRec);
+ if(prevUpdate==TRUE) {
+ updateAscendantSESE(seseRec);
}
// do something here
while(gl!=NULL) {
int i;
for(i=0; i<gl->size; i++) {
- void * orig=gl->array[i];
- ENQUEUE(orig, gl->array[i]);
+ void * orig=gl->array[i];
+ ENQUEUE(orig, gl->array[i]);
}
gl=gl->next;
- }
+ }
// iterate forwarding list of seseRec
struct Queue* fList=&seseRec->forwardList;
- updateForwardList(fList,prevUpdate);
+ updateForwardList(fList,prevUpdate);
fqItem=getNextQueueItem(fqItem);
- }
+ }
}
-updateMemoryQueue(SESEcommon* seseParent){
+updateMemoryQueue(SESEcommon* seseParent) {
// update memory queue
int i,binidx;
- for(i=0; i<seseParent->numMemoryQueue; i++){
+ for(i=0; i<seseParent->numMemoryQueue; i++) {
MemoryQueue *memoryQueue=seseParent->memoryQueueArray[i];
MemoryQueueItem *memoryItem=memoryQueue->head;
- while(memoryItem!=NULL){
- if(memoryItem->type==HASHTABLE){
+ while(memoryItem!=NULL) {
+ if(memoryItem->type==HASHTABLE) {
Hashtable *ht=(Hashtable*)memoryItem;
- for(binidx=0; binidx<NUMBINS; binidx++){
+ for(binidx=0; binidx<NUMBINS; binidx++) {
BinElement *bin=ht->array[binidx];
BinItem *binItem=bin->head;
- while(binItem!=NULL){
- if(binItem->type==READBIN){
+ while(binItem!=NULL) {
+ if(binItem->type==READBIN) {
ReadBinItem* readBinItem=(ReadBinItem*)binItem;
int ridx;
- for(ridx=0; ridx<readBinItem->index; ridx++){
+ for(ridx=0; ridx<readBinItem->index; ridx++) {
REntry *rentry=readBinItem->array[ridx];
- SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
+ SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
updateAscendantSESE(seseRec);
while(gl!=NULL) {
ENQUEUE(orig, gl->array[i]);
}
gl=gl->next;
- }
- }
- }else{ //writebin
+ }
+ }
+ } else { //writebin
REntry *rentry=((WriteBinItem*)binItem)->val;
- SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
- struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
+ SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
+ struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
updateAscendantSESE(seseRec);
while(gl!=NULL) {
int i;
ENQUEUE(orig, gl->array[i]);
}
gl=gl->next;
- }
+ }
}
binItem=binItem->next;
}
}
- }else if(memoryItem->type==VECTOR){
+ } else if(memoryItem->type==VECTOR) {
Vector *vt=(Vector*)memoryItem;
int idx;
- for(idx=0; idx<vt->index; idx++){
+ for(idx=0; idx<vt->index; idx++) {
REntry *rentry=vt->array[idx];
- if(rentry!=NULL){
- SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
+ if(rentry!=NULL) {
+ SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
updateAscendantSESE(seseRec);
while(gl!=NULL) {
ENQUEUE(orig, gl->array[i]);
}
gl=gl->next;
- }
+ }
}
}
- }else if(memoryItem->type==SINGLEITEM){
+ } else if(memoryItem->type==SINGLEITEM) {
SCC *scc=(SCC*)memoryItem;
REntry *rentry=scc->val;
- if(rentry!=NULL){
- SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
+ if(rentry!=NULL) {
+ SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
updateAscendantSESE(seseRec);
while(gl!=NULL) {
ENQUEUE(orig, gl->array[i]);
}
gl=gl->next;
- }
+ }
}
}
memoryItem=memoryItem->next;
}
- }
- }
-
- updateAscendantSESE(SESEcommon* seseRec){
+ }
+}
+
+updateAscendantSESE(SESEcommon* seseRec) {
int prevIdx;
- for(prevIdx=0; prevIdx<(seseRec->numDependentSESErecords); prevIdx++){
- SESEcommon* prevSESE = (SESEcommon*)
- (
- ((INTPTR)seseRec) +
- seseRec->offsetToDepSESErecords +
- (sizeof(INTPTR)*prevIdx)
- );
-
- if(prevSESE!=NULL){
+ for(prevIdx=0; prevIdx<(seseRec->numDependentSESErecords); prevIdx++) {
+ SESEcommon* prevSESE = (SESEcommon*)
+ (
+ ((INTPTR)seseRec) +
+ seseRec->offsetToDepSESErecords +
+ (sizeof(INTPTR)*prevIdx)
+ );
+
+ if(prevSESE!=NULL) {
struct garbagelist * prevgl=(struct garbagelist *)&(((SESEcommon*)(prevSESE))[1]);
while(prevgl!=NULL) {
int i;
ENQUEUE(orig, prevgl->array[i]);
}
prevgl=prevgl->next;
- }
+ }
}
}
- }
+}
#endif
#include "psemaphore.h"
-void psem_init( psemaphore* sem ) {
- pthread_mutex_init( &(sem->lock), NULL );
- pthread_cond_init ( &(sem->cond), NULL );
+void psem_init(psemaphore* sem) {
+ pthread_mutex_init(&(sem->lock), NULL);
+ pthread_cond_init(&(sem->cond), NULL);
sem->signaled = 0;
sem->tag = 0;
}
-void psem_take( psemaphore* sem, struct garbagelist* gl ) {
- pthread_mutex_lock( &(sem->lock) );
+void psem_take(psemaphore* sem, struct garbagelist* gl) {
+ pthread_mutex_lock(&(sem->lock) );
if( !sem->signaled ) {
- stopforgc( gl );
+ stopforgc(gl);
do {
- pthread_cond_wait( &(sem->cond), &(sem->lock) );
+ pthread_cond_wait(&(sem->cond), &(sem->lock) );
} while( !sem->signaled );
restartaftergc();
}
- pthread_mutex_unlock( &(sem->lock) );
+ pthread_mutex_unlock(&(sem->lock) );
}
-void psem_give( psemaphore* sem) {
- pthread_mutex_lock ( &(sem->lock) );
+void psem_give(psemaphore* sem) {
+ pthread_mutex_lock(&(sem->lock) );
sem->signaled = 1;
- pthread_cond_signal ( &(sem->cond) );
- pthread_mutex_unlock( &(sem->lock) );
+ pthread_cond_signal(&(sem->cond) );
+ pthread_mutex_unlock(&(sem->lock) );
}
-void psem_give_tag( psemaphore* sem, int tag) {
- pthread_mutex_lock ( &(sem->lock) );
+void psem_give_tag(psemaphore* sem, int tag) {
+ pthread_mutex_lock(&(sem->lock) );
if (sem->tag==tag) {
sem->signaled = 1;
- pthread_cond_signal ( &(sem->cond) );
+ pthread_cond_signal(&(sem->cond) );
}
- pthread_mutex_unlock( &(sem->lock) );
+ pthread_mutex_unlock(&(sem->lock) );
}
-void psem_reset( psemaphore* sem ) {
- pthread_mutex_lock ( &(sem->lock) );
+void psem_reset(psemaphore* sem) {
+ pthread_mutex_lock(&(sem->lock) );
sem->tag++;
sem->signaled = 0;
- pthread_mutex_unlock( &(sem->lock) );
+ pthread_mutex_unlock(&(sem->lock) );
}
typedef struct psemaphore_t {
pthread_mutex_t lock;
- pthread_cond_t cond;
- int signaled;
- int tag;
+ pthread_cond_t cond;
+ int signaled;
+ int tag;
} psemaphore;
-void psem_init ( psemaphore* sem );
-void psem_take ( psemaphore* sem, struct garbagelist* gl );
-void psem_give_tag ( psemaphore* sem, int tag );
-void psem_give ( psemaphore* sem);
-void psem_reset( psemaphore* sem );
+void psem_init(psemaphore* sem);
+void psem_take(psemaphore* sem, struct garbagelist* gl);
+void psem_give_tag(psemaphore* sem, int tag);
+void psem_give(psemaphore* sem);
+void psem_reset(psemaphore* sem);
#endif // ___PSEMAPHORE_H__
#include "DSTM/interface_recovery/altprelookup.h"
#ifdef RECOVERYSTATS
- extern int numRecovery;
- extern unsigned int deadMachine[8];
- extern unsigned int sizeOfRedupedData[8];
- extern double elapsedTime[8];
+extern int numRecovery;
+extern unsigned int deadMachine[8];
+extern unsigned int sizeOfRedupedData[8];
+extern double elapsedTime[8];
#endif
-
+
#else
#include "DSTM/interface/dstm.h"
#include "DSTM/interface/altprelookup.h"
#ifdef EVENTMONITOR
#include "monitor.h"
__thread int objcount=0;
-#define ASSIGNUID(x) { \
- int number=((objcount++)<<EVTHREADSHIFT)|threadnum; \
- x->objuid=number; \
- }
+#define ASSIGNUID(x) { \
+ int number=((objcount++)<<EVTHREADSHIFT)|threadnum; \
+ x->objuid=number; \
+}
#else
#define ASSIGNUID(x)
#endif
#ifdef D___Double______nativeparsedouble____L___String___
double CALL01(___Double______nativeparsedouble____L___String___,struct ___String___ * ___str___) {
int length=VAR(___str___)->___count___;
- int maxlength=(length>60)?60:length;
+ int maxlength=(length>60) ? 60 : length;
char str[maxlength+1];
struct ArrayObject * chararray=VAR(___str___)->___value___;
int i;
}
#endif
-#ifdef D___Double______nativeparsedouble_____AR_B_I_I
+#ifdef D___Double______nativeparsedouble_____AR_B_I_I
double CALL23(___Double______nativeparsedouble_____AR_B_I_I, int start, int length,int start,int length,struct ArrayObject * ___str___) {
- int maxlength=(length>60)?60:length;
+ int maxlength=(length>60) ? 60 : length;
char str[maxlength+1];
struct ArrayObject * bytearray=VAR(___str___);
int i;
}
#endif
-#ifdef D___Double______doubleToRawLongBits____D
-typedef union jvalue
-{
+#ifdef D___Double______doubleToRawLongBits____D
+typedef union jvalue {
bool z;
- char c;
- short s;
- int i;
- long long j;
- float f;
- double d;
+ char c;
+ short s;
+ int i;
+ long long j;
+ float f;
+ double d;
} jvalue;
long long CALL11(___Double______doubleToRawLongBits____D, double dval, double dval) {
}
#endif
-#ifdef D___Double______longBitsToDouble____J
+#ifdef D___Double______longBitsToDouble____J
double CALL11(___Double______longBitsToDouble____J, long long lval, long long lval) {
jvalue val;
val.j = lval;
int elementsize=classsize[srctype];
int size=srclength*elementsize;
//primitives
- memcpy(((char *)&aodst->___length___)+sizeof(int) , ((char *)&aosrc->___length___)+sizeof(int), size);
+ memcpy(((char *)&aodst->___length___)+sizeof(int), ((char *)&aosrc->___length___)+sizeof(int), size);
} else {
//objects
int i;
- for(i=0;i<srclength;i++) {
+ for(i=0; i<srclength; i++) {
struct ___Object___ * ptr=((struct ___Object___**)(((char*) &aosrc->___length___)+sizeof(int)))[i];
int ptrtype=((int *)ptr)[0];
if (ptrtype>=NUMCLASSES) {
} else {
//objects
int i;
- for(i=0;i<length;i++) {
+ for(i=0; i<length; i++) {
struct ___Object___ * ptr=((struct ___Object___**)(((char*) &aosrc->___length___)+sizeof(int)))[i+srcPos];
//hit an object
((struct ___Object___ **)(((char*) &aodst->___length___)+sizeof(int)))[i+destPos]=ptr;
}
}
-void CALL35(___System______arraycopy____L___Object____I_L___Object____I_I, int ___srcPos___, int ___destPos___, int ___length___, struct ___Object___ * ___src___, int ___srcPos___, struct ___Object___ * ___dst___, int ___destPos___, int ___length___) {
+void CALL35(___System______arraycopy____L___Object____I_L___Object____I_I, int ___srcPos___, int ___destPos___, int ___length___, struct ___Object___ * ___src___, int ___srcPos___, struct ___Object___ * ___dst___, int ___destPos___, int ___length___) {
arraycopy(VAR(___src___), ___srcPos___, VAR(___dst___), ___destPos___, ___length___);
}
#endif
collect((struct garbagelist *)___params___);
{
- void * tmp=to_heapbase;
- to_heapbase=curr_heapbase;
- curr_heapbase=tmp;
+ void * tmp=to_heapbase;
+ to_heapbase=curr_heapbase;
+ curr_heapbase=tmp;
- tmp=to_heaptop;
- to_heaptop=curr_heaptop;
- curr_heaptop=tmp;
+ tmp=to_heaptop;
+ to_heaptop=curr_heaptop;
+ curr_heaptop=tmp;
- tmp=to_heapptr;
- curr_heapptr=to_heapptr;
- curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
- to_heapptr=to_heapbase;
- bzero(tmp, curr_heaptop-tmp);
+ tmp=to_heapptr;
+ curr_heapptr=to_heapptr;
+ curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
+ to_heapptr=to_heapbase;
+ bzero(tmp, curr_heaptop-tmp);
}
#ifdef D___System______microTimes____
long long CALL00(___System______microTimes____) {
- struct timeval tv;
+ struct timeval tv;
long long retval;
gettimeofday(&tv, NULL);
retval = tv.tv_sec; /* seconds */
#ifdef D___System______getticks____
long long CALL00(___System______getticks____) {
unsigned a, d;
- asm("cpuid");
- asm volatile("rdtsc" : "=a" (a), "=d" (d));
+ asm ("cpuid");
+ asm volatile ("rdtsc" : "=a" (a), "=d" (d));
return (((ticks)a) | (((ticks)d) << 32));
}
#endif
}
#endif
-#ifdef D___RecoveryStat______printRecoveryStat____
+#ifdef D___RecoveryStat______printRecoveryStat____
#ifdef RECOVERYSTATS
void CALL00(___RecoveryStat______printRecoveryStat____) {
printRecoveryStat();
#endif
#endif
-#ifdef D___Task______execution____
+#ifdef D___Task______execution____
extern void* virtualtable[];
// associated with Task.execution(). finds proper execute method and call it
-void CALL01(___Task______execution____,struct ___Task___ * ___this___)
-{
+void CALL01(___Task______execution____,struct ___Task___ * ___this___) {
unsigned int oid;
oid = (unsigned int) VAR(___this___); // object id
int type = getObjType(oid); // object type
#ifdef PRECISE_GC
- int p[] = {1,0 , oid};
- ((void(*) (void *))virtualtable[type*MAXCOUNT + EXECUTEMETHOD])(p);
+ int p[] = {1,0, oid};
+ ((void (*)(void *))virtualtable[type*MAXCOUNT + EXECUTEMETHOD])(p);
#else
// call the proper execute method
- ((void(*) (void *))virtualtable[type*MAXCOUNT + EXECUTEMETHOD])(oid);
+ ((void (*)(void *))virtualtable[type*MAXCOUNT + EXECUTEMETHOD])(oid);
#endif
}
#endif
int bookkeepsize=numlocks*2*sizeof(int);
struct ArrayObject * v=(struct ArrayObject *)transCreateObj(ptr, sizeof(struct ArrayObject)+basesize+bookkeepsize, bookkeepsize);
unsigned int *intptr=(unsigned int *)(((char *)v)-sizeof(objheader_t));
- for(;numlocks>0;numlocks--) {
+ for(; numlocks>0; numlocks--) {
intptr-=2;
intptr[0]=1;
}
int numlocks=basesize>>INDEXSHIFT;
int bookkeepsize=(numlocks)*2*sizeof(int);
int *tmpint=mygcmalloc((struct garbagelist *) ptr, sizeof(struct ArrayObject)+basesize+sizeof(objheader_t)+bookkeepsize);
- for(;numlocks>0;numlocks--) {
+ for(; numlocks>0; numlocks--) {
tmpint[0]=1;
tmpint+=2;
}
__attribute__((malloc)) struct ArrayObject * allocate_newarray(void * ptr, int type, int length) {
return allocate_newarray_mlp(ptr, type, length, 0, 0);
}
- __attribute__((malloc)) struct ArrayObject * allocate_newarray_mlp(void * ptr, int type, int length, int oid, int allocsite) {
+__attribute__((malloc)) struct ArrayObject * allocate_newarray_mlp(void * ptr, int type, int length, int oid, int allocsite) {
#else
__attribute__((malloc)) struct ArrayObject * allocate_newarray(void * ptr, int type, int length) {
#endif
#ifndef SANDBOX
#ifdef D___System______Assert____Z
- void CALL11(___System______Assert____Z, int ___status___, int ___status___) {
- if (!___status___) {
- printf("Assertion violation\n");
- *((int *)(NULL)); //force stack trace error
- }
- }
+void CALL11(___System______Assert____Z, int ___status___, int ___status___) {
+ if (!___status___) {
+ printf("Assertion violation\n");
+ *((int *)(NULL)); //force stack trace error
+ }
+}
#endif
#endif
#define OBJECTARRAYINTERVAL 10
#define ARRAYSET(array, type, index, value) \
- ((type *)(& (& array->___length___)[1]))[index]=value
+ ((type *)(&(&array->___length___)[1]))[index]=value
#define ARRAYGET(array, type, index) \
- ((type *)(& (& array->___length___)[1]))[index]
+ ((type *)(&(&array->___length___)[1]))[index]
#ifdef OPTIONAL
#define OPTARG(x) , x
#if defined(__i386__)
-static __inline__ unsigned long long rdtsc(void)
-{
+static __inline__ unsigned long long rdtsc(void) {
unsigned long long int x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
}
#elif defined(__x86_64__)
-static __inline__ unsigned long long rdtsc(void)
-{
+static __inline__ unsigned long long rdtsc(void) {
unsigned hi, lo;
- __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
typedef unsigned long long int unsigned long long;
-static __inline__ unsigned long long rdtsc(void)
-{
+static __inline__ unsigned long long rdtsc(void) {
unsigned long long int result=0;
unsigned long int upper, lower,tmp;
- __asm__ volatile(
- "0: \n"
- "\tmftbu %0 \n"
- "\tmftb %1 \n"
- "\tmftbu %2 \n"
- "\tcmpw %2,%0 \n"
- "\tbne 0b \n"
- : "=r"(upper),"=r"(lower),"=r"(tmp)
- );
+ __asm__ volatile (
+ "0: \n"
+ "\tmftbu %0 \n"
+ "\tmftb %1 \n"
+ "\tmftbu %2 \n"
+ "\tcmpw %2,%0 \n"
+ "\tbne 0b \n"
+ : "=r" (upper),"=r" (lower),"=r" (tmp)
+ );
result = upper;
result = result<<32;
result = result|lower;
#endif
{
- int flag = 1;
- setsockopt(___fd___, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(flag));
+ int flag = 1;
+ setsockopt(___fd___, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(flag));
}
if (rc<0) goto error;
h=gethostbyname(str);
free(str);
- if (h != NULL) {
- for (n=0; h->h_addr_list[n]; n++) /* do nothing */ ;
+ if (h != NULL) {
+ for (n=0; h->h_addr_list[n]; n++) /* do nothing */;
#ifdef PRECISE_GC
- arraybytearray=allocate_newarray(___params___,BYTEARRAYARRAYTYPE,n);
+ arraybytearray=allocate_newarray(___params___,BYTEARRAYARRAYTYPE,n);
#else
- arraybytearray=allocate_newarray(BYTEARRAYARRAYTYPE,n);
+ arraybytearray=allocate_newarray(BYTEARRAYARRAYTYPE,n);
#endif
- for(i=0; i<n; i++) {
- struct ArrayObject *bytearray;
+ for(i=0; i<n; i++) {
+ struct ArrayObject *bytearray;
#ifdef PRECISE_GC
- {
- INTPTR ptrarray[]={1, (INTPTR) ___params___, (INTPTR)arraybytearray};
- bytearray=allocate_newarray(&ptrarray,BYTEARRAYTYPE,h->h_length);
- arraybytearray=(struct ArrayObject *) ptrarray[2];
- }
+ {
+ INTPTR ptrarray[]={1, (INTPTR) ___params___, (INTPTR)arraybytearray};
+ bytearray=allocate_newarray(&ptrarray,BYTEARRAYTYPE,h->h_length);
+ arraybytearray=(struct ArrayObject *) ptrarray[2];
+ }
#else
- bytearray=allocate_newarray(BYTEARRAYTYPE,h->h_length);
+ bytearray=allocate_newarray(BYTEARRAYTYPE,h->h_length);
#endif
- ((void **)&((&arraybytearray->___length___)[1]))[i]=bytearray;
- {
- int ha=ntohl(*(int *)h->h_addr_list[i]);
- (&bytearray->___length___)[1]=ha;
+ ((void **)&((&arraybytearray->___length___)[1]))[i]=bytearray;
+ {
+ int ha=ntohl(*(int *)h->h_addr_list[i]);
+ (&bytearray->___length___)[1]=ha;
+ }
}
- }
- return arraybytearray;
- } else {
- return NULL;
- }
+ return arraybytearray;
+ } else {
+ return NULL;
+ }
#endif
}
#endif
//////////////////////////////////////////////////////////
//
-// A memory pool implements POOLCREATE, POOLALLOC and
+// A memory pool implements POOLCREATE, POOLALLOC and
// POOLFREE to improve memory allocation by reusing records.
//
// This implementation uses a lock-free singly-linked list
// the memory pool must always have at least one
// item in it
static void dqInit(deque *q) {
- q->head = calloc( 1, sizeof(dequeItem) );
+ q->head = calloc(1, sizeof(dequeItem) );
q->head->next = NULL;
q->tail = q->head;
q->objret.itemSize=sizeof(dequeItem);
q->objret.tail=q->objret.head;
}
-static inline void tagpoolfreeinto( sqMemPool* p, void* ptr, void *realptr ) {
+static inline void tagpoolfreeinto(sqMemPool* p, void* ptr, void *realptr) {
// set up the now unneeded record to as the tail of the
// free list by treating its first bytes as next pointer,
sqMemPoolItem* tailNew = (sqMemPoolItem*) realptr;
tailCurrent->next=(sqMemPoolItem *) ptr;
}
-static inline void* tagpoolalloc( sqMemPool* p ) {
+static inline void* tagpoolalloc(sqMemPool* p) {
// to protect CAS in poolfree from dereferencing
// null, treat the queue as empty when there is
// only one item. The dequeue operation is only
int i;
if(next == NULL) {
// only one item, so don't take from pool
- sqMemPoolItem * newitem=(sqMemPoolItem *) RUNMALLOC( p->itemSize );
+ sqMemPoolItem * newitem=(sqMemPoolItem *) RUNMALLOC(p->itemSize);
((dequeItem *)newitem)->next=NULL;
return newitem;
}
p->head = next;
sqMemPoolItem* realNext=(sqMemPoolItem *) EXTRACTPTR((INTPTR)next);
- asm volatile( "prefetcht0 (%0)" :: "r" (realNext));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (realNext));
realNext=(sqMemPoolItem*)(((char *)realNext)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (realNext));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (realNext));
return (void*)headCurrent;
}
// otherwise someone did CAS before you, so try again (the return
// value is the old value you will pass next time.)
-static inline void dqPushBottom( deque* p, void* work ) {
+static inline void dqPushBottom(deque* p, void* work) {
dequeItem *ptr=(dequeItem *) tagpoolalloc(&p->objret);
dequeItem *realptr=(dequeItem *) EXTRACTPTR((unsigned INTPTR)ptr);
ptr=(dequeItem *) (((unsigned INTPTR)ptr)+INCREMENTTAG);
static inline void* dqPopTopSelf(deque *p) {
int tryagain=1;
while(1) {
- dequeItem *ptr=p->head;
- dequeItem *realptr=(dequeItem *) EXTRACTPTR((INTPTR)ptr);
- dequeItem *next=realptr->next;
- //remove if we can..steal work no matter what
- if (likely(next!=NULL)) {
- if (((dequeItem *)CAS(&(p->head),(INTPTR)ptr, (INTPTR)next))!=ptr)
- return DQ_POP_EMPTY;
- void * item=NULL;
- item=(void *)LOCKXCHG((unsigned INTPTR*) &(realptr->work), (unsigned INTPTR) item);
- realptr->next=NULL;
- BARRIER();
- tagpoolfreeinto(&p->objret,ptr, realptr);
- if (item==NULL&&tryagain) {
- tryagain=0;
- continue;
+ dequeItem *ptr=p->head;
+ dequeItem *realptr=(dequeItem *) EXTRACTPTR((INTPTR)ptr);
+ dequeItem *next=realptr->next;
+ //remove if we can..steal work no matter what
+ if (likely(next!=NULL)) {
+ if (((dequeItem *)CAS(&(p->head),(INTPTR)ptr, (INTPTR)next))!=ptr)
+ return DQ_POP_EMPTY;
+ void * item=NULL;
+ item=(void *)LOCKXCHG((unsigned INTPTR*) &(realptr->work), (unsigned INTPTR) item);
+ realptr->next=NULL;
+ BARRIER();
+ tagpoolfreeinto(&p->objret,ptr, realptr);
+ if (item==NULL&&tryagain) {
+ tryagain=0;
+ continue;
+ }
+ return item;
+ } else {
+ void * item=NULL;
+ if (realptr->work!=NULL)
+ item=(void *) LOCKXCHG((unsigned INTPTR*) &(realptr->work), (unsigned INTPTR) item);
+ return item;
}
- return item;
- } else {
- void * item=NULL;
- if (realptr->work!=NULL)
- item=(void *) LOCKXCHG((unsigned INTPTR*) &(realptr->work), (unsigned INTPTR) item);
- return item;
- }
}
}
processOptions();
initializeexithandler();
/* Create table for failed tasks */
- failedtasks=genallocatehashtable((unsigned int(*) (void *)) &hashCodetpd,
- (int(*) (void *,void *)) &comparetpd);
+ failedtasks=genallocatehashtable((unsigned int (*)(void *)) &hashCodetpd,
+ (int (*)(void *,void *)) &comparetpd);
/* Create queue of active tasks */
- activetasks=genallocatehashtable((unsigned int(*) (void *)) &hashCodetpd,
- (int(*) (void *,void *)) &comparetpd);
+ activetasks=genallocatehashtable((unsigned int (*)(void *)) &hashCodetpd,
+ (int (*)(void *,void *)) &comparetpd);
/* Process task information */
processtasks();
flag=obj->fses[offset];
obj->fses[offset++]=(flag|ormask)&andmask;
}
- qsort(&obj->fses[oldoffset], sizeof(int), counter, (int(*) (const void *, const void *)) &flagcomp);
+ qsort(&obj->fses[oldoffset], sizeof(int), counter, (int (*)(const void *, const void *)) &flagcomp);
}
enqueueoptional(obj, 0, NULL, NULL, 0);
} else
flag=obj->fses[offset];
obj->fses[offset++]=(flag|ormask)&andmask;
}
- qsort(&obj->fses[oldoffset], sizeof(int), counter, (int(*) (const void *, const void *)) &flagcomp);
+ qsort(&obj->fses[oldoffset], sizeof(int), counter, (int (*)(const void *, const void *)) &flagcomp);
}
enqueueoptional(obj, 0, NULL, NULL, 0);
} else
/* Check tags */
if (parameter->numbertags>0) {
if (tagptr==NULL)
- goto nextloop; //that means the object has no tag but that param needs tag
+ goto nextloop; //that means the object has no tag but that param needs tag
else if(tagptr->type==TAGTYPE) { //one tag
struct ___TagDescriptor___ * tag=(struct ___TagDescriptor___*) tagptr;
for(i=0; i<parameter->numbertags; i++) {
//slotid is parameter->tagarray[2*i];
int tagid=parameter->tagarray[2*i+1];
if (tagid!=tagptr->flag)
- goto nextloop; /*We don't have this tag */
+ goto nextloop; /*We don't have this tag */
}
} else { //multiple tags
struct ArrayObject * ao=(struct ArrayObject *) tagptr;
struct ___Object___ * tagptr = currobj->___tags___;
if(fswrapper->numtags>0) {
if (tagptr==NULL)
- return 0; //that means the object has no tag but that param
+ return 0; //that means the object has no tag but that param
//needs tag
else if(tagptr->type==TAGTYPE) { //one tag
if(fswrapper->numtags!=1)
- return 0; //we don't have the right number of tags
+ return 0; //we don't have the right number of tags
struct ___TagDescriptor___ * tag=(struct ___TagDescriptor___*) tagptr;
if (fswrapper->tags[0]!=tagptr->flag)
return 0;
int foundtag=0;
if(ao->___length___!=fswrapper->numtags)
- return 0; //we don't have the right number of tags
+ return 0; //we don't have the right number of tags
for(tag_counter=0; tag_counter<fswrapper->numtags; tag_counter++) {
int tagid=fswrapper->tags[tag_counter];
int j;
}
/* Actually call task */
#ifdef PRECISE_GC
- ((int *)taskpointerarray)[0]=currtpd->numParameters;
+ ((int *)taskpointerarray)[0]=currtpd->numParameters;
taskpointerarray[1]=NULL;
#endif
#ifdef OPTIONAL
#endif
if(debugtask) {
printf("ENTER %s count=%d\n",currtpd->task->name, (instaccum-instructioncount));
- ((void(*) (void **))currtpd->task->taskptr)(taskpointerarray);
+ ((void (*)(void **))currtpd->task->taskptr)(taskpointerarray);
printf("EXIT %s count=%d\n",currtpd->task->name, (instaccum-instructioncount));
} else
- ((void(*) (void **))currtpd->task->taskptr)(taskpointerarray);
+ ((void (*)(void **))currtpd->task->taskptr)(taskpointerarray);
#ifdef OPTIONAL
for(i=0; i<numparams; i++) {
#include "structdefs.h"
#include "SimpleHash.h"
#include "GenericHashtable.h"
-#include<string.h>
+#include <string.h>
#ifdef TASK
#else
struct lockvector *lptr=&lvector;
#endif
- for(lptr->index--;lptr->index>=0;lptr->index--) {
+ for(lptr->index--; lptr->index>=0; lptr->index--) {
if (lptr->locks[lptr->index].islastlock) {
struct ___Object___ *ll=lptr->locks[lptr->index].object;
ll->tid=0;
printf("We just took sig=%d\n",sig);
printf("signal\n");
printf("To get stack trace, set breakpoint in threadhandler in gdb\n");
-
+
threadexit();
}
#else
perror("backtrace_symbols");
exit(EXIT_FAILURE);
}
-
+
for (j = 0; j < nptrs; j++)
printf("%s\n", strings[j]);
-
+
threadexit();
}
#endif
lockedobjs=calloc(1, sizeof(struct objlist));
objlockscope = calloc(1, sizeof(objlockstate_t));
pthread_mutex_init(&lockedobjstore, NULL);
- {
+ {
int i;
for(i=0; i<TOTALNUMCLASSANDARRAY; i++) {
typesCausingAbort[i].numaccess = 0;
perror("arraystack");
#endif
#endif
- ___Thread____NNR____staticStart____L___Thread___((struct ___Thread____NNR____staticStart____L___Thread____params *)p);
- objstrDelete(t_cache);
- objstrDelete(t_reserve);
- t_chashDelete();
- free(newobjs);
+ ___Thread____NNR____staticStart____L___Thread___((struct ___Thread____NNR____staticStart____L___Thread____params *)p);
+ objstrDelete(t_cache);
+ objstrDelete(t_reserve);
+ t_chashDelete();
+ free(newobjs);
#ifdef STMSTATS
- free(lockedobjs);
+ free(lockedobjs);
#endif
#endif
___this___=(struct ___Thread___ *) p[2];
return getStatus(___mid___);
}
#endif
-#else
+#else
#ifdef D___Thread______nativeGetStatus____I
int CALL12(___Thread______nativeGetStatus____I, int ___mid___, struct ___Thread___ * ___this___, int ___mid___) {
return 0;
pthread_mutex_lock(&joinlock);
while(!VAR(___this___)->___finished___) {
#ifdef PRECISE_GC
- stopforgc((struct garbagelist *)___params___);
+ stopforgc((struct garbagelist *)___params___);
#endif
pthread_cond_wait(&joincond, &joinlock);
#ifdef PRECISE_GC
list=&litem;
pthread_mutex_unlock(&gclistlock);
- ((void(*) (void *))virtualtable[type*MAXCOUNT+RUNMETHOD])(p);
+ ((void (*)(void *))virtualtable[type*MAXCOUNT+RUNMETHOD])(p);
#else
- ((void(*) (void *))virtualtable[type*MAXCOUNT+RUNMETHOD])(oid);
+ ((void (*)(void *))virtualtable[type*MAXCOUNT+RUNMETHOD])(oid);
#endif
threadData = calloc(1, sizeof(unsigned int));
*((unsigned int *) threadData) = oid;
ptr[0]=oid;
ptr[1]=objType;
pthread_key_create(&oidval, globalDestructor);
-
+
do {
retval=pthread_create(&thread, &nattr, (void * (*)(void *)) &initDSMthread, ptr);
if (retval!=0)
#endif
#ifdef PRECISE_GC
-#define ATOMICLOCK if (pthread_mutex_trylock(&atomiclock)!=0) { \
- stopforgc((struct garbagelist *) &___locals___); \
- pthread_mutex_lock(&atomiclock); \
- restartaftergc(); \
- }
+#define ATOMICLOCK if (pthread_mutex_trylock(&atomiclock)!=0) { \
+ stopforgc((struct garbagelist *) &___locals___); \
+ pthread_mutex_lock(&atomiclock); \
+ restartaftergc(); \
+}
#define ATOMICUNLOCK pthread_mutex_unlock(&atomiclock)
#else
typedef struct workerData_t {
pthread_t workerThread;
- int id;
+ int id;
} WorkerData;
// a thread should know its worker id in any
static WorkerData* workerDataArray;
static pthread_t* workerArray;
-static void(*workFunc)(void*);
+static void (*workFunc)(void*);
// each thread can create objects but should assign
// globally-unique object ID's (oid) so have threads
-void* workerMain( void* arg ) {
+void* workerMain(void* arg) {
void* workUnit;
WorkerData* myData = (WorkerData*) arg;
deque* myDeque = &(deques[myData->id]);
- int keepRunning = TRUE;
- int haveWork;
- int lastVictim = 0;
- int i;
+ int keepRunning = TRUE;
+ int haveWork;
+ int lastVictim = 0;
+ int i;
myWorkerID = myData->id;
// each thread has a single semaphore that a running
// task should hand off to children threads it is
// going to stall on
- psem_init( &runningSESEstallSem );
+ psem_init(&runningSESEstallSem);
// the worker threads really have no context relevant to the
// user program, so build an empty garbage list struct to
struct garbagelist emptygarbagelist = { 0, NULL };
// Add this worker to the gc list
- pthread_mutex_lock( &gclistlock );
+ pthread_mutex_lock(&gclistlock);
threadcount++;
litem.prev = NULL;
litem.next = list;
- if( list != NULL )
+ if( list != NULL )
list->prev = &litem;
list = &litem;
- pthread_mutex_unlock( &gclistlock );
+ pthread_mutex_unlock(&gclistlock);
// start timing events in this thread
// wait for work
#ifdef CP_EVENTID_WORKSCHEDGRAB
- CP_LOGEVENT( CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_BEGIN );
+ CP_LOGEVENT(CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_BEGIN);
#endif
haveWork = FALSE;
while( !haveWork ) {
- workUnit = dqPopBottom( myDeque );
+ workUnit = dqPopBottom(myDeque);
if( workUnit != DQ_POP_EMPTY ) {
- haveWork = TRUE;
- goto dowork;
+ haveWork = TRUE;
+ goto dowork;
} else {
- // try to steal from another queue, starting
- // with the last successful victim, don't check
- // your own deque
+ // try to steal from another queue, starting
+ // with the last successful victim, don't check
+ // your own deque
int mynumWorkSchedWorkers=numWorkSchedWorkers;
- for( i = 0; i < mynumWorkSchedWorkers - 1; ++i ) {
+ for( i = 0; i < mynumWorkSchedWorkers - 1; ++i ) {
+
+ workUnit = dqPopTop(&(deques[lastVictim]) );
- workUnit = dqPopTop( &(deques[lastVictim]) );
-
#ifdef SQUEUE
- if( workUnit != DQ_POP_EMPTY ) {
+ if( workUnit != DQ_POP_EMPTY ) {
#else
if( workUnit != DQ_POP_ABORT &&
workUnit != DQ_POP_EMPTY ) {
#endif
- // successful steal!
- haveWork = TRUE;
- goto dowork;
- }
-
- // choose next victim
- lastVictim++; if( lastVictim == mynumWorkSchedWorkers ) { lastVictim = 0; }
-
- if( lastVictim == myWorkerID ) {
- lastVictim++; if( lastVictim == mynumWorkSchedWorkers ) { lastVictim = 0; }
- }
- }
- // end steal attempts
-
-
- // if we successfully stole work, break out of the
- // while-not-have-work loop, otherwise we looked
- // everywhere, so drop down to "I'm idle" code below
- if( haveWork ) {
+ // successful steal!
+ haveWork = TRUE;
+ goto dowork;
+ }
+
+ // choose next victim
+ lastVictim++; if( lastVictim == mynumWorkSchedWorkers ) {
+ lastVictim = 0;
+ }
+
+ if( lastVictim == myWorkerID ) {
+ lastVictim++; if( lastVictim == mynumWorkSchedWorkers ) {
+ lastVictim = 0;
+ }
+ }
+ }
+ // end steal attempts
+
+
+ // if we successfully stole work, break out of the
+ // while-not-have-work loop, otherwise we looked
+ // everywhere, so drop down to "I'm idle" code below
+ if( haveWork ) {
goto dowork;
- }
+ }
}
// if we drop down this far, we didn't find any work,
// so do a garbage collection, yield the processor,
// then check if the entire system is out of work
- if( unlikely( needtocollect ) ) {
- checkcollect( &emptygarbagelist );
+ if( unlikely(needtocollect) ) {
+ checkcollect(&emptygarbagelist);
}
sched_yield();
if( mainTaskRetired ) {
- keepRunning = FALSE;
- break;
+ keepRunning = FALSE;
+ break;
}
} // end the while-not-have-work loop
- dowork:
+dowork:
#ifdef CP_EVENTID_WORKSCHEDGRAB
- CP_LOGEVENT( CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_END );
+ CP_LOGEVENT(CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_END);
#endif
// when is no work left we will pop out
#ifdef DEBUG_DEQUE
if( workUnit == NULL ) {
- printf( "About to execute a null work item\n" );
+ printf("About to execute a null work item\n");
}
#endif
- workFunc( workUnit );
+ workFunc(workUnit);
litem.seseCommon = NULL;
}
- }
+ }
CP_EXIT();
// remove from GC list
- pthread_mutex_lock( &gclistlock );
+ pthread_mutex_lock(&gclistlock);
threadcount--;
if( litem.prev == NULL ) {
list = litem.next;
if( litem.next != NULL ) {
litem.next->prev = litem.prev;
}
- pthread_mutex_unlock( &gclistlock );
+ pthread_mutex_unlock(&gclistlock);
return NULL;
}
-void workScheduleInit( int numProcessors,
- void(*func)(void*) ) {
+void workScheduleInit(int numProcessors,
+ void (*func)(void*) ) {
int i, status;
pthread_attr_t attr;
oid = 1;
#ifdef RCR
- pthread_mutex_init( &queuelock, NULL );
+ pthread_mutex_init(&queuelock, NULL);
#endif
- pthread_mutex_init( &gclock, NULL );
- pthread_mutex_init( &gclistlock, NULL );
- pthread_cond_init ( &gccond, NULL );
+ pthread_mutex_init(&gclock, NULL);
+ pthread_mutex_init(&gclistlock, NULL);
+ pthread_cond_init(&gccond, NULL);
numWorkSchedWorkers = numProcessors;
while(1) {
int x=2;
//check primality
- for(;x<oidIncrement;x++) {
+ for(; x<oidIncrement; x++) {
//not prime
if (oidIncrement%x==0) {
- oidIncrement++;
- break;
+ oidIncrement++;
+ break;
}
}
//have prime
workFunc = func;
#ifdef RCR
- deques = RUNMALLOC( sizeof( deque )*numWorkSchedWorkers*2);
+ deques = RUNMALLOC(sizeof( deque )*numWorkSchedWorkers*2);
#else
- deques = RUNMALLOC( sizeof( deque )*numWorkSchedWorkers );
+ deques = RUNMALLOC(sizeof( deque )*numWorkSchedWorkers);
#endif
- workerDataArray = RUNMALLOC( sizeof( WorkerData )*numWorkSchedWorkers );
+ workerDataArray = RUNMALLOC(sizeof( WorkerData )*numWorkSchedWorkers);
#ifdef RCR
for( i = 0; i < numWorkSchedWorkers*2; ++i ) {
#else
for( i = 0; i < numWorkSchedWorkers; ++i ) {
#endif
- dqInit( &(deques[i]) );
+ dqInit(&(deques[i]) );
}
#ifndef COREPIN
-
- pthread_attr_init( &attr );
- pthread_attr_setdetachstate( &attr,
- PTHREAD_CREATE_JOINABLE );
+
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr,
+ PTHREAD_CREATE_JOINABLE);
workerDataArray[0].id = 0;
workerDataArray[i].id = i;
- status = pthread_create( &(workerDataArray[i].workerThread),
- &attr,
- workerMain,
- (void*) &(workerDataArray[i])
- );
+ status = pthread_create(&(workerDataArray[i].workerThread),
+ &attr,
+ workerMain,
+ (void*) &(workerDataArray[i])
+ );
- if( status != 0 ) { printf( "Error\n" ); exit( -1 ); }
+ if( status != 0 ) {
+ printf("Error\n"); exit(-1);
+ }
}
#else
int numCore=24;
workerDataArray[0].id = 0;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
- sched_setaffinity(syscall(SYS_gettid), sizeof(cpuset), &cpuset);
-
- for(idx=1;idx<numWorkSchedWorkers;idx++){
- int coreidx=idx%numCore;
+ sched_setaffinity(syscall(SYS_gettid), sizeof(cpuset), &cpuset);
+
+ for(idx=1; idx<numWorkSchedWorkers; idx++) {
+ int coreidx=idx%numCore;
pthread_attr_t* attr = &thread_attr[idx];
pthread_attr_init(attr);
pthread_attr_setdetachstate(attr, PTHREAD_CREATE_JOINABLE);
CPU_ZERO(&cpuset);
CPU_SET(coreidx, &cpuset);
pthread_attr_setaffinity_np(attr, sizeof(cpuset), &cpuset);
-
+
workerDataArray[idx].id = idx;
-
- status = pthread_create( &(workerDataArray[idx].workerThread),
- attr,
- workerMain,
- (void*) &(workerDataArray[idx])
- );
+
+ status = pthread_create(&(workerDataArray[idx].workerThread),
+ attr,
+ workerMain,
+ (void*) &(workerDataArray[idx])
+ );
}
#endif
}
-void workScheduleSubmit( void* workUnit ) {
+void workScheduleSubmit(void* workUnit) {
#ifdef CP_EVENTID_WORKSCHEDSUBMIT
- CP_LOGEVENT( CP_EVENTID_WORKSCHEDSUBMIT, CP_EVENTTYPE_BEGIN );
+ CP_LOGEVENT(CP_EVENTID_WORKSCHEDSUBMIT, CP_EVENTTYPE_BEGIN);
#endif
- dqPushBottom( &(deques[myWorkerID]), workUnit );
+ dqPushBottom(&(deques[myWorkerID]), workUnit);
#ifdef CP_EVENTID_WORKSCHEDSUBMIT
- CP_LOGEVENT( CP_EVENTID_WORKSCHEDSUBMIT, CP_EVENTTYPE_END );
+ CP_LOGEVENT(CP_EVENTID_WORKSCHEDSUBMIT, CP_EVENTTYPE_END);
#endif
}
// then wait for all other workers to exit gracefully
for( i = 1; i < realnumWorkSchedWorkers; ++i ) {
- pthread_join( workerDataArray[i].workerThread, NULL );
+ pthread_join(workerDataArray[i].workerThread, NULL);
}
// write all thread's events to disk
#define __WORK_SCHEDULE__
-// initialize the work schedule system, after
+// initialize the work schedule system, after
// which some preliminary work units can be
-// scheduled. Note the supplied work func
+// scheduled. Note the supplied work func
// should operate on a work unit of the type
// submitted in the function below
-void workScheduleInit( int numProcessors,
- void(*workFunc)(void*) );
+void workScheduleInit(int numProcessors,
+ void (*workFunc)(void*) );
// as the scheduler evolves, looks like this is
// a better way to shut down the system
// distribute work units among the worker pool
// threads. The workers will dynamically
// steal from one another to load balance
-void workScheduleSubmit( void* workUnit );
+void workScheduleSubmit(void* workUnit);
// once you call this begin function your main
// thread becomes a work thread, so programs
extern int threadcount;
extern pthread_mutex_t gclock;
extern pthread_mutex_t gclistlock;
-extern pthread_cond_t gccond;
+extern pthread_cond_t gccond;
#endif /* __WORK_SCHEDULE__ */
public CodePrinter(OutputStream w, boolean af) {
super(w,af);
}
-
+
StringBuffer genSpacing() {
StringBuffer sb=new StringBuffer();
- for(int i=0;i<braceCount;i++)
+ for(int i=0; i<braceCount; i++)
sb.append(" ");
return sb;
}
switch (c) {
case '\n': {
- // get the cr
- sb.append(string, lastcr, (i - lastcr) + 1);
- super.write(sb.toString());
- sb = genSpacing();
- lastcr = i + 1;// skip carriage return
- seenChar = false;
- break;
+ // get the cr
+ sb.append(string, lastcr, (i - lastcr) + 1);
+ super.write(sb.toString());
+ sb = genSpacing();
+ lastcr = i + 1; // skip carriage return
+ seenChar = false;
+ break;
}
+
case '{':
- braceCount++;
- seenChar = true;
- break;
+ braceCount++;
+ seenChar = true;
+ break;
+
case '}':
- braceCount--;
- // fix up close brace...
- if (!seenChar)
- sb = genSpacing();
- seenChar = true;
- break;
+ braceCount--;
+ // fix up close brace...
+ if (!seenChar)
+ sb = genSpacing();
+ seenChar = true;
+ break;
+
case ' ':
- // skip leading whitespace
- if (!seenChar)
- lastcr = i + 1;
- break;
+ // skip leading whitespace
+ if (!seenChar)
+ lastcr = i + 1;
+ break;
+
default:
- seenChar = true;
+ seenChar = true;
}
}
if (lastcr < string.length) {
tovisit.addAll(nodes);
while(!tovisit.isEmpty()) {
GraphNode gn=(GraphNode)tovisit.pop();
- for(Iterator it=gn.edges(); it.hasNext();) {
+ for(Iterator it=gn.edges(); it.hasNext(); ) {
Edge edge=(Edge)it.next();
GraphNode target=edge.getTarget();
if (!nodes.contains(target)) {
for(int i=0; i<depth&&!tovisit.isEmpty(); i++) {
while(!tovisit.isEmpty()) {
GraphNode gn=(GraphNode)tovisit.pop();
- for(Iterator it=gn.edges(); it.hasNext();) {
+ for(Iterator it=gn.edges(); it.hasNext(); ) {
Edge edge=(Edge)it.next();
GraphNode target=edge.getTarget();
if (!nodes.contains(target)) {
}
public void addEdge(Vector v) {
- for (Iterator it = v.iterator(); it.hasNext();)
+ for (Iterator it = v.iterator(); it.hasNext(); )
addEdge((Edge)it.next());
}
Edge edge = (Edge) edges.next();
GraphNode node = edge.getTarget();
if (nodes.contains(node)) {
- for(Iterator nodeit=nonmerge(node).iterator(); nodeit.hasNext();) {
+ for(Iterator nodeit=nonmerge(node).iterator(); nodeit.hasNext(); ) {
GraphNode node2=(GraphNode)nodeit.next();
String edgelabel = "";
String edgedotnodeparams="";
return true;
Object [] array=s.toArray();
GraphNode gn=(GraphNode)array[0];
- for(Iterator it=gn.edges(); it.hasNext();) {
+ for(Iterator it=gn.edges(); it.hasNext(); ) {
Edge e=(Edge)it.next();
if (e.getTarget()==gn)
return true; /* Self Cycle */
dfs.sccmaprev=new HashMap();
dfs.finishingorder=new Vector();
boolean acyclic=dfs.go();
- for (Iterator it = nodes.iterator(); it.hasNext();) {
+ for (Iterator it = nodes.iterator(); it.hasNext(); ) {
GraphNode gn = (GraphNode) it.next();
gn.resetscc();
}
sccmap.put(i,new HashSet());
((Set)sccmap.get(i)).add(gn);
sccmaprev.put(gn,i);
- for(Iterator edgeit=gn.inedges(); edgeit.hasNext();) {
+ for(Iterator edgeit=gn.inedges(); edgeit.hasNext(); ) {
Edge e=(Edge)edgeit.next();
GraphNode gn2=e.getSource();
dfsprev(gn2);
s.add(value);
if (!table.containsKey(value)) {
- Set<T> lowerNeighbor = new HashSet<T>();
- lowerNeighbor.add(bottom);
- table.put(value, lowerNeighbor);
+ Set<T> lowerNeighbor = new HashSet<T>();
+ lowerNeighbor.add(bottom);
+ table.put(value, lowerNeighbor);
}
// if value is already connected with top, it is no longer to be
do {
oldReachableSize = reachableSet.size();
Set<T> nextLevelNeighbors = new HashSet<T>();
- for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext();) {
- T element = iterator.next();
- Set<T> neighbors = get(element);
- if (neighbors != null) {
- nextLevelNeighbors.addAll(neighbors);
- reachableSet.addAll(neighbors);
- }
-
- if (reachableSet.contains(key)) {
- // found cycle
- return true;
- }
+ for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext(); ) {
+ T element = iterator.next();
+ Set<T> neighbors = get(element);
+ if (neighbors != null) {
+ nextLevelNeighbors.addAll(neighbors);
+ reachableSet.addAll(neighbors);
+ }
+
+ if (reachableSet.contains(key)) {
+ // found cycle
+ return true;
+ }
}
neighborSet = nextLevelNeighbors;
} while (oldReachableSize != reachableSet.size());
if (a.equals(top)) {
if (b.equals(top)) {
- return false;
+ return false;
}
return true;
}
return true;
} else {
boolean reachable = false;
- for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext();) {
- T neighbor = iterator.next();
- reachable = reachable || isGreaterThan(neighbor, b);
+ for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext(); ) {
+ T neighbor = iterator.next();
+ reachable = reachable || isGreaterThan(neighbor, b);
}
return reachable;
}
Set<T> lowerSet = new HashSet<T>();
// get lower set of input locations
- for (Iterator<T> iterator = inputSet.iterator(); iterator.hasNext();) {
+ for (Iterator<T> iterator = inputSet.iterator(); iterator.hasNext(); ) {
T element = iterator.next();
lowerSet.addAll(getLowerSet(element, new HashSet<T>()));
lowerSet.add(element);
// an element of lower bound should be lower than every input set
Set<T> toberemoved = new HashSet<T>();
- for (Iterator<T> inputIterator = inputSet.iterator(); inputIterator.hasNext();) {
+ for (Iterator<T> inputIterator = inputSet.iterator(); inputIterator.hasNext(); ) {
T inputElement = inputIterator.next();
- for (Iterator iterator = lowerSet.iterator(); iterator.hasNext();) {
- T lowerElement = (T) iterator.next();
- if (!inputElement.equals(lowerElement)) {
- if (!isGreaterThan(inputElement, lowerElement)) {
- toberemoved.add(lowerElement);
- }
- }
+ for (Iterator iterator = lowerSet.iterator(); iterator.hasNext(); ) {
+ T lowerElement = (T) iterator.next();
+ if (!inputElement.equals(lowerElement)) {
+ if (!isGreaterThan(inputElement, lowerElement)) {
+ toberemoved.add(lowerElement);
+ }
+ }
}
}
lowerSet.removeAll(toberemoved);
// calculate the greatest element of lower set
// find an element A, where every lower bound B of lowerSet, B<A
- for (Iterator<T> iterator = lowerSet.iterator(); iterator.hasNext();) {
+ for (Iterator<T> iterator = lowerSet.iterator(); iterator.hasNext(); ) {
T lowerElement = iterator.next();
boolean isGreaterThanAll = true;
- for (Iterator<T> iterator2 = lowerSet.iterator(); iterator2.hasNext();) {
- T e = iterator2.next();
- if (!lowerElement.equals(e)) {
- if (!isGreaterThan(lowerElement, e)) {
- isGreaterThanAll = false;
- break;
- }
- }
+ for (Iterator<T> iterator2 = lowerSet.iterator(); iterator2.hasNext(); ) {
+ T e = iterator2.next();
+ if (!lowerElement.equals(e)) {
+ if (!isGreaterThan(lowerElement, e)) {
+ isGreaterThanAll = false;
+ break;
+ }
+ }
}
if (isGreaterThanAll) {
- return lowerElement;
+ return lowerElement;
}
}
return null;
Set<T> neighborSet = get(element);
if (neighborSet != null) {
lowerSet.addAll(neighborSet);
- for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext();) {
- T neighbor = iterator.next();
- lowerSet = getLowerSet(neighbor, lowerSet);
+ for (Iterator<T> iterator = neighborSet.iterator(); iterator.hasNext(); ) {
+ T neighbor = iterator.next();
+ lowerSet = getLowerSet(neighbor, lowerSet);
}
}
return lowerSet;
import java.util.Hashtable;\r
import java.util.Set;\r
\r
-public class MultiHash{\r
- private int[] views;\r
- private Hashtable viewTable = new Hashtable();\r
+public class MultiHash {\r
+ private int[] views;\r
+ private Hashtable viewTable = new Hashtable();\r
\r
- public MultiHash(){\r
- }\r
- \r
- // Pass in the look up map\r
- public MultiHash(int[] bitmapArray){\r
- this.views = bitmapArray;\r
- for(int i = 0; i < views.length; i++){\r
- Hashtable ht = new Hashtable();\r
- viewTable.put(views[i], ht);\r
- }\r
- }\r
+ public MultiHash() {\r
+ }\r
\r
- // For each view add it to its view hashtable\r
- public void put(Tuples o){\r
- // Tune the Tuple for each view and add it to its designated hashtable\r
- for(int i = 0; i < views.length; i++){\r
- int tupleKey = generateTupleKey(o, views[i]);\r
- Hashtable tuplesTable = (Hashtable) viewTable.get(views[i]);\r
- if(tuplesTable.containsKey(tupleKey)){\r
- Set tupleSet = (Set) tuplesTable.get(tupleKey);\r
- tupleSet.add(o);\r
- }else{\r
- Set tupleSet = new HashSet();\r
- tupleSet.add(o);\r
- tuplesTable.put(tupleKey, tupleSet);\r
- }\r
- }\r
- }\r
+ // Pass in the look up map\r
+ public MultiHash(int[] bitmapArray) {\r
+ this.views = bitmapArray;\r
+ for(int i = 0; i < views.length; i++) {\r
+ Hashtable ht = new Hashtable();\r
+ viewTable.put(views[i], ht);\r
+ }\r
+ }\r
\r
- public int generateTupleKey(Tuples o, int viewIndex){\r
- ArrayList<Integer> indices = findIndices(viewIndex);\r
- ArrayList obj = new ArrayList();\r
- for(int i = 0; i < indices.size(); i++){\r
- obj.add(o.get(indices.get(i)));\r
- }\r
- return obj.hashCode()^29;\r
- }\r
+ // For each view add it to its view hashtable\r
+ public void put(Tuples o) {\r
+ // Tune the Tuple for each view and add it to its designated hashtable\r
+ for(int i = 0; i < views.length; i++) {\r
+ int tupleKey = generateTupleKey(o, views[i]);\r
+ Hashtable tuplesTable = (Hashtable) viewTable.get(views[i]);\r
+ if(tuplesTable.containsKey(tupleKey)) {\r
+ Set tupleSet = (Set) tuplesTable.get(tupleKey);\r
+ tupleSet.add(o);\r
+ } else {\r
+ Set tupleSet = new HashSet();\r
+ tupleSet.add(o);\r
+ tuplesTable.put(tupleKey, tupleSet);\r
+ }\r
+ }\r
+ }\r
\r
- private ArrayList<Integer> findIndices(int viewIndex){\r
- int mask = 1;\r
- ArrayList<Integer> indices = new ArrayList<Integer>();\r
- for(int i = 0; i < 31; i++){\r
- if((mask & viewIndex) != 0){\r
- indices.add(i);\r
- }\r
- mask = mask << 1;\r
- }\r
- return indices;\r
- }\r
+ public int generateTupleKey(Tuples o, int viewIndex) {\r
+ ArrayList<Integer> indices = findIndices(viewIndex);\r
+ ArrayList obj = new ArrayList();\r
+ for(int i = 0; i < indices.size(); i++) {\r
+ obj.add(o.get(indices.get(i)));\r
+ }\r
+ return obj.hashCode()^29;\r
+ }\r
\r
- public Tuples get(int bitmap, Tuple o){\r
- Tuples tuple = new Tuple(); //\r
- int tupleKey = generateTupleKey(o, bitmap);\r
- Hashtable tuplesTable = (Hashtable) viewTable.get(bitmap);\r
- if(tuplesTable.containsKey(tupleKey)){\r
- Set tupleSet = (Set) tuplesTable.get(tupleKey);\r
- tuple = convertToTuple(tupleSet);\r
- return tuple;\r
- }\r
- return null;\r
- }\r
+ private ArrayList<Integer> findIndices(int viewIndex) {\r
+ int mask = 1;\r
+ ArrayList<Integer> indices = new ArrayList<Integer>();\r
+ for(int i = 0; i < 31; i++) {\r
+ if((mask & viewIndex) != 0) {\r
+ indices.add(i);\r
+ }\r
+ mask = mask << 1;\r
+ }\r
+ return indices;\r
+ }\r
\r
- private Tuples convertToTuple(Set tupleSet){\r
- Object[] tuples = tupleSet.toArray();\r
- ArrayList o = new ArrayList();\r
- for(int i = 0; i < tuples.length; i++){\r
- o.add(tuples[i]);\r
- }\r
- Tuples tuple = new Tuple(o);\r
- return tuple;\r
- }\r
+ public Tuples get(int bitmap, Tuple o) {\r
+ Tuples tuple = new Tuple(); //\r
+ int tupleKey = generateTupleKey(o, bitmap);\r
+ Hashtable tuplesTable = (Hashtable) viewTable.get(bitmap);\r
+ if(tuplesTable.containsKey(tupleKey)) {\r
+ Set tupleSet = (Set) tuplesTable.get(tupleKey);\r
+ tuple = convertToTuple(tupleSet);\r
+ return tuple;\r
+ }\r
+ return null;\r
+ }\r
\r
- public void remove(Tuples o){\r
+ private Tuples convertToTuple(Set tupleSet) {\r
+ Object[] tuples = tupleSet.toArray();\r
+ ArrayList o = new ArrayList();\r
+ for(int i = 0; i < tuples.length; i++) {\r
+ o.add(tuples[i]);\r
+ }\r
+ Tuples tuple = new Tuple(o);\r
+ return tuple;\r
+ }\r
+\r
+ public void remove(Tuples o) {\r
// System.out.println("removed called"+viewTable.toString());\r
- for(int i = 0; i < views.length; i++){\r
- int tupleKey = generateTupleKey(o, views[i]);\r
- Hashtable tuplesTable = (Hashtable) viewTable.get(views[i]);\r
- if(tuplesTable.containsKey(tupleKey)){\r
- tuplesTable.remove(tupleKey);\r
- }else{\r
- System.out.println("Cannot find such key");\r
- }\r
- }\r
- }\r
+ for(int i = 0; i < views.length; i++) {\r
+ int tupleKey = generateTupleKey(o, views[i]);\r
+ Hashtable tuplesTable = (Hashtable) viewTable.get(views[i]);\r
+ if(tuplesTable.containsKey(tupleKey)) {\r
+ tuplesTable.remove(tupleKey);\r
+ } else {\r
+ System.out.println("Cannot find such key");\r
+ }\r
+ }\r
+ }\r
+\r
+ public OptimizedView getOptimizedView(int bitMapView) {\r
+ Hashtable tmp = (Hashtable) viewTable.get(bitMapView);\r
+ OptimizedView ov = new OptimizedView(bitMapView, tmp, this);\r
+ return ov;\r
+ }\r
+\r
+ /* Debug visualizations */\r
+ public void drawTierTwoTable() {\r
+ for(int i = 0; i < views.length; i++) {\r
+ Hashtable tmp = (Hashtable) viewTable.get(views[i]);\r
+ System.out.println("Hashtable "+i+":\t"+tmp.keySet().toString());\r
+ Object[] keySets = tmp.keySet().toArray();\r
+ for(int j = 0; j < keySets.length; j++) {\r
+ System.out.println(tmp.get(keySets[j]));\r
+ }\r
+ }\r
+ }\r
\r
- public OptimizedView getOptimizedView(int bitMapView){\r
- Hashtable tmp = (Hashtable) viewTable.get(bitMapView);\r
- OptimizedView ov = new OptimizedView(bitMapView, tmp, this);\r
- return ov;\r
- }\r
+ public int[] getViews() {\r
+ return views;\r
+ }\r
\r
- /* Debug visualizations */\r
- public void drawTierTwoTable(){\r
- for(int i = 0; i < views.length; i++){\r
- Hashtable tmp = (Hashtable) viewTable.get(views[i]);\r
- System.out.println("Hashtable "+i+":\t"+tmp.keySet().toString());\r
- Object[] keySets = tmp.keySet().toArray();\r
- for(int j = 0; j < keySets.length; j++){\r
- System.out.println(tmp.get(keySets[j]));\r
- }\r
- }\r
- }\r
- \r
- public int[] getViews(){\r
- return views;\r
- }\r
- \r
- public Hashtable getTable(){\r
- return viewTable;\r
- }\r
+ public Hashtable getTable() {\r
+ return viewTable;\r
+ }\r
}\r
\r
public class OptimizedView extends MultiHash\r
{\r
- private int bitMapView;\r
- private Hashtable table;\r
- private MultiHash parent;\r
+ private int bitMapView;\r
+ private Hashtable table;\r
+ private MultiHash parent;\r
\r
- public OptimizedView(int bitMapView, Hashtable table, MultiHash parent) {\r
- this.bitMapView = bitMapView;\r
- this.table = table;\r
- this.parent = parent;\r
- }\r
+ public OptimizedView(int bitMapView, Hashtable table, MultiHash parent) {\r
+ this.bitMapView = bitMapView;\r
+ this.table = table;\r
+ this.parent = parent;\r
+ }\r
\r
- public void remove(Tuple o){\r
- parent.remove(o);\r
- }\r
- public Tuples get(Tuples o){\r
- Tuples tuple = new Tuple();\r
+ public void remove(Tuple o) {\r
+ parent.remove(o);\r
+ }\r
+ public Tuples get(Tuples o) {\r
+ Tuples tuple = new Tuple();\r
\r
- int tupleKey = generateTupleKey(o);\r
- if(table.containsKey(tupleKey)){\r
- Set tupleSet = (Set) table.get(tupleKey);\r
- tuple = convertToTuple(tupleSet);\r
- return tuple;\r
- }\r
- return null;\r
- }\r
+ int tupleKey = generateTupleKey(o);\r
+ if(table.containsKey(tupleKey)) {\r
+ Set tupleSet = (Set) table.get(tupleKey);\r
+ tuple = convertToTuple(tupleSet);\r
+ return tuple;\r
+ }\r
+ return null;\r
+ }\r
\r
- private Tuples convertToTuple(Set tupleSet){\r
- Object[] tuples = tupleSet.toArray();\r
- ArrayList o = new ArrayList();\r
- for(int i = 0; i < tuples.length; i++){\r
- o.add(tuples[i]);\r
- }\r
- Tuples tuple = new Tuple(o);\r
- return tuple;\r
- }\r
+ private Tuples convertToTuple(Set tupleSet) {\r
+ Object[] tuples = tupleSet.toArray();\r
+ ArrayList o = new ArrayList();\r
+ for(int i = 0; i < tuples.length; i++) {\r
+ o.add(tuples[i]);\r
+ }\r
+ Tuples tuple = new Tuple(o);\r
+ return tuple;\r
+ }\r
\r
- public int generateTupleKey(Tuples o){\r
- ArrayList<Integer> indices = findIndices(bitMapView);\r
- ArrayList obj = new ArrayList();\r
- for(int i = 0; i < indices.size(); i++){\r
- obj.add(o.get(indices.get(i)));\r
- }\r
- return obj.hashCode()^29;\r
- }\r
+ public int generateTupleKey(Tuples o) {\r
+ ArrayList<Integer> indices = findIndices(bitMapView);\r
+ ArrayList obj = new ArrayList();\r
+ for(int i = 0; i < indices.size(); i++) {\r
+ obj.add(o.get(indices.get(i)));\r
+ }\r
+ return obj.hashCode()^29;\r
+ }\r
\r
- private ArrayList<Integer> findIndices(int viewIndex){\r
- int mask = 1;\r
- ArrayList<Integer> indices = new ArrayList<Integer>();\r
- for(int i = 0; i < 31; i++){\r
- if((mask & viewIndex) != 0){\r
- indices.add(i);\r
- }\r
- mask = mask << 1;\r
- }\r
- return indices;\r
- }\r
+ private ArrayList<Integer> findIndices(int viewIndex) {\r
+ int mask = 1;\r
+ ArrayList<Integer> indices = new ArrayList<Integer>();\r
+ for(int i = 0; i < 31; i++) {\r
+ if((mask & viewIndex) != 0) {\r
+ indices.add(i);\r
+ }\r
+ mask = mask << 1;\r
+ }\r
+ return indices;\r
+ }\r
\r
- public String toString(){\r
- return table.toString();\r
- }\r
+ public String toString() {\r
+ return table.toString();\r
+ }\r
}\r
return false;
Pair t=(Pair)o;
return a.equals(t.a)&&(((b!=null)&&(t.b!=null)&&b.equals(t.b))||
- ((b==null)&&(t.b==null)));
+ ((b==null)&&(t.b==null)));
}
}
\ No newline at end of file
import java.util.ArrayList;\r
\r
-public class Tuple implements Tuples{\r
- ArrayList o;\r
-\r
- public Tuple(){\r
- o = new ArrayList();\r
- }\r
-\r
- public Tuple(ArrayList o){\r
- this.o = o;\r
- }\r
-\r
- public Object get(int i){\r
- return o.get(i);\r
- }\r
-\r
- public void remove(int i){\r
- o.remove(i);\r
- }\r
-\r
- public ArrayList getList(){\r
- return o;\r
- }\r
-\r
- public int size(){\r
- return o.size();\r
- }\r
-\r
- public int hashCode(){\r
- return o.hashCode();\r
- }\r
-\r
- public String toString(){\r
- String tmp="";\r
- for(int i = 0; i < o.size(); i++){\r
- tmp += o.get(i)+" ";\r
- }\r
- return tmp;\r
- }\r
+public class Tuple implements Tuples {\r
+ ArrayList o;\r
+\r
+ public Tuple() {\r
+ o = new ArrayList();\r
+ }\r
+\r
+ public Tuple(ArrayList o) {\r
+ this.o = o;\r
+ }\r
+\r
+ public Object get(int i) {\r
+ return o.get(i);\r
+ }\r
+\r
+ public void remove(int i) {\r
+ o.remove(i);\r
+ }\r
+\r
+ public ArrayList getList() {\r
+ return o;\r
+ }\r
+\r
+ public int size() {\r
+ return o.size();\r
+ }\r
+\r
+ public int hashCode() {\r
+ return o.hashCode();\r
+ }\r
+\r
+ public String toString() {\r
+ String tmp="";\r
+ for(int i = 0; i < o.size(); i++) {\r
+ tmp += o.get(i)+" ";\r
+ }\r
+ return tmp;\r
+ }\r
}\r
import java.util.ArrayList;\r
\r
public interface Tuples {\r
- int size();\r
- int hashCode();\r
- void remove(int i);\r
- ArrayList getList();\r
- Object get(int i);\r
- String toString();\r
+ int size();\r
+ int hashCode();\r
+ void remove(int i);\r
+ ArrayList getList();\r
+ Object get(int i);\r
+ String toString();\r
}
\ No newline at end of file
// Note: the reason it must be a HashSet instead of
// a Set is that we want to clone sets of table b, so
// only a is modified. Set does not provide clone().
- static public void mergeHashtablesWithHashSetValues( Hashtable a,
- Hashtable b ) {
+ static public void mergeHashtablesWithHashSetValues(Hashtable a,
+ Hashtable b) {
Iterator itr = b.entrySet().iterator();
while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- Object key = (Object) me.getKey();
- HashSet s1 = (HashSet) me.getValue();
- HashSet s2 = (HashSet) a.get( key );
+ Map.Entry me = (Map.Entry)itr.next();
+ Object key = (Object) me.getKey();
+ HashSet s1 = (HashSet) me.getValue();
+ HashSet s2 = (HashSet) a.get(key);
if( s2 == null ) {
- a.put( key, s1.clone() );
+ a.put(key, s1.clone() );
} else {
- s2.addAll( s1 );
+ s2.addAll(s1);
}
}
}
-
+
// This method makes hashtable a the intersection of
// itself and hashtable b, where the new key set is the
// intersection. The values are sets, so if a key is
// common its new value should be the intersection of
// the existing values in a and b. If a new value is
// the empty set, then also remove that key.
- static public void intersectHashtablesWithSetValues( Hashtable a,
- Hashtable b ) {
+ static public void intersectHashtablesWithSetValues(Hashtable a,
+ Hashtable b) {
Set keysToRemove = new HashSet();
Iterator mapItr = a.entrySet().iterator();
while( mapItr.hasNext() ) {
- Map.Entry me = (Map.Entry) mapItr.next();
- Object akey = (Object) me.getKey();
- Set avals = (Set) me.getValue();
- Set bvals = (Set) b.get( akey );
-
+ Map.Entry me = (Map.Entry)mapItr.next();
+ Object akey = (Object) me.getKey();
+ Set avals = (Set) me.getValue();
+ Set bvals = (Set) b.get(akey);
+
if( bvals == null ) {
- // if b doesn't have the key, mark it for
- // safe removal after we traverse the map
- keysToRemove.add( akey );
+ // if b doesn't have the key, mark it for
+ // safe removal after we traverse the map
+ keysToRemove.add(akey);
} else {
- // otherwise we have the key, but pare
- // down the value set, if needed, and if
- // nothing is left, remove the key, too
- avals.retainAll( bvals );
- if( avals.isEmpty() ) {
- keysToRemove.add( akey );
- }
+ // otherwise we have the key, but pare
+ // down the value set, if needed, and if
+ // nothing is left, remove the key, too
+ avals.retainAll(bvals);
+ if( avals.isEmpty() ) {
+ keysToRemove.add(akey);
+ }
}
}
Iterator keyItr = keysToRemove.iterator();
while( keyItr.hasNext() ) {
Object key = keyItr.next();
- a.remove( key );
- }
+ a.remove(key);
+ }
}
-
+
}
sp_catch_brace = Add
sp_brace_catch = Add
sp_try_brace = Add
-sp_after_sparen = Add
\ No newline at end of file
+sp_after_sparen = Add
+sp_cond_colon = remove
+sp_cond_question = remove