FlatNode fn=toanalyze.iterator().next();
toanalyze.remove(fn);
Hashtable<TempDescriptor, HashSet<FlatNode>> tmptofn=new Hashtable<TempDescriptor, HashSet<FlatNode>>();
-
+
+ //Don't process non-atomic nodes
+ if (locality.getAtomic(lb).get(fn).intValue()==0) {
+ if (!map.containsKey(fn)) {
+ map.put(fn, new Hashtable<TempDescriptor, HashSet<FlatNode>>());
+ //enqueue next nodes
+ for(int i=0;i<fn.numNext();i++)
+ toanalyze.add(fn.getNext(i));
+ }
+ continue;
+ }
+
//Do merge on incoming edges
for(int i=0;i<fn.numPrev();i++) {
FlatNode fnprev=fn.getPrev(i);
for(int i=0;i<writeset.length;i++) {
TempDescriptor tmp=writeset[i];
HashSet<FlatNode> set=new HashSet<FlatNode>();
- set.add(fn);
tmptofn.put(tmp,set);
+ set.add(fn);
}
if (fn.numNext()>1) {
//We have a conditional branch...need to handle this carefully
if (!set0.equals(set1)||set0.size()>1) {
//This branch is important--need to remember how it goes
livenodes.add(fn);
+ } else {
+ System.out.println("Removing branch:"+fn);
+ System.out.println("set0="+set0+" set1="+set1);
}
}
}
switch(fn.kind()) {
//We might need to translate arguments to pointer comparison
-
+
case FKind.FlatOpNode: {
FlatOpNode fon=(FlatOpNode)fn;
if (fon.getOp().getOp()==Operation.EQUAL||
break;
}
+ case FKind.FlatGlobalConvNode: {
+ //need to translate these if the value we read from may be a
+ //shadow... check this by seeing if any of the values we
+ //may read are in the transread set or came from our caller
+ //or a method we called
+
+ FlatGlobalConvNode fgcn=(FlatGlobalConvNode)fn;
+ if (fgcn.getLocality()!=lb)
+ break;
+
+ Set<TempFlatPair> tfpset=tmap.get(fgcn.getSrc());
+ if (tfpset!=null) {
+ for(Iterator<TempFlatPair> tfpit=tfpset.iterator();tfpit.hasNext();) {
+ TempFlatPair tfp=tfpit.next();
+ if (tfset.contains(tfp)||outofscope(tfp)) {
+ srctrans.add(fgcn);
+ break;
+ }
+ }
+ }
+ break;
+ }
+
case FKind.FlatSetFieldNode: {
//need to translate these if the value we read from may be a
//shadow... check this by seeing if any of the values we
TempNodePair tnp2=new TempNodePair(tnp.getTemp());
tnp2.setNode(fn);
tempset.add(tnp2);
- nodetoconvs2.get(fn).add(tnp.getTemp()); //have to hide cached copies from gc
+ nodetoconvs2.get(fn).add(tnp.getTemp()); //have to hide cached copies from gc -- add unused converted temps
} else
tempset.add(tnp);
} else
for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext();) {
TempDescriptor tmpd=tempit.next();
FlatGlobalConvNode fgcn=new FlatGlobalConvNode(tmpd, lb, false, nodetoconvs.get(fn).contains(tmpd));
+ fgcn.setAtomicEnter(((FlatAtomicExitNode)fn).getAtomicEnter());
atomictab.put(fgcn, atomictab.get(fn));
temptab.put(fgcn, (Hashtable<TempDescriptor, Integer>)temptab.get(fn).clone());
Set<TempDescriptor> tempset=nodetotranstemps.get(fn);
for(Iterator<TempDescriptor> tempit=tempset.iterator(); tempit.hasNext();) {
FlatGlobalConvNode fgcn=new FlatGlobalConvNode(tempit.next(), lb, true);
+ fgcn.setAtomicEnter((FlatAtomicEnterNode)fn);
atomictab.put(fgcn, atomictab.get(fn));
temptab.put(fgcn, (Hashtable<TempDescriptor, Integer>)temptab.get(fn).clone());
fgcn.addNext(fn.getNext(0));
public class AtomicRecord {
String name;
Set<TempDescriptor> livein;
+ Set<TempDescriptor> reallivein;
Set<TempDescriptor> liveout;
Set<TempDescriptor> liveoutvirtualread;
ClassDescriptor cn=md!=null ? md.getClassDesc() : null;
ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null ? lb : md!=null ? md : task);
+ HashSet<AtomicRecord> arset=null;
if (state.DELAYCOMP&&!lb.isAtomic()&&lb.getHasAtomic()) {
//create map
//fix these so we get right strings for local variables
localsprefixaddr=localsprefix;
localsprefixderef=localsprefix+"->";
-
+ arset=new HashSet<AtomicRecord>();
+
//Generate commit methods here
for(Iterator<FlatNode> fnit=fm.getNodeSet().iterator();fnit.hasNext();) {
FlatNode fn=fnit.next();
String methodname=md.getSymbol()+(atomicmethodcount++);
AtomicRecord ar=new AtomicRecord();
ar.name=methodname;
+ arset.add(ar);
atomicmethodmap.put(faen, ar);
Set<TempDescriptor> liveout=delaycomp.liveout(lb, faen);
Set<TempDescriptor> liveoutvirtualread=delaycomp.liveoutvirtualread(lb, faen);
ar.livein=liveinto;
+ ar.reallivein=new HashSet(liveinto);
ar.liveout=liveout;
ar.liveoutvirtualread=liveoutvirtualread;
generateHeader(fm, lb, md!=null ? md : task,output);
TempObject objecttemp=(TempObject) tempstable.get(lb!=null ? lb : md!=null ? md : task);
+ if (state.DELAYCOMP&&!lb.isAtomic()&&lb.getHasAtomic()) {
+ for(Iterator<AtomicRecord> arit=arset.iterator();arit.hasNext();) {
+ AtomicRecord ar=arit.next();
+ output.println("struct atomicprimitives_"+ar.name+" primitives_"+ar.name+";");
+ }
+ }
+
if (GENERATEPRECISEGC) {
if (md!=null&&(state.DSM||state.SINGLETM))
output.print(" struct "+cn.getSafeSymbol()+lb.getSignature()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_locals "+localsprefix+"={");
if (state.DELAYCOMP) {
if (firstpass) {
//need to record which way it should go
- output.print(" ");
+ output.print(" ");
if (storeset!=null&&storeset.contains(current_node)) {
//need to store which way branch goes
generateStoreFlatCondBranch(fm, lb, (FlatCondBranch)current_node, "L"+nodetolabel.get(current_node.getNext(1)), output);
}
} else {
/* Need to convert to OID */
- if (fgcn.doConvert()) {
- output.println(generateTemp(fm, fgcn.getSrc(),lb)+"=(void *)COMPOID("+generateTemp(fm, fgcn.getSrc(),lb)+");");
- } else {
- output.println(generateTemp(fm, fgcn.getSrc(),lb)+"=NULL;");
+ if ((dc==null)||dc.getNeedSrcTrans(lb,fgcn)) {
+ if (fgcn.doConvert()||(delaycomp!=null&&atomicmethodmap.get(fgcn.getAtomicEnter()).reallivein.contains(fgcn.getSrc()))) {
+ output.println(generateTemp(fm, fgcn.getSrc(),lb)+"=(void *)COMPOID("+generateTemp(fm, fgcn.getSrc(),lb)+");");
+ } else {
+ output.println(generateTemp(fm, fgcn.getSrc(),lb)+"=NULL;");
+ }
}
}
}
if (locality.getAtomic(lb).get(faen.getPrev(0)).intValue()>0)
return;
+
+
+ if (state.DELAYCOMP) {
+ AtomicRecord ar=atomicmethodmap.get(faen);
+ //copy in
+ for(Iterator<TempDescriptor> tmpit=ar.livein.iterator();tmpit.hasNext();) {
+ TempDescriptor tmp=tmpit.next();
+ output.println("primitives_"+ar.name+"."+tmp.getSafeSymbol()+"="+tmp.getSafeSymbol()+";");
+ }
+
+ //copy outs that depend on path
+ for(Iterator<TempDescriptor> tmpit=ar.liveoutvirtualread.iterator();tmpit.hasNext();) {
+ TempDescriptor tmp=tmpit.next();
+ if (!ar.livein.contains(tmp))
+ output.println("primitives_"+ar.name+"."+tmp.getSafeSymbol()+"="+tmp.getSafeSymbol()+";");
+ }
+ }
+
/* Backup the temps. */
for(Iterator<TempDescriptor> tmpit=locality.getTemps(lb).get(faen).iterator(); tmpit.hasNext();) {
TempDescriptor tmp=tmpit.next();
}
if (state.DELAYCOMP) {
AtomicRecord ar=atomicmethodmap.get(faen.getAtomicEnter());
- output.println("{");
- output.println("struct atomicprimitives_"+ar.name+" primitives;");
- //copy in
- for(Iterator<TempDescriptor> tmpit=ar.livein.iterator();tmpit.hasNext();) {
- TempDescriptor tmp=tmpit.next();
- output.println("primitives."+tmp.getSafeSymbol()+"="+tmp.getSafeSymbol()+";");
- }
-
- //copy outs that depend on path
- for(Iterator<TempDescriptor> tmpit=ar.liveoutvirtualread.iterator();tmpit.hasNext();) {
- TempDescriptor tmp=tmpit.next();
- if (!ar.livein.contains(tmp))
- output.println("primitives."+tmp.getSafeSymbol()+"="+tmp.getSafeSymbol()+";");
- }
//do call
- output.println("if (transCommit((void (*)(void *, void *, void *))&"+ar.name+", &primitives, &"+localsprefix+", "+paramsprefix+")) {");
+ output.println("if (transCommit((void (*)(void *, void *, void *))&"+ar.name+", &primitives_"+ar.name+", &"+localsprefix+", "+paramsprefix+")) {");
} else
output.println("if (transCommit()) {");
/* Transaction aborts if it returns true */
output.println("else {");
for(Iterator<TempDescriptor> tmpit=ar.liveout.iterator();tmpit.hasNext();) {
TempDescriptor tmp=tmpit.next();
- output.println(tmp.getSafeSymbol()+"=primitives."+tmp.getSafeSymbol()+";");
+ output.println(tmp.getSafeSymbol()+"=primitives_"+ar.name+"."+tmp.getSafeSymbol()+";");
}
output.println("}");
- output.println("}");
}
}
LocalityBinding lb;
boolean makePtr;
boolean convert=true;
+ FlatAtomicEnterNode faen;
public FlatGlobalConvNode(TempDescriptor src, LocalityBinding lb, boolean makePtr) {
this.src=src;
this.convert=doactualconvert;
}
+ public FlatAtomicEnterNode getAtomicEnter() {
+ return faen;
+ }
+
+ public void setAtomicEnter(FlatAtomicEnterNode faen) {
+ this.faen=faen;
+ }
+
boolean doConvert() {
return convert;
}
//them for now..probably a real implementation would page protect the
//page after...then default to something simpler
-#define MAXPOINTERS 1024*1024*16
-#define MAXVALUES 1024*1024*16
+#define MAXPOINTERS 1024*1024*4
+#define MAXVALUES 1024*1024*4
struct pointerlist {
int count;
//Pointers
-#define RESTOREPTR(x) x=ptrstack.array[--ptrstack.count]
+#define RESTOREPTR(x) x=ptrstack.array[ptrstack.count++];
-#define STOREPTR(x) ptrstack.array[ptrstack.count++]=x; dc_t_chashInsertOnce(x,x);
+#define STOREPTR(x) {void * y=COMPOID(x); ptrstack.array[ptrstack.count++]=y; dc_t_chashInsertOnce(y,y);}
//Branches
-#define RESTOREANDBRANCH(loc) if (primstack.array[--primstack.count]) goto loc
+#define RESTOREANDBRANCH(loc) if (primstack.array[primstack.count++]) goto loc
#define STOREANDBRANCH(cond, loc) if (primstack.array[primstack.count++]=cond) goto loc
//Integers
-#define RESTOREI(x) x=primstack.array[--primstack.count]
+#define RESTOREI(x) x=primstack.array[primstack.count++]
#define STOREI(x) primstack.array[primstack.count++]=x
//Floats
-#define RESTOREF(x) x=*((float *)&primstack.array[--primstack.count])
+#define RESTOREF(x) x=*((float *)&primstack.array[primstack.count++])
#define STOREF(x) *((float *)&primstack.array[primstack.count++])=x
//Doubles
-#define RESTORED(x) x=*((double *)&primstack.array[primstack.count-=2])
+#define RESTORED(x) x=*((double *)&primstack.array[primstack.count]); primstack.count+=2
#define STORED(x) *((double *)&primstack.array[primstack.count])=x; primstack.count+=2
//Bytes
-#define RESTOREB(x) x=*((char *)&primstack.array[--primstack.count])
+#define RESTOREB(x) x=*((char *)&primstack.array[primstack.count++])
#define STOREB(x) *((char *)&primstack.array[primstack.count++])=x
//Characters
-#define RESTOREC(x) x=*((short *)&primstack.array[--primstack.count])
+#define RESTOREC(x) x=*((short *)&primstack.array[primstack.count++])
#define STOREC(x) *((short *)&primstack.array[primstack.count++])=x
-//Doubles
+//Longs
-#define RESTOREJ(x) x=*((long long *)&primstack.array[primstack.count-=2])
+#define RESTOREJ(x) x=*((long long *)&primstack.array[primstack.count]); primstack.count+=2
#define STOREJ(x) *((long long *)&primstack.array[primstack.count])=x; primstack.count+=2
//Booleans
-#define RESTOREZ(x) x=primstack.array[--primstack.count]
+#define RESTOREZ(x) x=primstack.array[primstack.count++]
#define STOREZ(x) primstack.array[primstack.count++]=x
t_chashreset();
#ifdef DELAYCOMP
dc_t_chashreset();
+ ptrstack.count=0;
+ primstack.count=0;
#endif
return TRANS_ABORT;
}
t_chashreset();
#ifdef DELAYCOMP
dc_t_chashreset();
+ ptrstack.count=0;
+ primstack.count=0;
#endif
return 0;
}
t_chashreset();
#ifdef DELAYCOMP
dc_t_chashreset();
+ ptrstack.count=0;
+ primstack.count=0;
#endif
return TRANS_ABORT;
}
#ifdef DELAYCOMP
// call commit method
+ ptrstack.count=0;
+ primstack.count=0;
commitmethod(params, locals, primitives);
#endif
threadexit();
}
+struct primitivelist *pl;
+
void initializethreads() {
struct sigaction sig;
threadcount=1;
t_chashCreate(CHASH_SIZE, CLOADFACTOR);
#ifdef DELAYCOMP
dc_t_chashCreate(CHASH_SIZE, CLOADFACTOR);
+ ptrstack.count=0;
+ primstack.count=0;
+ pl=&primstack;
#endif
#ifdef STMSTATS
trec=calloc(1, sizeof(threadrec_t));
t_chashCreate(CHASH_SIZE, CLOADFACTOR);
#ifdef DELAYCOMP
dc_t_chashCreate(CHASH_SIZE, CLOADFACTOR);
+ ptrstack.count=0;
+ primstack.count=0;
#endif
___Thread____NNR____staticStart____L___Thread___((struct ___Thread____NNR____staticStart____L___Thread____params *)p);
objstrDelete(t_cache);