//See if flatnode is definitely no delay
if (fn.kind()==FKind.FlatCall) {
- isnodelay=true;
- //Have to deal with fields/arrays
FlatCall fcall=(FlatCall)fn;
MethodDescriptor mdcall=fcall.getMethod();
- nodelayfieldwrset.addAll(gft.getFieldsAll(mdcall));
- nodelayarraywrset.addAll(typeanalysis.expandSet(gft.getArraysAll(mdcall)));
- //Have to deal with field/array reads
- nodelayfieldrdset.addAll(gft.getFieldsRdAll(mdcall));
- nodelayarrayrdset.addAll(typeanalysis.expandSet(gft.getArraysRdAll(mdcall)));
+ if (!mdcall.getClassDesc().getSymbol().equals("System")||
+ (!mdcall.getSymbol().equals("println")&&!mdcall.getSymbol().equals("printString")))
+ isnodelay=true;
}
//Delay branches if possible
//have to do expansion
nodelayarrayrdset.addAll(typeanalysis.expand(((FlatElementNode)fn).getSrc().getType()));
}
+
+ //See if flatnode is definitely no delay
+ if (fn.kind()==FKind.FlatCall) {
+ //Have to deal with fields/arrays
+ FlatCall fcall=(FlatCall)fn;
+ MethodDescriptor mdcall=fcall.getMethod();
+ nodelayfieldwrset.addAll(gft.getFieldsAll(mdcall));
+ nodelayarraywrset.addAll(typeanalysis.expandSet(gft.getArraysAll(mdcall)));
+ //Have to deal with field/array reads
+ nodelayfieldrdset.addAll(gft.getFieldsRdAll(mdcall));
+ nodelayarrayrdset.addAll(typeanalysis.expandSet(gft.getArraysRdAll(mdcall)));
+ }
} else {
//Need to know which objects to lock on
switch(fn.kind()) {
}
} else {
Integer thistype=currtable.get(fc.getThis());
- if (!thistype.equals(NORMAL)) {
- throw new Error("Called start on possible scratch object");
+ if (!thistype.equals(NORMAL)&&!thistype.equals(STMEITHER)) {
+ throw new Error("Called start on possible scratch object"+thistype);
}
lb.setGlobal(0,currtable.get(fc.getThis()));
}
private void generateFlatCall(FlatMethod fm, LocalityBinding lb, FlatCall fc, PrintWriter output) {
MethodDescriptor md=fc.getMethod();
- ParamsObject objectparams=(ParamsObject)paramstable.get(state.DSM||state.SINGLETM ? locality.getBinding(lb, fc) : md);
+ ParamsObject objectparams=(ParamsObject)paramstable.get(lb!=null ? locality.getBinding(lb, fc) : md);
ClassDescriptor cn=md.getClassDesc();
output.println("{");
if (GENERATEPRECISEGC) {
- if (state.DSM||state.SINGLETM) {
+ if (lb!=null) {
LocalityBinding fclb=locality.getBinding(lb, fc);
output.print(" struct "+cn.getSafeSymbol()+fclb.getSignature()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params __parameterlist__={");
} else
/* Do we need to do virtual dispatch? */
if (md.isStatic()||md.getReturnType()==null||singleCall(fc.getThis().getType().getClassDesc(),md)) {
//no
- if (state.DSM||state.SINGLETM) {
+ if (lb!=null) {
LocalityBinding fclb=locality.getBinding(lb, fc);
output.print(cn.getSafeSymbol()+fclb.getSignature()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor());
} else {
boolean printcomma=false;
if (GENERATEPRECISEGC) {
- if (state.DSM||state.SINGLETM) {
+ if (lb!=null) {
LocalityBinding fclb=locality.getBinding(lb, fc);
output.print("struct "+cn.getSafeSymbol()+fclb.getSignature()+md.getSafeSymbol()+"_"+md.getSafeMethodDescriptor()+"_params * ");
} else
}
- if (state.DSM||state.SINGLETM) {
+ if (lb!=null) {
LocalityBinding fclb=locality.getBinding(lb, fc);
output.print("))virtualtable["+generateTemp(fm,fc.getThis(),lb)+"->type*"+maxcount+"+"+virtualcalls.getLocalityNumber(fclb)+"])");
} else
boolean srcptr=fsfn.getSrc().getType().isPtr();
String src=generateTemp(fm,fsfn.getSrc(),lb);
String dst=generateTemp(fm,fsfn.getDst(),lb);
+ output.println("//"+srcptr+" "+fsfn.getSrc().getType().isNull());
if (srcptr&&!fsfn.getSrc().getType().isNull()) {
output.println("{");
if ((dc==null)||dc.getNeedSrcTrans(lb, fsfn)&&
#define DEBUGSTM(x...)
#endif
-#ifdef FASTMEMCPY
-void * A_memcpy (void * dest, const void * src, size_t count);
-#else
-#define A_memcpy memcpy
-#endif
+//#ifdef FASTMEMCPY
+//void * A_memcpy (void * dest, const void * src, size_t count);
+//#else
+//#define A_memcpy memcpy
+//#endif
+
+void * A_memcpy (void * dest, const void * src, size_t count) {
+ int off=0;
+ INTPTR *desti=(INTPTR *)dest;
+ INTPTR *srci=(INTPTR *)src;
+
+ //word copy
+ while(count>=sizeof(INTPTR)) {
+ desti[off]=srci[off];
+ off+=1;
+ count-=sizeof(INTPTR);
+ }
+ off*=sizeof(INTPTR);
+ //byte copy
+ while(count>0) {
+ ((char *)dest)[off]=((char *)src)[off];
+ off++;
+ count--;
+ }
+}
+
extern void * curr_heapbase;
extern void * curr_heapptr;
* -copies the object into the transaction cache
* =============================================================
*/
-__attribute__ ((pure)) void *transRead(void * oid, void *gl) {
+//__attribute__ ((pure))
+void *transRead(void * oid, void *gl) {
objheader_t *tmp, *objheader;
objheader_t *objcopy;
int size;
}
#endif
A_memcpy(objcopy, header, size);
+
/* Insert into cache's lookup table */
STATUS(objcopy)=0;
if (((unsigned INTPTR)oid)<((unsigned INTPTR ) curr_heapbase)|| ((unsigned INTPTR)oid) >((unsigned INTPTR) curr_heapptr))
objheader_t *header=oidrdlocked[i];
unsigned int version=oidrdversion[i];
if(header->lock>0) { //not write locked
+ CFENCE;
if(version != header->version) { /* versions do not match */
#ifdef DELAYCOMP
transAbortProcess(oidwrlocked, numoidwrtotal);
for(i=0; i<numoidrdlocked; i++) {
objheader_t * header=oidrdlocked[i];
unsigned int version=oidrdversion[i];
- if(header->lock>=0) {
+ if(header->lock>0) {
+ CFENCE;
if(version != header->version) {
#ifdef DELAYCOMP
transAbortProcess(oidwrlocked, numoidwrtotal);
dst->___cachedCode___=src->___cachedCode___;
dst->___cachedHash___=src->___cachedHash___;
A_memcpy(&dst[1], &src[1], tmpsize-sizeof(struct ___Object___));
- __asm__ __volatile__("": : :"memory");
-#ifndef DELAYCOMP
- header->version++;
-#endif
}
- __asm__ __volatile__("": : :"memory");
+ CFENCE;
#ifdef DELAYCOMP
// call commit method
for(i=numoidwrlocked-1; i>=0; i--) {
#endif
header = (objheader_t *)oidwrlocked[i];
-#ifdef DELAYCOMP
header->version++;
-#endif
write_unlock(&header->lock);
}
} else { //failed to get lock
trec->blocked=1;
//memory barrier
- __asm__ __volatile__("":::"memory");
+ CFENCE;
//see if other thread is blocked
if(ptr->blocked == 1) {
//it might be block, so ignore lock and clear our blocked flag
*/
void write_unlock(volatile unsigned int *lock) {
- __asm __volatile__("movl $1, %0" : "+m" (*__xg(lock))::"memory");
+ __asm __volatile__("movl $1, %0" : "+m" (*lock)::"memory");
}
#define RW_LOCK_BIAS 1
#define LOCK_UNLOCKED { LOCK_BIAS }
+#define CFENCE asm volatile("":::"memory");
struct __xchg_dummy {
unsigned long a[100];
int retval=0;
__asm__ __volatile__("xchgl %0,%1"
: "=r"(retval)
- : "m"(*__xg(lock)), "0"(retval)
+ : "m"(*lock), "0"(retval)
: "memory");
return retval;
}