}
if (state.THREAD) {
outclassdefs.println(" pthread_t tid;");
- outclassdefs.println(" void * lockentry;");
- outclassdefs.println(" int lockcount;");
+ outclassdefs.println(" volatile int lockcount;");
+ outclassdefs.println(" volatile int notifycount;");
}
if(state.MGC) {
outclassdefs.println(" int mutex;");
outclassdefs.println(" int version;");
outclassdefs.println(" int * lock;"); // lock entry for this obj
outclassdefs.println(" int mutex;");
- outclassdefs.println(" int lockcount;");
+ outclassdefs.println(" volatile int lockcount;");
+ outclassdefs.println(" volatile int notifycount;");
if(state.MULTICOREGC) {
outclassdefs.println(" int marked;");
}
}
if (state.THREAD) {
classdefout.println(" pthread_t tid;");
- classdefout.println(" void * lockentry;");
- classdefout.println(" int lockcount;");
+ classdefout.println(" volatile int lockcount;");
+ classdefout.println(" volatile int notifycount;");
}
if (state.MGC) {
classdefout.println(" int mutex;");
// update forward list & memory queue for the current SESE
updateForwardList(&((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
+#endif
#ifdef THREADS
{
struct lockvector * lvector=listptr->lvector;
ENQUEUE(orig, lvector->locks[i].object);
}
}
-#endif
#endif
listptr=listptr->next;
}
{
struct listitem *litem=pthread_getspecific(litemkey);
if (listptr==litem) {
+#ifdef THREADS
+ {
+ struct lockvector * lvector=listptr->lvector;
+ int i;
+ for(i=0;i<lvector->index;i++) {
+ struct ___Object___ *orig=lvector->locks[i].object;
+ ENQUEUE(orig, lvector->locks[i].object);
+ }
+ }
+#endif
listptr=listptr->next;
}
}
#else
struct lockvector *lptr=&lvector;
#endif
- struct lockpair *lpair=&lptr->locks[lptr->index];
+ struct lockpair *lpair=&lptr->locks[lptr->index++];
pthread_t self=pthread_self();
lpair->object=VAR(___this___);
- lptr->index++;
-
if (self==VAR(___this___)->tid) {
lpair->islastlock=0;
} else {
}
{
#ifdef PRECISE_GC
- stopforgc((struct garbagelist *)___params___);
-#endif
-#ifdef PRECISE_GC
- restartaftergc();
+ if (unlikely(needtocollect))
+ checkcollect((struct garbagelist *)___params___);
#endif
}
}
#ifdef D___Object______notify____
void CALL01(___Object______notify____, struct ___Object___ * ___this___) {
+ VAR(___this___)->notifycount++;
}
#endif
#ifdef D___Object______notifyAll____
void CALL01(___Object______notifyAll____, struct ___Object___ * ___this___) {
+ VAR(___this___)->notifycount++;
}
#endif
#ifdef D___Object______wait____
void CALL01(___Object______wait____, struct ___Object___ * ___this___) {
pthread_t self=pthread_self();
-
+ int notifycount=VAR(___this___)->notifycount;
+ BARRIER();
VAR(___this___)->tid=0;
BARRIER();
VAR(___this___)->lockcount=0;
- //allow gc
-#ifdef PRECISE_GC
- stopforgc((struct garbagelist *)___params___);
-#endif
- sched_yield();
+ while(notifycount==VAR(___this___)->notifycount) {
#ifdef PRECISE_GC
- restartaftergc();
+ if (unlikely(needtocollect))
+ checkcollect((struct garbagelist *)___params___);
#endif
+ }
while(1) {
if (VAR(___this___)->lockcount==0) {
return;
}
}
- {
-#ifdef PRECISE_GC
- stopforgc((struct garbagelist *)___params___);
-#endif
#ifdef PRECISE_GC
- restartaftergc();
+ if (unlikely(needtocollect))
+ checkcollect((struct garbagelist *)___params___);
#endif
- }
}
}
#endif
struct lockvector *lptr=&lvector;
#endif
struct lockpair *lpair=&lptr->locks[--lptr->index];
- pthread_t self=pthread_self();
if (lpair->islastlock) {
lpair->object->tid=0;
- BARRIER();
+ MBARRIER();
lpair->object->lockcount=0;
}
}