From 4d947d6f3ca240a96a335c8f8c7ac0787b029af7 Mon Sep 17 00:00:00 2001 From: jzhou Date: Thu, 25 Jun 2009 23:12:09 +0000 Subject: [PATCH] bug fix for multicore tilera version --- .../Analysis/Scheduling/MCImplSynthesis.java | 64 +++++++++++++------ Robust/src/Analysis/Scheduling/Schedule.java | 7 +- Robust/src/IR/Flat/BuildCode.java | 2 - Robust/src/IR/Flat/BuildCodeMultiCore.java | 37 ++++------- Robust/src/Runtime/ThreadSim/task_arch.c | 9 ++- Robust/src/Runtime/mem.c | 9 +++ Robust/src/Runtime/multicoreruntime.c | 4 -- Robust/src/Runtime/multicoreruntime.h | 9 ++- Robust/src/Runtime/multicoretask.c | 54 +++++++++------- 9 files changed, 116 insertions(+), 79 deletions(-) diff --git a/Robust/src/Analysis/Scheduling/MCImplSynthesis.java b/Robust/src/Analysis/Scheduling/MCImplSynthesis.java index 19f1900a..13488bf7 100644 --- a/Robust/src/Analysis/Scheduling/MCImplSynthesis.java +++ b/Robust/src/Analysis/Scheduling/MCImplSynthesis.java @@ -27,11 +27,11 @@ public class MCImplSynthesis { ScheduleSimulator scheduleSimulator; int coreNum; - int scheduleThreshold; // how many starting points generated by schedule analysis - int probThreshold; // the probability to stop when no accelaration achieved in the - // directed simulated annealing - int generateThreshold; // how many optimized implementation generated in each iteration - // of the directed simulated annealing + int scheduleThreshold; // # of starting points generated by schedule analysis + int probThreshold; // the probability to stop when no accelaration achieved + // in the directed simulated annealing + int generateThreshold; // how many optimized implementation generated in + // each iteration of the directed simulated annealing public MCImplSynthesis(State state, TaskAnalysis ta, @@ -136,7 +136,8 @@ public class MCImplSynthesis { // check all multi-parameter tasks Vector multiparamtds = new Vector(); - Iterator it_tasks = this.state.getTaskSymbolTable().getDescriptorsIterator(); + Iterator it_tasks = + this.state.getTaskSymbolTable().getDescriptorsIterator(); while(it_tasks.hasNext()) { TaskDescriptor td = (TaskDescriptor)it_tasks.next(); if(td.numParameters() > 1) { @@ -203,12 +204,15 @@ public class MCImplSynthesis { schedulinggraph.clear(); } scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0)); - schedulinggraph = scheduleGraphs.elementAt(selectedSchedulings.elementAt(0)); - System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n"); + schedulinggraph = scheduleGraphs.elementAt( + selectedSchedulings.elementAt(0)); + System.out.print("end of: #" + tryindex + " (bestexetime: " + + bestexetime + ")\n"); System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n"); tryindex++; } else if(tmpexetime == bestexetime) { - System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n"); + System.out.print("end of: #" + tryindex + " (bestexetime: " + + bestexetime + ")\n"); System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n"); tryindex++; if((Math.abs(rand.nextInt()) % 100) < this.probThreshold) { @@ -243,6 +247,7 @@ public class MCImplSynthesis { } selectedSimExeGraphs.clear(); selectedSimExeGraphs = null; + multiparamtds.clear(); multiparamtds = null; @@ -1080,11 +1085,14 @@ public class MCImplSynthesis { Vector cNodes = sn.getClassNodes(); for(int k = 0; k < cNodes.size(); k++) { Iterator it_flags = cNodes.elementAt(k).getFlags(); + Vector rootnodes = this.taskAnalysis.getRootNodes( + cNodes.elementAt(k).getClassDescriptor()); while(it_flags.hasNext()) { FlagState fs = (FlagState)it_flags.next(); Iterator it_edges = fs.edges(); while(it_edges.hasNext()) { - TaskDescriptor td = ((FEdge)it_edges.next()).getTask(); + FEdge tmpfe = (FEdge)it_edges.next(); + TaskDescriptor td = (tmpfe).getTask(); tmpSchedule.addTask(td); if(!td2cores.containsKey(td)) { td2cores.put(td, new Vector()); @@ -1094,8 +1102,8 @@ public class MCImplSynthesis { tmpcores.add(tmpSchedule); } tmpcores = null; - // if the FlagState can be fed to some multi-param tasks, - // need to record corresponding ally cores later + // If the FlagState can be fed to some multi-param tasks, + // need to record corresponding ally cores later. if(td.numParameters() > 1) { tmpSchedule.addFState4TD(td, fs); } @@ -1113,7 +1121,8 @@ public class MCImplSynthesis { } cNodes = null; - // For each of the ScheduleEdge out of this ScheduleNode, add the target ScheduleNode into the queue inside sn + // For each of the ScheduleEdge out of this ScheduleNode, add the + // target ScheduleNode into the queue inside sn Iterator it_edges = sn.edges(); while(it_edges.hasNext()) { ScheduleEdge se = (ScheduleEdge)it_edges.next(); @@ -1122,7 +1131,18 @@ public class MCImplSynthesis { switch(se.getType()) { case ScheduleEdge.NEWEDGE: { for(int k = 0; k < se.getNewRate(); k++) { - tmpSchedule.addTargetCore(se.getFstate(), targetcore); + FlagState fs = se.getFstate(); + tmpSchedule.addTargetCore(fs, targetcore); + // Check if the new obj could be fed to some + // multi-parameter task, if so, add for ally cores + // checking + Iterator it = fs.edges(); + while(it.hasNext()) { + TaskDescriptor td = ((FEdge)it.next()).getTask(); + if(td.numParameters() > 1) { + tmpSchedule.addFState4TD(td, fs); + } + } } break; } @@ -1132,17 +1152,16 @@ public class MCImplSynthesis { tmpSchedule.addTargetCore(se.getFstate(), targetcore, se.getTargetFState()); - // check if missed some FlagState associated with some multi-parameter - // task, which has been cloned when splitting a ClassNode + // check if missed some FlagState associated with some + // multi-parameter task, which has been cloned when + // splitting a ClassNode FlagState fs = se.getSourceFState(); FlagState tfs = se.getTargetFState(); Iterator it = tfs.edges(); while(it.hasNext()) { TaskDescriptor td = ((FEdge)it.next()).getTask(); if(td.numParameters() > 1) { - if(tmpSchedule.getTasks().contains(td)) { - tmpSchedule.addFState4TD(td, fs); - } + tmpSchedule.addFState4TD(td, fs); } } break; @@ -1188,10 +1207,13 @@ public class MCImplSynthesis { for(int k = 0; k < cores.size(); ++k) { Schedule tmpSchedule = cores.elementAt(k); + // Make sure all the parameter objs of a multi-parameter + // task would be send to right place for(int h = 0; h < fes.size(); ++h) { FEdge tmpfe = fes.elementAt(h); FlagState tmpfs = (FlagState)tmpfe.getTarget(); - Vector tmptds = new Vector(); + Vector tmptds = + new Vector(); if((tmpSchedule.getTargetCoreTable() == null) || (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) { // add up all possible cores' info @@ -1222,6 +1244,8 @@ public class MCImplSynthesis { tmptds = null; } + // Make sure all objs which could be feed to a multi-parameter + // task would be send to all the possible task instances if(cores.size() > 1) { Vector tmpfss = tmpSchedule.getFStates4TD(td); for(int h = 0; h < tmpfss.size(); ++h) { diff --git a/Robust/src/Analysis/Scheduling/Schedule.java b/Robust/src/Analysis/Scheduling/Schedule.java index 8f8241b2..7adfc80e 100644 --- a/Robust/src/Analysis/Scheduling/Schedule.java +++ b/Robust/src/Analysis/Scheduling/Schedule.java @@ -119,8 +119,7 @@ public class Schedule { this.allyCores.put(fstate, new Vector()); } if((this.coreNum != targetCore.intValue()) && (!this.allyCores.get(fstate).contains(targetCore))) { - this.allyCores.get(fstate).add(targetCore); // there may have some duplicate items, - // which reflects probabilities. + this.allyCores.get(fstate).add(targetCore); } } @@ -133,7 +132,7 @@ public class Schedule { this.td2fs.put(td, new Vector()); } if(!this.td2fs.get(td).contains(fstate)) { - this.td2fs.get(td).add(fstate); + this.td2fs.get(td).addElement(fstate); } } @@ -149,4 +148,4 @@ public class Schedule { this.tasks.add(task); } } -} \ No newline at end of file +} diff --git a/Robust/src/IR/Flat/BuildCode.java b/Robust/src/IR/Flat/BuildCode.java index 2379220f..28a05fe2 100644 --- a/Robust/src/IR/Flat/BuildCode.java +++ b/Robust/src/IR/Flat/BuildCode.java @@ -571,7 +571,6 @@ public class BuildCode { if(!state.MULTICORE) { outclassdefs.println(" void * flagptr;"); } else { - outclassdefs.println(" int isolate;"); // indicate if this object is shared or not outclassdefs.println(" int version;"); outclassdefs.println(" struct ___Object___ * original;"); //outclassdefs.println(" int numlocks;"); // array for locks @@ -1281,7 +1280,6 @@ public class BuildCode { if((!state.MULTICORE) || (cn.getSymbol().equals("TagDescriptor"))) { classdefout.println(" void * flagptr;"); } else if (state.MULTICORE) { - classdefout.println(" int isolate;"); // indicate if this object is shared or not classdefout.println(" int version;"); classdefout.println(" struct ___Object___ * original;"); //classdefout.println(" int numlocks;"); // array for locks diff --git a/Robust/src/IR/Flat/BuildCodeMultiCore.java b/Robust/src/IR/Flat/BuildCodeMultiCore.java index 4d334de3..e6e9da63 100644 --- a/Robust/src/IR/Flat/BuildCodeMultiCore.java +++ b/Robust/src/IR/Flat/BuildCodeMultiCore.java @@ -1042,6 +1042,7 @@ public class BuildCodeMultiCore extends BuildCode { isolate = (this.currentSchedule.getAllyCoreTable().get(tmpFState) == null) || (this.currentSchedule.getAllyCoreTable().get(tmpFState).size() == 0); } + /* no longler use the isolate flag in object structure if(!isolate) { // indentify this object as a shared object // isolate flag is initially set as 1, once this flag is set as 0, it is never reset to 1, i.e. once an object @@ -1051,8 +1052,9 @@ public class BuildCodeMultiCore extends BuildCode { output.println(" " + super.generateTemp(fm, temp, lb) + "->original = (struct ___Object___ *)" + super.generateTemp(fm, temp, lb) + ";"); output.println("}"); } + */ - //Vector sendto = new Vector(); + Vector sendto = new Vector(); Queue queue = null; if(targetCoreTbl != null) { queue = targetCoreTbl.get(tmpFState); @@ -1110,9 +1112,6 @@ public class BuildCodeMultiCore extends BuildCode { } else { tmpinfo.fs = tmpFState; } - // fixed 05/12/09, it's very likely to repeatedly send an object to the same core - // as sheduled - //if(!contains(sendto, tmpinfo)) { qinfo = outputtransqueues(tmpinfo.fs, targetcore, output); output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));"); output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";"); @@ -1120,8 +1119,6 @@ public class BuildCodeMultiCore extends BuildCode { output.println("tmpObjInfo->queues = " + qinfo.qname + ";"); output.println("tmpObjInfo->length = " + qinfo.length + ";"); output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);"); - //sendto.add(tmpinfo); - //} output.println("}"); } output.println("break;"); @@ -1148,9 +1145,6 @@ public class BuildCodeMultiCore extends BuildCode { } else { tmpinfo.fs = tmpFState; } - // fixed 05/12/09, it's very likely to repeatedly send an object to the same core - // as sheduled - //if(!contains(sendto, tmpinfo)) { qinfo = outputtransqueues(tmpinfo.fs, targetcore, output); output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));"); output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";"); @@ -1158,8 +1152,6 @@ public class BuildCodeMultiCore extends BuildCode { output.println("tmpObjInfo->queues = " + qinfo.qname + ";"); output.println("tmpObjInfo->length = " + qinfo.length + ";"); output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);"); - //sendto.add(tmpinfo); - //} output.println("}"); } output.println("/* increase index*/"); @@ -1186,10 +1178,13 @@ public class BuildCodeMultiCore extends BuildCode { // need to be send to other cores Vector targetcores = this.currentSchedule.getAllyCores(tmpFState); output.println("/* send the shared object to possible queues on other cores*/"); - for(int k = 0; k < targetcores.size(); ++k) { + // TODO, temporary solution, send to mostly the first two + int upperbound = targetcores.size() > 2? 2: targetcores.size(); + for(int k = 0; k < upperbound; ++k) { // TODO // add the information of exactly which queue - //if(!sendto.contains(targetcores.elementAt(i))) { + int targetcore = targetcores.elementAt(k).intValue(); + if(!sendto.contains(targetcore)) { // previously not sended to this target core // enqueue this object and its destinations for later process output.println("{"); @@ -1197,27 +1192,23 @@ public class BuildCodeMultiCore extends BuildCode { QueueInfo qinfo = null; TranObjInfo tmpinfo = new TranObjInfo(); tmpinfo.name = super.generateTemp(fm, temp, lb); - tmpinfo.targetcore = targetcores.elementAt(i); + tmpinfo.targetcore = targetcore; FlagState targetFS = this.currentSchedule.getTargetFState(tmpFState); if(targetFS != null) { tmpinfo.fs = targetFS; } else { tmpinfo.fs = tmpFState; } - // fixed 05/12/09, it's very likely to repeatedly send an object to the same core - // as sheduled - //if(!contains(sendto, tmpinfo)) { - qinfo = outputtransqueues(tmpinfo.fs, targetcores.elementAt(i), output); + qinfo = outputtransqueues(tmpinfo.fs, targetcore, output); output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));"); output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";"); - output.println("tmpObjInfo->targetcore = "+targetcores.elementAt(i).toString()+";"); + output.println("tmpObjInfo->targetcore = "+targetcore+";"); output.println("tmpObjInfo->queues = " + qinfo.qname + ";"); output.println("tmpObjInfo->length = " + qinfo.length + ";"); output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);"); - //sendto.add(tmpinfo); - //} - output.println("}"); - //} + output.println("}"); + sendto.addElement(targetcore); + } } } } diff --git a/Robust/src/Runtime/ThreadSim/task_arch.c b/Robust/src/Runtime/ThreadSim/task_arch.c index 8ff4aae7..8feaaab5 100644 --- a/Robust/src/Runtime/ThreadSim/task_arch.c +++ b/Robust/src/Runtime/ThreadSim/task_arch.c @@ -424,9 +424,10 @@ int receiveObject() { int size=classsize[type]; struct ___Object___ * newobj=RUNMALLOC(size); memcpy(newobj, tmpptr, size); - if(0 == newobj->isolate) { + // TODO no longer use isolate flag + /*if(0 == newobj->isolate) { newobj->original=tmpptr; - } + }*/ RUNFREE(msgptr); tmpptr = NULL; int k = 0; @@ -712,7 +713,8 @@ newtask: numparams=currtpd->task->numParameters; numtotal=currtpd->task->numTotal; - int isolateflags[numparams]; + // TODO no longer use isolate flag + int isolateflags[numparams]; // clear the lockRedirectTbl (TODO, this table should be empty after all locks are released) // TODO: need modification according to added alias locks @@ -724,6 +726,7 @@ newtask: for(i=0; iparameterArray[i]; tmpparam = (struct ___Object___ *)parameter; + // TODO no longer use isolate flag if(0 == tmpparam->isolate) { isolateflags[i] = 0; // shared object, need to flush with current value diff --git a/Robust/src/Runtime/mem.c b/Robust/src/Runtime/mem.c index 684100bc..be74ff42 100644 --- a/Robust/src/Runtime/mem.c +++ b/Robust/src/Runtime/mem.c @@ -18,6 +18,9 @@ void * mycalloc(int m, int size) { int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK); BAMBOO_START_CRITICAL_SECTION_MEM(); p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize); + if(p == NULL) { + exit(0xa024); + } BAMBOO_CLOSE_CRITICAL_SECTION_MEM(); return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK)); } @@ -27,6 +30,9 @@ void * mycalloc_share(int m, int size) { int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK); BAMBOO_START_CRITICAL_SECTION_MEM(); p = BAMBOO_SHARE_MEM_CALLOC_I(m, isize); // calloc(m, isize); + if(p == NULL) { + exit(0xa025); + } BAMBOO_CLOSE_CRITICAL_SECTION_MEM(); return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK)); } @@ -35,6 +41,9 @@ void * mycalloc_i(int m, int size) { void * p = NULL; int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK); p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize); + if(p == NULL) { + exit(0xa026); + } return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK)); } diff --git a/Robust/src/Runtime/multicoreruntime.c b/Robust/src/Runtime/multicoreruntime.c index 69dfd479..414e5f7e 100644 --- a/Robust/src/Runtime/multicoreruntime.c +++ b/Robust/src/Runtime/multicoreruntime.c @@ -192,7 +192,6 @@ void CALL01(___System______printString____L___String___,struct ___String___ * __ void * allocate_new(void * ptr, int type) { struct ___Object___ * v=(struct ___Object___ *) mygcmalloc((struct garbagelist *) ptr, classsize[type]); v->type=type; - v->isolate = 1; v->version = 0; //v->numlocks = 0; v->lock = NULL; @@ -209,7 +208,6 @@ void * allocate_new(void * ptr, int type) { struct ArrayObject * allocate_newarray(void * ptr, int type, int length) { struct ArrayObject * v=mygcmalloc((struct garbagelist *) ptr, sizeof(struct ArrayObject)+length*classsize[type]); v->type=type; - v->isolate = 1; v->version = 0; //v->numlocks = 0; v->lock = NULL; @@ -232,7 +230,6 @@ struct ArrayObject * allocate_newarray(void * ptr, int type, int length) { void * allocate_new(int type) { struct ___Object___ * v=FREEMALLOC(classsize[type]); v->type=type; - v->isolate = 1; v->version = 0; //v->numlocks = 0; v->lock = NULL; @@ -244,7 +241,6 @@ void * allocate_new(int type) { struct ArrayObject * allocate_newarray(int type, int length) { struct ArrayObject * v=FREEMALLOC(sizeof(struct ArrayObject)+length*classsize[type]); v->type=type; - v->isolate = 1; v->version = 0; //v->numlocks = 0; v->lock = NULL; diff --git a/Robust/src/Runtime/multicoreruntime.h b/Robust/src/Runtime/multicoreruntime.h index 0c5857b3..1601f536 100644 --- a/Robust/src/Runtime/multicoreruntime.h +++ b/Robust/src/Runtime/multicoreruntime.h @@ -10,7 +10,8 @@ int msgdata[30]; int msgtype; int msgdataindex; int msglength; -int outmsgdata[30]; +#define BAMBOO_OUT_BUF_LENGTH 300 +int outmsgdata[BAMBOO_OUT_BUF_LENGTH]; int outmsgindex; int outmsglast; int outmsgleft; @@ -46,6 +47,12 @@ bool lockflag; struct Queue objqueue; // data structures for shared memory allocation +#define BAMBOO_NUM_PAGES 1024 * 512 +#define BAMBOO_PAGE_SIZE 4096 +#define BAMBOO_SHARED_MEM_SIZE BAMBOO_PAGE_SIZE * BAMBOO_PAGE_SIZE +#define BAMBOO_BASE_VA 0xd000000 +#define BAMBOO_SMEM_SIZE 16 * BAMBOO_PAGE_SIZE + bool smemflag; struct bamboo_shared_mem { mspace msp; diff --git a/Robust/src/Runtime/multicoretask.c b/Robust/src/Runtime/multicoretask.c index 6b7a2e80..76e945b7 100644 --- a/Robust/src/Runtime/multicoretask.c +++ b/Robust/src/Runtime/multicoretask.c @@ -447,7 +447,7 @@ objqueuebreak: } } #endif - terminate(); // All done. + terminate(); // All done. } // if-else of line 364: if(!waitconfirm) } else { // still some objects on the fly on the network @@ -546,7 +546,6 @@ void createstartupobject(int argc, char ** argv) { ((void **)(((char *)&stringarray->___length___)+sizeof(int)))[i-1]=newstring; } - startupobject->isolate = 1; startupobject->version = 0; startupobject->lock = NULL; @@ -1616,14 +1615,24 @@ msg: void * mem = mspace_calloc(cur_mem->msp, 1, msgdata[1]); struct bamboo_shared_mem * failmem = cur_mem; while(mem == NULL) { - // move current head to the tail - bamboo_free_msps->tail->next = cur_mem; - bamboo_free_msps->tail = cur_mem; - bamboo_free_msps->head = cur_mem->next; - cur_mem->next = NULL; - cur_mem = bamboo_free_msps->head; - if(cur_mem == failmem) { - BAMBOO_EXIT(0xa016); + if(msgdata[1] > BAMBOO_SMEM_SIZE) { + // move current head to the tail + bamboo_free_msps->tail->next = cur_mem; + bamboo_free_msps->tail = cur_mem; + bamboo_free_msps->head = cur_mem->next; + cur_mem->next = NULL; + cur_mem = bamboo_free_msps->head; + if(cur_mem == failmem) { + BAMBOO_EXIT(0xa016); + } + } else { + // remove the head + bamboo_free_msps->head = cur_mem->next; + RUNFREE(cur_mem); + cur_mem = bamboo_free_msps->head; + if(cur_mem == NULL) { + BAMBOO_EXIT(0xa017); + } } mem = mspace_calloc(cur_mem->msp, 1, msgdata[1]); } @@ -1708,7 +1717,7 @@ int processlockrequest(int locktype, int lock, int obj, int requestcore, int roo BAMBOO_DEBUGPRINT_REG(lock); BAMBOO_DEBUGPRINT_REG(corenum); #endif - BAMBOO_EXIT(0xa017); + BAMBOO_EXIT(0xa018); } /*if((corenum == STARTUPCORE) && waitconfirm) { waitconfirm = false; @@ -1831,7 +1840,7 @@ bool getreadlock(void * ptr) { #endif } else { // conflicts on lockresults - BAMBOO_EXIT(0xa018); + BAMBOO_EXIT(0xa019); } } return true; @@ -1861,7 +1870,7 @@ void releasereadlock(void * ptr) { // reside on this core if(!RuntimeHashcontainskey(locktbl, reallock)) { // no locks for this object, something is wrong - BAMBOO_EXIT(0xa019); + BAMBOO_EXIT(0xa01a); } else { int rwlock_obj = 0; struct LockValue * lockvalue = NULL; @@ -1917,7 +1926,7 @@ bool getreadlock_I_r(void * ptr, void * redirectlock, int core, bool cache) { #endif } else { // conflicts on lockresults - BAMBOO_EXIT(0xa01a); + BAMBOO_EXIT(0xa01b); } return true; } else { @@ -2001,7 +2010,7 @@ bool getwritelock(void * ptr) { #endif } else { // conflicts on lockresults - BAMBOO_EXIT(0xa01b); + BAMBOO_EXIT(0xa01c); } } return true; @@ -2038,7 +2047,7 @@ void releasewritelock(void * ptr) { // reside on this core if(!RuntimeHashcontainskey(locktbl, reallock)) { // no locks for this object, something is wrong - BAMBOO_EXIT(0xa01c); + BAMBOO_EXIT(0xa01d); } else { int rwlock_obj = 0; struct LockValue * lockvalue = NULL; @@ -2078,7 +2087,7 @@ void releasewritelock_r(void * lock, void * redirectlock) { // reside on this core if(!RuntimeHashcontainskey(locktbl, reallock)) { // no locks for this object, something is wrong - BAMBOO_EXIT(0xa01d); + BAMBOO_EXIT(0xa01e); } else { int rwlock_obj = 0; struct LockValue * lockvalue = NULL; @@ -2155,7 +2164,7 @@ bool getwritelock_I(void * ptr) { #endif } else { // conflicts on lockresults - BAMBOO_EXIT(0xa01e); + BAMBOO_EXIT(0xa01f); } return true; } @@ -2213,7 +2222,7 @@ bool getwritelock_I_r(void * ptr, void * redirectlock, int core, bool cache) { #endif } else { // conflicts on lockresults - BAMBOO_EXIT(0xa01f); + BAMBOO_EXIT(0xa020); } return true; } else { @@ -2259,7 +2268,7 @@ void releasewritelock_I(void * ptr) { // reside on this core if(!RuntimeHashcontainskey(locktbl, reallock)) { // no locks for this object, something is wrong - BAMBOO_EXIT(0xa020); + BAMBOO_EXIT(0xa021); } else { int rwlock_obj = 0; struct LockValue * lockvalue = NULL; @@ -2291,7 +2300,7 @@ void releasewritelock_I_r(void * lock, void * redirectlock) { // reside on this core if(!RuntimeHashcontainskey(locktbl, reallock)) { // no locks for this object, something is wrong - BAMBOO_EXIT(0xa021); + BAMBOO_EXIT(0xa022); } else { int rwlock_obj = 0; struct LockValue * lockvalue = NULL; @@ -2878,7 +2887,7 @@ parameterpresent: reverse=NULL; #endif // #if 0: for recovery BAMBOO_DEBUGPRINT_REG(x); - BAMBOO_EXIT(0xa022); + BAMBOO_EXIT(0xa023); } else { #endif // #ifndef MULTICORE #if 0 @@ -2969,6 +2978,7 @@ execute: #endif #ifdef DEBUG BAMBOO_DEBUGPRINT(0xe99a); + //BAMBOO_DEBUGPRINT_REG(hashsize(activetasks)); #endif #ifndef MULTICORE } // line 2946: if (x=setjmp(error_handler)) -- 2.34.1