ScheduleSimulator scheduleSimulator;
int coreNum;
- int scheduleThreshold; // how many starting points generated by schedule analysis
- int probThreshold; // the probability to stop when no accelaration achieved in the
- // directed simulated annealing
- int generateThreshold; // how many optimized implementation generated in each iteration
- // of the directed simulated annealing
+ int scheduleThreshold; // # of starting points generated by schedule analysis
+ int probThreshold; // the probability to stop when no accelaration achieved
+ // in the directed simulated annealing
+ int generateThreshold; // how many optimized implementation generated in
+ // each iteration of the directed simulated annealing
public MCImplSynthesis(State state,
TaskAnalysis ta,
// check all multi-parameter tasks
Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
- Iterator it_tasks = this.state.getTaskSymbolTable().getDescriptorsIterator();
+ Iterator it_tasks =
+ this.state.getTaskSymbolTable().getDescriptorsIterator();
while(it_tasks.hasNext()) {
TaskDescriptor td = (TaskDescriptor)it_tasks.next();
if(td.numParameters() > 1) {
schedulinggraph.clear();
}
scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
- schedulinggraph = scheduleGraphs.elementAt(selectedSchedulings.elementAt(0));
- System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n");
+ schedulinggraph = scheduleGraphs.elementAt(
+ selectedSchedulings.elementAt(0));
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
tryindex++;
} else if(tmpexetime == bestexetime) {
- System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n");
+ System.out.print("end of: #" + tryindex + " (bestexetime: "
+ + bestexetime + ")\n");
System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
tryindex++;
if((Math.abs(rand.nextInt()) % 100) < this.probThreshold) {
}
selectedSimExeGraphs.clear();
selectedSimExeGraphs = null;
+
multiparamtds.clear();
multiparamtds = null;
Vector<ClassNode> cNodes = sn.getClassNodes();
for(int k = 0; k < cNodes.size(); k++) {
Iterator it_flags = cNodes.elementAt(k).getFlags();
+ Vector rootnodes = this.taskAnalysis.getRootNodes(
+ cNodes.elementAt(k).getClassDescriptor());
while(it_flags.hasNext()) {
FlagState fs = (FlagState)it_flags.next();
Iterator it_edges = fs.edges();
while(it_edges.hasNext()) {
- TaskDescriptor td = ((FEdge)it_edges.next()).getTask();
+ FEdge tmpfe = (FEdge)it_edges.next();
+ TaskDescriptor td = (tmpfe).getTask();
tmpSchedule.addTask(td);
if(!td2cores.containsKey(td)) {
td2cores.put(td, new Vector<Schedule>());
tmpcores.add(tmpSchedule);
}
tmpcores = null;
- // if the FlagState can be fed to some multi-param tasks,
- // need to record corresponding ally cores later
+ // If the FlagState can be fed to some multi-param tasks,
+ // need to record corresponding ally cores later.
if(td.numParameters() > 1) {
tmpSchedule.addFState4TD(td, fs);
}
}
cNodes = null;
- // For each of the ScheduleEdge out of this ScheduleNode, add the target ScheduleNode into the queue inside sn
+ // For each of the ScheduleEdge out of this ScheduleNode, add the
+ // target ScheduleNode into the queue inside sn
Iterator it_edges = sn.edges();
while(it_edges.hasNext()) {
ScheduleEdge se = (ScheduleEdge)it_edges.next();
switch(se.getType()) {
case ScheduleEdge.NEWEDGE: {
for(int k = 0; k < se.getNewRate(); k++) {
- tmpSchedule.addTargetCore(se.getFstate(), targetcore);
+ FlagState fs = se.getFstate();
+ tmpSchedule.addTargetCore(fs, targetcore);
+ // Check if the new obj could be fed to some
+ // multi-parameter task, if so, add for ally cores
+ // checking
+ Iterator it = fs.edges();
+ while(it.hasNext()) {
+ TaskDescriptor td = ((FEdge)it.next()).getTask();
+ if(td.numParameters() > 1) {
+ tmpSchedule.addFState4TD(td, fs);
+ }
+ }
}
break;
}
tmpSchedule.addTargetCore(se.getFstate(),
targetcore,
se.getTargetFState());
- // check if missed some FlagState associated with some multi-parameter
- // task, which has been cloned when splitting a ClassNode
+ // check if missed some FlagState associated with some
+ // multi-parameter task, which has been cloned when
+ // splitting a ClassNode
FlagState fs = se.getSourceFState();
FlagState tfs = se.getTargetFState();
Iterator it = tfs.edges();
while(it.hasNext()) {
TaskDescriptor td = ((FEdge)it.next()).getTask();
if(td.numParameters() > 1) {
- if(tmpSchedule.getTasks().contains(td)) {
- tmpSchedule.addFState4TD(td, fs);
- }
+ tmpSchedule.addFState4TD(td, fs);
}
}
break;
for(int k = 0; k < cores.size(); ++k) {
Schedule tmpSchedule = cores.elementAt(k);
+ // Make sure all the parameter objs of a multi-parameter
+ // task would be send to right place
for(int h = 0; h < fes.size(); ++h) {
FEdge tmpfe = fes.elementAt(h);
FlagState tmpfs = (FlagState)tmpfe.getTarget();
- Vector<TaskDescriptor> tmptds = new Vector<TaskDescriptor>();
+ Vector<TaskDescriptor> tmptds =
+ new Vector<TaskDescriptor>();
if((tmpSchedule.getTargetCoreTable() == null)
|| (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) {
// add up all possible cores' info
tmptds = null;
}
+ // Make sure all objs which could be feed to a multi-parameter
+ // task would be send to all the possible task instances
if(cores.size() > 1) {
Vector<FlagState> tmpfss = tmpSchedule.getFStates4TD(td);
for(int h = 0; h < tmpfss.size(); ++h) {
this.allyCores.put(fstate, new Vector<Integer>());
}
if((this.coreNum != targetCore.intValue()) && (!this.allyCores.get(fstate).contains(targetCore))) {
- this.allyCores.get(fstate).add(targetCore); // there may have some duplicate items,
- // which reflects probabilities.
+ this.allyCores.get(fstate).add(targetCore);
}
}
this.td2fs.put(td, new Vector<FlagState>());
}
if(!this.td2fs.get(td).contains(fstate)) {
- this.td2fs.get(td).add(fstate);
+ this.td2fs.get(td).addElement(fstate);
}
}
this.tasks.add(task);
}
}
-}
\ No newline at end of file
+}
if(!state.MULTICORE) {
outclassdefs.println(" void * flagptr;");
} else {
- outclassdefs.println(" int isolate;"); // indicate if this object is shared or not
outclassdefs.println(" int version;");
outclassdefs.println(" struct ___Object___ * original;");
//outclassdefs.println(" int numlocks;"); // array for locks
if((!state.MULTICORE) || (cn.getSymbol().equals("TagDescriptor"))) {
classdefout.println(" void * flagptr;");
} else if (state.MULTICORE) {
- classdefout.println(" int isolate;"); // indicate if this object is shared or not
classdefout.println(" int version;");
classdefout.println(" struct ___Object___ * original;");
//classdefout.println(" int numlocks;"); // array for locks
isolate = (this.currentSchedule.getAllyCoreTable().get(tmpFState) == null) ||
(this.currentSchedule.getAllyCoreTable().get(tmpFState).size() == 0);
}
+ /* no longler use the isolate flag in object structure
if(!isolate) {
// indentify this object as a shared object
// isolate flag is initially set as 1, once this flag is set as 0, it is never reset to 1, i.e. once an object
output.println(" " + super.generateTemp(fm, temp, lb) + "->original = (struct ___Object___ *)" + super.generateTemp(fm, temp, lb) + ";");
output.println("}");
}
+ */
- //Vector<TranObjInfo> sendto = new Vector<TranObjInfo>();
+ Vector<Integer> sendto = new Vector<Integer>();
Queue<Integer> queue = null;
if(targetCoreTbl != null) {
queue = targetCoreTbl.get(tmpFState);
} else {
tmpinfo.fs = tmpFState;
}
- // fixed 05/12/09, it's very likely to repeatedly send an object to the same core
- // as sheduled
- //if(!contains(sendto, tmpinfo)) {
qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
output.println("tmpObjInfo->length = " + qinfo.length + ";");
output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
- //sendto.add(tmpinfo);
- //}
output.println("}");
}
output.println("break;");
} else {
tmpinfo.fs = tmpFState;
}
- // fixed 05/12/09, it's very likely to repeatedly send an object to the same core
- // as sheduled
- //if(!contains(sendto, tmpinfo)) {
qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
output.println("tmpObjInfo->length = " + qinfo.length + ";");
output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
- //sendto.add(tmpinfo);
- //}
output.println("}");
}
output.println("/* increase index*/");
// need to be send to other cores
Vector<Integer> targetcores = this.currentSchedule.getAllyCores(tmpFState);
output.println("/* send the shared object to possible queues on other cores*/");
- for(int k = 0; k < targetcores.size(); ++k) {
+ // TODO, temporary solution, send to mostly the first two
+ int upperbound = targetcores.size() > 2? 2: targetcores.size();
+ for(int k = 0; k < upperbound; ++k) {
// TODO
// add the information of exactly which queue
- //if(!sendto.contains(targetcores.elementAt(i))) {
+ int targetcore = targetcores.elementAt(k).intValue();
+ if(!sendto.contains(targetcore)) {
// previously not sended to this target core
// enqueue this object and its destinations for later process
output.println("{");
QueueInfo qinfo = null;
TranObjInfo tmpinfo = new TranObjInfo();
tmpinfo.name = super.generateTemp(fm, temp, lb);
- tmpinfo.targetcore = targetcores.elementAt(i);
+ tmpinfo.targetcore = targetcore;
FlagState targetFS = this.currentSchedule.getTargetFState(tmpFState);
if(targetFS != null) {
tmpinfo.fs = targetFS;
} else {
tmpinfo.fs = tmpFState;
}
- // fixed 05/12/09, it's very likely to repeatedly send an object to the same core
- // as sheduled
- //if(!contains(sendto, tmpinfo)) {
- qinfo = outputtransqueues(tmpinfo.fs, targetcores.elementAt(i), output);
+ qinfo = outputtransqueues(tmpinfo.fs, targetcore, output);
output.println("tmpObjInfo = RUNMALLOC(sizeof(struct transObjInfo));");
output.println("tmpObjInfo->objptr = (void *)" + tmpinfo.name + ";");
- output.println("tmpObjInfo->targetcore = "+targetcores.elementAt(i).toString()+";");
+ output.println("tmpObjInfo->targetcore = "+targetcore+";");
output.println("tmpObjInfo->queues = " + qinfo.qname + ";");
output.println("tmpObjInfo->length = " + qinfo.length + ";");
output.println("addNewItem(totransobjqueue, (void*)tmpObjInfo);");
- //sendto.add(tmpinfo);
- //}
- output.println("}");
- //}
+ output.println("}");
+ sendto.addElement(targetcore);
+ }
}
}
}
int size=classsize[type];
struct ___Object___ * newobj=RUNMALLOC(size);
memcpy(newobj, tmpptr, size);
- if(0 == newobj->isolate) {
+ // TODO no longer use isolate flag
+ /*if(0 == newobj->isolate) {
newobj->original=tmpptr;
- }
+ }*/
RUNFREE(msgptr);
tmpptr = NULL;
int k = 0;
numparams=currtpd->task->numParameters;
numtotal=currtpd->task->numTotal;
- int isolateflags[numparams];
+ // TODO no longer use isolate flag
+ int isolateflags[numparams];
// clear the lockRedirectTbl (TODO, this table should be empty after all locks are released)
// TODO: need modification according to added alias locks
for(i=0; i<numparams; i++) {
void * parameter=currtpd->parameterArray[i];
tmpparam = (struct ___Object___ *)parameter;
+ // TODO no longer use isolate flag
if(0 == tmpparam->isolate) {
isolateflags[i] = 0;
// shared object, need to flush with current value
int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK);
BAMBOO_START_CRITICAL_SECTION_MEM();
p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize);
+ if(p == NULL) {
+ exit(0xa024);
+ }
BAMBOO_CLOSE_CRITICAL_SECTION_MEM();
return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
}
int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK);
BAMBOO_START_CRITICAL_SECTION_MEM();
p = BAMBOO_SHARE_MEM_CALLOC_I(m, isize); // calloc(m, isize);
+ if(p == NULL) {
+ exit(0xa025);
+ }
BAMBOO_CLOSE_CRITICAL_SECTION_MEM();
return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
}
void * p = NULL;
int isize = 2*BAMBOO_CACHE_LINE_SIZE-4+(size-1)&(~BAMBOO_CACHE_LINE_MASK);
p = BAMBOO_LOCAL_MEM_CALLOC(m, isize); // calloc(m, isize);
+ if(p == NULL) {
+ exit(0xa026);
+ }
return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK));
}
void * allocate_new(void * ptr, int type) {
struct ___Object___ * v=(struct ___Object___ *) mygcmalloc((struct garbagelist *) ptr, classsize[type]);
v->type=type;
- v->isolate = 1;
v->version = 0;
//v->numlocks = 0;
v->lock = NULL;
struct ArrayObject * allocate_newarray(void * ptr, int type, int length) {
struct ArrayObject * v=mygcmalloc((struct garbagelist *) ptr, sizeof(struct ArrayObject)+length*classsize[type]);
v->type=type;
- v->isolate = 1;
v->version = 0;
//v->numlocks = 0;
v->lock = NULL;
void * allocate_new(int type) {
struct ___Object___ * v=FREEMALLOC(classsize[type]);
v->type=type;
- v->isolate = 1;
v->version = 0;
//v->numlocks = 0;
v->lock = NULL;
struct ArrayObject * allocate_newarray(int type, int length) {
struct ArrayObject * v=FREEMALLOC(sizeof(struct ArrayObject)+length*classsize[type]);
v->type=type;
- v->isolate = 1;
v->version = 0;
//v->numlocks = 0;
v->lock = NULL;
int msgtype;
int msgdataindex;
int msglength;
-int outmsgdata[30];
+#define BAMBOO_OUT_BUF_LENGTH 300
+int outmsgdata[BAMBOO_OUT_BUF_LENGTH];
int outmsgindex;
int outmsglast;
int outmsgleft;
struct Queue objqueue;
// data structures for shared memory allocation
+#define BAMBOO_NUM_PAGES 1024 * 512
+#define BAMBOO_PAGE_SIZE 4096
+#define BAMBOO_SHARED_MEM_SIZE BAMBOO_PAGE_SIZE * BAMBOO_PAGE_SIZE
+#define BAMBOO_BASE_VA 0xd000000
+#define BAMBOO_SMEM_SIZE 16 * BAMBOO_PAGE_SIZE
+
bool smemflag;
struct bamboo_shared_mem {
mspace msp;
}
}
#endif
- terminate(); // All done.
+ terminate(); // All done.
} // if-else of line 364: if(!waitconfirm)
} else {
// still some objects on the fly on the network
((void **)(((char *)&stringarray->___length___)+sizeof(int)))[i-1]=newstring;
}
- startupobject->isolate = 1;
startupobject->version = 0;
startupobject->lock = NULL;
void * mem = mspace_calloc(cur_mem->msp, 1, msgdata[1]);
struct bamboo_shared_mem * failmem = cur_mem;
while(mem == NULL) {
- // move current head to the tail
- bamboo_free_msps->tail->next = cur_mem;
- bamboo_free_msps->tail = cur_mem;
- bamboo_free_msps->head = cur_mem->next;
- cur_mem->next = NULL;
- cur_mem = bamboo_free_msps->head;
- if(cur_mem == failmem) {
- BAMBOO_EXIT(0xa016);
+ if(msgdata[1] > BAMBOO_SMEM_SIZE) {
+ // move current head to the tail
+ bamboo_free_msps->tail->next = cur_mem;
+ bamboo_free_msps->tail = cur_mem;
+ bamboo_free_msps->head = cur_mem->next;
+ cur_mem->next = NULL;
+ cur_mem = bamboo_free_msps->head;
+ if(cur_mem == failmem) {
+ BAMBOO_EXIT(0xa016);
+ }
+ } else {
+ // remove the head
+ bamboo_free_msps->head = cur_mem->next;
+ RUNFREE(cur_mem);
+ cur_mem = bamboo_free_msps->head;
+ if(cur_mem == NULL) {
+ BAMBOO_EXIT(0xa017);
+ }
}
mem = mspace_calloc(cur_mem->msp, 1, msgdata[1]);
}
BAMBOO_DEBUGPRINT_REG(lock);
BAMBOO_DEBUGPRINT_REG(corenum);
#endif
- BAMBOO_EXIT(0xa017);
+ BAMBOO_EXIT(0xa018);
}
/*if((corenum == STARTUPCORE) && waitconfirm) {
waitconfirm = false;
#endif
} else {
// conflicts on lockresults
- BAMBOO_EXIT(0xa018);
+ BAMBOO_EXIT(0xa019);
}
}
return true;
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa019);
+ BAMBOO_EXIT(0xa01a);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;
#endif
} else {
// conflicts on lockresults
- BAMBOO_EXIT(0xa01a);
+ BAMBOO_EXIT(0xa01b);
}
return true;
} else {
#endif
} else {
// conflicts on lockresults
- BAMBOO_EXIT(0xa01b);
+ BAMBOO_EXIT(0xa01c);
}
}
return true;
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa01c);
+ BAMBOO_EXIT(0xa01d);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa01d);
+ BAMBOO_EXIT(0xa01e);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;
#endif
} else {
// conflicts on lockresults
- BAMBOO_EXIT(0xa01e);
+ BAMBOO_EXIT(0xa01f);
}
return true;
}
#endif
} else {
// conflicts on lockresults
- BAMBOO_EXIT(0xa01f);
+ BAMBOO_EXIT(0xa020);
}
return true;
} else {
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa020);
+ BAMBOO_EXIT(0xa021);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa021);
+ BAMBOO_EXIT(0xa022);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;
reverse=NULL;
#endif // #if 0: for recovery
BAMBOO_DEBUGPRINT_REG(x);
- BAMBOO_EXIT(0xa022);
+ BAMBOO_EXIT(0xa023);
} else {
#endif // #ifndef MULTICORE
#if 0
#endif
#ifdef DEBUG
BAMBOO_DEBUGPRINT(0xe99a);
+ //BAMBOO_DEBUGPRINT_REG(hashsize(activetasks));
#endif
#ifndef MULTICORE
} // line 2946: if (x=setjmp(error_handler))