2 #include "multicoregccompact.h"
3 #include "runtime_arch.h"
4 #include "multicoreruntime.h"
5 #include "multicoregarbage.h"
8 bool gc_checkCoreStatus() {
9 for(int i = 0; i < NUMCORES4GC; ++i) {
10 if(gccorestatus[i] != 0) {
17 void gc_resetCoreStatus() {
18 for(int i = 0; i < NUMCORES4GC; ++i) {
23 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
25 to->localblocknum = 0;
26 BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
28 to->bound=to->base+BLOCKSIZE(to->localblocknum);
31 orig->localblocknum = 0;
32 orig->ptr=orig->base = to->base;
33 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
36 void getSpaceLocally(struct moveHelper *to) {
37 //we have space on our core...just keep going
39 BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
41 to->bound = to->base + BLOCKSIZE(to->localblocknum);
44 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
45 //need to get another block from elsewhere
46 //set flag to wait for memory
48 //send request for memory
49 send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, to->ptr, minimumbytes);
50 //wait for flag to be set that we received message
54 to->ptr = gcmovestartaddr;
56 //set localblock number to high number to indicate this block isn't local
57 to->localblocknum = MAXBLOCK;
58 unsigned int globalblocknum;
59 BLOCKINDEX(globalblocknum, to->ptr);
60 to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
61 to->bound = gcbaseva + BOUNDPTR(globalblocknum);
64 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
65 //need more space to compact into
66 if (to->localblocknum < gcblock2fill) {
69 getSpaceRemotely(to, minimumbytes);
73 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
75 unsigned int minimumbytes=compactblocks(orig, to);
76 if (orig->ptr==orig->bound) {
77 //need more data to compact
79 orig->localblocknum++;
80 BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
82 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
83 if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
86 if (minimumbytes!=0) {
87 getSpace(to, minimumbytes);
91 send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, to->ptr, 0);
94 /* Should be invoked with interrupt turned off. */
96 void * assignSpareMem_I(unsigned int sourcecore, unsigned int requiredmem) {
100 void * assignSpareMem(unsigned int sourcecore,unsigned int requiredmem) {
101 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
102 void * retval=assignSpareMem_I(sourcecore, requiredmem);
103 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
107 /* should be invoked with interrupt turned off */
109 void * gcfindSpareMem_I(unsigned int requiredmem,unsigned int requiredcore) {
111 for(int k = 0; k < NUMCORES4GC; k++) {
112 if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
113 // check if this stopped core has enough mem
114 startaddr=assignSpareMem_I(k, requiredmem);
118 // If we cannot find spare mem right now, hold the request
119 gcrequiredmems[requiredcore] = requiredmem;
124 bool gcfindSpareMem(unsigned int requiredmem,unsigned int requiredcore) {
125 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
126 bool retval=gcfindSpareMem_I(requiredmem, requiredcore);
127 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
131 /* This function is performance critical... spend more time optimizing it */
133 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
135 void *tobound=to->bound;
136 void *origptr=orig->ptr;
137 void *origbound=orig->bound;
138 unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
139 unsigned int objlength;
141 while(origptr<origbound) {
142 //Try to skip over stuff fast first
143 unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
144 unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
145 if (!gcmarktbl[arrayoffset]) {
148 if (arrayoffset<origendoffset) {
149 //finished with block...
155 } while(!gcmarktbl[arrayoffset]);
156 origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
159 //Scan more carefully next
160 objlength=getMarkedLength(origptr);
162 if (objlength!=NOTMARKED) {
163 unsigned int length=ALIGNSIZETOBYTES(objlength);
164 void *endtoptr=toptr+length;
165 if (endtoptr>tobound) {
171 //good to move objects and update pointers
172 gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
176 origptr+=ALIGNMENTSIZE;
181 BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
184 // initialize structs for compacting
185 struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
186 struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
187 initOrig_Dst(&orig, &to);
189 CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
191 compacthelper(&orig, &to);
194 void master_compact() {
195 // predict number of blocks to fill for each core
196 void * tmpheaptop = 0;
197 int numblockspercore = loadbalance(&tmpheaptop, &gctopblock, &gctopcore);
199 GC_PRINTF("mark phase finished \n");
201 gc_resetCoreStatus();
202 tmpheaptop = gcbaseva + BAMBOO_SHARED_MEM_SIZE;
203 for(int i = 0; i < NUMCORES4GC; i++) {
204 // init some data strutures for compact phase
205 gcfilledblocks[i] = 0;
206 gcrequiredmems[i] = 0;
208 //send start compact messages to all cores
209 gcstopblock[i] = numblockspercore;
210 if(i != STARTUPCORE) {
211 send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
213 gcblock2fill = numblockspercore;
220 /* wait for all cores to finish compacting */
222 while(gc_checkCoreStatus())
227 GC_PRINTF("compact phase finished \n");
230 #endif // MULTICORE_GC