2 #include "multicoregccompact.h"
3 #include "runtime_arch.h"
4 #include "multicoreruntime.h"
5 #include "multicoregarbage.h"
8 int gc_countRunningCores() {
10 for(int i = 0; i < NUMCORES4GC; ++i) {
11 if(gccorestatus[i] != 0) {
18 bool gc_checkCoreStatus() {
19 for(int i = 0; i < NUMCORES4GC; ++i) {
20 if(gccorestatus[i] != 0) {
27 void gc_resetCoreStatus() {
28 for(int i = 0; i < NUMCORES4GC; ++i) {
33 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
35 to->localblocknum = 0;
36 BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
38 to->bound=to->base+BLOCKSIZE(to->localblocknum);
41 orig->localblocknum = 0;
42 orig->ptr=orig->base = to->base;
43 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
46 void getSpaceLocally(struct moveHelper *to) {
47 //we have space on our core...just keep going
49 BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
51 to->bound = to->base + BLOCKSIZE(to->localblocknum);
54 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
55 //need to get another block from elsewhere
56 //set flag to wait for memory
58 //send request for memory
59 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes);
60 //wait for flag to be set that we received message
64 to->ptr = gcmovestartaddr;
66 //set localblock number to high number to indicate this block isn't local
67 to->localblocknum = MAXBLOCK;
68 unsigned int globalblocknum;
69 BLOCKINDEX(globalblocknum, to->ptr);
70 to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
71 to->bound = gcbaseva + BOUNDPTR(globalblocknum);
74 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
75 //need more space to compact into
76 if (to->localblocknum < gcblock2fill) {
79 getSpaceRemotely(to, minimumbytes);
83 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
84 bool senttopmessage=false;
86 if ((gcheaptop < ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
87 //This block is the last for this core...let the startup know
88 send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gcheaptop);
89 //Only send the message once
93 unsigned int minimumbytes=compactblocks(orig, to);
94 if (orig->ptr==orig->bound) {
95 //need more data to compact
97 orig->localblocknum++;
98 BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
100 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
101 if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
104 if (minimumbytes!=0) {
105 getSpace(to, minimumbytes);
109 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0);
112 void * checkNeighbors(int corenum, unsigned INTPTR requiredmem) {
113 int minblockindex=allocation.lowestfreeblock/NUMCORES4GC;
114 block_t toplocalblock=topblock/NUMCORES4GC;
115 for(int i=0;i<NUM_CORES2TEST;i++) {
116 int neighborcore=core2test[corenum][i];
117 if (neighborcore!=-1) {
118 for(block_t lblock=minblockindex;lblock<numblockspercore;lblock++) {
119 block_t globalblockindex=BLOCKINDEX2(neighborcore, lblock);
120 struct blockrecord * block=&allocationinfo.blocktable[globalblockindex];
121 if (block->status==BS_FREE) {
122 unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
123 if (requiredmem<freespace) {
126 block->status=BS_USED;
127 void *blockptr=OFFSET2BASEVA(globalblockindex)+gcbaseva;
128 unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
129 return blockptr+usedspace;
138 void * globalSearch(unsigned int topblock) {
139 unsigned int firstfree=NOFREEBLOCK;
140 for(block_t i=allocationinfo.lowestfreeblock;i<topblock;i++) {
141 struct blockrecord * block=&allocationinfo.blocktable[i];
142 if (block->status==BS_FREE) {
143 if(firstfree==NOFREEBLOCK)
145 unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
146 if (requiredmem<freespace) {
149 block->status=BS_USED;
150 void *blockptr=OFFSET2BASEVA(globalblockindex)+gcbaseva;
151 unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
152 allocationinfo.lowestfreeblock=firstfree;
153 return blockptr+usedspace;
157 allocationinfo.lowestfreeblock=firstfree;
161 /* should be invoked with interrupt turned off */
163 void * gcfindSpareMem_I(unsigned int requiredmem,unsigned int requiredcore) {
164 if (allocationinfo.lowestfreeblock!=NOFREEBLOCK) {
165 //There are spare blocks
166 unsigned int topblock=numblockspercore*NUMCORES4GC;
169 if (memblock=checkNeighbors(requiredcore, requiredmem)) {
171 } else if (memblock=globalSearch(topblock, requiredmem)) {
176 // If we cannot find spare mem right now, hold the request
177 gcrequiredmems[requiredcore] = requiredmem;
180 int count=gc_countRunningCores();
181 if (gcmovepending==count) {
182 // All cores have stopped...hand out memory as necessary to handle all requests
189 bool gcfindSpareMem(unsigned int requiredmem,unsigned int requiredcore) {
190 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
191 bool retval=gcfindSpareMem_I(requiredmem, requiredcore);
192 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
196 /* This function is performance critical... spend more time optimizing it */
198 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
199 void *toptrinit=to->ptr;
201 void *tobound=to->bound;
202 void *origptr=orig->ptr;
203 void *origbound=orig->bound;
204 unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
205 unsigned int objlength;
207 while(origptr<origbound) {
208 //Try to skip over stuff fast first
209 unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
210 unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
211 if (!gcmarktbl[arrayoffset]) {
214 if (arrayoffset<origendoffset) {
215 //finished with block...
219 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
222 } while(!gcmarktbl[arrayoffset]);
223 origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
226 //Scan more carefully next
227 objlength=getMarkedLength(origptr);
229 if (objlength!=NOTMARKED) {
230 unsigned int length=ALIGNSIZETOBYTES(objlength);
231 void *endtoptr=toptr+length;
232 if (endtoptr>tobound) {
233 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
238 //good to move objects and update pointers
239 gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
243 origptr+=ALIGNMENTSIZE;
248 BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
251 // initialize structs for compacting
252 struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
253 struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
254 initOrig_Dst(&orig, &to);
256 CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
258 compacthelper(&orig, &to);
261 void master_compact() {
262 // predict number of blocks to fill for each core
263 numblockspercore = loadbalance()+1;
265 GC_PRINTF("mark phase finished \n");
267 gc_resetCoreStatus();
268 //initialize local data structures first....we don't want remote requests messing data up
269 unsigned int initblocks=numblockspercore*NUMCORES4GC;
270 allocationinfo.lowestfreeblock=NOFREEBLOCKS;
273 for(int i=0;i<initblocks;i++) {
274 allocationinfo.blocktable[i].status=BS_USED;
278 for(int i=initblocks;i<GCNUMBLOCK;i++) {
279 allocationinfo.blocktable[i].status=BS_FREE;
280 allocationinfo.blocktable[i].usedspace=0;
281 //this is true because all cores have at least one block already...
282 allocationinfo.blocktable[i].freespace=BLOCKSIZE(1);
285 //start all of the cores
286 for(int i = 0; i < NUMCORES4GC; i++) {
287 // init some data strutures for compact phase
288 gcrequiredmems[i] = 0;
290 //send start compact messages to all cores
291 if(i != STARTUPCORE) {
292 send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
294 gcblock2fill = numblockspercore;
301 /* wait for all cores to finish compacting */
303 while(gc_checkCoreStatus())
308 GC_PRINTF("compact phase finished \n");
311 #endif // MULTICORE_GC