49fd3d9364d856c4098987fa5ff3d50e360e9c8f
[IRC.git] / Robust / src / Runtime / bamboo / multicoregccompact.c
1 #ifdef MULTICORE_GC
2 #include "structdefs.h"
3 #include "multicoregccompact.h"
4 #include "runtime_arch.h"
5 #include "multicoreruntime.h"
6 #include "multicoregarbage.h"
7 #include "markbit.h"
8 #include "multicoremem_helper.h"
9 #include "gcqueue.h"
10
11 int gc_countRunningCores() {
12   int count=0;
13   for(int i = 0; i < NUMCORES4GC; i++) {
14     if(returnedmem[i]) {
15       count++;
16     }
17   }
18   return count;
19 }
20
21 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
22   // init the dst ptr
23   to->localblocknum = 0;
24   BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
25   to->ptr = to->base;
26   to->bound=to->base+BLOCKSIZE(to->localblocknum);
27   
28   // init the orig ptr
29   orig->localblocknum = 0;
30   orig->ptr=orig->base = to->base;
31   orig->bound=orig->base+BLOCKSIZE(orig->localblocknum);
32 #ifdef GC_CACHE_ADAPT
33   to->pagebound=to->base+BAMBOO_PAGE_SIZE;
34   orig->pagebound=orig->base+BAMBOO_PAGE_SIZE;
35 #endif
36 }
37
38 void getSpaceLocally(struct moveHelper *to) {
39   //we have space on our core...just keep going
40   to->localblocknum++;
41   BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
42   to->ptr=to->base;
43   to->bound=to->base+BLOCKSIZE(to->localblocknum);
44 #ifdef GC_CACHE_ADAPT
45   to->pagebound=to->base+BAMBOO_PAGE_SIZE;
46 #endif
47 }
48
49 //This function is called on the master core only...and typically by
50 //the message interrupt handler
51
52 void handleReturnMem_I(unsigned int cnum, void *heaptop) {
53   unsigned int blockindex;
54   BLOCKINDEX(blockindex, heaptop);
55   unsigned INTPTR localblocknum=GLOBALBLOCK2LOCAL(blockindex);
56   //this core is done as far as memory usage is concerned
57   returnedmem[cnum]=0;
58
59   struct blockrecord * blockrecord=&allocationinfo.blocktable[blockindex];
60
61   blockrecord->status=BS_FREE;
62   blockrecord->usedspace=(unsigned INTPTR)(heaptop-OFFSET2BASEVA(blockindex)-gcbaseva);
63   blockrecord->freespace=BLOCKSIZE(localblocknum)-blockrecord->usedspace;
64   /* Update the lowest free block */
65   if (blockindex < allocationinfo.lowestfreeblock) {
66     allocationinfo.lowestfreeblock=blockindex;
67   }
68
69   /* This is our own block...means we should mark other blocks above us as free*/
70   
71   if (cnum==blockrecord->corenum) {
72     unsigned INTPTR nextlocalblocknum=localblocknum+1;
73     for(;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
74       unsigned INTPTR blocknum=BLOCKINDEX2(cnum, nextlocalblocknum);
75       struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blocknum];
76       nextblockrecord->status=BS_FREE;
77       nextblockrecord->usedspace=0;
78       //this is true because this cannot be the lowest block
79       nextblockrecord->freespace=BLOCKSIZE(1);
80     }
81   }
82
83   //this could be the last one....
84   int count=gc_countRunningCores();
85   if (gcmovepending==count) {
86     // All cores have stopped...hand out memory as necessary to handle all requests
87     handleMemoryRequests_I();
88   } else {
89     //see if returned memory blocks let us resolve requests
90     useReturnedMem(cnum, allocationinfo.lowestfreeblock);
91   }
92 }
93
94 void useReturnedMem(unsigned int retcorenum, block_t localblockindex) {
95   for(int i=0;i<NUMCORES4GC;i++) {
96     unsigned INTPTR requiredmem=gcrequiredmems[i];
97     if (requiredmem) {
98       unsigned INTPTR desiredmem=maxusefulmems[i];
99       unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
100       unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
101
102
103       for(block_t nextlocalblocknum=localblockindex;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
104         unsigned INTPTR blocknum=BLOCKINDEX2(retcorenum, nextlocalblocknum);
105         struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blocknum];
106         if (nextblockrecord->status==BS_FREE) {
107           unsigned INTPTR freespace=nextblockrecord->freespace&~BAMBOO_CACHE_LINE_MASK;
108           if (freespace>=memcheck) {
109             nextblockrecord->status=BS_USED;
110             void *blockptr=OFFSET2BASEVA(blocknum)+gcbaseva;
111             unsigned INTPTR usedspace=((nextblockrecord->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
112             //taken care of one block
113             gcmovepending--;
114             void *startaddr=blockptr+usedspace;
115             gcrequiredmems[i]=0;
116             maxusefulmems[i]=0;
117             if (i==STARTUPCORE) {
118               gctomove = true;
119               gcmovestartaddr = startaddr;
120             } else if(BAMBOO_CHECK_SEND_MODE()) {
121               cache_msg_2_I(i,GCMOVESTART,startaddr);
122             } else {
123               send_msg_2_I(i,GCMOVESTART,startaddr);
124             }
125           }
126         }
127       }
128     }
129   }
130 }
131
132 void handleReturnMem(unsigned int cnum, void *heaptop) {
133   BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
134   handleReturnMem_I(cnum, heaptop);
135   BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
136 }
137
138 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
139   //need to get another block from elsewhere
140   //set flag to wait for memory
141
142   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
143     gctomove=false;
144     BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
145     void *startaddr=handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
146     BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
147
148     if (startaddr) {
149       gcmovestartaddr=startaddr;
150     } else {
151       while(!gctomove) ;
152     }
153   } else {
154     gctomove=false;
155     //send request for memory
156     send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
157     //wait for flag to be set that we received message
158     while(!gctomove)
159       ;
160   }
161
162   //store pointer
163   to->ptr = gcmovestartaddr;
164
165   //set localblock number to high number to indicate this block isn't local
166   to->localblocknum = MAXBLOCK;
167   unsigned int globalblocknum;
168   BLOCKINDEX(globalblocknum, to->ptr);
169   to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
170   to->bound=gcbaseva+BOUNDPTR(globalblocknum);
171 #ifdef GC_CACHE_ADAPT
172   to->pagebound=(void *)((int)((int)(to->ptr)&(~(BAMBOO_PAGE_SIZE-1)))+BAMBOO_PAGE_SIZE);
173 #endif
174 }
175
176 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
177   //need more space to compact into
178   if ((to->localblocknum+1) < gcblock2fill) {
179     getSpaceLocally(to);
180   } else {
181     getSpaceRemotely(to, minimumbytes);
182   }
183 }
184
185 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
186   bool senttopmessage=false;
187   while(true) {
188     if ((gccurr_heaptop <= ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
189       //This block is the last for this core...let the startup know
190       if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
191         handleReturnMem(BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
192       } else {
193         send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
194       }
195       //Only send the message once
196       senttopmessage=true;
197     }
198     unsigned int minimumbytes=COMPACTUNITS(orig, to);
199     if (orig->ptr==orig->bound) {
200       //need more data to compact
201       //increment the core
202       orig->localblocknum++;
203       BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
204       orig->ptr=orig->base;
205       orig->bound=orig->base+BLOCKSIZE(orig->localblocknum);
206 #ifdef GC_CACHE_ADAPT
207       orig->pagebound=orig->base+BAMBOO_PAGE_SIZE;
208 #endif
209       if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE) {
210         CACHEADAPT_FINISH_COMPACT(to->ptr);
211         break;
212       }
213     }
214     if (minimumbytes!=0) {
215       getSpace(to, minimumbytes);
216     }
217   }
218   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
219     BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
220     handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, 0, 0);
221     BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
222   } else {
223     send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0, 0);
224   }
225 }
226
227 void * checkNeighbors_I(int ncorenum, unsigned INTPTR requiredmem, unsigned INTPTR desiredmem) {
228   int minblockindex=allocationinfo.lowestfreeblock/NUMCORES4GC;
229   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
230   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
231
232   for(block_t lblock=minblockindex;lblock<numblockspercore;lblock++) {
233     for(int i=0;i<NUM_CORES2TEST;i++) {
234       int neighborcore=core2test[ncorenum][i];
235       if (neighborcore!=-1) {
236         block_t globalblockindex=BLOCKINDEX2(neighborcore, lblock);
237         struct blockrecord * block=&allocationinfo.blocktable[globalblockindex];
238         if (block->status==BS_FREE) {
239           unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
240           if (memcheck<=freespace) {
241             //we have a block
242             //mark block as used
243             block->status=BS_USED;
244             void *blockptr=OFFSET2BASEVA(globalblockindex)+gcbaseva;
245             unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
246             return blockptr+usedspace;
247           }
248         }
249       }
250     }
251   }
252   return NULL;
253 }
254
255 void * globalSearch_I(unsigned int topblock, unsigned INTPTR requiredmem, unsigned INTPTR desiredmem) {
256   unsigned int firstfree=NOFREEBLOCK;
257   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
258   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
259
260   for(block_t i=allocationinfo.lowestfreeblock;i<topblock;i++) {
261     struct blockrecord * block=&allocationinfo.blocktable[i];
262     if (block->status==BS_FREE) {
263       if(firstfree==NOFREEBLOCK)
264         firstfree=i;
265       unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
266       if (memcheck<=freespace) {
267         //we have a block
268         //mark block as used
269         block->status=BS_USED;
270         void *blockptr=OFFSET2BASEVA(i)+gcbaseva;
271         unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
272         allocationinfo.lowestfreeblock=firstfree;
273         return blockptr+usedspace;
274       }
275     }
276   }
277   allocationinfo.lowestfreeblock=firstfree;
278   return NULL;
279 }
280
281 void handleOneMemoryRequest(int core, unsigned int lowestblock) {
282   unsigned INTPTR requiredmem=gcrequiredmems[core];
283   unsigned INTPTR desiredmem=maxusefulmems[core];
284   block_t firstfree=NOFREEBLOCK;
285   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
286   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
287
288   for(block_t searchblock=lowestblock;searchblock<GCNUMBLOCK;searchblock++) {
289     struct blockrecord * block=&allocationinfo.blocktable[searchblock];
290     if (block->status==BS_FREE) {
291       if(firstfree==NOFREEBLOCK)
292         firstfree=searchblock;
293       //don't take a block from another core that hasn't returned its memory yet
294       if (block->corenum!=core&&returnedmem[block->corenum])
295         continue;
296       
297       unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
298       if (freespace>=memcheck) {
299         //TODO: should check memory block at same level on our own core...if that works, use it to preserve locality
300
301         //we have a block
302         //mark block as used
303         block->status=BS_USED;
304         void *blockptr=OFFSET2BASEVA(searchblock)+gcbaseva;
305         unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
306         allocationinfo.lowestfreeblock=firstfree;
307         //taken care of one block
308         gcmovepending--;
309         void *startaddr=blockptr+usedspace;
310         if(BAMBOO_CHECK_SEND_MODE()) {
311           cache_msg_2_I(core,GCMOVESTART,startaddr);
312         } else {
313           send_msg_2_I(core,GCMOVESTART,startaddr);
314         }
315         return;
316       }
317     }
318   }
319   //this is bad...ran out of memory
320   printf("Out of memory.  Was trying for %u bytes\n", threshold);
321   BAMBOO_EXIT();
322 }
323
324 void handleMemoryRequests_I() {
325   unsigned int lowestblock=allocationinfo.lowestfreeblock;
326   if (lowestblock==NOFREEBLOCK) {
327     lowestblock=numblockspercore*NUMCORES4GC;
328   }
329   
330   for(int i=0;i < NUMCORES4GC; i++) {
331     if (gcrequiredmems[i]) {
332       handleOneMemoryRequest(i, lowestblock);
333       lowestblock=allocationinfo.lowestfreeblock;
334     }
335   }
336 }
337
338 /* should be invoked with interrupt turned off */
339
340 void * gcfindSpareMem_I(unsigned INTPTR requiredmem, unsigned INTPTR desiredmem,unsigned int requiredcore) {
341   if (allocationinfo.lowestfreeblock!=NOFREEBLOCK) {
342     //There are spare blocks
343     unsigned int topblock=numblockspercore*NUMCORES4GC;
344     void *memblock;
345     
346     if (memblock=checkNeighbors_I(requiredcore, requiredmem, desiredmem)) {
347       return memblock;
348     } else if (memblock=globalSearch_I(topblock, requiredmem, desiredmem)) {
349       return memblock;
350     }
351   }
352   
353   // If we cannot find spare mem right now, hold the request
354   gcrequiredmems[requiredcore] = requiredmem;
355   maxusefulmems[requiredcore]=desiredmem;
356   gcmovepending++;
357
358   int count=gc_countRunningCores();
359   if (gcmovepending==count) {
360     // All cores have stopped...hand out memory as necessary to handle all requests
361     handleMemoryRequests_I();
362   }
363
364   return NULL;
365
366
367 #ifdef GC_CACHE_ADAPT
368 unsigned int compactblockshelper(struct moveHelper * orig, struct moveHelper * to) {
369   unsigned int minimumbytes=0;
370   void *origptr=orig->ptr;
371   void *origbound=orig->bound;
372   void * tmporig=orig->ptr;
373   void * tmpto=to->ptr;
374
375   while(true) {
376     //call compactblocks using the page boundaries at the current bounds
377     minimumbytes=compactblocks(orig, to);
378     if(minimumbytes == 0) {
379       //bump the orig page bound...
380       //use old orig pointer to make sure we get correct block
381       CACHEADAPT_FINISH_SRC_PAGE(tmporig, tmpto, to->ptr);
382       if (orig->ptr<origbound) {
383         tmporig=orig->ptr;
384         tmpto=to->ptr;
385         orig->pagebound=orig->pagebound+BAMBOO_PAGE_SIZE;
386       } else {
387         return 0;
388       }
389     } else {
390       // require more memory
391       void *endtoptr=to->ptr+minimumbytes;
392       if (endtoptr>to->bound) {
393         CACHEADAPT_FINISH_DST_PAGE(orig->ptr, tmpto, to->ptr, 0);
394         return minimumbytes;
395       } else {
396         CACHEADAPT_FINISH_DST_PAGE(orig->ptr, tmpto, to->ptr, minimumbytes);
397         to->pagebound=((((unsigned INTPTR)endtoptr)-1)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE;
398         //update pointers to avoid double counting the stuff we already added in
399         tmporig=orig->ptr+minimumbytes;
400         tmpto=to->ptr+minimumbytes;
401       }
402     }
403   }
404 }
405 #endif
406
407 /* This function is performance critical...  spend more time optimizing it */
408
409 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
410   void *toptrinit=to->ptr;
411   void *toptr=toptrinit;
412   void *origptr=orig->ptr;
413 #ifdef GC_CACHE_ADAPT
414   void *origbound=orig->pagebound;
415   void *tobound=to->pagebound;
416 #else
417   void *origbound=orig->bound;
418   void *tobound=to->bound;
419 #endif
420   unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
421   unsigned int objlength;
422
423   while(origptr<origbound) {
424     //Try to skip over stuff fast first
425     unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
426     unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
427     if (!gcmarktbl[arrayoffset]) {
428       do {
429         arrayoffset++;
430         if (arrayoffset>=origendoffset) {
431           //finished with block(a page in CACHE_ADAPT version)...
432           to->ptr=toptr;
433           orig->ptr=origbound;
434           gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
435           return 0;
436         }
437       } while(!gcmarktbl[arrayoffset]);
438       origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
439     }
440     
441     //Scan more carefully next
442     objlength=getMarkedLength(origptr);
443
444     if (objlength!=NOTMARKED) {
445       unsigned int length=ALIGNSIZETOBYTES(objlength);
446
447       //code between this and next comment should be removed
448 #ifdef GC_DEBUG
449       unsigned int size;
450       unsigned int type;
451       gettype_size(origptr, &type, &size);
452       size=((size-1)&(~(ALIGNMENTSIZE-1)))+ALIGNMENTSIZE;
453       
454       if (size!=length) {
455         tprintf("BAD SIZE IN BITMAP: type=%u object=%x size=%u length=%u\n", type, origptr, size, length);
456         unsigned INTPTR alignsize=ALIGNOBJSIZE((unsigned INTPTR)(origptr-gcbaseva));
457         unsigned INTPTR hibits=alignsize>>4;
458         unsigned INTPTR lobits=(alignsize&15)<<1;
459         tprintf("hibits=%x lobits=%x\n", hibits, lobits);
460         tprintf("hi=%x lo=%x\n", gcmarktbl[hibits], gcmarktbl[hibits+1]);
461       }
462 #endif
463       //end of code to remove
464
465       void *endtoptr=toptr+length;
466       if (endtoptr>tobound) {
467         gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
468         to->ptr=toptr;
469         orig->ptr=origptr;
470         return length;
471       }
472       //good to move objects and update pointers
473       
474       gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
475       
476       origptr+=length;
477       toptr=endtoptr;
478     } else
479       origptr+=ALIGNMENTSIZE;
480   }
481   to->ptr=toptr;
482   orig->ptr=origptr;
483   gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
484   return 0;
485 }
486
487 void compact() {
488   BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
489   
490   // initialize structs for compacting
491   struct moveHelper orig;
492   struct moveHelper to;
493   initOrig_Dst(&orig, &to);
494
495   compacthelper(&orig, &to);
496
497
498 void master_compact() {
499   // predict number of blocks to fill for each core
500   numblockspercore = loadbalance()+1;
501   
502   GC_PRINTF("mark phase finished \n");
503   
504   gc_resetCoreStatus();
505   //initialize local data structures first....we don't want remote requests messing data up
506   unsigned int initblocks=numblockspercore*NUMCORES4GC;
507   allocationinfo.lowestfreeblock=NOFREEBLOCK;
508
509   //assigned blocks
510   for(int i=0;i<initblocks;i++) {
511     allocationinfo.blocktable[i].status=BS_USED;
512   }
513
514   //free blocks
515   for(int i=initblocks;i<GCNUMBLOCK;i++) {
516     allocationinfo.blocktable[i].status=BS_FREE;
517     allocationinfo.blocktable[i].usedspace=0;
518     //this is true because all cores have at least one block already...
519     allocationinfo.blocktable[i].freespace=BLOCKSIZE(1);
520   }
521
522   //start all of the cores
523   for(int i = 0; i < NUMCORES4GC; i++) {
524     // init some data strutures for compact phase
525     gcrequiredmems[i] = 0;
526     gccorestatus[i] = 1;
527     returnedmem[i] = 1;
528     //send start compact messages to all cores
529     if(i != STARTUPCORE) {
530       send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
531     } else {
532       gcblock2fill = numblockspercore;
533     }
534   }
535   GCPROFILE_ITEM();
536   // compact phase
537   compact();
538   /* wait for all cores to finish compacting */
539   
540
541   while(!gc_checkCoreStatus())
542     ;
543
544 #ifdef GC_DEBUG
545   void *nextvalid=gcbaseva;
546   for(void *tmp=gcbaseva; tmp<gcbaseva+BAMBOO_SHARED_MEM_SIZE;tmp+=ALIGNMENTSIZE) {
547     unsigned int objlength=getMarkedLength(tmp);
548     void *forwarding=gcmappingtbl[OBJMAPPINGINDEX(tmp)];
549     if (tmp>=nextvalid&&((objlength!=0)!=(forwarding!=NULL))) {
550       tprintf("Maps disagree tmp=%x olength=%u forwarding=%x\n",tmp, objlength, forwarding);
551     }
552     if (tmp<nextvalid&&forwarding!=NULL) {
553       tprintf("Weird forwarding pointer\n");
554     }
555     if (tmp>=nextvalid&&(objlength!=0||forwarding!=NULL)) {
556       unsigned int length=ALIGNSIZETOBYTES(objlength);
557       unsigned int size;
558       unsigned int type;
559       nextvalid=tmp+length;
560       gettype_size(tmp, &type, &size);
561       size=((size-1)&(~(ALIGNMENTSIZE-1)))+ALIGNMENTSIZE;
562       if (size!=length) {
563         tprintf("Bad size in bitmap: tmp=%x length=%u size=%u type=%u\n", tmp, length, size, type);
564       }
565       block_t blockindex;
566       BLOCKINDEX(blockindex, forwarding);
567       struct blockrecord * block=&allocationinfo.blocktable[blockindex];
568       void *blockptr=OFFSET2BASEVA(blockindex)+gcbaseva;
569
570       if (block->status==BS_FREE) {
571         if (forwarding>(blockptr+block->usedspace)) {
572           tprintf("Pointer references free space forwarding=%x tmp=%x length=%u type=%u blockindex=%u, baseptr=%x, usedspace=%u, status=%u\n", forwarding, tmp, length, type,blockindex, blockptr, block->usedspace, block->status);
573         }
574       }
575     }
576   }
577 #endif
578
579   GCPROFILE_ITEM();
580
581   //just in case we didn't get blocks back...
582   if (allocationinfo.lowestfreeblock==NOFREEBLOCK)
583     allocationinfo.lowestfreeblock=numblockspercore*NUMCORES4GC;
584
585   GC_PRINTF("compact phase finished \n");
586 }
587
588 #endif // MULTICORE_GC