1 #ifndef BAMBOO_MULTICORE_GARBAGE_H
2 #define BAMBOO_MULTICORE_GARBAGE_H
5 #include "multicoregc.h"
6 #include "multicorehelper.h" // for mappings between core # and block #
7 #include "structdefs.h"
8 #include "multicoregcprofile.h"
11 #define GC_PRINTF tprintf
13 #define GC_PRINTF if(0) tprintf
16 // data structures for GC
17 #define BAMBOO_SMEM_SIZE_L (BAMBOO_SMEM_SIZE * 2)
18 #define BAMBOO_LARGE_SMEM_BOUND (BAMBOO_SMEM_SIZE_L*NUMCORES4GC)
19 // let each gc core to have one big block, this is very important
20 // for the computation of NUMBLOCKS(s, n), DO NOT change this!
31 INITPHASE = 0x0, // 0x0
34 SUBTLECOMPACTPHASE, // 0x3
38 PREFINISHPHASE, // 0x6
40 FINISHPHASE // 0x6/0x7
43 typedef struct gc_status {
44 volatile bool gcprocessing;
45 volatile GCPHASETYPE gcphase; // indicating GC phase
46 volatile bool gcbusystatus;
49 extern volatile bool gcflag;
50 extern gc_status_t gc_status_info;
51 volatile bool gcprecheck; // indicates if there are updated pregc information
53 unsigned int gccurr_heaptop;
54 struct MGCHash * gcforwardobjtbl; // cache forwarded objs in mark phase
55 // for mark phase termination
56 volatile unsigned int gccorestatus[NUMCORESACTIVE];//records status of each core
59 volatile unsigned int gcnumsendobjs[2][NUMCORESACTIVE];//# of objects sent out
60 volatile unsigned int gcnumreceiveobjs[2][NUMCORESACTIVE];//# of objects received
61 volatile unsigned int gcnumsrobjs_index;//indicates which entry to record the
62 // info received before phase 1 of the mark finish
64 // the info received in phase 2 must be
65 // recorded in the other entry
67 unsigned int gcself_numsendobjs;
68 unsigned int gcself_numreceiveobjs;
71 unsigned int gcheaptop;
72 unsigned int gcloads[NUMCORES4GC];
73 unsigned int gctopcore; // the core host the top of the heap
74 unsigned int gctopblock; // the number of current top block
76 unsigned int gcnumlobjs;
78 // compact instruction
79 unsigned int gcmarkedptrbound;
80 unsigned int gcblock2fill;
81 unsigned int gcstopblock[NUMCORES4GC]; // indicate when to stop compact phase
82 unsigned int gcfilledblocks[NUMCORES4GC]; //indicate how many blocks have been fulfilled
84 unsigned int gcmovestartaddr;
85 unsigned int gcdstcore;
86 volatile bool gctomove;
87 unsigned int gcrequiredmems[NUMCORES4GC]; //record pending mem requests
88 volatile unsigned int gcmovepending;
90 // shared memory pointer for pointer mapping tbls
91 // In GC version, this block of memory is located at the bottom of the
92 // shared memory, right on the top of the smem tbl.
93 // The bottom of the shared memory = sbstart tbl + smemtbl + bamboo_rmsp
94 // These three types of table are always reside at the bottom of the shared
95 // memory and will never be moved or garbage collected
96 unsigned int * gcmappingtbl;
97 unsigned int bamboo_rmsp_size;
98 unsigned int bamboo_baseobjsize;
100 // table recording the starting address of each small block
101 // (size is BAMBOO_SMEM_SIZE)
102 // Note: 1. this table always resides on the very bottom of the shared memory
103 // 2. it is not counted in the shared heap, would never be garbage
107 unsigned int gcsbstarttbl_len;
109 unsigned int gcreservedsb; // number of reserved sblock for sbstarttbl
110 unsigned int gcnumblock; // number of total blocks in the shared mem
111 unsigned int gcbaseva; // base va for shared memory without reserved sblocks
112 #ifdef GC_CACHE_ADAPT
113 unsigned int gctopva; // top va for shared memory without reserved sblocks
114 volatile bool gccachestage;
115 // table recording the sampling data collected for cache adaption
116 int * gccachesamplingtbl;
117 int * gccachesamplingtbl_local;
118 unsigned int size_cachesamplingtbl_local;
119 int * gccachesamplingtbl_r;
120 int * gccachesamplingtbl_local_r;
121 unsigned int size_cachesamplingtbl_local_r;
122 int * gccachepolicytbl;
123 unsigned int size_cachepolicytbl;
126 #define WAITFORGCPHASE(phase) while(gc_status_info.gcphase != phase) ;
128 #define OBJMAPPINGINDEX(p) (((unsigned int)p-gcbaseva)/bamboo_baseobjsize)
130 #define ISSHAREDOBJ(p) \
131 ((((unsigned int)p)>=gcbaseva)&&(((unsigned int)p)<(gcbaseva+(BAMBOO_SHARED_MEM_SIZE))))
133 #define ALIGNSIZE(s, as) \
134 (*((unsigned int*)as))=((((unsigned int)(s-1))&(~(BAMBOO_CACHE_LINE_MASK)))+(BAMBOO_CACHE_LINE_SIZE))
136 // mapping of pointer to block # (start from 0), here the block # is
138 #define BLOCKINDEX(p, b) \
140 unsigned int t = (p) - gcbaseva; \
141 if(t < (BAMBOO_LARGE_SMEM_BOUND)) { \
142 (*((unsigned int*)b)) = t / (BAMBOO_SMEM_SIZE_L); \
144 (*((unsigned int*)b)) = NUMCORES4GC+((t-(BAMBOO_LARGE_SMEM_BOUND))/(BAMBOO_SMEM_SIZE)); \
148 // mapping of pointer to core #
149 #define RESIDECORE(p, c) \
151 if(1 == (NUMCORES4GC)) { \
152 (*((unsigned int*)c)) = 0; \
155 BLOCKINDEX((p), &b); \
156 (*((unsigned int*)c)) = gc_block2core[(b%(NUMCORES4GC*2))]; \
160 // NOTE: n starts from 0
161 // mapping of heaptop (how many bytes there are in the local heap) to
162 // the number of the block
163 // the number of the block indicates that the block is the xth block on
165 #define NUMBLOCKS(s, n) \
166 if(s < (BAMBOO_SMEM_SIZE_L)) { \
167 (*((unsigned int*)(n))) = 0; \
169 (*((unsigned int*)(n))) = 1 + ((s) - (BAMBOO_SMEM_SIZE_L)) / (BAMBOO_SMEM_SIZE); \
172 #define OFFSET(s, o) \
173 if(s < BAMBOO_SMEM_SIZE_L) { \
174 (*((unsigned int*)(o))) = (s); \
176 (*((unsigned int*)(o))) = ((s)-(BAMBOO_SMEM_SIZE_L))%(BAMBOO_SMEM_SIZE); \
179 #define OFFSET2BASEVA(i) \
180 (((i)<NUMCORES4GC)?(BAMBOO_SMEM_SIZE_L*(i)):(BAMBOO_SMEM_SIZE*((i)-NUMCORES4GC)+BAMBOO_LARGE_SMEM_BOUND))
182 #define BLOCKSIZE(c) \
183 ((c)?BAMBOO_SMEM_SIZE_L:BAMBOO_SMEM_SIZE)
185 // mapping of (core #, index of the block) to the global block index
186 #define BLOCKINDEX2(c, n) \
187 (gc_core2block[(2*(c))+((n)%2)]+((NUMCORES4GC*2)*((n)/2)))
189 #define BOUNDPTR(b) \
190 (((b)<NUMCORES4GC)?(((b)+1)*BAMBOO_SMEM_SIZE_L):(BAMBOO_LARGE_SMEM_BOUND+((b)-NUMCORES4GC+1)*BAMBOO_SMEM_SIZE))
192 #define BLOCKBOUND(n) \
193 (((n)==0)?BAMBOO_SMEM_SIZE_L:BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*(n))
195 // mapping of (core #, number of the block) to the base pointer of the block
196 #define BASEPTR(c, n, p) \
198 unsigned int b = BLOCKINDEX2((c), (n)); \
199 if(b < (NUMCORES4GC)) { \
200 (*((unsigned int*)p)) = gcbaseva + b * (BAMBOO_SMEM_SIZE_L); \
202 (*((unsigned int*)p)) = gcbaseva+(BAMBOO_LARGE_SMEM_BOUND)+ \
203 (b-(NUMCORES4GC))*(BAMBOO_SMEM_SIZE); \
207 // the next core in the top of the heap
208 #define NEXTTOPCORE(b) (gc_block2core[((b)+1)%(NUMCORES4GC*2)])
210 // close current block, fill the header
211 #define CLOSEBLOCK(base, size) \
213 BAMBOO_MEMSET_WH((base), '\0', BAMBOO_CACHE_LINE_SIZE); \
214 *((int*)(base)) = (size); \
217 // check if all cores are stall now
218 #define GC_CHECK_ALL_CORE_STATUS(f) \
220 gccorestatus[BAMBOO_NUM_OF_CORE] = 0; \
222 if(gc_checkAllCoreStatus()) { \
228 // send a 1-word msg to all clients
229 #define GC_SEND_MSG_1_TO_CLIENT(m) \
231 for(int i = 0; i < NUMCORESACTIVE; ++i) { \
232 gccorestatus[i] = 1; \
233 if(BAMBOO_NUM_OF_CORE != i) { \
234 send_msg_1(i, (m)); \
239 #define ISLOCAL(p) (hostcore(p)==BAMBOO_NUM_OF_CORE)
241 void initmulticoregcdata();
242 void dismulticoregcdata();
243 bool gc_checkAllCoreStatus();
244 bool gc(struct garbagelist * stackptr); // core coordinator routine
245 void gc_collect(struct garbagelist* stackptr); //core collector routine
246 void gc_nocollect(struct garbagelist* stackptr); //non-gc core collector routine
247 void master_mark(struct garbagelist *stackptr);
248 void master_getlargeobjs();
249 void master_compact();
250 void master_updaterefs();
251 void master_finish();
252 void gc_master(struct garbagelist * stackptr);
255 void transferMarkResults_I();
256 bool gcfindSpareMem_I(unsigned int * startaddr,unsigned int * tomove,unsigned int * dstcore,unsigned int requiredmem,unsigned int requiredcore);
258 #define INITMULTICOREGCDATA() initmulticoregcdata()
259 #define DISMULTICOREGCDATA() dismulticoregcdata()
260 #else // MULTICORE_GC
261 #define INITMULTICOREGCDATA()
262 #define DISMULTICOREGCDATA()
263 #endif // MULTICORE_GC
264 #endif // BAMBOO_MULTICORE_GARBAGE_H