1 #ifndef BAMBOO_MULTICORE_GARBAGE_H
2 #define BAMBOO_MULTICORE_GARBAGE_H
5 #include "multicoregc.h"
6 #include "multicorehelper.h" // for mappings between core # and block #
7 #include "structdefs.h"
8 #include "multicoregcprofile.h"
9 #include "multicorecache.h"
12 #define GC_PRINTF tprintf
14 #define GC_PRINTF if(0) tprintf
17 // data structures for GC
18 #define BAMBOO_SMEM_SIZE_L (BAMBOO_SMEM_SIZE * 2)
19 #define BAMBOO_LARGE_SMEM_BOUND (BAMBOO_SMEM_SIZE_L*NUMCORES4GC)
20 // let each gc core to have one big block, this is very important
21 // for the computation of NUMBLOCKS(s, n), DO NOT change this!
24 #define GC_NUM_FLUSH_DTLB 1
25 unsigned int gc_num_flush_dtlb;
37 INITPHASE = 0x0, // 0x0
40 SUBTLECOMPACTPHASE, // 0x3
44 PREFINISHPHASE, // 0x6
46 FINISHPHASE // 0x6/0x7
50 volatile bool gcprocessing;
51 volatile GCPHASETYPE gcphase; // indicating GC phase
53 volatile bool gcpreinform; // counter for stopped cores
54 volatile bool gcprecheck; // indicates if there are updated pregc information
56 unsigned int gccurr_heaptop;
57 struct MGCHash * gcforwardobjtbl; // cache forwarded objs in mark phase
58 // for mark phase termination
59 volatile unsigned int gccorestatus[NUMCORESACTIVE];//records status of each core
62 volatile unsigned int gcnumsendobjs[2][NUMCORESACTIVE];//# of objects sent out
63 volatile unsigned int gcnumreceiveobjs[2][NUMCORESACTIVE];//# of objects received
64 volatile unsigned int gcnumsrobjs_index;//indicates which entry to record the
65 // info received before phase 1 of the mark finish
67 // the info received in phase 2 must be
68 // recorded in the other entry
69 volatile bool gcbusystatus;
70 unsigned int gcself_numsendobjs;
71 unsigned int gcself_numreceiveobjs;
74 unsigned int gcheaptop;
75 unsigned int gcloads[NUMCORES4GC];
76 unsigned int gctopcore; // the core host the top of the heap
77 unsigned int gctopblock; // the number of current top block
79 unsigned int gcnumlobjs;
81 // compact instruction
82 unsigned int gcmarkedptrbound;
83 unsigned int gcblock2fill;
84 unsigned int gcstopblock[NUMCORES4GC]; // indicate when to stop compact phase
85 unsigned int gcfilledblocks[NUMCORES4GC]; //indicate how many blocks have been fulfilled
87 unsigned int gcmovestartaddr;
88 unsigned int gcdstcore;
89 volatile bool gctomove;
90 unsigned int gcrequiredmems[NUMCORES4GC]; //record pending mem requests
91 volatile unsigned int gcmovepending;
93 // shared memory pointer for pointer mapping tbls
94 // In GC version, this block of memory is located at the bottom of the
95 // shared memory, right on the top of the smem tbl.
96 // The bottom of the shared memory = sbstart tbl + smemtbl + bamboo_rmsp
97 // These three types of table are always reside at the bottom of the shared
98 // memory and will never be moved or garbage collected
99 unsigned int * gcmappingtbl;
100 unsigned int bamboo_rmsp_size;
101 unsigned int bamboo_baseobjsize;
103 // table recording the starting address of each small block
104 // (size is BAMBOO_SMEM_SIZE)
105 // Note: 1. this table always resides on the very bottom of the shared memory
106 // 2. it is not counted in the shared heap, would never be garbage
110 unsigned int gcsbstarttbl_len;
112 unsigned int gcreservedsb; // number of reserved sblock for sbstarttbl
113 unsigned int gcnumblock; // number of total blocks in the shared mem
114 unsigned int gcbaseva; // base va for shared memory without reserved sblocks
115 #ifdef GC_CACHE_ADAPT
116 unsigned int gctopva; // top va for shared memory without reserved sblocks
117 volatile bool gccachestage;
118 // table recording the sampling data collected for cache adaption
119 int * gccachesamplingtbl;
120 int * gccachesamplingtbl_local;
121 unsigned int size_cachesamplingtbl_local;
122 int * gccachesamplingtbl_r;
123 int * gccachesamplingtbl_local_r;
124 unsigned int size_cachesamplingtbl_local_r;
125 int * gccachepolicytbl;
126 unsigned int size_cachepolicytbl;
127 #endif // GC_CACHE_ADAPT
129 #define OBJMAPPINGINDEX(p) (((unsigned int)p-gcbaseva)/bamboo_baseobjsize)
131 #define ISSHAREDOBJ(p) \
132 ((((unsigned int)p)>=gcbaseva)&&(((unsigned int)p)<(gcbaseva+(BAMBOO_SHARED_MEM_SIZE))))
134 #define ALIGNSIZE(s, as) \
135 (*((unsigned int*)as))=((((unsigned int)(s-1))&(~(BAMBOO_CACHE_LINE_MASK)))+(BAMBOO_CACHE_LINE_SIZE))
137 // mapping of pointer to block # (start from 0), here the block # is
139 #define BLOCKINDEX(p, b) \
141 unsigned int t = (p) - gcbaseva; \
142 if(t < (BAMBOO_LARGE_SMEM_BOUND)) { \
143 (*((unsigned int*)b)) = t / (BAMBOO_SMEM_SIZE_L); \
145 (*((unsigned int*)b)) = NUMCORES4GC+((t-(BAMBOO_LARGE_SMEM_BOUND))/(BAMBOO_SMEM_SIZE)); \
149 // mapping of pointer to core #
150 #define RESIDECORE(p, c) \
152 if(1 == (NUMCORES4GC)) { \
153 (*((unsigned int*)c)) = 0; \
156 BLOCKINDEX((p), &b); \
157 (*((unsigned int*)c)) = gc_block2core[(b%(NUMCORES4GC*2))]; \
161 // NOTE: n starts from 0
162 // mapping of heaptop (how many bytes there are in the local heap) to
163 // the number of the block
164 // the number of the block indicates that the block is the xth block on
166 #define NUMBLOCKS(s, n) \
167 if(s < (BAMBOO_SMEM_SIZE_L)) { \
168 (*((unsigned int*)(n))) = 0; \
170 (*((unsigned int*)(n))) = 1 + ((s) - (BAMBOO_SMEM_SIZE_L)) / (BAMBOO_SMEM_SIZE); \
173 #define OFFSET(s, o) \
174 if(s < BAMBOO_SMEM_SIZE_L) { \
175 (*((unsigned int*)(o))) = (s); \
177 (*((unsigned int*)(o))) = ((s)-(BAMBOO_SMEM_SIZE_L))%(BAMBOO_SMEM_SIZE); \
180 // mapping of (core #, index of the block) to the global block index
181 #define BLOCKINDEX2(c, n) \
182 (gc_core2block[(2*(c))+((n)%2)]+((NUMCORES4GC*2)*((n)/2)))
184 // mapping of (core #, number of the block) to the base pointer of the block
185 #define BASEPTR(c, n, p) \
187 unsigned int b = BLOCKINDEX2((c), (n)); \
188 if(b < (NUMCORES4GC)) { \
189 (*((unsigned int*)p)) = gcbaseva + b * (BAMBOO_SMEM_SIZE_L); \
191 (*((unsigned int*)p)) = gcbaseva+(BAMBOO_LARGE_SMEM_BOUND)+ \
192 (b-(NUMCORES4GC))*(BAMBOO_SMEM_SIZE); \
196 // the next core in the top of the heap
197 #define NEXTTOPCORE(b) (gc_block2core[((b)+1)%(NUMCORES4GC*2)])
199 // close current block, fill the header
200 #define CLOSEBLOCK(base, size) \
202 BAMBOO_MEMSET_WH((base), '\0', BAMBOO_CACHE_LINE_SIZE); \
203 *((int*)(base)) = (size); \
206 // check if all cores are stall now
207 #define GC_CHECK_ALL_CORE_STATUS(f) \
209 gccorestatus[BAMBOO_NUM_OF_CORE] = 0; \
211 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT(); \
212 if(gc_checkAllCoreStatus_I()) { \
213 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME(); \
216 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME(); \
220 // send a 1-word msg to all clients
221 #define GC_SEND_MSG_1_TO_CLIENT(m) \
223 for(int i = 0; i < NUMCORESACTIVE; ++i) { \
224 gccorestatus[i] = 1; \
225 if(BAMBOO_NUM_OF_CORE != i) { \
226 send_msg_1(i, (m), false); \
231 #define ISLOCAL(p) (hostcore(p)==BAMBOO_NUM_OF_CORE)
233 INLINE void initmulticoregcdata();
234 INLINE void dismulticoregcdata();
235 INLINE bool gc_checkAllCoreStatus_I();
236 INLINE bool gc(struct garbagelist * stackptr); // core coordinator routine
237 INLINE void gc_collect(struct garbagelist* stackptr); //core collector routine
238 INLINE void gc_nocollect(struct garbagelist* stackptr); //non-gc core collector routine
239 INLINE void transferMarkResults_I();
240 INLINE bool gcfindSpareMem_I(unsigned int * startaddr,
241 unsigned int * tomove,
242 unsigned int * dstcore,
243 unsigned int requiredmem,
244 unsigned int requiredcore);
246 #define INITMULTICOREGCDATA() initmulticoregcdata()
247 #define DISMULTICOREGCDATA() dismulticoregcdata()
248 #else // MULTICORE_GC
249 #define INITMULTICOREGCDATA()
250 #define DISMULTICOREGCDATA()
251 #endif // MULTICORE_GC
252 #endif // BAMBOO_MULTICORE_GARBAGE_H