1 #if defined(MULTICORE)&&defined(MULTICORE_GC)
2 #include "runtime_arch.h"
3 #include "multicoreruntime.h"
4 #include "multicoregarbage.h"
5 #include "multicorehelper.h"
6 #include "multicoremem_helper.h"
8 // Only allocate local mem chunks to each core.
9 // If a core has used up its local shared memory, start gc.
10 void * localmalloc_I(int coren, unsigned INTPTR memcheck, unsigned INTPTR * allocsize) {
11 for(block_t localblocknum=0;localblocknum<GCNUMLOCALBLOCK;localblocknum++) {
12 block_t searchblock=BLOCKINDEX2(coren, localblocknum);
13 if (searchblock>=GCNUMBLOCK)
15 struct blockrecord * block=&allocationinfo.blocktable[searchblock];
16 if (block->status==BS_FREE) {
17 unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
18 if (freespace>=memcheck) {
21 block->status=BS_USED;
22 void *blockptr=OFFSET2BASEVA(searchblock)+gcbaseva;
23 unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
24 void *startaddr=blockptr+usedspace;
34 // Allocate the local shared memory to each core with the highest priority,
35 // if a core has used up its local shared memory, try to allocate the
36 // shared memory that belong to its neighbours, if also failed, start gc.
37 void * fixedmalloc_I(int coren, unsigned INTPTR memcheck, unsigned INTPTR * allocsize) {
39 void * mem=localmalloc_I(coren,memcheck,allocsize);
43 //failed try neighbors...in a round robin fashion
44 for(block_t lblock=0;lblock<MAXNEIGHBORALLOC;lblock++) {
45 for(int i=0;i<NUM_CORES2TEST;i++) {
46 int neighborcore=core2test[coren][i];
47 if (neighborcore!=-1) {
48 block_t globalblockindex=BLOCKINDEX2(neighborcore, lblock);
49 if (globalblockindex>=GCNUMBLOCK||globalblockindex<0)
51 struct blockrecord * block=&allocationinfo.blocktable[globalblockindex];
52 if (block->status==BS_FREE) {
53 unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
54 if (freespace>=memcheck) {
57 block->status=BS_USED;
58 void *blockptr=OFFSET2BASEVA(globalblockindex)+gcbaseva;
59 unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
61 return blockptr+usedspace;
72 // Allocate the local shared memory to each core with the highest priority,
73 // if a core has used up its local shared memory, try to allocate the
74 // shared memory that belong to its neighbours first, if failed, check
75 // current memory allocation rate, if it has already reached the threshold,
76 // start gc, otherwise, allocate the shared memory globally. If all the
77 // shared memory has been used up, start gc.
78 void * mixedmalloc_I(int coren, unsigned INTPTR isize, unsigned INTPTR * allocsize) {
79 void * mem=fixedmalloc_I(coren,isize,allocsize);
83 //try global allocator instead
84 return globalmalloc_I(coren, isize, allocsize);
88 // Allocate all the memory chunks globally, do not consider the host cores
89 // When all the shared memory are used up, start gc.
90 void * globalmalloc_I(int coren, unsigned INTPTR memcheck, unsigned INTPTR * allocsize) {
91 block_t firstfree=NOFREEBLOCK;
92 block_t lowestblock=allocationinfo.lowestfreeblock;
93 for(block_t searchblock=lowestblock;searchblock<GCNUMBLOCK;searchblock++) {
94 struct blockrecord * block=&allocationinfo.blocktable[searchblock];
95 if (block->status==BS_FREE) {
96 if(firstfree==NOFREEBLOCK)
97 firstfree=searchblock;
98 unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
99 if (freespace>=memcheck) {
102 block->status=BS_USED;
103 void *blockptr=OFFSET2BASEVA(searchblock)+gcbaseva;
104 unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
105 allocationinfo.lowestfreeblock=firstfree;
106 void *startaddr=blockptr+usedspace;
107 *allocsize=freespace;
115 void * smemalloc(int coren, unsigned INTPTR isize, unsigned INTPTR * allocsize) {
116 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
117 void *retval=smemalloc_I(coren, isize, allocsize);
118 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
119 GCPROFILE_RECORD_ALLOCATED_OBJ(*allocsize);
123 // malloc from the shared memory
124 void * smemalloc_I(int coren, unsigned INTPTR isize, unsigned INTPTR * allocsize) {
126 void *mem = localmalloc_I(coren, isize, allocsize);
128 void *mem = fixedmalloc_I(coren, isize, allocsize);
130 void *mem = mixedmalloc_I(coren, isize, allocsize);
132 void *mem = globalmalloc_I(coren, isize, allocsize);
136 // not enough shared global memory
140 if(!gc_status_info.gcprocessing) {
141 // inform other cores to stop and wait for gc
143 for(int i = 0; i < NUMCORESACTIVE; i++) {
144 // reuse the gcnumsendobjs & gcnumreceiveobjs
145 gcnumsendobjs[0][i] = 0;
146 gcnumreceiveobjs[0][i] = 0;
148 GC_SEND_MSG_1_TO_CLIENT(GCSTARTPRE);
155 // malloc from the shared memory
156 void * smemalloc_I(int coren, unsigned INTPTR size, unsigned INTPTR * allocsize) {
158 int toallocate = (size>(BAMBOO_SMEM_SIZE)) ? (size) : (BAMBOO_SMEM_SIZE);
159 if(toallocate > bamboo_free_smem_size) {
163 mem = (void *)bamboo_free_smemp;
164 bamboo_free_smemp = ((void*)bamboo_free_smemp) + toallocate;
165 bamboo_free_smem_size -= toallocate;
167 *allocsize = toallocate;
169 // no enough shared global memory