Add gcprofile to PMC garbage collector and compute the size of allocated obj instead...
[IRC.git] / Robust / src / Runtime / bamboo / pmc_garbage.c
1 #include "multicoregc.h"
2 #include "multicoreruntime.h"
3 #include "multicoregcprofile.h"
4 #include "pmc_garbage.h"
5 #include "runtime_arch.h"
6 #include "pmc_mark.h"
7 #include "pmc_forward.h"
8 #include "pmc_refupdate.h"
9 #ifdef PERFCOUNT
10 #include "bme_perf_counter.h"
11 #endif
12
13 struct pmc_heap * pmc_heapptr;
14 struct pmc_queue * pmc_localqueue;
15 volatile bool gcflag;
16
17 void incrementthreads() {
18   tmc_spin_mutex_lock(&pmc_heapptr->lock);
19   pmc_heapptr->numthreads++;
20   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
21 }
22
23 void decrementthreads() {
24   tmc_spin_mutex_lock(&pmc_heapptr->lock);
25   pmc_heapptr->numthreads--;
26   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
27 }
28
29 void * pmc_unitend(unsigned int index) {
30   return gcbaseva+(index+1)*UNITSIZE;
31 }
32
33 void pmc_onceInit() {
34   pmc_localqueue=&pmc_heapptr->regions[BAMBOO_NUM_OF_CORE].markqueue;
35   pmc_queueinit(pmc_localqueue);
36   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
37     tmc_spin_barrier_init(&pmc_heapptr->barrier, NUMCORES4GC);
38     for(int i=0;i<NUMPMCUNITS;i++) {
39       pmc_heapptr->units[i].endptr=pmc_unitend(i);
40       //tprintf("Ch5: %u-> %x\n", i, pmc_heapptr->units[i].endptr);
41     }
42     
43     for(int i=0;i<NUMCORES4GC;i+=2) {
44       if (i==0) {
45         pmc_heapptr->regions[i].lastptr=gcbaseva;
46       } else
47         pmc_heapptr->regions[i].lastptr=pmc_heapptr->units[i*4-1].endptr;
48       pmc_heapptr->regions[i].lowunit=4*i;
49       pmc_heapptr->regions[i].highunit=4*(i+1);
50       if ((i+1)<NUMCORES4GC) {
51         pmc_heapptr->regions[i+1].lastptr=pmc_heapptr->units[(i+1)*4+3].endptr;
52         pmc_heapptr->regions[i+1].lowunit=4*(i+1);
53         pmc_heapptr->regions[i+1].highunit=4*(i+2);
54       }
55     }
56     //for(int i=0;i<NUMCORES4GC;i++) {
57       //tprintf("%u lastptr=%x\n", i, pmc_heapptr->regions[i].lastptr);
58     //}
59   }
60 }
61
62 void pmc_init() {
63   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
64     pmc_heapptr->numthreads=NUMCORES4GC;
65     for(int i=0;i<NUMCORES4GC;i+=2) {
66       void *startptr=pmc_heapptr->regions[i].lastptr;
67       void *finishptr=(i+1)<NUMCORES4GC?pmc_heapptr->regions[i+1].lastptr:pmc_heapptr->regions[i].endptr;
68       struct pmc_region *region=&pmc_heapptr->regions[i];
69       unsigned int startindex=region->lowunit;
70       unsigned int endindex=(i+1)<NUMCORES4GC?pmc_heapptr->regions[i+1].highunit:pmc_heapptr->regions[i].highunit;
71       //tprintf("Free space in partition %u from %x to %x\n", i, startptr, finishptr);
72       for(unsigned int index=startindex;index<endindex;index++) {
73         void *ptr=pmc_heapptr->units[index].endptr;
74         if ((ptr>startptr)&&(ptr<=finishptr)) {
75           padspace(startptr, (unsigned int)(ptr-startptr));
76           startptr=ptr;
77         }
78         if (ptr>finishptr) {
79           padspace(startptr, (unsigned int) (finishptr-startptr));
80           break;
81         }
82       }
83     }
84   }
85   if (bamboo_smem_size) {
86     //tprintf("Left over alloc space from %x to %x\n", bamboo_cur_msp, bamboo_cur_msp+bamboo_smem_size);
87     padspace(bamboo_cur_msp, bamboo_smem_size);  
88   }
89   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
90 }
91
92 void gc(struct garbagelist *gl) {
93 #ifdef PERFCOUNT
94   profile_start(GC_REGION);
95 #endif
96   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
97     tprintf("start GC\n");
98     GCPROFILE_START_MASTER();
99   }
100   pmc_init();
101   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
102     GCPROFILE_ITEM_MASTER();
103   }
104   //mark live objects
105   //tprintf("mark\n");
106   pmc_mark(gl);
107   //count live objects per unit
108   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
109   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
110     GCPROFILE_ITEM_MASTER();
111   }
112   //tprintf("count\n");
113   pmc_count();
114   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
115   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
116     GCPROFILE_ITEM_MASTER();
117   }
118   //divide up work
119   //tprintf("divide\n");
120   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
121     pmc_processunits();
122     GCPROFILE_ITEM_MASTER();
123   }
124   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
125   //set up forwarding pointers
126   //tprintf("forward\n");
127   pmc_doforward();
128   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
129   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
130     GCPROFILE_ITEM_MASTER();
131   }
132   //update pointers
133   //tprintf("updaterefs\n");
134   pmc_doreferenceupdate(gl);
135   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
136   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
137     GCPROFILE_ITEM_MASTER();
138   }
139   //compact data
140   //tprintf("compact\n");
141   pmc_docompact();
142   //reset memory allocation
143   bamboo_cur_msp=NULL;
144   bamboo_smem_size=0;
145   //tprintf("done\n");
146
147   //if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
148     //    for(int i=0;i<NUMCORES4GC;i+=2) {
149     //      void *startptr=pmc_heapptr->regions[i].lastptr;
150     //      void *finishptr=pmc_heapptr->regions[i+1].lastptr;
151     //      tprintf("Partition %u from %x to %x\n", i, startptr, finishptr);
152     //      tprintf("%x %x %x %x\n", pmc_heapptr->regions[i].startptr, pmc_heapptr->regions[i].endptr, pmc_heapptr->regions[i+1].startptr, pmc_heapptr->regions[i+1].endptr);
153     //    }
154   //  }
155
156   gcflag=false;
157   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
158   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
159     GCPROFILE_RECORD_SPACE_MASTER();
160     GCPROFILE_END_MASTER();
161   }
162
163 #ifdef PERFCOUNT
164   profile_start(APP_REGION);
165 #endif
166   //tprintf("exit GC\n");
167 }
168
169 void padspace(void *ptr, unsigned int length) {
170   //zero small blocks
171   if (length<sizeof(struct ArrayObject)) {
172     BAMBOO_MEMSET_WH(ptr,0,length);
173   } else {
174     //generate fake arrays for big blocks
175     struct ArrayObject *ao=(struct ArrayObject *)ptr;
176     ao->type=BYTEARRAYTYPE;
177     unsigned arraylength=length-sizeof(struct ArrayObject);
178     ao->___length___=arraylength;
179     ao->marked=0;
180   }
181 }
182
183 void gettype_size(void * ptr, unsigned int * ttype, unsigned int * tsize) {
184   int type = ((int *)ptr)[0];
185   //  if (type>TOTALNUMCLASSANDARRAY) {
186   //    tprintf("ptr=%x type=%u\n", ptr, type);
187   //  }
188
189   if(type < NUMCLASSES) {
190     // a normal object
191     *tsize = classsize[type];
192     *ttype = type;
193   } else {
194     // an array
195     struct ArrayObject *ao=(struct ArrayObject *)ptr;
196     unsigned int elementsize=classsize[type];
197     unsigned int length=ao->___length___;
198     *tsize = sizeof(struct ArrayObject)+length*elementsize;
199     *ttype = type;
200   } 
201 }