changes to support profiling
[IRC.git] / Robust / src / Runtime / bamboo / pmc_garbage.c
1 #include "multicoregc.h"
2 #include "multicoreruntime.h"
3 #include "pmc_garbage.h"
4 #include "runtime_arch.h"
5 #include "pmc_mark.h"
6 #include "pmc_forward.h"
7 #include "pmc_refupdate.h"
8 #ifdef PERFCOUNT
9 #include "bme_perf_counter.h"
10 #endif
11
12 struct pmc_heap * pmc_heapptr;
13 struct pmc_queue * pmc_localqueue;
14 volatile bool gcflag;
15
16 void incrementthreads() {
17   tmc_spin_mutex_lock(&pmc_heapptr->lock);
18   pmc_heapptr->numthreads++;
19   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
20 }
21
22 void decrementthreads() {
23   tmc_spin_mutex_lock(&pmc_heapptr->lock);
24   pmc_heapptr->numthreads--;
25   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
26 }
27
28 void * pmc_unitend(unsigned int index) {
29   return gcbaseva+(index+1)*UNITSIZE;
30 }
31
32 void pmc_onceInit() {
33   pmc_localqueue=&pmc_heapptr->regions[BAMBOO_NUM_OF_CORE].markqueue;
34   pmc_queueinit(pmc_localqueue);
35   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
36     tmc_spin_barrier_init(&pmc_heapptr->barrier, NUMCORES4GC);
37     for(int i=0;i<NUMPMCUNITS;i++) {
38       pmc_heapptr->units[i].endptr=pmc_unitend(i);
39       //tprintf("Ch5: %u-> %x\n", i, pmc_heapptr->units[i].endptr);
40     }
41     
42     for(int i=0;i<NUMCORES4GC;i+=2) {
43       if (i==0) {
44         pmc_heapptr->regions[i].lastptr=gcbaseva;
45       } else
46         pmc_heapptr->regions[i].lastptr=pmc_heapptr->units[i*4-1].endptr;
47       pmc_heapptr->regions[i].lowunit=4*i;
48       pmc_heapptr->regions[i].highunit=4*(i+1);
49       pmc_heapptr->regions[i+1].lastptr=pmc_heapptr->units[(i+1)*4+3].endptr;
50       pmc_heapptr->regions[i+1].lowunit=4*(i+1);
51       pmc_heapptr->regions[i+1].highunit=4*(i+2);
52     }
53     //for(int i=0;i<NUMCORES4GC;i++) {
54       //tprintf("%u lastptr=%x\n", i, pmc_heapptr->regions[i].lastptr);
55     //}
56   }
57 }
58
59 void pmc_init() {
60   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
61     pmc_heapptr->numthreads=NUMCORES4GC;
62     for(int i=0;i<NUMCORES4GC;i+=2) {
63       void *startptr=pmc_heapptr->regions[i].lastptr;
64       void *finishptr=pmc_heapptr->regions[i+1].lastptr;
65       struct pmc_region *region=&pmc_heapptr->regions[i];
66       unsigned int startindex=region->lowunit;
67       unsigned int endindex=pmc_heapptr->regions[i+1].highunit;
68       //tprintf("Free space in partition %u from %x to %x\n", i, startptr, finishptr);
69       for(unsigned int index=startindex;index<endindex;index++) {
70         void *ptr=pmc_heapptr->units[index].endptr;
71         if ((ptr>startptr)&&(ptr<=finishptr)) {
72           padspace(startptr, (unsigned int)(ptr-startptr));
73           startptr=ptr;
74         }
75         if (ptr>finishptr) {
76           padspace(startptr, (unsigned int) (finishptr-startptr));
77           break;
78         }
79       }
80     }
81   }
82   if (bamboo_smem_size) {
83     //tprintf("Left over alloc space from %x to %x\n", bamboo_cur_msp, bamboo_cur_msp+bamboo_smem_size);
84     padspace(bamboo_cur_msp, bamboo_smem_size);  
85   }
86   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
87 }
88
89 void gc(struct garbagelist *gl) {
90 #ifdef PERFCOUNT
91   profile_start(GC_REGION);
92 #endif
93   if (BAMBOO_NUM_OF_CORE==STARTUPCORE)
94     tprintf("start GC\n");
95   pmc_init();
96   //mark live objects
97   //tprintf("mark\n");
98   pmc_mark(gl);
99   //count live objects per unit
100   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
101   //tprintf("count\n");
102   pmc_count();
103   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
104   //divide up work
105   //tprintf("divide\n");
106   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
107     pmc_processunits();
108   }
109   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
110   //set up forwarding pointers
111   //tprintf("forward\n");
112   pmc_doforward();
113   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
114   //update pointers
115   //tprintf("updaterefs\n");
116   pmc_doreferenceupdate(gl);
117   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
118   //compact data
119   //tprintf("compact\n");
120   pmc_docompact();
121   //reset memory allocation
122   bamboo_cur_msp=NULL;
123   bamboo_smem_size=0;
124   //tprintf("done\n");
125
126   //if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
127     //    for(int i=0;i<NUMCORES4GC;i+=2) {
128     //      void *startptr=pmc_heapptr->regions[i].lastptr;
129     //      void *finishptr=pmc_heapptr->regions[i+1].lastptr;
130     //      tprintf("Partition %u from %x to %x\n", i, startptr, finishptr);
131     //      tprintf("%x %x %x %x\n", pmc_heapptr->regions[i].startptr, pmc_heapptr->regions[i].endptr, pmc_heapptr->regions[i+1].startptr, pmc_heapptr->regions[i+1].endptr);
132     //    }
133   //  }
134
135   gcflag=false;
136   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
137
138 #ifdef PERFCOUNT
139   profile_start(APP_REGION);
140 #endif
141   //tprintf("exit GC\n");
142 }
143
144 void padspace(void *ptr, unsigned int length) {
145   //zero small blocks
146   if (length<sizeof(struct ArrayObject)) {
147     BAMBOO_MEMSET_WH(ptr,0,length);
148   } else {
149     //generate fake arrays for big blocks
150     struct ArrayObject *ao=(struct ArrayObject *)ptr;
151     ao->type=BYTEARRAYTYPE;
152     unsigned arraylength=length-sizeof(struct ArrayObject);
153     ao->___length___=arraylength;
154     ao->marked=0;
155   }
156 }
157
158 void gettype_size(void * ptr, unsigned int * ttype, unsigned int * tsize) {
159   int type = ((int *)ptr)[0];
160   //  if (type>TOTALNUMCLASSANDARRAY) {
161   //    tprintf("ptr=%x type=%u\n", ptr, type);
162   //  }
163
164   if(type < NUMCLASSES) {
165     // a normal object
166     *tsize = classsize[type];
167     *ttype = type;
168   } else {
169     // an array
170     struct ArrayObject *ao=(struct ArrayObject *)ptr;
171     unsigned int elementsize=classsize[type];
172     unsigned int length=ao->___length___;
173     *tsize = sizeof(struct ArrayObject)+length*elementsize;
174     *ttype = type;
175   } 
176 }