fix odd core pmc bug
[IRC.git] / Robust / src / Runtime / bamboo / pmc_garbage.c
1 #include "multicoregc.h"
2 #include "multicoreruntime.h"
3 #include "pmc_garbage.h"
4 #include "runtime_arch.h"
5 #include "pmc_mark.h"
6 #include "pmc_forward.h"
7 #include "pmc_refupdate.h"
8 #ifdef PERFCOUNT
9 #include "bme_perf_counter.h"
10 #endif
11
12 struct pmc_heap * pmc_heapptr;
13 struct pmc_queue * pmc_localqueue;
14 volatile bool gcflag;
15
16 void incrementthreads() {
17   tmc_spin_mutex_lock(&pmc_heapptr->lock);
18   pmc_heapptr->numthreads++;
19   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
20 }
21
22 void decrementthreads() {
23   tmc_spin_mutex_lock(&pmc_heapptr->lock);
24   pmc_heapptr->numthreads--;
25   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
26 }
27
28 void * pmc_unitend(unsigned int index) {
29   return gcbaseva+(index+1)*UNITSIZE;
30 }
31
32 void pmc_onceInit() {
33   pmc_localqueue=&pmc_heapptr->regions[BAMBOO_NUM_OF_CORE].markqueue;
34   pmc_queueinit(pmc_localqueue);
35   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
36     tmc_spin_barrier_init(&pmc_heapptr->barrier, NUMCORES4GC);
37     for(int i=0;i<NUMPMCUNITS;i++) {
38       pmc_heapptr->units[i].endptr=pmc_unitend(i);
39       //tprintf("Ch5: %u-> %x\n", i, pmc_heapptr->units[i].endptr);
40     }
41     
42     for(int i=0;i<NUMCORES4GC;i+=2) {
43       if (i==0) {
44         pmc_heapptr->regions[i].lastptr=gcbaseva;
45       } else
46         pmc_heapptr->regions[i].lastptr=pmc_heapptr->units[i*4-1].endptr;
47       pmc_heapptr->regions[i].lowunit=4*i;
48       pmc_heapptr->regions[i].highunit=4*(i+1);
49       if ((i+1)<NUMCORES4GC) {
50         pmc_heapptr->regions[i+1].lastptr=pmc_heapptr->units[(i+1)*4+3].endptr;
51         pmc_heapptr->regions[i+1].lowunit=4*(i+1);
52         pmc_heapptr->regions[i+1].highunit=4*(i+2);
53       }
54     }
55     //for(int i=0;i<NUMCORES4GC;i++) {
56       //tprintf("%u lastptr=%x\n", i, pmc_heapptr->regions[i].lastptr);
57     //}
58   }
59 }
60
61 void pmc_init() {
62   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
63     pmc_heapptr->numthreads=NUMCORES4GC;
64     for(int i=0;i<NUMCORES4GC;i+=2) {
65       void *startptr=pmc_heapptr->regions[i].lastptr;
66       void *finishptr=(i+1)<NUMCORES4GC?pmc_heapptr->regions[i+1].lastptr:pmc_heapptr->regions[i].endptr;
67       struct pmc_region *region=&pmc_heapptr->regions[i];
68       unsigned int startindex=region->lowunit;
69       unsigned int endindex=pmc_heapptr->regions[i+1].highunit;
70       //tprintf("Free space in partition %u from %x to %x\n", i, startptr, finishptr);
71       for(unsigned int index=startindex;index<endindex;index++) {
72         void *ptr=pmc_heapptr->units[index].endptr;
73         if ((ptr>startptr)&&(ptr<=finishptr)) {
74           padspace(startptr, (unsigned int)(ptr-startptr));
75           startptr=ptr;
76         }
77         if (ptr>finishptr) {
78           padspace(startptr, (unsigned int) (finishptr-startptr));
79           break;
80         }
81       }
82     }
83   }
84   if (bamboo_smem_size) {
85     //tprintf("Left over alloc space from %x to %x\n", bamboo_cur_msp, bamboo_cur_msp+bamboo_smem_size);
86     padspace(bamboo_cur_msp, bamboo_smem_size);  
87   }
88   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
89 }
90
91 void gc(struct garbagelist *gl) {
92 #ifdef PERFCOUNT
93   profile_start(GC_REGION);
94 #endif
95   if (BAMBOO_NUM_OF_CORE==STARTUPCORE)
96     tprintf("start GC\n");
97   pmc_init();
98   //mark live objects
99   //tprintf("mark\n");
100   pmc_mark(gl);
101   //count live objects per unit
102   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
103   //tprintf("count\n");
104   pmc_count();
105   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
106   //divide up work
107   //tprintf("divide\n");
108   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
109     pmc_processunits();
110   }
111   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
112   //set up forwarding pointers
113   //tprintf("forward\n");
114   pmc_doforward();
115   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
116   //update pointers
117   //tprintf("updaterefs\n");
118   pmc_doreferenceupdate(gl);
119   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
120   //compact data
121   //tprintf("compact\n");
122   pmc_docompact();
123   //reset memory allocation
124   bamboo_cur_msp=NULL;
125   bamboo_smem_size=0;
126   //tprintf("done\n");
127
128   //if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
129     //    for(int i=0;i<NUMCORES4GC;i+=2) {
130     //      void *startptr=pmc_heapptr->regions[i].lastptr;
131     //      void *finishptr=pmc_heapptr->regions[i+1].lastptr;
132     //      tprintf("Partition %u from %x to %x\n", i, startptr, finishptr);
133     //      tprintf("%x %x %x %x\n", pmc_heapptr->regions[i].startptr, pmc_heapptr->regions[i].endptr, pmc_heapptr->regions[i+1].startptr, pmc_heapptr->regions[i+1].endptr);
134     //    }
135   //  }
136
137   gcflag=false;
138   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
139
140 #ifdef PERFCOUNT
141   profile_start(APP_REGION);
142 #endif
143   //tprintf("exit GC\n");
144 }
145
146 void padspace(void *ptr, unsigned int length) {
147   //zero small blocks
148   if (length<sizeof(struct ArrayObject)) {
149     BAMBOO_MEMSET_WH(ptr,0,length);
150   } else {
151     //generate fake arrays for big blocks
152     struct ArrayObject *ao=(struct ArrayObject *)ptr;
153     ao->type=BYTEARRAYTYPE;
154     unsigned arraylength=length-sizeof(struct ArrayObject);
155     ao->___length___=arraylength;
156     ao->marked=0;
157   }
158 }
159
160 void gettype_size(void * ptr, unsigned int * ttype, unsigned int * tsize) {
161   int type = ((int *)ptr)[0];
162   //  if (type>TOTALNUMCLASSANDARRAY) {
163   //    tprintf("ptr=%x type=%u\n", ptr, type);
164   //  }
165
166   if(type < NUMCLASSES) {
167     // a normal object
168     *tsize = classsize[type];
169     *ttype = type;
170   } else {
171     // an array
172     struct ArrayObject *ao=(struct ArrayObject *)ptr;
173     unsigned int elementsize=classsize[type];
174     unsigned int length=ao->___length___;
175     *tsize = sizeof(struct ArrayObject)+length*elementsize;
176     *ttype = type;
177   } 
178 }