version runs specJBB...finally...
[IRC.git] / Robust / src / Runtime / bamboo / pmc_garbage.c
1 #include "multicoregc.h"
2 #include "multicoreruntime.h"
3 #include "pmc_garbage.h"
4 #include "runtime_arch.h"
5 #include "pmc_mark.h"
6 #include "pmc_forward.h"
7 #include "pmc_refupdate.h"
8
9 struct pmc_heap * pmc_heapptr;
10 struct pmc_queue * pmc_localqueue;
11 volatile bool gcflag;
12
13 void incrementthreads() {
14   tmc_spin_mutex_lock(&pmc_heapptr->lock);
15   pmc_heapptr->numthreads++;
16   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
17 }
18
19 void decrementthreads() {
20   tmc_spin_mutex_lock(&pmc_heapptr->lock);
21   pmc_heapptr->numthreads--;
22   tmc_spin_mutex_unlock(&pmc_heapptr->lock);
23 }
24
25 void * pmc_unitend(unsigned int index) {
26   return gcbaseva+(index+1)*UNITSIZE;
27 }
28
29 void pmc_onceInit() {
30   pmc_localqueue=&pmc_heapptr->regions[BAMBOO_NUM_OF_CORE].markqueue;
31   pmc_queueinit(pmc_localqueue);
32   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
33     tmc_spin_barrier_init(&pmc_heapptr->barrier, NUMCORES4GC);
34     for(int i=0;i<NUMPMCUNITS;i++) {
35       pmc_heapptr->units[i].endptr=pmc_unitend(i);
36       //tprintf("Ch5: %u-> %x\n", i, pmc_heapptr->units[i].endptr);
37     }
38     
39     for(int i=0;i<NUMCORES4GC;i+=2) {
40       if (i==0) {
41         pmc_heapptr->regions[i].lastptr=gcbaseva;
42       } else
43         pmc_heapptr->regions[i].lastptr=pmc_heapptr->units[i*4-1].endptr;
44       pmc_heapptr->regions[i].lowunit=4*i;
45       pmc_heapptr->regions[i].highunit=4*(i+1);
46       pmc_heapptr->regions[i+1].lastptr=pmc_heapptr->units[(i+1)*4+3].endptr;
47       pmc_heapptr->regions[i+1].lowunit=4*(i+1);
48       pmc_heapptr->regions[i+1].highunit=4*(i+2);
49     }
50     //for(int i=0;i<NUMCORES4GC;i++) {
51       //tprintf("%u lastptr=%x\n", i, pmc_heapptr->regions[i].lastptr);
52     //}
53   }
54 }
55
56 void pmc_init() {
57   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
58     pmc_heapptr->numthreads=NUMCORES4GC;
59     for(int i=0;i<NUMCORES4GC;i+=2) {
60       void *startptr=pmc_heapptr->regions[i].lastptr;
61       void *finishptr=pmc_heapptr->regions[i+1].lastptr;
62       struct pmc_region *region=&pmc_heapptr->regions[i];
63       unsigned int startindex=region->lowunit;
64       unsigned int endindex=pmc_heapptr->regions[i+1].highunit;
65       //tprintf("Free space in partition %u from %x to %x\n", i, startptr, finishptr);
66       for(unsigned int index=startindex;index<endindex;index++) {
67         void *ptr=pmc_heapptr->units[index].endptr;
68         if ((ptr>startptr)&&(ptr<=finishptr)) {
69           padspace(startptr, (unsigned int)(ptr-startptr));
70           startptr=ptr;
71         }
72         if (ptr>finishptr) {
73           padspace(startptr, (unsigned int) (finishptr-startptr));
74           break;
75         }
76       }
77     }
78   }
79   if (bamboo_smem_size) {
80     //tprintf("Left over alloc space from %x to %x\n", bamboo_cur_msp, bamboo_cur_msp+bamboo_smem_size);
81     padspace(bamboo_cur_msp, bamboo_smem_size);  
82   }
83   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
84 }
85
86 void gc(struct garbagelist *gl) {
87   if (BAMBOO_NUM_OF_CORE==STARTUPCORE)
88     tprintf("start GC\n");
89   pmc_init();
90   //mark live objects
91   //tprintf("mark\n");
92   pmc_mark(gl);
93   //count live objects per unit
94   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
95   //tprintf("count\n");
96   pmc_count();
97   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
98   //divide up work
99   //tprintf("divide\n");
100   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
101     pmc_processunits();
102   }
103   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
104   //set up forwarding pointers
105   //tprintf("forward\n");
106   pmc_doforward();
107   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
108   //update pointers
109   //tprintf("updaterefs\n");
110   pmc_doreferenceupdate(gl);
111   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
112   //compact data
113   //tprintf("compact\n");
114   pmc_docompact();
115   //reset memory allocation
116   bamboo_cur_msp=NULL;
117   bamboo_smem_size=0;
118   //tprintf("done\n");
119
120   //if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
121     //    for(int i=0;i<NUMCORES4GC;i+=2) {
122     //      void *startptr=pmc_heapptr->regions[i].lastptr;
123     //      void *finishptr=pmc_heapptr->regions[i+1].lastptr;
124     //      tprintf("Partition %u from %x to %x\n", i, startptr, finishptr);
125     //      tprintf("%x %x %x %x\n", pmc_heapptr->regions[i].startptr, pmc_heapptr->regions[i].endptr, pmc_heapptr->regions[i+1].startptr, pmc_heapptr->regions[i+1].endptr);
126     //    }
127   //  }
128
129   gcflag=false;
130   tmc_spin_barrier_wait(&pmc_heapptr->barrier);
131
132   //tprintf("exit GC\n");
133 }
134
135 void padspace(void *ptr, unsigned int length) {
136   //zero small blocks
137   if (length<sizeof(struct ArrayObject)) {
138     BAMBOO_MEMSET_WH(ptr,0,length);
139   } else {
140     //generate fake arrays for big blocks
141     struct ArrayObject *ao=(struct ArrayObject *)ptr;
142     ao->type=BYTEARRAYTYPE;
143     unsigned arraylength=length-sizeof(struct ArrayObject);
144     ao->___length___=arraylength;
145     ao->marked=0;
146   }
147 }
148
149 void gettype_size(void * ptr, unsigned int * ttype, unsigned int * tsize) {
150   int type = ((int *)ptr)[0];
151   //  if (type>TOTALNUMCLASSANDARRAY) {
152   //    tprintf("ptr=%x type=%u\n", ptr, type);
153   //  }
154
155   if(type < NUMCLASSES) {
156     // a normal object
157     *tsize = classsize[type];
158     *ttype = type;
159   } else {
160     // an array
161     struct ArrayObject *ao=(struct ArrayObject *)ptr;
162     unsigned int elementsize=classsize[type];
163     unsigned int length=ao->___length___;
164     *tsize = sizeof(struct ArrayObject)+length*elementsize;
165     *ttype = type;
166   } 
167 }