6 #include "workschedule.h"
7 #include "mlp_runtime.h"
8 #include "coreprof/coreprof.h"
10 // NOTE: Converting this from a work-stealing strategy
11 // to a single-queue thread pool protected by a single
12 // lock. This will not scale, but it will support
13 // development of the system for now
18 typedef struct Queue deq;
20 typedef struct workerData_t{
21 pthread_t workerThread;
26 static pthread_mutex_t systemLockIn;
27 static pthread_mutex_t systemLockOut;
29 // implementation internal data
30 static WorkerData* workerDataArray;
31 static pthread_t* workerArray;
33 static int systemStarted = 0;
35 static pthread_cond_t systemBeginCond = PTHREAD_COND_INITIALIZER;
36 static void(*workFunc)(void*);
38 static pthread_cond_t workAvailCond = PTHREAD_COND_INITIALIZER;
43 pthread_mutex_t gclock;
44 pthread_mutex_t gclistlock;
45 pthread_cond_t gccond;
47 extern struct listitem * list;
48 extern __thread struct listitem litem;
49 extern __thread SESEcommon* seseCommon;
55 void workerExit( void* arg ) {
56 //printf( "Thread %d canceled.\n", pthread_self() );
62 void* workerMain( void* arg ) {
64 WorkerData* myData = (WorkerData*) arg;
68 // once-per-thread stuff
71 //pthread_cleanup_push( workerExit, NULL );
73 // ensure that object ID's start at 1 so that using
74 // oid with value 0 indicates an invalid object
77 //pthread_setcanceltype ( PTHREAD_CANCEL_ASYNCHRONOUS, &oldState );
78 //pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, &oldState );
80 // then continue to process work
84 //CP_LOGEVENT( CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_BEGIN );
87 pthread_mutex_lock( &systemLockOut );
88 if( headqi->next == NULL ) {
89 pthread_mutex_unlock( &systemLockOut );
96 struct QI * tmp=headqi;
97 headqi = headqi->next;
98 workUnit = headqi->value;
99 pthread_mutex_unlock( &systemLockOut );
101 //CP_LOGEVENT( CP_EVENTID_WORKSCHEDGRAB, CP_EVENTTYPE_END );
103 pthread_mutex_lock(&gclistlock);
105 litem.seseCommon=(void*)workUnit;
111 seseCommon=(SESEcommon*)workUnit;
112 pthread_mutex_unlock(&gclistlock);
114 workFunc( workUnit );
116 pthread_mutex_lock(&gclistlock);
118 if (litem.prev==NULL) {
121 litem.prev->next=litem.next;
123 if (litem.next!=NULL) {
124 litem.next->prev=litem.prev;
126 pthread_mutex_unlock(&gclistlock);
129 //pthread_cleanup_pop( 0 );
134 void workScheduleInit( int numProcessors,
135 void(*func)(void*) ) {
138 // the original thread must call this now to
139 // protect memory allocation events coming, but it
140 // will also add itself to the worker pool and therefore
141 // try to call it again, CP_CREATE should just ignore
145 pthread_mutex_init(&gclock, NULL);
146 pthread_mutex_init(&gclistlock, NULL);
147 pthread_cond_init(&gccond, NULL);
149 numWorkers = numProcessors + 1;
153 headqi=tailqi=RUNMALLOC(sizeof(struct QI));
156 status = pthread_mutex_init( &systemLockIn, NULL );
157 status = pthread_mutex_init( &systemLockOut, NULL );
159 // allocate space for one more--the original thread (running
160 // this code) will become a worker thread after setup
161 workerDataArray = RUNMALLOC( sizeof( WorkerData ) * (numWorkers+1) );
163 for( i = 0; i < numWorkers; ++i ) {
165 // the original thread is ID 1, start counting from there
166 workerDataArray[i].id = 2 + i;
168 status = pthread_create( &(workerDataArray[i].workerThread),
171 (void*) &(workerDataArray[i])
174 if( status != 0 ) { printf( "Error\n" ); exit( -1 ); }
176 // yield and let all workers get to the begin
177 // condition variable, waiting--we have to hold them
178 // so they don't all see empty work queues right away
179 if( sched_yield() == -1 ) { printf( "Error thread trying to yield.\n" ); exit( -1 ); }
183 void workScheduleSubmit( void* workUnit ) {
184 struct QI* item=RUNMALLOC(sizeof(struct QI));
185 item->value=workUnit;
188 pthread_mutex_lock ( &systemLockIn );
191 pthread_mutex_unlock( &systemLockIn );
195 // really should be named "add original thread as a worker"
196 void workScheduleBegin() {
199 // space was saved for the original thread to become a
200 // worker after setup is complete
201 workerDataArray[numWorkers].id = 1;
202 workerDataArray[numWorkers].workerThread = pthread_self();
205 workerMain( &(workerDataArray[numWorkers-1]) );
209 // the above function does NOT naturally join all the worker
210 // threads at exit, once the main SESE/Rblock/Task completes
211 // we know all worker threads are finished executing other
212 // tasks so we can explicitly kill the workers, and therefore
213 // trigger any worker-specific cleanup (like coreprof!)
214 void workScheduleExit() {
217 // This is not working well--canceled threads don't run their
218 // thread-level exit routines? Anyway, its not critical for
219 // coreprof but if we ever need a per-worker exit routine to
220 // run we'll have to look back into this.
222 //printf( "Thread %d performing schedule exit.\n", pthread_self() );
224 //for( i = 0; i < numWorkers; ++i ) {
225 // if( pthread_self() != workerDataArray[i].workerThread ) {
226 // pthread_cancel( workerDataArray[i].workerThread );
230 //// how to let all the threads actually get canceled?