4 #include "model-assert.h"
8 #define relaxed memory_order_relaxed
9 #define release memory_order_release
10 #define acquire memory_order_acquire
12 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
13 #define INITIAL_FREE 3 /* Each thread starts with INITIAL_FREE free nodes */
15 #define POISON_IDX 0x666
17 static unsigned int (*free_lists)[MAX_FREELIST];
19 /* Search this thread's free list for a "new" node */
20 static unsigned int new_node()
23 int t = get_thread_num();
24 for (i = 0; i < MAX_FREELIST; i++) {
25 //unsigned int node = load_32(&free_lists[t][i]);
26 unsigned int node = free_lists[t][i];
27 //unsigned int node = free_lists[t][i];
29 //store_32(&free_lists[t][i], 0);
31 //free_lists[t][i] = 0;
35 /* free_list is empty? */
40 /* Place this node index back on this thread's free list */
41 static void reclaim(unsigned int node)
44 int t = get_thread_num();
46 /* Don't reclaim NULL node */
49 for (i = 0; i < MAX_FREELIST; i++) {
50 /* Should never race with our own thread here */
51 //unsigned int idx = load_32(&free_lists[t][i]);
52 unsigned int idx = free_lists[t][i];
53 //unsigned int idx = free_lists[t][i];
55 /* Found empty spot in free list */
57 //store_32(&free_lists[t][i], node);
58 free_lists[t][i] = node;
59 //free_lists[t][i] = node;
63 /* free list is full? */
67 void init_queue(queue_t *q, int num_threads)
71 /* Initialize each thread's free list with INITIAL_FREE pointers */
72 /* The actual nodes are initialized with poison indexes */
73 free_lists = malloc(num_threads * sizeof(*free_lists));
74 for (i = 0; i < num_threads; i++) {
75 for (j = 0; j < INITIAL_FREE; j++) {
76 free_lists[i][j] = 2 + i * MAX_FREELIST + j;
77 atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
81 /* initialize queue */
82 atomic_init(&q->head, MAKE_POINTER(1, 0));
83 atomic_init(&q->tail, MAKE_POINTER(1, 0));
84 atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
89 @Interface_define: Enqueue
92 void enqueue(queue_t *q, unsigned int val)
101 //store_32(&q->nodes[node].value, val);
102 q->nodes[node].value = val;
103 //q->nodes[node].value = val;
104 tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
105 set_ptr(&tmp, 0); // NULL
106 atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
109 /**** detected UL (2 threads, 1 enqueue & 1 dequeue) ****/
110 tail = atomic_load_explicit(&q->tail, acquire);
111 /****FIXME: miss ****/
112 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
113 if (tail == atomic_load_explicit(&q->tail, relaxed)) {
115 /* Check for uninitialized 'next' */
116 //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
118 if (get_ptr(next) == 0) { // == NULL
119 pointer value = MAKE_POINTER(node, get_count(next) + 1);
120 /**** correctness error (1 dequeue & 1 enqueue) ****/
121 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
122 &next, value, release, relaxed);
125 @Commit_point_define_check: success == true
126 @Label: Enqueue_Success_Point
131 // This routine helps the other enqueue to update the tail
132 /**** detected UL (2 threads, 1 enqueue & 1 dequeue) ****/
133 unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
134 pointer value = MAKE_POINTER(ptr,
135 get_count(tail) + 1);
136 /****FIXME: miss ****/
138 succ = atomic_compare_exchange_strong_explicit(&q->tail,
139 &tail, value, release, relaxed);
141 //printf("miss2_enqueue CAS succ\n");
143 //printf("miss2_enqueue\n");
148 /**** correctness error (1 dequeue & 1 enqueue) ****/
149 atomic_compare_exchange_strong_explicit(&q->tail,
151 MAKE_POINTER(node, get_count(tail) + 1),
157 @Interface_define: Dequeue
160 bool dequeue(queue_t *q, unsigned int *retVal)
169 /**** FIXME: miss ****/
170 head = atomic_load_explicit(&q->head, acquire);
171 // This must be acquire otherwise we have a bug with 1 enqueue &
173 /**** correctness error (1 dequeue & 1 enqueue) ****/
174 tail = atomic_load_explicit(&q->tail, acquire);
175 /**** correctness error (1 dequeue & 1 enqueue) ****/
176 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
177 //printf("miss3_dequeue\n");
178 if (atomic_load_explicit(&q->head, relaxed) == head) {
179 if (get_ptr(head) == get_ptr(tail)) {
181 /* Check for uninitialized 'next' */
182 //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
184 if (get_ptr(next) == 0) { // NULL
187 @Commit_point_define_check: true
188 @Label: Dequeue_Empty_Point
191 return false; // NULL
193 /****FIXME: miss (not reached) ****/
195 succ = atomic_compare_exchange_strong_explicit(&q->tail,
197 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
200 //printf("miss4_dequeue CAS succ\n");
202 //printf("miss4_dequeue\n");
205 //*retVal = load_32(&q->nodes[get_ptr(next)].value);
206 *retVal = q->nodes[get_ptr(next)].value;
207 //value = q->nodes[get_ptr(next)].value;
208 /**** FIXME: miss (not reached) ****/
209 success = atomic_compare_exchange_strong_explicit(&q->head,
211 MAKE_POINTER(get_ptr(next), get_count(head) + 1),
215 @Commit_point_define_check: success == true
216 @Label: Dequeue_Success_Point
224 reclaim(get_ptr(head));