4 #include "model-assert.h"
8 #define relaxed memory_order_relaxed
9 #define release memory_order_release
10 #define acquire memory_order_acquire
12 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
13 #define INITIAL_FREE 3 /* Each thread starts with INITIAL_FREE free nodes */
15 #define POISON_IDX 0x666
17 static unsigned int (*free_lists)[MAX_FREELIST];
19 /* Search this thread's free list for a "new" node */
20 static unsigned int new_node()
23 int t = get_thread_num();
24 for (i = 0; i < MAX_FREELIST; i++) {
25 unsigned int node = load_32(&free_lists[t][i]);
26 //unsigned int node = free_lists[t][i];
28 store_32(&free_lists[t][i], 0);
29 //free_lists[t][i] = 0;
33 /* free_list is empty? */
38 /* Place this node index back on this thread's free list */
39 static void reclaim(unsigned int node)
42 int t = get_thread_num();
44 /* Don't reclaim NULL node */
47 for (i = 0; i < MAX_FREELIST; i++) {
48 /* Should never race with our own thread here */
49 unsigned int idx = load_32(&free_lists[t][i]);
50 //unsigned int idx = free_lists[t][i];
52 /* Found empty spot in free list */
54 store_32(&free_lists[t][i], node);
55 //free_lists[t][i] = node;
59 /* free list is full? */
63 void init_queue(queue_t *q, int num_threads)
66 for (i = 0; i < MAX_NODES; i++) {
67 atomic_init(&q->nodes[i].next, MAKE_POINTER(POISON_IDX, 0));
70 /* Initialize each thread's free list with INITIAL_FREE pointers */
71 /* The actual nodes are initialized with poison indexes */
72 free_lists = malloc(num_threads * sizeof(*free_lists));
73 for (i = 0; i < num_threads; i++) {
74 for (j = 0; j < INITIAL_FREE; j++) {
75 free_lists[i][j] = 2 + i * MAX_FREELIST + j;
76 atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
80 /* initialize queue */
81 atomic_init(&q->head, MAKE_POINTER(1, 0));
82 atomic_init(&q->tail, MAKE_POINTER(1, 0));
83 atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
88 @Interface_define: Enqueue
91 void enqueue(queue_t *q, unsigned int val)
100 store_32(&q->nodes[node].value, val);
101 //q->nodes[node].value = val;
102 tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
103 set_ptr(&tmp, 0); // NULL
104 atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
109 @Commit_point_clear: true
110 @Label: Enqueue_Clear
113 /**** detected UL ****/
114 tail = atomic_load_explicit(&q->tail, acquire);
117 @Commit_point_define_check: true
118 @Label: Enqueue_Read_Tail
121 /****FIXME: miss ****/
122 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
123 //printf("miss1_enqueue\n");
124 if (tail == atomic_load_explicit(&q->tail, relaxed)) {
126 /* Check for uninitialized 'next' */
127 //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
129 if (get_ptr(next) == 0) { // == NULL
130 pointer value = MAKE_POINTER(node, get_count(next) + 1);
131 /**** detected UL ****/
132 // Second release can be just relaxed
133 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
134 &next, value, release, relaxed);
137 @Commit_point_define_check: success
138 @Label: Enqueue_UpdateNext
143 // This routine helps the other enqueue to update the tail
144 /**** detected UL ****/
145 unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
146 pointer value = MAKE_POINTER(ptr,
147 get_count(tail) + 1);
148 /****FIXME: miss ****/
149 // Second release can be just relaxed
151 succ = atomic_compare_exchange_strong_explicit(&q->tail,
152 &tail, value, release, relaxed);
154 //printf("miss2_enqueue CAS succ\n");
156 //printf("miss2_enqueue\n");
161 /**** dectected UL ****/
162 // Second release can be just relaxed
163 bool succ = atomic_compare_exchange_strong_explicit(&q->tail,
165 MAKE_POINTER(node, get_count(tail) + 1),
169 @Additional_ordering_point_define_check: true
170 @Label: Enqueue_Additional_Tail_LoadOrCAS
178 @Interface_define: Dequeue
181 bool dequeue(queue_t *q, int *retVal)
183 unsigned int value = 0;
192 @Commit_point_clear: true
193 @Label: Dequeue_Clear
196 /**** detected correctness error ****/
197 head = atomic_load_explicit(&q->head, acquire);
200 @Commit_point_define_check: true
201 @Label: Dequeue_Read_Head
204 /** FIXME: A new bug has been found here!!! It should be acquire instead of
205 * relaxed (it introduces a bug when there's two dequeuers and one
208 tail = atomic_load_explicit(&q->tail, acquire);
211 @Potential_commit_point_define: true
212 @Label: Dequeue_Potential_Read_Tail
216 /****FIXME: miss ****/
217 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
220 @Potential_commit_point_define: true
221 @Label: Dequeue_Potential_LoadNext
225 if (atomic_load_explicit(&q->head, relaxed) == head) {
226 if (get_ptr(head) == get_ptr(tail)) {
228 /* Check for uninitialized 'next' */
229 //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
231 if (get_ptr(next) == 0) { // NULL
234 @Commit_point_define: true
235 @Potential_commit_point_label: Dequeue_Potential_Read_Tail
236 @Label: Dequeue_Read_Tail
239 return false; // NULL
241 /****FIXME: miss (not reached) ****/
242 // Second release can be just relaxed
244 succ = atomic_compare_exchange_strong_explicit(&q->tail,
246 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
249 //printf("miss4_dequeue CAS succ\n");
251 //printf("miss4_dequeue\n");
254 value = load_32(&q->nodes[get_ptr(next)].value);
255 //value = q->nodes[get_ptr(next)].value;
256 /****FIXME: correctness error ****/
257 success = atomic_compare_exchange_strong_explicit(&q->head,
259 MAKE_POINTER(get_ptr(next), get_count(head) + 1),
263 @Commit_point_define: success
264 @Potential_commit_point_label: Dequeue_Potential_LoadNext
265 @Label: Dequeue_LoadNext
273 reclaim(get_ptr(head));