edits
[model-checker-benchmarks.git] / ms-queue / queue_wildcard.c
1 #include <threads.h>
2 #include <stdlib.h>
3 #include "librace.h"
4 #include "model-assert.h"
5
6 #include "queue.h"
7 #include "wildcard.h"
8
9 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
10 #define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
11
12 #define POISON_IDX 0x666
13
14 static unsigned int (*free_lists)[MAX_FREELIST];
15
16 /* Search this thread's free list for a "new" node */
17 static unsigned int new_node()
18 {
19         int i;
20         int t = get_thread_num();
21         for (i = 0; i < MAX_FREELIST; i++) {
22                 //unsigned int node = load_32(&free_lists[t][i]);
23                 unsigned int node = free_lists[t][i];
24                 if (node) {
25                         //store_32(&free_lists[t][i], 0);
26                         free_lists[t][i] = 0;
27                         return node;
28                 }
29         }
30         /* free_list is empty? */
31         MODEL_ASSERT(0);
32         return 0;
33 }
34
35 /* Simulate the fact that when a node got recycled, it will get assigned to the
36  * same queue or for other usage */
37 void simulateRecycledNodeUpdate(queue_t *q, unsigned int node) {
38         atomic_store_explicit(&q->nodes[node].next, -1, memory_order_release);
39 }
40
41 /* Place this node index back on this thread's free list */
42 static void reclaim(unsigned int node)
43 {
44         int i;
45         int t = get_thread_num();
46
47         /* Don't reclaim NULL node */
48         //MODEL_ASSERT(node);
49
50         for (i = 0; i < MAX_FREELIST; i++) {
51                 /* Should never race with our own thread here */
52                 //unsigned int idx = load_32(&free_lists[t][i]);
53                 unsigned int idx = free_lists[t][i];
54
55                 /* Found empty spot in free list */
56                 if (idx == 0) {
57                         //store_32(&free_lists[t][i], node);
58                         free_lists[t][i] = node;
59                         return;
60                 }
61         }
62         /* free list is full? */
63         MODEL_ASSERT(0);
64 }
65
66 void init_queue(queue_t *q, int num_threads)
67 {
68         int i, j;
69
70         /* Initialize each thread's free list with INITIAL_FREE pointers */
71         /* The actual nodes are initialized with poison indexes */
72         free_lists = malloc(num_threads * sizeof(*free_lists));
73         for (i = 0; i < num_threads; i++) {
74                 for (j = 0; j < INITIAL_FREE; j++) {
75                         free_lists[i][j] = 2 + i * MAX_FREELIST + j;
76                         atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
77                 }
78         }
79
80         /* initialize queue */
81         atomic_init(&q->head, MAKE_POINTER(1, 0));
82         atomic_init(&q->tail, MAKE_POINTER(1, 0));
83         atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
84 }
85
86 void enqueue(queue_t *q, unsigned int val, int n)
87 {
88         int success = 0;
89         unsigned int node;
90         pointer tail;
91         pointer next;
92         pointer tmp;
93
94         //node = new_node();
95         if (n == 0) // Don't want to control the malloc process
96                 node = new_node();
97         else
98                 node = n;
99         //store_32(&q->nodes[node].value, val);
100         q->nodes[node].value = val;
101         tmp = atomic_load_explicit(&q->nodes[node].next, wildcard(1)); // relaxed
102         set_ptr(&tmp, 0); // NULL
103         // FIXME: SCFence makes this release, and this is actually a bug!!! 
104         atomic_store_explicit(&q->nodes[node].next, tmp, wildcard(2)); // relaxed
105
106         while (!success) {
107                 tail = atomic_load_explicit(&q->tail, wildcard(3)); // acquire
108                 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, wildcard(4)); //acquire
109                 if (tail == atomic_load_explicit(&q->tail, wildcard(5))) { // relaxed
110
111                         /* Check for uninitialized 'next' */
112                         //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
113
114                         if (get_ptr(next) == 0) { // == NULL
115                                 pointer value = MAKE_POINTER(node, get_count(next) + 1);
116                                 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
117                                                 &next, value, wildcard(6), wildcard(7)); // release & relaxed
118                         }
119                         if (!success) {
120                                 unsigned int ptr =
121                                         get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, wildcard(8))); // acquire
122                                 pointer value = MAKE_POINTER(ptr,
123                                                 get_count(tail) + 1);
124                                 atomic_compare_exchange_strong_explicit(&q->tail,
125                                                 &tail, value,
126                                                 wildcard(9), wildcard(10)); // release & relaxed
127                                 thrd_yield();
128                         }
129                 }
130         }
131         atomic_compare_exchange_strong_explicit(&q->tail,
132                         &tail,
133                         MAKE_POINTER(node, get_count(tail) + 1),
134                         wildcard(11), wildcard(12)); // release & relaxed
135 }
136
137 bool dequeue(queue_t *q, unsigned int *retVal, unsigned int *reclaimNode)
138 {
139         unsigned int value;
140         int success = 0;
141         pointer head;
142         pointer tail;
143         pointer next;
144
145         while (!success) {
146                 head = atomic_load_explicit(&q->head, wildcard(13)); // acquire
147                 // SCFence makes this acquire, and we actually need an acquire here!!!
148                 tail = atomic_load_explicit(&q->tail, wildcard(14)); // relaxed 
149                 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, wildcard(15)); // acquire
150                 if (atomic_load_explicit(&q->head, wildcard(16)) == head) { // relaxed
151                         if (get_ptr(head) == get_ptr(tail)) {
152
153                                 /* Check for uninitialized 'next' */
154                                 //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
155
156                                 if (get_ptr(next) == 0) { // NULL
157                                         return false; // NULL
158                                 }
159                                 atomic_compare_exchange_strong_explicit(&q->tail,
160                                                 &tail,
161                                                 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
162                                                 wildcard(17), wildcard(18)); // release & relaxed
163                                 thrd_yield();
164                         } else {
165                                 //value = load_32(&q->nodes[get_ptr(next)].value);
166                                 value = q->nodes[get_ptr(next)].value;
167                                 success = atomic_compare_exchange_strong_explicit(&q->head,
168                                                 &head, MAKE_POINTER(get_ptr(next), get_count(head) + 1),
169                                                 wildcard(19), wildcard(20)); // release & relaxed
170                                 if (!success)
171                                         thrd_yield();
172                         }
173                 }
174         }
175
176         reclaimNode = get_ptr(head);
177         reclaim(get_ptr(head));
178         *retVal = value;
179         return true;
180 }