6a2737f91825a9bdcee355027f6baad9135ea826
[cdsspec-compiler.git] / benchmark / ms-queue / my_queue.c
1 #include <threads.h>
2 #include <stdlib.h>
3 #include "librace.h"
4 #include "model-assert.h"
5
6 #include "my_queue.h"
7
8 #define relaxed memory_order_relaxed
9 #define release memory_order_release
10 #define acquire memory_order_acquire
11
12 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
13 #define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
14
15 #define POISON_IDX 0x666
16
17 static unsigned int (*free_lists)[MAX_FREELIST];
18
19 /* Search this thread's free list for a "new" node */
20 static unsigned int new_node()
21 {
22         int i;
23         int t = get_thread_num();
24         for (i = 0; i < MAX_FREELIST; i++) {
25                 unsigned int node = load_32(&free_lists[t][i]);
26                 if (node) {
27                         store_32(&free_lists[t][i], 0);
28                         return node;
29                 }
30         }
31         /* free_list is empty? */
32         MODEL_ASSERT(0);
33         return 0;
34 }
35
36 /* Place this node index back on this thread's free list */
37 static void reclaim(unsigned int node)
38 {
39         int i;
40         int t = get_thread_num();
41
42         /* Don't reclaim NULL node */
43         MODEL_ASSERT(node);
44
45         for (i = 0; i < MAX_FREELIST; i++) {
46                 /* Should never race with our own thread here */
47                 unsigned int idx = load_32(&free_lists[t][i]);
48
49                 /* Found empty spot in free list */
50                 if (idx == 0) {
51                         store_32(&free_lists[t][i], node);
52                         return;
53                 }
54         }
55         /* free list is full? */
56         MODEL_ASSERT(0);
57 }
58
59 void init_queue(queue_t *q, int num_threads)
60 {
61         int i, j;
62
63         /* Initialize each thread's free list with INITIAL_FREE pointers */
64         /* The actual nodes are initialized with poison indexes */
65         free_lists = malloc(num_threads * sizeof(*free_lists));
66         for (i = 0; i < num_threads; i++) {
67                 for (j = 0; j < INITIAL_FREE; j++) {
68                         free_lists[i][j] = 2 + i * MAX_FREELIST + j;
69                         atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
70                 }
71         }
72
73         /* initialize queue */
74         atomic_init(&q->head, MAKE_POINTER(1, 0));
75         atomic_init(&q->tail, MAKE_POINTER(1, 0));
76         atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
77 }
78
79 /**
80         @Begin
81         @Global_define:
82                 @DeclareVar:
83                 typedef struct tag_elem {
84                         Tag id;
85                         unsigned int data;
86                         
87                         tag_elem(Tag _id, unsigned int _data) {
88                                 id = _id;
89                                 data = _data;
90                         }
91                 } tag_elem_t;
92
93                 spec_queue<tag_elem_t> queue;
94                 Tag tag;
95                 @InitVar:
96                         queue = spec_queue<tag_elem_t>();
97                         tag = Tag();
98         @Happens_before:
99                 # Only check the happens-before relationship according to the id of the
100                 # commit_point_set. For commit_point_set that has same ID, A -> B means
101                 # B happens after the previous A.
102                 Enqueue -> Dequeue
103         @End
104 */
105
106 /**
107         @Begin
108         @Interface: Enqueue
109         @Commit_point_set: Enqueue_Success_Point
110         @ID: __sequential.tag.getCurAndInc()
111         @Action:
112                 # __ID__ is an internal macro that refers to the id of the current
113                 # interface call
114                 @Code:
115                 __sequential.queue.enqueue(tag_elem_t(__ID__, val));
116         @End
117 */
118 void enqueue(queue_t *q, unsigned int val)
119 {
120         int success = 0;
121         unsigned int node;
122         pointer tail;
123         pointer next;
124         pointer tmp;
125
126         node = new_node();
127         store_32(&q->nodes[node].value, val);
128         tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
129         set_ptr(&tmp, 0); // NULL
130         atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
131
132         while (!success) {
133                 tail = atomic_load_explicit(&q->tail, acquire);
134                 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
135                 if (tail == atomic_load_explicit(&q->tail, relaxed)) {
136
137                         /* Check for uninitialized 'next' */
138                         MODEL_ASSERT(get_ptr(next) != POISON_IDX);
139
140                         if (get_ptr(next) == 0) { // == NULL
141                                 pointer value = MAKE_POINTER(node, get_count(next) + 1);
142                                 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
143                                                 &next, value, release, release);
144                         }
145                         if (!success) {
146                                 unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
147                                 pointer value = MAKE_POINTER(ptr,
148                                                 get_count(tail) + 1);
149                                 int commit_success = 0;
150                                 commit_success = atomic_compare_exchange_strong_explicit(&q->tail,
151                                                 &tail, value, release, release);
152                                 /**
153                                         @Begin
154                                         @Commit_point_define_check: __ATOMIC_RET__ == true
155                                         @Label: Enqueue_Success_Point
156                                         @End
157                                 */
158                                 thrd_yield();
159                         }
160                 }
161         }
162         atomic_compare_exchange_strong_explicit(&q->tail,
163                         &tail,
164                         MAKE_POINTER(node, get_count(tail) + 1),
165                         release, release);
166 }
167
168 /**
169         @Begin
170         @Interface: Dequeue
171         @Commit_point_set: Dequeue_Success_Point
172         @ID: __sequential.queue.peak().tag
173         @Action:
174                 @Code:
175                 unsigned int _Old_Val = __sequential.queue.dequeue().data;
176         @Post_check:
177                 _Old_Val == __RET__
178         @End
179 */
180 unsigned int dequeue(queue_t *q)
181 {
182         unsigned int value;
183         int success = 0;
184         pointer head;
185         pointer tail;
186         pointer next;
187
188         while (!success) {
189                 head = atomic_load_explicit(&q->head, acquire);
190                 tail = atomic_load_explicit(&q->tail, relaxed);
191                 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
192                 if (atomic_load_explicit(&q->head, relaxed) == head) {
193                         if (get_ptr(head) == get_ptr(tail)) {
194
195                                 /* Check for uninitialized 'next' */
196                                 MODEL_ASSERT(get_ptr(next) != POISON_IDX);
197
198                                 if (get_ptr(next) == 0) { // NULL
199                                         return 0; // NULL
200                                 }
201                                 atomic_compare_exchange_strong_explicit(&q->tail,
202                                                 &tail,
203                                                 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
204                                                 release, release);
205                                 thrd_yield();
206                         } else {
207                                 value = load_32(&q->nodes[get_ptr(next)].value);
208                                 success = atomic_compare_exchange_strong_explicit(&q->head,
209                                                 &head,
210                                                 MAKE_POINTER(get_ptr(next), get_count(head) + 1),
211                                                 release, release);
212                                 /**
213                                         @Begin
214                                         @Commit_point_define_check: __ATOMIC_RET__ == true
215                                         @Label: Dequeue_Success_Point
216                                         @End
217                                 */
218                                 if (!success)
219                                         thrd_yield();
220                         }
221                 }
222         }
223         reclaim(get_ptr(head));
224         return value;
225 }