4 #include "model-assert.h"
8 #define relaxed memory_order_relaxed
9 #define release memory_order_release
10 #define acquire memory_order_acquire
12 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
13 #define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
15 #define POISON_IDX 0x666
17 static unsigned int (*free_lists)[MAX_FREELIST];
19 /* Search this thread's free list for a "new" node */
20 static unsigned int new_node()
23 int t = get_thread_num();
24 for (i = 0; i < MAX_FREELIST; i++) {
25 unsigned int node = load_32(&free_lists[t][i]);
27 store_32(&free_lists[t][i], 0);
31 /* free_list is empty? */
36 /* Place this node index back on this thread's free list */
37 static void reclaim(unsigned int node)
40 int t = get_thread_num();
42 /* Don't reclaim NULL node */
45 for (i = 0; i < MAX_FREELIST; i++) {
46 /* Should never race with our own thread here */
47 unsigned int idx = load_32(&free_lists[t][i]);
49 /* Found empty spot in free list */
51 store_32(&free_lists[t][i], node);
55 /* free list is full? */
59 void init_queue(queue_t *q, int num_threads)
63 /* Initialize each thread's free list with INITIAL_FREE pointers */
64 /* The actual nodes are initialized with poison indexes */
65 free_lists = malloc(num_threads * sizeof(*free_lists));
66 for (i = 0; i < num_threads; i++) {
67 for (j = 0; j < INITIAL_FREE; j++) {
68 free_lists[i][j] = 2 + i * MAX_FREELIST + j;
69 atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
73 /* initialize queue */
74 atomic_init(&q->head, MAKE_POINTER(1, 0));
75 atomic_init(&q->tail, MAKE_POINTER(1, 0));
76 atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
83 typedef struct tag_elem {
87 tag_elem(Tag _id, unsigned int _data) {
93 spec_queue<tag_elem_t> queue;
96 queue = spec_queue<tag_elem_t>();
99 # Only check the happens-before relationship according to the id of the
100 # commit_point_set. For commit_point_set that has same ID, A -> B means
101 # B happens after the previous A.
109 @Commit_point_set: Enqueue_Success_Point
110 @ID: __sequential.tag.getCurAndInc()
112 # __ID__ is an internal macro that refers to the id of the current
115 __sequential.queue.enqueue(tag_elem_t(__ID__, val));
118 void enqueue(queue_t *q, unsigned int val)
127 store_32(&q->nodes[node].value, val);
128 tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
129 set_ptr(&tmp, 0); // NULL
130 atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
133 tail = atomic_load_explicit(&q->tail, acquire);
134 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
135 if (tail == atomic_load_explicit(&q->tail, relaxed)) {
137 /* Check for uninitialized 'next' */
138 MODEL_ASSERT(get_ptr(next) != POISON_IDX);
140 if (get_ptr(next) == 0) { // == NULL
141 pointer value = MAKE_POINTER(node, get_count(next) + 1);
142 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
143 &next, value, release, release);
146 unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
147 pointer value = MAKE_POINTER(ptr,
148 get_count(tail) + 1);
149 int commit_success = 0;
150 commit_success = atomic_compare_exchange_strong_explicit(&q->tail,
151 &tail, value, release, release);
154 @Commit_point_define_check: __ATOMIC_RET__ == true
155 @Label: Enqueue_Success_Point
162 atomic_compare_exchange_strong_explicit(&q->tail,
164 MAKE_POINTER(node, get_count(tail) + 1),
171 @Commit_point_set: Dequeue_Success_Point
172 @ID: __sequential.queue.peak().tag
175 unsigned int _Old_Val = __sequential.queue.dequeue().data;
180 unsigned int dequeue(queue_t *q)
189 head = atomic_load_explicit(&q->head, acquire);
190 tail = atomic_load_explicit(&q->tail, relaxed);
191 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
192 if (atomic_load_explicit(&q->head, relaxed) == head) {
193 if (get_ptr(head) == get_ptr(tail)) {
195 /* Check for uninitialized 'next' */
196 MODEL_ASSERT(get_ptr(next) != POISON_IDX);
198 if (get_ptr(next) == 0) { // NULL
201 atomic_compare_exchange_strong_explicit(&q->tail,
203 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
207 value = load_32(&q->nodes[get_ptr(next)].value);
208 success = atomic_compare_exchange_strong_explicit(&q->head,
210 MAKE_POINTER(get_ptr(next), get_count(head) + 1),
214 @Commit_point_define_check: __ATOMIC_RET__ == true
215 @Label: Dequeue_Success_Point
223 reclaim(get_ptr(head));