BENCH := queue
-NORMAL_TESTS := testcase1 testcase2
+NORMAL_TESTS := testcase1 testcase2 testcase3
WILDCARD_TESTS := $(patsubst %, %_wildcard, $(NORMAL_TESTS))
return 0;
}
+/* Simulate the fact that when a node got recycled, it will get assigned to the
+ * same queue or for other usage */
+void simulateRecycledNodeUpdate(queue_t *q, unsigned int node) {
+ atomic_store_explicit(&q->nodes[node].next, -1, memory_order_release);
+}
+
+
/* Place this node index back on this thread's free list */
static void reclaim(unsigned int node)
{
atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
}
-void enqueue(queue_t *q, unsigned int val)
+void enqueue(queue_t *q, unsigned int val, bool yield)
{
int success = 0;
unsigned int node;
release, release);
}
-bool dequeue(queue_t *q, unsigned int *retVal)
+bool dequeue(queue_t *q, unsigned int *retVal, unsigned int *reclaimNode)
{
int success = 0;
pointer head;
}
}
}
+ reclaimNode = get_ptr(head);
reclaim(get_ptr(head));
return true;
}
typedef struct {
pointer_t head;
pointer_t tail;
- node_t nodes[MAX_NODES + 1];
+ node_t nodes[MAX_NODES + 2];
} queue_t;
void init_queue(queue_t *q, int num_threads);
-void enqueue(queue_t *q, unsigned int val);
-bool dequeue(queue_t *q, unsigned int *retVal);
+void enqueue(queue_t *q, unsigned int val, bool yield);
+bool dequeue(queue_t *q, unsigned int *retVal, unsigned int *reclaimedNode);
+
+void simulateRecycledNodeUpdate(queue_t *q, unsigned int node);
+
int get_thread_num();
return 0;
}
+/* Simulate the fact that when a node got recycled, it will get assigned to the
+ * same queue or for other usage */
+void simulateRecycledNodeUpdate(queue_t *q, unsigned int node) {
+ atomic_store_explicit(&q->nodes[node].next, -1, memory_order_release);
+}
+
/* Place this node index back on this thread's free list */
static void reclaim(unsigned int node)
{
atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
}
-void enqueue(queue_t *q, unsigned int val)
+void enqueue(queue_t *q, unsigned int val, bool yield)
{
int success = 0;
unsigned int node;
while (!success) {
tail = atomic_load_explicit(&q->tail, wildcard(3)); // acquire
- // FIXME: SCFence makes this relaxed
+ // FIXME: SCFence makes this relaxed
+ if (yield)
+ thrd_yield();
next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, wildcard(4)); //acquire
if (tail == atomic_load_explicit(&q->tail, wildcard(5))) { // relaxed
if (get_ptr(next) == 0) { // == NULL
pointer value = MAKE_POINTER(node, get_count(next) + 1);
-
- // ***********************************
- // Inference analysis results have two choices here, it either
- // makes wildcard(6) acq_rel and wildcard(8) relaxed or
- // wildcard(6) release and wildcard(8) acquire. The
- // synchronization here is for enqueue() to dequeue(), and
- // actually either synchronization options will work!!!
success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
&next, value, wildcard(6), wildcard(7)); // release & relaxed
}
wildcard(11), wildcard(12)); // release & relaxed
}
-bool dequeue(queue_t *q, unsigned int *retVal)
+bool dequeue(queue_t *q, unsigned int *retVal, unsigned int *reclaimNode)
{
unsigned int value;
int success = 0;
}
}
}
+
+ reclaimNode = get_ptr(head);
reclaim(get_ptr(head));
*retVal = value;
return true;
--- /dev/null
+
+Result 1:
+wildcard 1 -> memory_order_relaxed
+wildcard 2 -> memory_order_relaxed
+wildcard 3 -> memory_order_acquire
+wildcard 4 -> memory_order_relaxed
+wildcard 5 -> memory_order_relaxed
+wildcard 6 -> memory_order_acq_rel
+wildcard 8 -> memory_order_relaxed
+wildcard 9 -> memory_order_release
+wildcard 11 -> memory_order_release
+wildcard 13 -> memory_order_acquire
+wildcard 14 -> memory_order_acquire
+wildcard 15 -> memory_order_acquire
+wildcard 16 -> memory_order_relaxed
+wildcard 17 -> memory_order_release
+wildcard 19 -> memory_order_release
--- /dev/null
+Result 0:
+wildcard 1 -> memory_order_relaxed
+wildcard 2 -> memory_order_relaxed
+wildcard 3 -> memory_order_acquire
+wildcard 4 -> memory_order_relaxed
+wildcard 5 -> memory_order_relaxed
+wildcard 6 -> memory_order_acq_rel
+wildcard 8 -> memory_order_acquire
+wildcard 9 -> memory_order_release
+wildcard 11 -> memory_order_release
+wildcard 13 -> memory_order_acquire
+wildcard 14 -> memory_order_acquire
+wildcard 15 -> memory_order_acquire
+wildcard 16 -> memory_order_relaxed
+wildcard 17 -> memory_order_release
+wildcard 19 -> memory_order_release
bool succ1, succ2;
atomic_int x[3];
int idx1, idx2;
+unsigned int reclaimNode;
static int procs = 4;
static void main_task(void *param)
int pid = *((int *)param);
if (pid % 4 == 0) {
atomic_store_explicit(&x[0], 1, memory_order_relaxed);
- enqueue(queue, 0);
+ enqueue(queue, 0, false);
} else if (pid % 4 == 1) {
atomic_store_explicit(&x[1], 1, memory_order_relaxed);
- enqueue(queue, 1);
+ enqueue(queue, 1, false);
} else if (pid % 4 == 2) {
- succ1 = dequeue(queue, &idx1);
+ succ1 = dequeue(queue, &idx1, &reclaimNode);
if (succ1) {
atomic_load_explicit(&x[idx1], memory_order_relaxed);
}
} else if (pid % 4 == 3) {
/*
- succ2 = dequeue(queue, &idx2);
+ succ2 = dequeue(queue, &idx2, &reclaimNode);
if (succ2) {
atomic_load_explicit(&x[idx2], memory_order_relaxed);
}
bool succ1, succ2;
atomic_int x[3];
int idx1, idx2;
+unsigned int reclaimNode;
static int procs = 4;
static void main_task(void *param)
*/
} else if (pid % 4 == 1) {
atomic_store_explicit(&x[1], 1, memory_order_relaxed);
- enqueue(queue, 1);
+ enqueue(queue, 1, false);
} else if (pid % 4 == 2) {
- succ1 = dequeue(queue, &idx1);
+ succ1 = dequeue(queue, &idx1, &reclaimNode);
if (succ1) {
atomic_load_explicit(&x[idx1], memory_order_relaxed);
}
} else if (pid % 4 == 3) {
- succ2 = dequeue(queue, &idx2);
+ succ2 = dequeue(queue, &idx2, &reclaimNode);
if (succ2) {
atomic_load_explicit(&x[idx2], memory_order_relaxed);
}
bool succ1, succ2;
atomic_int x[3];
int idx1, idx2;
+unsigned int reclaimNode1, reclaimNode2;
-static int procs = 4;
+static int procs = 2;
static void main_task(void *param)
{
unsigned int val;
int pid = *((int *)param);
if (pid % 4 == 0) {
- atomic_store_explicit(&x[0], 1, memory_order_relaxed);
- enqueue(queue, 0);
+ //atomic_store_explicit(&x[0], 1, memory_order_relaxed);
+ enqueue(queue, 0, true);
- succ1 = dequeue(queue, &idx1);
+
+ } else if (pid % 4 == 1) {
+ //atomic_store_explicit(&x[1], 1, memory_order_relaxed);
+ enqueue(queue, 1, false);
+ enqueue(queue, 1, false);
+
+ succ1 = dequeue(queue, &idx1, &reclaimNode1);
if (succ1) {
- atomic_load_explicit(&x[idx1], memory_order_relaxed);
+ //atomic_load_explicit(&x[idx1], memory_order_relaxed);
}
- } else if (pid % 4 == 1) {
- atomic_store_explicit(&x[1], 1, memory_order_relaxed);
- enqueue(queue, 1);
- succ2 = dequeue(queue, &idx2);
+ succ2 = dequeue(queue, &idx2, &reclaimNode2);
if (succ2) {
- atomic_load_explicit(&x[idx2], memory_order_relaxed);
+ //atomic_load_explicit(&x[idx2], memory_order_relaxed);
}
+ simulateRecycledNodeUpdate(queue, reclaimNode1);
+
+
+
} else if (pid % 4 == 2) {
} else if (pid % 4 == 3) {