include ../benchmarks.mk
-TESTNAME = main
+BENCH := queue
-HEADERS = my_queue.h
-OBJECTS = main.o my_queue.o
+NORMAL_TESTS := testcase1
-all: $(TESTNAME)
+WILDCARD_TESTS := $(patsubst %, %_wildcard, $(NORMAL_TESTS))
-$(TESTNAME): $(HEADERS) $(OBJECTS)
- $(CC) -o $@ $(OBJECTS) $(CFLAGS) $(LDFLAGS)
+TESTS := $(NORMAL_TESTS) $(WILDCARD_TESTS)
-%.o: %.c
- $(CC) -c -o $@ $< $(CFLAGS)
+all: $(TESTS)
+
+$(BENCH).o : $(BENCH).c $(BENCH).h
+ $(CC) -o $@ $< $(CFLAGS) -c $(LDFLAGS)
+
+$(BENCH)_wildcard.o : $(BENCH)_wildcard.c $(BENCH).h
+ $(CC) -o $@ $< $(CFLAGS) -c $(LDFLAGS)
+
+$(WILDCARD_TESTS): %_wildcard : %.c $(BENCH)_wildcard.o
+ $(CC) -o $@ $^ $(CFLAGS) $(LDFLAGS)
+
+$(NORMAL_TESTS): % : %.c $(BENCH).o
+ $(CC) -o $@ $^ $(CFLAGS) $(LDFLAGS)
clean:
- rm -f $(TESTNAME) *.o
+ rm -f *.o *.d $(TESTS)
+++ /dev/null
-#include <stdlib.h>
-#include <stdio.h>
-#include <threads.h>
-
-#include "my_queue.h"
-#include "model-assert.h"
-
-static int procs = 2;
-static queue_t *queue;
-static thrd_t *threads;
-static unsigned int *input;
-static unsigned int *output;
-static int num_threads;
-
-int get_thread_num()
-{
- thrd_t curr = thrd_current();
- int i;
- for (i = 0; i < num_threads; i++)
- if (curr.priv == threads[i].priv)
- return i;
- MODEL_ASSERT(0);
- return -1;
-}
-
-bool succ1, succ2;
-
-static void main_task(void *param)
-{
- unsigned int val;
- int pid = *((int *)param);
- if (!pid) {
- input[0] = 17;
- enqueue(queue, input[0]);
- succ1 = dequeue(queue, &output[0]);
- //printf("Dequeue: %d\n", output[0]);
- } else {
- input[1] = 37;
- enqueue(queue, input[1]);
- succ2 = dequeue(queue, &output[1]);
- }
-}
-
-int user_main(int argc, char **argv)
-{
- int i;
- int *param;
- unsigned int in_sum = 0, out_sum = 0;
-
- queue = calloc(1, sizeof(*queue));
- MODEL_ASSERT(queue);
-
- num_threads = procs;
- threads = malloc(num_threads * sizeof(thrd_t));
- param = malloc(num_threads * sizeof(*param));
- input = calloc(num_threads, sizeof(*input));
- output = calloc(num_threads, sizeof(*output));
-
- init_queue(queue, num_threads);
- for (i = 0; i < num_threads; i++) {
- param[i] = i;
- thrd_create(&threads[i], main_task, ¶m[i]);
- }
- for (i = 0; i < num_threads; i++)
- thrd_join(threads[i]);
-
- for (i = 0; i < num_threads; i++) {
- in_sum += input[i];
- out_sum += output[i];
- }
- for (i = 0; i < num_threads; i++)
- printf("input[%d] = %u\n", i, input[i]);
- for (i = 0; i < num_threads; i++)
- printf("output[%d] = %u\n", i, output[i]);
- if (succ1 && succ2)
- MODEL_ASSERT(in_sum == out_sum);
- else
- MODEL_ASSERT (false);
-
- free(param);
- free(threads);
- free(queue);
-
- return 0;
-}
+++ /dev/null
-#include <threads.h>
-#include <stdlib.h>
-#include "librace.h"
-#include "model-assert.h"
-
-#include "my_queue.h"
-
-#define relaxed memory_order_relaxed
-#define release memory_order_release
-#define acquire memory_order_acquire
-
-#define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
-#define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
-
-#define POISON_IDX 0x666
-
-static unsigned int (*free_lists)[MAX_FREELIST];
-
-/* Search this thread's free list for a "new" node */
-static unsigned int new_node()
-{
- int i;
- int t = get_thread_num();
- for (i = 0; i < MAX_FREELIST; i++) {
- unsigned int node = load_32(&free_lists[t][i]);
- if (node) {
- store_32(&free_lists[t][i], 0);
- return node;
- }
- }
- /* free_list is empty? */
- MODEL_ASSERT(0);
- return 0;
-}
-
-/* Place this node index back on this thread's free list */
-static void reclaim(unsigned int node)
-{
- int i;
- int t = get_thread_num();
-
- /* Don't reclaim NULL node */
- MODEL_ASSERT(node);
-
- for (i = 0; i < MAX_FREELIST; i++) {
- /* Should never race with our own thread here */
- unsigned int idx = load_32(&free_lists[t][i]);
-
- /* Found empty spot in free list */
- if (idx == 0) {
- store_32(&free_lists[t][i], node);
- return;
- }
- }
- /* free list is full? */
- MODEL_ASSERT(0);
-}
-
-void init_queue(queue_t *q, int num_threads)
-{
- int i, j;
-
- /* Initialize each thread's free list with INITIAL_FREE pointers */
- /* The actual nodes are initialized with poison indexes */
- free_lists = malloc(num_threads * sizeof(*free_lists));
- for (i = 0; i < num_threads; i++) {
- for (j = 0; j < INITIAL_FREE; j++) {
- free_lists[i][j] = 2 + i * MAX_FREELIST + j;
- atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
- }
- }
-
- /* initialize queue */
- atomic_init(&q->head, MAKE_POINTER(1, 0));
- atomic_init(&q->tail, MAKE_POINTER(1, 0));
- atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
-}
-
-void enqueue(queue_t *q, unsigned int val)
-{
- int success = 0;
- unsigned int node;
- pointer tail;
- pointer next;
- pointer tmp;
-
- node = new_node();
- store_32(&q->nodes[node].value, val);
- tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
- set_ptr(&tmp, 0); // NULL
- atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
-
- while (!success) {
- tail = atomic_load_explicit(&q->tail, acquire);
- next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
- if (tail == atomic_load_explicit(&q->tail, relaxed)) {
-
- /* Check for uninitialized 'next' */
- MODEL_ASSERT(get_ptr(next) != POISON_IDX);
-
- if (get_ptr(next) == 0) { // == NULL
- pointer value = MAKE_POINTER(node, get_count(next) + 1);
- success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
- &next, value, release, release);
- }
- if (!success) {
- unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
- pointer value = MAKE_POINTER(ptr,
- get_count(tail) + 1);
- atomic_compare_exchange_strong_explicit(&q->tail,
- &tail, value,
- release, release);
- thrd_yield();
- }
- }
- }
- atomic_compare_exchange_strong_explicit(&q->tail,
- &tail,
- MAKE_POINTER(node, get_count(tail) + 1),
- release, release);
-}
-
-bool dequeue(queue_t *q, unsigned int *retVal)
-{
- int success = 0;
- pointer head;
- pointer tail;
- pointer next;
-
- while (!success) {
- head = atomic_load_explicit(&q->head, acquire);
- tail = atomic_load_explicit(&q->tail, relaxed);
- next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
- if (atomic_load_explicit(&q->head, relaxed) == head) {
- if (get_ptr(head) == get_ptr(tail)) {
-
- /* Check for uninitialized 'next' */
- MODEL_ASSERT(get_ptr(next) != POISON_IDX);
-
- if (get_ptr(next) == 0) { // NULL
- return false; // NULL
- }
- atomic_compare_exchange_strong_explicit(&q->tail,
- &tail,
- MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
- release, release);
- thrd_yield();
- } else {
- *retVal = load_32(&q->nodes[get_ptr(next)].value);
- success = atomic_compare_exchange_strong_explicit(&q->head,
- &head,
- MAKE_POINTER(get_ptr(next), get_count(head) + 1),
- release, release);
- if (!success)
- thrd_yield();
- }
- }
- }
- reclaim(get_ptr(head));
- return true;
-}
+++ /dev/null
-#include <stdatomic.h>
-
-#define MAX_NODES 0xf
-
-typedef unsigned long long pointer;
-typedef atomic_ullong pointer_t;
-
-#define MAKE_POINTER(ptr, count) ((((pointer)count) << 32) | ptr)
-#define PTR_MASK 0xffffffffLL
-#define COUNT_MASK (0xffffffffLL << 32)
-
-static inline void set_count(pointer *p, unsigned int val) { *p = (*p & ~COUNT_MASK) | ((pointer)val << 32); }
-static inline void set_ptr(pointer *p, unsigned int val) { *p = (*p & ~PTR_MASK) | val; }
-static inline unsigned int get_count(pointer p) { return (p & COUNT_MASK) >> 32; }
-static inline unsigned int get_ptr(pointer p) { return p & PTR_MASK; }
-
-typedef struct node {
- unsigned int value;
- pointer_t next;
-} node_t;
-
-typedef struct {
- pointer_t head;
- pointer_t tail;
- node_t nodes[MAX_NODES + 1];
-} queue_t;
-
-void init_queue(queue_t *q, int num_threads);
-void enqueue(queue_t *q, unsigned int val);
-bool dequeue(queue_t *q, unsigned int *retVal);
-int get_thread_num();
--- /dev/null
+#include <threads.h>
+#include <stdlib.h>
+#include "librace.h"
+#include "model-assert.h"
+
+#include "queue.h"
+
+#define relaxed memory_order_relaxed
+#define release memory_order_release
+#define acquire memory_order_acquire
+
+#define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
+#define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
+
+#define POISON_IDX 0x666
+
+static unsigned int (*free_lists)[MAX_FREELIST];
+
+/* Search this thread's free list for a "new" node */
+static unsigned int new_node()
+{
+ int i;
+ int t = get_thread_num();
+ for (i = 0; i < MAX_FREELIST; i++) {
+ unsigned int node = load_32(&free_lists[t][i]);
+ if (node) {
+ store_32(&free_lists[t][i], 0);
+ return node;
+ }
+ }
+ /* free_list is empty? */
+ MODEL_ASSERT(0);
+ return 0;
+}
+
+/* Place this node index back on this thread's free list */
+static void reclaim(unsigned int node)
+{
+ int i;
+ int t = get_thread_num();
+
+ /* Don't reclaim NULL node */
+ MODEL_ASSERT(node);
+
+ for (i = 0; i < MAX_FREELIST; i++) {
+ /* Should never race with our own thread here */
+ unsigned int idx = load_32(&free_lists[t][i]);
+
+ /* Found empty spot in free list */
+ if (idx == 0) {
+ store_32(&free_lists[t][i], node);
+ return;
+ }
+ }
+ /* free list is full? */
+ MODEL_ASSERT(0);
+}
+
+void init_queue(queue_t *q, int num_threads)
+{
+ int i, j;
+
+ /* Initialize each thread's free list with INITIAL_FREE pointers */
+ /* The actual nodes are initialized with poison indexes */
+ free_lists = malloc(num_threads * sizeof(*free_lists));
+ for (i = 0; i < num_threads; i++) {
+ for (j = 0; j < INITIAL_FREE; j++) {
+ free_lists[i][j] = 2 + i * MAX_FREELIST + j;
+ atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
+ }
+ }
+
+ /* initialize queue */
+ atomic_init(&q->head, MAKE_POINTER(1, 0));
+ atomic_init(&q->tail, MAKE_POINTER(1, 0));
+ atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
+}
+
+void enqueue(queue_t *q, unsigned int val)
+{
+ int success = 0;
+ unsigned int node;
+ pointer tail;
+ pointer next;
+ pointer tmp;
+
+ node = new_node();
+ store_32(&q->nodes[node].value, val);
+ tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
+ set_ptr(&tmp, 0); // NULL
+ atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
+
+ while (!success) {
+ tail = atomic_load_explicit(&q->tail, acquire);
+ next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
+ if (tail == atomic_load_explicit(&q->tail, relaxed)) {
+
+ /* Check for uninitialized 'next' */
+ MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+
+ if (get_ptr(next) == 0) { // == NULL
+ pointer value = MAKE_POINTER(node, get_count(next) + 1);
+ success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
+ &next, value, release, release);
+ }
+ if (!success) {
+ unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
+ pointer value = MAKE_POINTER(ptr,
+ get_count(tail) + 1);
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail, value,
+ release, release);
+ thrd_yield();
+ }
+ }
+ }
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail,
+ MAKE_POINTER(node, get_count(tail) + 1),
+ release, release);
+}
+
+bool dequeue(queue_t *q, unsigned int *retVal)
+{
+ int success = 0;
+ pointer head;
+ pointer tail;
+ pointer next;
+
+ while (!success) {
+ head = atomic_load_explicit(&q->head, acquire);
+ tail = atomic_load_explicit(&q->tail, relaxed);
+ next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
+ if (atomic_load_explicit(&q->head, relaxed) == head) {
+ if (get_ptr(head) == get_ptr(tail)) {
+
+ /* Check for uninitialized 'next' */
+ MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+
+ if (get_ptr(next) == 0) { // NULL
+ return false; // NULL
+ }
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail,
+ MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
+ release, release);
+ thrd_yield();
+ } else {
+ *retVal = load_32(&q->nodes[get_ptr(next)].value);
+ success = atomic_compare_exchange_strong_explicit(&q->head,
+ &head,
+ MAKE_POINTER(get_ptr(next), get_count(head) + 1),
+ release, release);
+ if (!success)
+ thrd_yield();
+ }
+ }
+ }
+ reclaim(get_ptr(head));
+ return true;
+}
--- /dev/null
+#include <stdatomic.h>
+
+#define MAX_NODES 0xf
+
+typedef unsigned long long pointer;
+typedef atomic_ullong pointer_t;
+
+#define MAKE_POINTER(ptr, count) ((((pointer)count) << 32) | ptr)
+#define PTR_MASK 0xffffffffLL
+#define COUNT_MASK (0xffffffffLL << 32)
+
+static inline void set_count(pointer *p, unsigned int val) { *p = (*p & ~COUNT_MASK) | ((pointer)val << 32); }
+static inline void set_ptr(pointer *p, unsigned int val) { *p = (*p & ~PTR_MASK) | val; }
+static inline unsigned int get_count(pointer p) { return (p & COUNT_MASK) >> 32; }
+static inline unsigned int get_ptr(pointer p) { return p & PTR_MASK; }
+
+typedef struct node {
+ unsigned int value;
+ pointer_t next;
+} node_t;
+
+typedef struct {
+ pointer_t head;
+ pointer_t tail;
+ node_t nodes[MAX_NODES + 1];
+} queue_t;
+
+void init_queue(queue_t *q, int num_threads);
+void enqueue(queue_t *q, unsigned int val);
+bool dequeue(queue_t *q, unsigned int *retVal);
+int get_thread_num();
--- /dev/null
+#include <threads.h>
+#include <stdlib.h>
+#include "librace.h"
+#include "model-assert.h"
+
+#include "queue.h"
+#include "wildcard.h"
+
+#define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
+#define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
+
+#define POISON_IDX 0x666
+
+static unsigned int (*free_lists)[MAX_FREELIST];
+
+/* Search this thread's free list for a "new" node */
+static unsigned int new_node()
+{
+ int i;
+ int t = get_thread_num();
+ for (i = 0; i < MAX_FREELIST; i++) {
+ //unsigned int node = load_32(&free_lists[t][i]);
+ unsigned int node = free_lists[t][i];
+ if (node) {
+ //store_32(&free_lists[t][i], 0);
+ free_lists[t][i] = 0;
+ return node;
+ }
+ }
+ /* free_list is empty? */
+ MODEL_ASSERT(0);
+ return 0;
+}
+
+/* Place this node index back on this thread's free list */
+static void reclaim(unsigned int node)
+{
+ int i;
+ int t = get_thread_num();
+
+ /* Don't reclaim NULL node */
+ //MODEL_ASSERT(node);
+
+ for (i = 0; i < MAX_FREELIST; i++) {
+ /* Should never race with our own thread here */
+ //unsigned int idx = load_32(&free_lists[t][i]);
+ unsigned int idx = free_lists[t][i];
+
+ /* Found empty spot in free list */
+ if (idx == 0) {
+ //store_32(&free_lists[t][i], node);
+ free_lists[t][i] = node;
+ return;
+ }
+ }
+ /* free list is full? */
+ MODEL_ASSERT(0);
+}
+
+void init_queue(queue_t *q, int num_threads)
+{
+ int i, j;
+
+ /* Initialize each thread's free list with INITIAL_FREE pointers */
+ /* The actual nodes are initialized with poison indexes */
+ free_lists = malloc(num_threads * sizeof(*free_lists));
+ for (i = 0; i < num_threads; i++) {
+ for (j = 0; j < INITIAL_FREE; j++) {
+ free_lists[i][j] = 2 + i * MAX_FREELIST + j;
+ atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
+ }
+ }
+
+ /* initialize queue */
+ atomic_init(&q->head, MAKE_POINTER(1, 0));
+ atomic_init(&q->tail, MAKE_POINTER(1, 0));
+ atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
+}
+
+void enqueue(queue_t *q, unsigned int val)
+{
+ int success = 0;
+ unsigned int node;
+ pointer tail;
+ pointer next;
+ pointer tmp;
+
+ node = new_node();
+ //store_32(&q->nodes[node].value, val);
+ q->nodes[node].value = val;
+ tmp = atomic_load_explicit(&q->nodes[node].next, wildcard(1)); // relaxed
+ set_ptr(&tmp, 0); // NULL
+ atomic_store_explicit(&q->nodes[node].next, tmp, wildcard(2)); // relaxed
+
+ while (!success) {
+ tail = atomic_load_explicit(&q->tail, wildcard(3)); // acquire
+ // FIXME: SCFence makes this relaxed
+ next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, wildcard(4)); //acquire
+ if (tail == atomic_load_explicit(&q->tail, wildcard(5))) { // relaxed
+
+ /* Check for uninitialized 'next' */
+ //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+
+ if (get_ptr(next) == 0) { // == NULL
+ pointer value = MAKE_POINTER(node, get_count(next) + 1);
+ success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
+ &next, value, wildcard(6), wildcard(7)); // release & relaxed
+ }
+ if (!success) {
+ unsigned int ptr =
+ get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, wildcard(8))); // acquire
+ pointer value = MAKE_POINTER(ptr,
+ get_count(tail) + 1);
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail, value,
+ wildcard(9), wildcard(10)); // release & relaxed
+ thrd_yield();
+ }
+ }
+ }
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail,
+ MAKE_POINTER(node, get_count(tail) + 1),
+ wildcard(11), wildcard(12)); // release & relaxed
+}
+
+bool dequeue(queue_t *q, unsigned int *retVal)
+{
+ unsigned int value;
+ int success = 0;
+ pointer head;
+ pointer tail;
+ pointer next;
+
+ while (!success) {
+ head = atomic_load_explicit(&q->head, wildcard(13)); // acquire
+ // FIXME: SCFence makes this acquire
+ tail = atomic_load_explicit(&q->tail, wildcard(14)); // relaxed
+ next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, wildcard(15)); // acquire
+ if (atomic_load_explicit(&q->head, wildcard(16)) == head) { // relaxed
+ if (get_ptr(head) == get_ptr(tail)) {
+
+ /* Check for uninitialized 'next' */
+ //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+
+ if (get_ptr(next) == 0) { // NULL
+ return false; // NULL
+ }
+ atomic_compare_exchange_strong_explicit(&q->tail,
+ &tail,
+ MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
+ wildcard(17), wildcard(18)); // release & relaxed
+ thrd_yield();
+ } else {
+ //value = load_32(&q->nodes[get_ptr(next)].value);
+ value = q->nodes[get_ptr(next)].value;
+ // FIXME: SCFence makes this relaxed
+ success = atomic_compare_exchange_strong_explicit(&q->head,
+ &head, MAKE_POINTER(get_ptr(next), get_count(head) + 1),
+ wildcard(19), wildcard(20)); // release & relaxed
+ if (!success)
+ thrd_yield();
+ }
+ }
+ }
+ reclaim(get_ptr(head));
+ *retVal = value;
+ return true;
+}
--- /dev/null
+#include <stdlib.h>
+#include <stdio.h>
+#include <threads.h>
+
+#include "queue.h"
+#include "model-assert.h"
+
+static int procs = 2;
+static queue_t *queue;
+static thrd_t *threads;
+static unsigned int *input;
+static unsigned int *output;
+static int num_threads;
+
+int get_thread_num()
+{
+ thrd_t curr = thrd_current();
+ int i;
+ for (i = 0; i < num_threads; i++)
+ if (curr.priv == threads[i].priv)
+ return i;
+ MODEL_ASSERT(0);
+ return -1;
+}
+
+bool succ1, succ2;
+
+static void main_task(void *param)
+{
+ unsigned int val;
+ int pid = *((int *)param);
+ if (!pid) {
+ input[0] = 17;
+ enqueue(queue, input[0]);
+ succ1 = dequeue(queue, &output[0]);
+ //printf("Dequeue: %d\n", output[0]);
+ } else {
+ input[1] = 37;
+ enqueue(queue, input[1]);
+ succ2 = dequeue(queue, &output[1]);
+ }
+}
+
+int user_main(int argc, char **argv)
+{
+ int i;
+ int *param;
+ unsigned int in_sum = 0, out_sum = 0;
+
+ queue = calloc(1, sizeof(*queue));
+ MODEL_ASSERT(queue);
+
+ num_threads = procs;
+ threads = malloc(num_threads * sizeof(thrd_t));
+ param = malloc(num_threads * sizeof(*param));
+ input = calloc(num_threads, sizeof(*input));
+ output = calloc(num_threads, sizeof(*output));
+
+ init_queue(queue, num_threads);
+ for (i = 0; i < num_threads; i++) {
+ param[i] = i;
+ thrd_create(&threads[i], main_task, ¶m[i]);
+ }
+ for (i = 0; i < num_threads; i++)
+ thrd_join(threads[i]);
+
+ for (i = 0; i < num_threads; i++) {
+ in_sum += input[i];
+ out_sum += output[i];
+ }
+ for (i = 0; i < num_threads; i++)
+ printf("input[%d] = %u\n", i, input[i]);
+ for (i = 0; i < num_threads; i++)
+ printf("output[%d] = %u\n", i, output[i]);
+ if (succ1 && succ2)
+ MODEL_ASSERT(in_sum == out_sum);
+ else
+ MODEL_ASSERT (false);
+
+ free(param);
+ free(threads);
+ free(queue);
+
+ return 0;
+}