5 template <typename t_element, size_t t_size>
6 struct mpmc_boundq_1_alt
10 // elements should generally be cache-line-size padded :
11 t_element m_array[t_size];
13 // rdwr counts the reads & writes that have started
14 atomic<unsigned int> m_rdwr;
15 // "read" and "written" count the number completed
16 atomic<unsigned int> m_read;
17 atomic<unsigned int> m_written;
31 Order_queue<unsigned int*> spec_queue;
34 //-----------------------------------------------------
36 t_element * read_fetch() {
37 unsigned int rdwr = m_rdwr.load(wildcard(1)); // acquire, but can be relaxed
40 rd = (rdwr>>16) & 0xFFFF;
43 if ( wr == rd ) { // empty
48 if ( m_rdwr.compare_exchange_weak(rdwr,rdwr+(1<<16),wildcard(2)) )
57 while ( (m_written.load(wildcard(3)) & 0xFFFF) != wr ) {
61 t_element * p = & ( m_array[ rd % t_size ] );
64 @Commit_point_Check: true
67 spec_queue.peak() == p
73 m_read.fetch_add(1,wildcard(4)); // release
75 @Commit_point_define: true
76 @Label: Read_Consume_Success
84 //-----------------------------------------------------
86 t_element * write_prepare() {
87 unsigned int rdwr = m_rdwr.load(wildcard(5)); // acquire, but can be relaxed
90 rd = (rdwr>>16) & 0xFFFF;
93 if ( wr == ((rd + t_size)&0xFFFF) ) // full
96 if ( m_rdwr.compare_exchange_weak(rdwr,(rd<<16) | ((wr+1)&0xFFFF),wildcard(6)) )
104 while ( (m_read.load(wildcard(7)) & 0xFFFF) != rd ) { // acquire
109 t_element * p = & ( m_array[ wr % t_size ] );
112 @Commit_point_check: ANY
113 @Action: spec_queue.add(p);
120 m_written.fetch_add(1,wildcard(8)); // release
123 //-----------------------------------------------------
128 typedef struct mpmc_boundq_1_alt<int, 4> queue_t;