4 template <typename t_element, size_t t_size>
5 struct mpmc_boundq_1_alt
9 // elements should generally be cache-line-size padded :
10 nonatomic<t_element> m_array[t_size];
12 // rdwr counts the reads & writes that have started
13 atomic<unsigned int> m_rdwr;
14 // "read" and "written" count the number completed
15 atomic<unsigned int> m_read;
16 atomic<unsigned int> m_written;
20 mpmc_boundq_1_alt() : m_rdwr(0), m_read(0), m_written(0)
24 //-----------------------------------------------------
26 nonatomic<t_element> * read_fetch() {
27 unsigned int rdwr = m_rdwr.load(mo_acquire);
30 rd = (rdwr>>16) & 0xFFFF;
33 if ( wr == rd ) // empty
36 if ( m_rdwr.compare_exchange_weak(rdwr,rdwr+(1<<16),mo_acq_rel) )
42 while ( (m_written.load(mo_acquire) & 0xFFFF) != wr ) {
46 nonatomic<t_element> * p = & ( m_array[ rd % t_size ] );
52 m_read.fetch_add(1,mo_release);
55 //-----------------------------------------------------
57 nonatomic<t_element> * write_prepare() {
58 unsigned int rdwr = m_rdwr.load(mo_acquire);
61 rd = (rdwr>>16) & 0xFFFF;
64 if ( wr == ((rd + t_size)&0xFFFF) ) // full
67 if ( m_rdwr.compare_exchange_weak(rdwr,(rd<<16) | ((wr+1)&0xFFFF),mo_acq_rel) )
73 while ( (m_read.load(mo_acquire) & 0xFFFF) != rd ) {
77 nonatomic<t_element> * p = & ( m_array[ wr % t_size ] );
84 m_written.fetch_add(1,mo_release);
87 //-----------------------------------------------------