Restored acq-rel fence in urcu access_lock().
[libcds.git] / cds / urcu / details / sh.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
33
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
36
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
39
40 //@cond
41 namespace cds { namespace urcu { namespace details {
42
43     // Inlines
44
45     // sh_thread_gc
46     template <typename RCUtag>
47     inline sh_thread_gc<RCUtag>::sh_thread_gc()
48     {
49         if ( !threading::Manager::isThreadAttached())
50             cds::threading::Manager::attachThread();
51     }
52
53     template <typename RCUtag>
54     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
55     {
56         cds::threading::Manager::detachThread();
57     }
58
59     template <typename RCUtag>
60     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
61     {
62         return cds::threading::getRCU<RCUtag>();
63     }
64
65     template <typename RCUtag>
66     inline void sh_thread_gc<RCUtag>::access_lock()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr );
70
71         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72
73         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
74             pRec->m_nAccessControl.store( sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
75                 atomics::memory_order_relaxed );
76
77 #   if CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30800
78             // Seems, CLang 3.6-3.7 cannot handle acquire barrier correctly
79             CDS_COMPILER_RW_BARRIER;
80 #   else
81             // acquire barrier
82             pRec->m_nAccessControl.load( atomics::memory_order_acquire );
83 #   endif
84         }
85         else {
86             // nested lock
87             pRec->m_nAccessControl.store( tmp + 1, atomics::memory_order_relaxed );
88         }
89     }
90
91     template <typename RCUtag>
92     inline void sh_thread_gc<RCUtag>::access_unlock()
93     {
94         thread_record * pRec = get_thread_record();
95         assert( pRec != nullptr);
96
97         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
98         assert( ( tmp & rcu_class::c_nNestMask ) > 0 );
99
100         pRec->m_nAccessControl.store( tmp - 1, atomics::memory_order_release );
101     }
102
103     template <typename RCUtag>
104     inline bool sh_thread_gc<RCUtag>::is_locked()
105     {
106         thread_record * pRec = get_thread_record();
107         assert( pRec != nullptr);
108
109         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
110     }
111
112
113     // sh_singleton
114     template <typename RCUtag>
115     inline void sh_singleton<RCUtag>::set_signal_handler()
116     {
117         //TODO: OS-specific code must be moved to cds::OS namespace
118         struct sigaction sigact;
119         memset( &sigact, 0, sizeof(sigact));
120         sigact.sa_sigaction = signal_handler;
121         sigact.sa_flags = SA_SIGINFO;
122         sigemptyset( &sigact.sa_mask );
123         sigaction( m_nSigNo, &sigact, nullptr );
124
125         sigaddset( &sigact.sa_mask, m_nSigNo );
126         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
127     }
128
129     template <typename RCUtag>
130     inline void sh_singleton<RCUtag>::clear_signal_handler()
131     {}
132
133     template <typename RCUtag>
134     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
135     {
136         pthread_kill( tid, m_nSigNo );
137     }
138
139     template <typename RCUtag>
140     template <class Backoff>
141     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
142     {
143         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
144
145         // Send "need membar" signal to all RCU threads
146         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
147             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
148             if ( tid != nullThreadId ) {
149                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
150                 raise_signal( tid );
151             }
152         }
153
154         // Wait while all RCU threads process the signal
155         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
156             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
157             if ( tid != nullThreadId ) {
158                 bkOff.reset();
159                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
160                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
161                 {
162                     // Some versions of OSes can lose signals
163                     // So, we resend the signal
164                     raise_signal( tid );
165                     bkOff();
166                 }
167             }
168         }
169     }
170
171     template <typename RCUtag>
172     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
173     {
174         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
175         return (v & signal_handling_rcu::c_nNestMask)
176             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
177     }
178
179     template <typename RCUtag>
180     template <class Backoff>
181     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
182     {
183         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
184
185         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
186             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
187                 bkOff();
188         }
189     }
190
191 }}} // namespace cds:urcu::details
192 //@endcond
193
194 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
195 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H