2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
41 namespace cds { namespace urcu { namespace details {
46 template <typename RCUtag>
47 inline sh_thread_gc<RCUtag>::sh_thread_gc()
49 if ( !threading::Manager::isThreadAttached())
50 cds::threading::Manager::attachThread();
53 template <typename RCUtag>
54 inline sh_thread_gc<RCUtag>::~sh_thread_gc()
56 cds::threading::Manager::detachThread();
59 template <typename RCUtag>
60 inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
62 return cds::threading::getRCU<RCUtag>();
65 template <typename RCUtag>
66 inline void sh_thread_gc<RCUtag>::access_lock()
68 thread_record * pRec = get_thread_record();
69 assert( pRec != nullptr );
71 uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72 if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
73 pRec->m_nAccessControl.store(
74 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
75 atomics::memory_order_release
79 pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
81 CDS_COMPILER_RW_BARRIER;
84 template <typename RCUtag>
85 inline void sh_thread_gc<RCUtag>::access_unlock()
87 thread_record * pRec = get_thread_record();
88 assert( pRec != nullptr);
90 CDS_COMPILER_RW_BARRIER;
91 pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
94 template <typename RCUtag>
95 inline bool sh_thread_gc<RCUtag>::is_locked()
97 thread_record * pRec = get_thread_record();
98 assert( pRec != nullptr);
100 return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
105 template <typename RCUtag>
106 inline void sh_singleton<RCUtag>::set_signal_handler()
108 //TODO: OS-specific code must be moved to cds::OS namespace
109 struct sigaction sigact;
110 memset( &sigact, 0, sizeof(sigact));
111 sigact.sa_sigaction = signal_handler;
112 sigact.sa_flags = SA_SIGINFO;
113 sigemptyset( &sigact.sa_mask );
114 //sigaddset( &sigact.sa_mask, m_nSigNo );
115 sigaction( m_nSigNo, &sigact, nullptr );
117 sigaddset( &sigact.sa_mask, m_nSigNo );
118 pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
121 template <typename RCUtag>
122 inline void sh_singleton<RCUtag>::clear_signal_handler()
125 template <typename RCUtag>
126 void sh_singleton<RCUtag>::signal_handler( int /*signo*/, siginfo_t * /*sigInfo*/, void * /*context*/ )
128 thread_record * pRec = cds::threading::getRCU<RCUtag>();
130 atomics::atomic_signal_fence( atomics::memory_order_acquire );
131 pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
132 atomics::atomic_signal_fence( atomics::memory_order_release );
136 template <typename RCUtag>
137 inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
139 pthread_kill( tid, m_nSigNo );
142 template <typename RCUtag>
143 template <class Backoff>
144 inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
146 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
148 // Send "need membar" signal to all RCU threads
149 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
150 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
151 if ( tid != nullThreadId ) {
152 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
157 // Wait while all RCU threads process the signal
158 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
159 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
160 if ( tid != nullThreadId ) {
162 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
163 && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
165 // Some versions of OSes can lose signals
166 // So, we resend the signal
174 template <typename RCUtag>
175 bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
177 uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
178 return (v & signal_handling_rcu::c_nNestMask)
179 && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
182 template <typename RCUtag>
183 template <class Backoff>
184 void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
186 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
188 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
189 while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
194 }}} // namespace cds:urcu::details
197 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
198 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H