3 #ifndef _CDS_URCU_DETAILS_SH_H
4 #define _CDS_URCU_DETAILS_SH_H
6 #include <memory.h> //memset
7 #include <cds/urcu/details/sh_decl.h>
9 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
10 #include <cds/threading/model.h>
13 namespace cds { namespace urcu { namespace details {
18 template <typename RCUtag>
19 inline sh_thread_gc<RCUtag>::sh_thread_gc()
21 if ( !threading::Manager::isThreadAttached() )
22 cds::threading::Manager::attachThread();
25 template <typename RCUtag>
26 inline sh_thread_gc<RCUtag>::~sh_thread_gc()
28 cds::threading::Manager::detachThread();
31 template <typename RCUtag>
32 inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
34 return cds::threading::getRCU<RCUtag>();
37 template <typename RCUtag>
38 inline void sh_thread_gc<RCUtag>::access_lock()
40 thread_record * pRec = get_thread_record();
41 assert( pRec != nullptr );
43 uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
44 if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
45 pRec->m_nAccessControl.store(
46 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
47 atomics::memory_order_release
51 pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
53 CDS_COMPILER_RW_BARRIER;
56 template <typename RCUtag>
57 inline void sh_thread_gc<RCUtag>::access_unlock()
59 thread_record * pRec = get_thread_record();
60 assert( pRec != nullptr);
62 CDS_COMPILER_RW_BARRIER;
63 pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
66 template <typename RCUtag>
67 inline bool sh_thread_gc<RCUtag>::is_locked()
69 thread_record * pRec = get_thread_record();
70 assert( pRec != nullptr);
72 return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
77 template <typename RCUtag>
78 inline void sh_singleton<RCUtag>::set_signal_handler()
80 //TODO: OS-specific code must be moved to cds::OS namespace
81 struct sigaction sigact;
82 memset( &sigact, 0, sizeof(sigact));
83 sigact.sa_sigaction = signal_handler;
84 sigact.sa_flags = SA_SIGINFO;
85 sigemptyset( &sigact.sa_mask );
86 //sigaddset( &sigact.sa_mask, m_nSigNo );
87 sigaction( m_nSigNo, &sigact, nullptr );
89 sigaddset( &sigact.sa_mask, m_nSigNo );
90 pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
93 template <typename RCUtag>
94 inline void sh_singleton<RCUtag>::clear_signal_handler()
97 template <typename RCUtag>
98 void sh_singleton<RCUtag>::signal_handler( int /*signo*/, siginfo_t * /*sigInfo*/, void * /*context*/ )
100 thread_record * pRec = cds::threading::getRCU<RCUtag>();
102 atomics::atomic_signal_fence( atomics::memory_order_acquire );
103 pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
104 atomics::atomic_signal_fence( atomics::memory_order_release );
108 template <typename RCUtag>
109 inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
111 pthread_kill( tid, m_nSigNo );
114 template <typename RCUtag>
115 template <class Backoff>
116 inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
118 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
120 // Send "need membar" signal to all RCU threads
121 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
122 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
123 if ( tid != nullThreadId ) {
124 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
129 // Wait while all RCU threads process the signal
130 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
131 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
132 if ( tid != nullThreadId ) {
134 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
135 && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
137 // Some versions of OSes can lose signals
138 // So, we resend the signal
146 template <typename RCUtag>
147 bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
149 uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
150 return (v & signal_handling_rcu::c_nNestMask)
151 && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
154 template <typename RCUtag>
155 template <class Backoff>
156 void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
158 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
160 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
161 while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
166 }}} // namespace cds:urcu::details
169 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
170 #endif // #ifndef _CDS_URCU_DETAILS_SH_H