10 #include "lockguard.h"
15 #ifdef CHECK_INVARIANTS
16 static const uint64_t tick_us = 1 * 1000; /* 1 ms */
18 static const uint64_t tick_us = 40 * 1000; /* 40 ms */
22 : current_tick_(1), last_tick_inclusive_(0)
24 std::thread thd(&ticker::tickerloop, this);
29 global_current_tick() const
31 return current_tick_.load(std::memory_order_acquire);
35 global_last_tick_inclusive() const
37 return last_tick_inclusive_.load(std::memory_order_acquire);
41 global_last_tick_exclusive() const
43 return global_last_tick_inclusive() + 1;
46 // should yield a # >= global_last_tick_exclusive()
48 compute_global_last_tick_exclusive() const
50 uint64_t e = ticks_[0].current_tick_.load(std::memory_order_acquire);
51 for (size_t i = 1; i < ticks_.size(); i++)
52 e = std::min(e, ticks_[i].current_tick_.load(std::memory_order_acquire));
56 // returns true if guard is currently active, along with filling
59 is_locally_guarded(uint64_t &cur_epoch) const
61 const uint64_t core_id = coreid::core_id();
62 const uint64_t current_tick =
63 ticks_[core_id].current_tick_.load(std::memory_order_acquire);
64 const uint64_t current_depth =
65 ticks_[core_id].depth_.load(std::memory_order_acquire);
67 cur_epoch = current_tick;
72 is_locally_guarded() const
75 return is_locally_guarded(c);
79 lock_for(uint64_t core_id)
81 INVARIANT(core_id < ticks_.size());
82 return ticks_[core_id].lock_;
85 // a guard is re-entrant within a single thread
90 : impl_(&impl), core_(coreid::core_id()), start_us_(0)
92 tickinfo &ti = impl_->ticks_[core_];
93 // bump the depth first
94 const uint64_t prev_depth = util::non_atomic_fetch_add(ti.depth_, 1UL);
98 // read epoch # (try to advance forward)
99 tick_ = impl_->global_current_tick();
100 INVARIANT(ti.current_tick_.load(std::memory_order_acquire) <= tick_);
101 ti.current_tick_.store(tick_, std::memory_order_release);
102 start_us_ = util::timer::cur_usec();
103 ti.start_us_.store(start_us_, std::memory_order_release);
105 tick_ = ti.current_tick_.load(std::memory_order_acquire);
106 start_us_ = ti.start_us_.load(std::memory_order_acquire);
108 INVARIANT(ti.lock_.is_locked());
109 depth_ = prev_depth + 1;
112 guard(guard &&) = default;
113 guard(const guard &) = delete;
114 guard &operator=(const guard &) = delete;
120 INVARIANT(core_ == coreid::core_id());
121 tickinfo &ti = impl_->ticks_[core_];
122 INVARIANT(ti.lock_.is_locked());
123 INVARIANT(tick_ > impl_->global_last_tick_inclusive());
124 const uint64_t prev_depth = util::non_atomic_fetch_sub(ti.depth_, 1UL);
125 INVARIANT(prev_depth);
127 if (prev_depth == 1) {
128 ti.start_us_.store(0, std::memory_order_release);
154 inline const ticker &
161 // refers to the start time of the *outermost* scope
176 static ticker s_instance CACHE_ALIGNED; // system wide ticker
184 util::timer loop_timer;
188 const uint64_t last_loop_usec = loop_timer.lap();
189 const uint64_t delay_time_usec = tick_us;
190 if (last_loop_usec < delay_time_usec) {
191 const uint64_t sleep_ns = (delay_time_usec - last_loop_usec) * 1000;
192 t.tv_sec = sleep_ns / ONE_SECOND_NS;
193 t.tv_nsec = sleep_ns % ONE_SECOND_NS;
194 nanosleep(&t, nullptr);
195 loop_timer.lap(); // since we slept away the lag
198 // bump the current tick
199 // XXX: ignore overflow
200 const uint64_t last_tick = util::non_atomic_fetch_add(current_tick_, 1UL);
201 const uint64_t cur_tick = last_tick + 1;
203 // wait for all threads to finish the last tick
204 for (size_t i = 0; i < ticks_.size(); i++) {
205 tickinfo &ti = ticks_[i];
206 const uint64_t thread_cur_tick =
207 ti.current_tick_.load(std::memory_order_acquire);
208 INVARIANT(thread_cur_tick == last_tick ||
209 thread_cur_tick == cur_tick);
210 if (thread_cur_tick == cur_tick)
212 lock_guard<spinlock> lg(ti.lock_);
213 ti.current_tick_.store(cur_tick, std::memory_order_release);
216 last_tick_inclusive_.store(last_tick, std::memory_order_release);
221 spinlock lock_; // guards current_tick_ and depth_
223 std::atomic<uint64_t> current_tick_; // last RCU epoch this thread has seen
224 // (implies completion through current_tick_ - 1)
225 std::atomic<uint64_t> depth_; // 0 if not in RCU section
226 std::atomic<uint64_t> start_us_; // 0 if not in RCU section
229 : current_tick_(1), depth_(0), start_us_(0)
231 ALWAYS_ASSERT(((uintptr_t)this % CACHELINE_SIZE) == 0);
235 percore<tickinfo> ticks_;
237 std::atomic<uint64_t> current_tick_; // which tick are we currenlty on?
238 std::atomic<uint64_t> last_tick_inclusive_;
239 // all threads have *completed* ticks <= last_tick_inclusive_