2 * Copyright 2004-present Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/io/async/HHWheelTimer.h>
18 #include <folly/io/async/Request.h>
20 #include <folly/BitIterator.h>
21 #include <folly/Memory.h>
22 #include <folly/Optional.h>
23 #include <folly/ScopeGuard.h>
25 #include <folly/Bits.h>
29 using std::chrono::milliseconds;
34 * We want to select the default interval carefully.
35 * An interval of 10ms will give us 10ms * WHEEL_SIZE^WHEEL_BUCKETS
36 * for the largest timeout possible, or about 497 days.
38 * For a lower bound, we want a reasonable limit on local IO, 10ms
41 * A shorter interval also has CPU implications, less than 1ms might
42 * start showing up in cpu perf. Also, it might not be possible to set
43 * tick interval less than 10ms on older kernels.
45 int HHWheelTimer::DEFAULT_TICK_INTERVAL = 10;
47 HHWheelTimer::Callback::~Callback() {
53 void HHWheelTimer::Callback::setScheduled(HHWheelTimer* wheel,
54 std::chrono::milliseconds timeout) {
55 assert(wheel_ == nullptr);
56 assert(expiration_ == decltype(expiration_){});
60 expiration_ = getCurTime() + timeout;
63 void HHWheelTimer::Callback::cancelTimeoutImpl() {
64 if (--wheel_->count_ <= 0) {
65 assert(wheel_->count_ == 0);
66 wheel_->AsyncTimeout::cancelTimeout();
69 if ((-1 != bucket_) && (wheel_->buckets_[0][bucket_].empty())) {
70 auto bi = makeBitIterator(wheel_->bitmap_.begin());
71 *(bi + bucket_) = false;
78 HHWheelTimer::HHWheelTimer(
79 folly::TimeoutManager* timeoutMananger,
80 std::chrono::milliseconds intervalMS,
81 AsyncTimeout::InternalEnum internal,
82 std::chrono::milliseconds defaultTimeoutMS)
83 : AsyncTimeout(timeoutMananger, internal),
84 interval_(intervalMS),
85 defaultTimeout_(defaultTimeoutMS),
89 startTime_(getCurTime()),
90 processingCallbacksGuard_(nullptr) {
91 bitmap_.resize((WHEEL_SIZE / sizeof(uint64_t)) / 8, 0);
94 HHWheelTimer::~HHWheelTimer() {
95 // Ensure this gets done, but right before destruction finishes.
96 auto destructionPublisherGuard = folly::makeGuard([&] {
97 // Inform the subscriber that this instance is doomed.
98 if (processingCallbacksGuard_) {
99 *processingCallbacksGuard_ = true;
102 while (!timeouts.empty()) {
103 auto* cb = &timeouts.front();
104 timeouts.pop_front();
106 cb->callbackCanceled();
111 void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
112 std::chrono::milliseconds timeout) {
113 auto nextTick = calcNextTick();
114 int64_t due = timeToWheelTicks(timeout) + nextTick;
115 int64_t diff = due - nextTick;
118 auto bi = makeBitIterator(bitmap_.begin());
121 list = &buckets_[0][nextTick & WHEEL_MASK];
122 *(bi + (nextTick & WHEEL_MASK)) = true;
123 callback->bucket_ = nextTick & WHEEL_MASK;
124 } else if (diff < WHEEL_SIZE) {
125 list = &buckets_[0][due & WHEEL_MASK];
126 *(bi + (due & WHEEL_MASK)) = true;
127 callback->bucket_ = due & WHEEL_MASK;
128 } else if (diff < 1 << (2 * WHEEL_BITS)) {
129 list = &buckets_[1][(due >> WHEEL_BITS) & WHEEL_MASK];
130 } else if (diff < 1 << (3 * WHEEL_BITS)) {
131 list = &buckets_[2][(due >> 2 * WHEEL_BITS) & WHEEL_MASK];
133 /* in largest slot */
134 if (diff > LARGEST_SLOT) {
136 due = diff + nextTick;
138 list = &buckets_[3][(due >> 3 * WHEEL_BITS) & WHEEL_MASK];
140 list->push_back(*callback);
143 void HHWheelTimer::scheduleTimeout(Callback* callback,
144 std::chrono::milliseconds timeout) {
145 // Cancel the callback if it happens to be scheduled already.
146 callback->cancelTimeout();
148 callback->context_ = RequestContext::saveContext();
152 callback->setScheduled(this, timeout);
153 scheduleTimeoutImpl(callback, timeout);
155 /* If we're calling callbacks, timer will be reset after all
156 * callbacks are called.
158 if (!processingCallbacksGuard_) {
159 scheduleNextTimeout();
163 void HHWheelTimer::scheduleTimeout(Callback* callback) {
164 CHECK(std::chrono::milliseconds(-1) != defaultTimeout_)
165 << "Default timeout was not initialized";
166 scheduleTimeout(callback, defaultTimeout_);
169 bool HHWheelTimer::cascadeTimers(int bucket, int tick) {
171 cbs.swap(buckets_[bucket][tick]);
172 while (!cbs.empty()) {
173 auto* cb = &cbs.front();
175 scheduleTimeoutImpl(cb, cb->getTimeRemaining(getCurTime()));
178 // If tick is zero, timeoutExpired will cascade the next bucket.
182 void HHWheelTimer::timeoutExpired() noexcept {
183 auto nextTick = calcNextTick();
185 // If the last smart pointer for "this" is reset inside the callback's
186 // timeoutExpired(), then the guard will detect that it is time to bail from
188 auto isDestroyed = false;
189 // If scheduleTimeout is called from a callback in this function, it may
190 // cause inconsistencies in the state of this object. As such, we need
191 // to treat these calls slightly differently.
192 CHECK(!processingCallbacksGuard_);
193 processingCallbacksGuard_ = &isDestroyed;
194 auto reEntryGuard = folly::makeGuard([&] {
196 processingCallbacksGuard_ = nullptr;
200 // timeoutExpired() can only be invoked directly from the event base loop.
201 // It should never be invoked recursively.
203 lastTick_ = expireTick_;
204 while (lastTick_ < nextTick) {
205 int idx = lastTick_ & WHEEL_MASK;
207 auto bi = makeBitIterator(bitmap_.begin());
211 CallbackList* cbs = &buckets_[0][idx];
212 while (!cbs->empty()) {
213 auto* cb = &cbs->front();
215 timeouts.push_back(*cb);
220 if (cascadeTimers(1, (lastTick_ >> WHEEL_BITS) & WHEEL_MASK) &&
221 cascadeTimers(2, (lastTick_ >> (2 * WHEEL_BITS)) & WHEEL_MASK)) {
222 cascadeTimers(3, (lastTick_ >> (3 * WHEEL_BITS)) & WHEEL_MASK);
227 while (!timeouts.empty()) {
228 auto* cb = &timeouts.front();
229 timeouts.pop_front();
231 cb->wheel_ = nullptr;
232 cb->expiration_ = {};
233 RequestContextScopeGuard rctx(cb->context_);
234 cb->timeoutExpired();
236 // The HHWheelTimer itself has been destroyed. The other callbacks
237 // will have been cancelled from the destructor. Bail before causing
242 scheduleNextTimeout();
245 size_t HHWheelTimer::cancelAll() {
249 const uint64_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
250 auto maxBuckets = std::min(numElements, count_);
251 auto buckets = std::make_unique<CallbackList[]>(maxBuckets);
252 size_t countBuckets = 0;
253 for (auto& tick : buckets_) {
254 for (auto& bucket : tick) {
255 if (bucket.empty()) {
258 count += bucket.size();
259 std::swap(bucket, buckets[countBuckets++]);
260 if (count >= count_) {
266 for (size_t i = 0; i < countBuckets; ++i) {
267 auto& bucket = buckets[i];
268 while (!bucket.empty()) {
269 auto& cb = bucket.front();
271 cb.callbackCanceled();
279 void HHWheelTimer::scheduleNextTimeout() {
280 auto nextTick = calcNextTick();
283 if (nextTick & WHEEL_MASK) {
284 auto bi = makeBitIterator(bitmap_.begin());
285 auto bi_end = makeBitIterator(bitmap_.end());
286 auto it = folly::findFirstSet(bi + (nextTick & WHEEL_MASK), bi_end);
288 tick = WHEEL_SIZE - ((nextTick - 1) & WHEEL_MASK);
290 tick = std::distance(bi + (nextTick & WHEEL_MASK), it) + 1;
295 if (!this->AsyncTimeout::isScheduled() ||
296 (expireTick_ > tick + nextTick - 1)) {
297 this->AsyncTimeout::scheduleTimeout(interval_ * tick);
298 expireTick_ = tick + nextTick - 1;
301 this->AsyncTimeout::cancelTimeout();
305 int64_t HHWheelTimer::calcNextTick() {
306 auto intervals = (getCurTime() - startTime_) / interval_;
307 // Slow eventbases will have skew between the actual time and the
308 // callback time. To avoid racing the next scheduleNextTimeout()
309 // call, always schedule new timeouts against the actual
310 // timeoutExpired() time.
311 if (!processingCallbacksGuard_) {