2 * Copyright 2016 Facebook, Inc.
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing,
15 * software distributed under the License is distributed on an
16 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17 * KIND, either express or implied. See the License for the
18 * specific language governing permissions and limitations
21 #include <folly/io/async/HHWheelTimer.h>
22 #include <folly/io/async/Request.h>
24 #include <folly/Memory.h>
25 #include <folly/Optional.h>
26 #include <folly/ScopeGuard.h>
28 #include <folly/Bits.h>
32 using std::chrono::milliseconds;
37 * We want to select the default interval carefully.
38 * An interval of 10ms will give us 10ms * WHEEL_SIZE^WHEEL_BUCKETS
39 * for the largest timeout possible, or about 497 days.
41 * For a lower bound, we want a reasonable limit on local IO, 10ms
44 * A shorter interval also has CPU implications, less than 1ms might
45 * start showing up in cpu perf. Also, it might not be possible to set
46 * tick interval less than 10ms on older kernels.
48 int HHWheelTimer::DEFAULT_TICK_INTERVAL = 10;
50 HHWheelTimer::Callback::~Callback() {
56 void HHWheelTimer::Callback::setScheduled(HHWheelTimer* wheel,
57 std::chrono::milliseconds timeout) {
58 assert(wheel_ == nullptr);
59 assert(expiration_ == milliseconds(0));
63 expiration_ = getCurTime() + timeout;
66 void HHWheelTimer::Callback::cancelTimeoutImpl() {
67 if (--wheel_->count_ <= 0) {
68 assert(wheel_->count_ == 0);
69 wheel_->AsyncTimeout::cancelTimeout();
72 if ((-1 != bucket_) && (wheel_->buckets_[0][bucket_].empty())) {
73 auto bi = makeBitIterator(wheel_->bitmap_.begin());
74 *(bi + bucket_) = false;
78 expiration_ = milliseconds(0);
81 HHWheelTimer::HHWheelTimer(
82 folly::TimeoutManager* timeoutMananger,
83 std::chrono::milliseconds intervalMS,
84 AsyncTimeout::InternalEnum internal,
85 std::chrono::milliseconds defaultTimeoutMS)
86 : AsyncTimeout(timeoutMananger, internal),
87 interval_(intervalMS),
88 defaultTimeout_(defaultTimeoutMS),
92 startTime_(getCurTime()),
93 processingCallbacksGuard_(nullptr) {
94 bitmap_.resize((WHEEL_SIZE / sizeof(uint64_t)) / 8, 0);
97 HHWheelTimer::~HHWheelTimer() {
98 // Ensure this gets done, but right before destruction finishes.
99 auto destructionPublisherGuard = folly::makeGuard([&] {
100 // Inform the subscriber that this instance is doomed.
101 if (processingCallbacksGuard_) {
102 *processingCallbacksGuard_ = true;
105 while (!timeouts.empty()) {
106 auto* cb = &timeouts.front();
107 timeouts.pop_front();
109 cb->callbackCanceled();
114 void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
115 std::chrono::milliseconds timeout) {
116 auto nextTick = calcNextTick();
117 int64_t due = timeToWheelTicks(timeout) + nextTick;
118 int64_t diff = due - nextTick;
121 auto bi = makeBitIterator(bitmap_.begin());
124 list = &buckets_[0][nextTick & WHEEL_MASK];
125 *(bi + (nextTick & WHEEL_MASK)) = true;
126 callback->bucket_ = nextTick & WHEEL_MASK;
127 } else if (diff < WHEEL_SIZE) {
128 list = &buckets_[0][due & WHEEL_MASK];
129 *(bi + (due & WHEEL_MASK)) = true;
130 callback->bucket_ = due & WHEEL_MASK;
131 } else if (diff < 1 << (2 * WHEEL_BITS)) {
132 list = &buckets_[1][(due >> WHEEL_BITS) & WHEEL_MASK];
133 } else if (diff < 1 << (3 * WHEEL_BITS)) {
134 list = &buckets_[2][(due >> 2 * WHEEL_BITS) & WHEEL_MASK];
136 /* in largest slot */
137 if (diff > LARGEST_SLOT) {
139 due = diff + nextTick;
141 list = &buckets_[3][(due >> 3 * WHEEL_BITS) & WHEEL_MASK];
143 list->push_back(*callback);
146 void HHWheelTimer::scheduleTimeout(Callback* callback,
147 std::chrono::milliseconds timeout) {
148 // Cancel the callback if it happens to be scheduled already.
149 callback->cancelTimeout();
151 callback->context_ = RequestContext::saveContext();
153 uint64_t prev = count_;
156 callback->setScheduled(this, timeout);
157 scheduleTimeoutImpl(callback, timeout);
159 /* If we're calling callbacks, timer will be reset after all
160 * callbacks are called.
162 if (!processingCallbacksGuard_) {
163 scheduleNextTimeout();
167 void HHWheelTimer::scheduleTimeout(Callback* callback) {
168 CHECK(std::chrono::milliseconds(-1) != defaultTimeout_)
169 << "Default timeout was not initialized";
170 scheduleTimeout(callback, defaultTimeout_);
173 bool HHWheelTimer::cascadeTimers(int bucket, int tick) {
175 cbs.swap(buckets_[bucket][tick]);
176 while (!cbs.empty()) {
177 auto* cb = &cbs.front();
179 scheduleTimeoutImpl(cb, cb->getTimeRemaining(getCurTime()));
182 // If tick is zero, timeoutExpired will cascade the next bucket.
186 void HHWheelTimer::timeoutExpired() noexcept {
187 auto nextTick = calcNextTick();
189 // If the last smart pointer for "this" is reset inside the callback's
190 // timeoutExpired(), then the guard will detect that it is time to bail from
192 auto isDestroyed = false;
193 // If scheduleTimeout is called from a callback in this function, it may
194 // cause inconsistencies in the state of this object. As such, we need
195 // to treat these calls slightly differently.
196 CHECK(!processingCallbacksGuard_);
197 processingCallbacksGuard_ = &isDestroyed;
198 auto reEntryGuard = folly::makeGuard([&] {
200 processingCallbacksGuard_ = nullptr;
204 // timeoutExpired() can only be invoked directly from the event base loop.
205 // It should never be invoked recursively.
207 lastTick_ = expireTick_;
208 while (lastTick_ < nextTick) {
209 int idx = lastTick_ & WHEEL_MASK;
211 auto bi = makeBitIterator(bitmap_.begin());
215 CallbackList* cbs = &buckets_[0][idx];
216 while (!cbs->empty()) {
217 auto* cb = &cbs->front();
219 timeouts.push_back(*cb);
224 if (cascadeTimers(1, (lastTick_ >> WHEEL_BITS) & WHEEL_MASK) &&
225 cascadeTimers(2, (lastTick_ >> (2 * WHEEL_BITS)) & WHEEL_MASK)) {
226 cascadeTimers(3, (lastTick_ >> (3 * WHEEL_BITS)) & WHEEL_MASK);
231 while (!timeouts.empty()) {
232 auto* cb = &timeouts.front();
233 timeouts.pop_front();
235 cb->wheel_ = nullptr;
236 cb->expiration_ = milliseconds(0);
237 RequestContextScopeGuard rctx(cb->context_);
238 cb->timeoutExpired();
240 // The HHWheelTimer itself has been destroyed. The other callbacks
241 // will have been cancelled from the destructor. Bail before causing
246 scheduleNextTimeout();
249 size_t HHWheelTimer::cancelAll() {
253 const uint64_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
254 auto maxBuckets = std::min(numElements, count_);
255 auto buckets = folly::make_unique<CallbackList[]>(maxBuckets);
256 size_t countBuckets = 0;
257 for (auto& tick : buckets_) {
258 for (auto& bucket : tick) {
259 if (bucket.empty()) {
262 count += bucket.size();
263 std::swap(bucket, buckets[countBuckets++]);
264 if (count >= count_) {
270 for (size_t i = 0; i < countBuckets; ++i) {
271 auto& bucket = buckets[i];
272 while (!bucket.empty()) {
273 auto& cb = bucket.front();
275 cb.callbackCanceled();
283 void HHWheelTimer::scheduleNextTimeout() {
284 auto nextTick = calcNextTick();
287 if (nextTick & WHEEL_MASK) {
288 auto bi = makeBitIterator(bitmap_.begin());
289 auto bi_end = makeBitIterator(bitmap_.end());
290 auto it = folly::findFirstSet(bi + (nextTick & WHEEL_MASK), bi_end);
292 tick = WHEEL_SIZE - ((nextTick - 1) & WHEEL_MASK);
294 tick = std::distance(bi + (nextTick & WHEEL_MASK), it) + 1;
299 if (!this->AsyncTimeout::isScheduled() ||
300 (expireTick_ > tick + nextTick - 1)) {
301 this->AsyncTimeout::scheduleTimeout(interval_ * tick);
302 expireTick_ = tick + nextTick - 1;
305 this->AsyncTimeout::cancelTimeout();
309 int64_t HHWheelTimer::calcNextTick() {
311 (getCurTime().count() - startTime_.count()) / interval_.count();
312 // Slow eventbases will have skew between the actual time and the
313 // callback time. To avoid racing the next scheduleNextTimeout()
314 // call, always schedule new timeouts against the actual
315 // timeoutExpired() time.
316 if (!processingCallbacksGuard_) {