2 * Copyright 2016 Facebook, Inc.
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing,
15 * software distributed under the License is distributed on an
16 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17 * KIND, either express or implied. See the License for the
18 * specific language governing permissions and limitations
21 #include <folly/io/async/HHWheelTimer.h>
22 #include <folly/io/async/Request.h>
24 #include <folly/Optional.h>
25 #include <folly/ScopeGuard.h>
29 using std::chrono::milliseconds;
34 * We want to select the default interval carefully.
35 * An interval of 10ms will give us 10ms * WHEEL_SIZE^WHEEL_BUCKETS
36 * for the largest timeout possible, or about 497 days.
38 * For a lower bound, we want a reasonable limit on local IO, 10ms
41 * A shorter interval also has CPU implications, less than 1ms might
42 * start showing up in cpu perf. Also, it might not be possible to set
43 * tick interval less than 10ms on older kernels.
45 int HHWheelTimer::DEFAULT_TICK_INTERVAL = 10;
47 HHWheelTimer::Callback::~Callback() {
53 void HHWheelTimer::Callback::setScheduled(HHWheelTimer* wheel,
54 std::chrono::milliseconds timeout) {
55 assert(wheel_ == nullptr);
56 assert(expiration_ == milliseconds(0));
58 wheelGuard_ = DestructorGuard(wheel);
61 // Only update the now_ time if we're not in a timeout expired callback
62 if (wheel_->count_ == 0 && !wheel_->processingCallbacksGuard_) {
63 wheel_->now_ = getCurTime();
66 expiration_ = wheel_->now_ + timeout;
69 void HHWheelTimer::Callback::cancelTimeoutImpl() {
70 if (--wheel_->count_ <= 0) {
71 assert(wheel_->count_ == 0);
72 wheel_->AsyncTimeout::cancelTimeout();
77 wheelGuard_ = folly::none;
78 expiration_ = milliseconds(0);
81 HHWheelTimer::HHWheelTimer(folly::TimeoutManager* timeoutMananger,
82 std::chrono::milliseconds intervalMS,
83 AsyncTimeout::InternalEnum internal,
84 std::chrono::milliseconds defaultTimeoutMS)
85 : AsyncTimeout(timeoutMananger, internal),
86 interval_(intervalMS),
87 defaultTimeout_(defaultTimeoutMS),
90 catchupEveryN_(DEFAULT_CATCHUP_EVERY_N),
91 expirationsSinceCatchup_(0),
92 processingCallbacksGuard_(false) {}
94 HHWheelTimer::~HHWheelTimer() {
98 void HHWheelTimer::destroy() {
99 if (getDestructorGuardCount() == count_) {
100 // Every callback holds a DestructorGuard. In this simple case,
101 // All timeouts should already be gone.
104 // Clean them up in opt builds and move on
107 // else, we are only marking pending destruction, but one or more
108 // HHWheelTimer::SharedPtr's (and possibly their timeouts) are still holding
109 // this HHWheelTimer. We cannot assert that all timeouts have been cancelled,
110 // and will just have to wait for them all to complete on their own.
111 DelayedDestruction::destroy();
114 void HHWheelTimer::scheduleTimeoutImpl(Callback* callback,
115 std::chrono::milliseconds timeout) {
116 int64_t due = timeToWheelTicks(timeout) + nextTick_;
117 int64_t diff = due - nextTick_;
121 list = &buckets_[0][nextTick_ & WHEEL_MASK];
122 } else if (diff < WHEEL_SIZE) {
123 list = &buckets_[0][due & WHEEL_MASK];
124 } else if (diff < 1 << (2 * WHEEL_BITS)) {
125 list = &buckets_[1][(due >> WHEEL_BITS) & WHEEL_MASK];
126 } else if (diff < 1 << (3 * WHEEL_BITS)) {
127 list = &buckets_[2][(due >> 2 * WHEEL_BITS) & WHEEL_MASK];
129 /* in largest slot */
130 if (diff > LARGEST_SLOT) {
132 due = diff + nextTick_;
134 list = &buckets_[3][(due >> 3 * WHEEL_BITS) & WHEEL_MASK];
136 list->push_back(*callback);
139 void HHWheelTimer::scheduleTimeout(Callback* callback,
140 std::chrono::milliseconds timeout) {
141 // Cancel the callback if it happens to be scheduled already.
142 callback->cancelTimeout();
144 callback->context_ = RequestContext::saveContext();
146 if (count_ == 0 && !processingCallbacksGuard_) {
147 this->AsyncTimeout::scheduleTimeout(interval_.count());
150 callback->setScheduled(this, timeout);
151 scheduleTimeoutImpl(callback, timeout);
155 void HHWheelTimer::scheduleTimeout(Callback* callback) {
156 CHECK(std::chrono::milliseconds(-1) != defaultTimeout_)
157 << "Default timeout was not initialized";
158 scheduleTimeout(callback, defaultTimeout_);
161 bool HHWheelTimer::cascadeTimers(int bucket, int tick) {
163 cbs.swap(buckets_[bucket][tick]);
164 while (!cbs.empty()) {
165 auto* cb = &cbs.front();
167 scheduleTimeoutImpl(cb, cb->getTimeRemaining(now_));
170 // If tick is zero, timeoutExpired will cascade the next bucket.
174 void HHWheelTimer::timeoutExpired() noexcept {
175 // If destroy() is called inside timeoutExpired(), delay actual destruction
176 // until timeoutExpired() returns
177 DestructorGuard dg(this);
178 // If scheduleTimeout is called from a callback in this function, it may
179 // cause inconsistencies in the state of this object. As such, we need
180 // to treat these calls slightly differently.
181 processingCallbacksGuard_ = true;
182 auto reEntryGuard = folly::makeGuard([&] {
183 processingCallbacksGuard_ = false;
186 // timeoutExpired() can only be invoked directly from the event base loop.
187 // It should never be invoked recursively.
189 milliseconds catchup = now_ + interval_;
190 // If catchup is enabled, we may have missed multiple intervals, use
191 // currentTime() to check exactly.
192 if (++expirationsSinceCatchup_ >= catchupEveryN_) {
193 catchup = std::chrono::duration_cast<milliseconds>(
194 std::chrono::steady_clock::now().time_since_epoch());
195 expirationsSinceCatchup_ = 0;
197 while (now_ < catchup) {
200 int idx = nextTick_ & WHEEL_MASK;
203 if (cascadeTimers(1, (nextTick_ >> WHEEL_BITS) & WHEEL_MASK) &&
204 cascadeTimers(2, (nextTick_ >> (2 * WHEEL_BITS)) & WHEEL_MASK)) {
205 cascadeTimers(3, (nextTick_ >> (3 * WHEEL_BITS)) & WHEEL_MASK);
210 CallbackList* cbs = &buckets_[0][idx];
211 while (!cbs->empty()) {
212 auto* cb = &cbs->front();
215 cb->wheel_ = nullptr;
216 cb->expiration_ = milliseconds(0);
217 RequestContextScopeGuard rctx(cb->context_);
218 cb->timeoutExpired();
222 this->AsyncTimeout::scheduleTimeout(interval_.count());
226 size_t HHWheelTimer::cancelAll() {
230 const uint64_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
231 auto maxBuckets = std::min(numElements, count_);
232 auto buckets = folly::make_unique<CallbackList[]>(maxBuckets);
233 size_t countBuckets = 0;
234 for (auto& tick : buckets_) {
235 for (auto& bucket : tick) {
236 if (bucket.empty()) {
239 for (auto& cb : bucket) {
242 std::swap(bucket, buckets[countBuckets++]);
243 if (count >= count_) {
249 for (size_t i = 0; i < countBuckets; ++i) {
250 auto& bucket = buckets[i];
251 while (!bucket.empty()) {
252 auto& cb = bucket.front();
254 cb.callbackCanceled();