2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <folly/ThreadLocal.h>
19 #include <folly/experimental/AsymmetricMemoryBarrier.h>
28 : localCount_([&]() { return new LocalRefCount(*this); }),
29 collectGuard_(this, [](void*) {}) {}
31 ~TLRefCount() noexcept {
32 assert(globalCount_.load() == 0);
33 assert(state_.load() == State::GLOBAL);
36 // This can't increment from 0.
37 Int operator++() noexcept {
38 auto& localCount = *localCount_;
44 if (state_.load() == State::GLOBAL_TRANSITION) {
45 std::lock_guard<std::mutex> lg(globalMutex_);
48 assert(state_.load() == State::GLOBAL);
50 auto value = globalCount_.load();
55 } while (!globalCount_.compare_exchange_weak(value, value+1));
60 Int operator--() noexcept {
61 auto& localCount = *localCount_;
67 if (state_.load() == State::GLOBAL_TRANSITION) {
68 std::lock_guard<std::mutex> lg(globalMutex_);
71 assert(state_.load() == State::GLOBAL);
73 return globalCount_-- - 1;
76 Int operator*() const {
77 if (state_ != State::GLOBAL) {
80 return globalCount_.load();
83 void useGlobal() noexcept {
84 std::lock_guard<std::mutex> lg(globalMutex_);
86 state_ = State::GLOBAL_TRANSITION;
88 asymmetricHeavyBarrier();
90 std::weak_ptr<void> collectGuardWeak = collectGuard_;
92 // Make sure we can't create new LocalRefCounts
93 collectGuard_.reset();
95 while (!collectGuardWeak.expired()) {
96 auto accessor = localCount_.accessAllThreads();
97 for (auto& count : accessor) {
102 state_ = State::GLOBAL;
106 using AtomicInt = std::atomic<Int>;
114 class LocalRefCount {
116 explicit LocalRefCount(TLRefCount& refCount) :
117 refCount_(refCount) {
118 std::lock_guard<std::mutex> lg(refCount.globalMutex_);
120 collectGuard_ = refCount.collectGuard_;
128 std::lock_guard<std::mutex> lg(collectMutex_);
130 if (!collectGuard_) {
134 collectCount_ = count_.load();
135 refCount_.globalCount_.fetch_add(collectCount_);
136 collectGuard_.reset();
148 bool update(Int delta) {
149 if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
153 // This is equivalent to atomic fetch_add. We know that this operation
154 // is always performed from a single thread. asymmetricLightBarrier()
155 // makes things faster than atomic fetch_add on platforms with native
157 auto count = count_.load(std::memory_order_relaxed) + delta;
158 count_.store(count, std::memory_order_relaxed);
160 asymmetricLightBarrier();
162 if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
163 std::lock_guard<std::mutex> lg(collectMutex_);
168 if (collectCount_ != count) {
177 TLRefCount& refCount_;
179 std::mutex collectMutex_;
180 Int collectCount_{0};
181 std::shared_ptr<void> collectGuard_;
184 std::atomic<State> state_{State::LOCAL};
185 folly::ThreadLocal<LocalRefCount, TLRefCount> localCount_;
186 std::atomic<int64_t> globalCount_{1};
187 std::mutex globalMutex_;
188 std::shared_ptr<void> collectGuard_;