*/
#pragma once
+#include <folly/Baton.h>
#include <folly/ThreadLocal.h>
namespace folly {
TLRefCount() :
localCount_([&]() {
return new LocalRefCount(*this);
+ }),
+ collectGuard_(&collectBaton_, [](void* p) {
+ auto baton = reinterpret_cast<folly::Baton<>*>(p);
+ baton->post();
}) {
}
count.collect();
}
+ collectGuard_.reset();
+ collectBaton_.wait();
+
state_ = State::GLOBAL;
}
class LocalRefCount {
public:
explicit LocalRefCount(TLRefCount& refCount) :
- refCount_(refCount) {}
+ refCount_(refCount) {
+ std::lock_guard<std::mutex> lg(refCount.globalMutex_);
+
+ collectGuard_ = refCount.collectGuard_;
+ }
~LocalRefCount() {
collect();
void collect() {
std::lock_guard<std::mutex> lg(collectMutex_);
- if (collectDone_) {
+ if (!collectGuard_) {
return;
}
collectCount_ = count_;
refCount_.globalCount_ += collectCount_;
- collectDone_ = true;
+ collectGuard_.reset();
}
bool operator++() {
if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
std::lock_guard<std::mutex> lg(collectMutex_);
- if (!collectDone_) {
+ if (collectGuard_) {
return true;
}
if (collectCount_ != count) {
std::mutex collectMutex_;
Int collectCount_{0};
- bool collectDone_{false};
+ std::shared_ptr<void> collectGuard_;
};
std::atomic<State> state_{State::LOCAL};
folly::ThreadLocal<LocalRefCount, TLRefCount> localCount_;
std::atomic<int64_t> globalCount_{1};
std::mutex globalMutex_;
+ folly::Baton<> collectBaton_;
+ std::shared_ptr<void> collectGuard_;
};
}
folly::Baton<> b;
std::vector<std::thread> ts;
+ folly::Baton<> threadBatons[numThreads];
for (size_t t = 0; t < numThreads; ++t) {
- ts.emplace_back([&count, &b, &got0, numIters, t]() {
+ ts.emplace_back([&count, &b, &got0, numIters, t, &threadBatons]() {
for (size_t i = 0; i < numIters; ++i) {
auto ret = ++count;
EXPECT_TRUE(ret > 1);
+ if (i == 0) {
+ threadBatons[t].post();
+ }
}
if (t == 0) {
});
}
+ for (size_t t = 0; t < numThreads; ++t) {
+ threadBatons[t].wait();
+ }
+
b.wait();
count.useGlobal();
- EXPECT_TRUE(--count > 0);
+ --count;
for (auto& t: ts) {
t.join();