struct hazptr_tc {
hazptr_tc_entry entry_[HAZPTR_TC_SIZE];
size_t count_;
-#ifndef NDEBUG
- bool local_;
-#endif
+ bool local_; // for debug mode only
public:
hazptr_tc_entry& operator[](size_t i);
domain.objRetire(this);
}
+/**
+ * hazptr_obj_base_refcounted
+ */
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::retire(
+ hazptr_domain& domain,
+ D deleter) {
+ DEBUG_PRINT(this << " " << &domain);
+ deleter_ = std::move(deleter);
+ reclaim_ = [](hazptr_obj* p) {
+ auto hrobp = static_cast<hazptr_obj_base_refcounted*>(p);
+ if (hrobp->release_ref()) {
+ auto obj = static_cast<T*>(hrobp);
+ hrobp->deleter_(obj);
+ }
+ };
+ if (HAZPTR_PRIV &&
+ (HAZPTR_ONE_DOMAIN || (&domain == &default_hazptr_domain()))) {
+ if (hazptr_priv_try_retire(this)) {
+ return;
+ }
+ }
+ domain.objRetire(this);
+}
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::acquire_ref() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.fetch_add(1);
+ DCHECK(oldval >= 0);
+}
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::acquire_ref_safe() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.load(std::memory_order_acquire);
+ DCHECK(oldval >= 0);
+ refcount_.store(oldval + 1, std::memory_order_release);
+}
+
+template <typename T, typename D>
+inline bool hazptr_obj_base_refcounted<T, D>::release_ref() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.load(std::memory_order_acquire);
+ if (oldval > 0) {
+ oldval = refcount_.fetch_sub(1);
+ } else {
+ if (kIsDebug) {
+ refcount_.store(-1);
+ }
+ }
+ DEBUG_PRINT(this << " " << oldval);
+ DCHECK(oldval >= 0);
+ return oldval == 0;
+}
+
/**
* hazptr_rec
*/
auto& tc = *ptc;
auto count = tc.count();
if (M <= count) {
-#ifndef NDEBUG
- DCHECK(!tc.local_);
- tc.local_ = true;
-#endif
+ if (kIsDebug) {
+ DCHECK(!tc.local_);
+ tc.local_ = true;
+ }
// Fast path
for (size_t i = 0; i < M; ++i) {
auto hprec = tc[i].hprec_;
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_local<M>::~hazptr_local() {
if (LIKELY(!need_destruct_)) {
-#ifndef NDEBUG
- auto ptc = hazptr_tc_tls();
- DCHECK(ptc != nullptr);
- auto& tc = *ptc;
- DCHECK(tc.local_);
- tc.local_ = false;
-#endif
+ if (kIsDebug) {
+ auto ptc = hazptr_tc_tls();
+ DCHECK(ptc != nullptr);
+ auto& tc = *ptc;
+ DCHECK(tc.local_);
+ tc.local_ = false;
+ }
return;
}
// Slow path
while (retired) {
for (auto p = retired; p; p = next) {
next = p->next_;
+ DEBUG_PRINT(this << " " << p << " " << p->reclaim_);
(*(p->reclaim_))(p);
}
retired = retired_.exchange(nullptr);
auto& tc = tls_tc_data_;
DEBUG_PRINT(&tc);
tc.count_ = 0;
-#ifndef NDEBUG
- tc.local_ = false;
-#endif
+ if (kIsDebug) {
+ tc.local_ = false;
+ }
}
inline void hazptr_tc_shutdown() {
template <typename T, typename Deleter>
class hazptr_obj_base;
+/** hazptr_obj_base_refcounted:
+ * Base template for reference counted objects protected by hazard pointers.
+ */
+template <typename T, typename Deleter>
+class hazptr_obj_base_refcounted;
+
/** hazptr_local: Optimized template for bulk construction and destruction of
* hazard pointers */
template <size_t M>
friend class hazptr_holder;
template <typename, typename>
friend class hazptr_obj_base;
+ template <typename, typename>
+ friend class hazptr_obj_base_refcounted;
friend struct hazptr_priv;
memory_resource* mr_;
friend class hazptr_domain;
template <typename, typename>
friend class hazptr_obj_base;
+ template <typename, typename>
+ friend class hazptr_obj_base_refcounted;
friend struct hazptr_priv;
void (*reclaim_)(hazptr_obj*);
hazptr_obj* next_;
+
const void* getObjPtr() const;
};
D deleter_;
};
+/** Definition of hazptr_recounted_obj_base */
+template <typename T, typename D = std::default_delete<T>>
+class hazptr_obj_base_refcounted : public hazptr_obj {
+ public:
+ /* Retire a removed object and pass the responsibility for
+ * reclaiming it to the hazptr library */
+ void retire(hazptr_domain& domain = default_hazptr_domain(), D reclaim = {});
+
+ /* aquire_ref() increments the reference count
+ *
+ * acquire_ref_safe() is the same as acquire_ref() except that in
+ * addition the caller guarantees that the call is made in a
+ * thread-safe context, e.g., the object is not yet shared. This is
+ * just an optimization to save an atomic operation.
+ *
+ * release_ref() decrements the reference count and returns true if
+ * the object is safe to reclaim.
+ */
+ void acquire_ref();
+ void acquire_ref_safe();
+ bool release_ref();
+
+ private:
+ std::atomic<uint32_t> refcount_{0};
+ D deleter_;
+};
+
/** hazptr_holder: Class for automatic acquisition and release of
* hazard pointers, and interface for hazard pointer operations. */
class hazptr_holder {
TEST_F(HazptrTest, Test1) {
DEBUG_PRINT("");
Node1* node0 = (Node1*)malloc(sizeof(Node1));
- DEBUG_PRINT("=== new node0 " << node0 << " " << sizeof(*node0));
+ node0 = new (node0) Node1;
+ DEBUG_PRINT("=== malloc node0 " << node0 << " " << sizeof(*node0));
Node1* node1 = (Node1*)malloc(sizeof(Node1));
+ node1 = new (node1) Node1;
DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
Node1* node2 = (Node1*)malloc(sizeof(Node1));
+ node2 = new (node2) Node1;
DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
Node1* node3 = (Node1*)malloc(sizeof(Node1));
+ node3 = new (node3) Node1;
DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
DEBUG_PRINT("");
Node1* n2 = shared2.load();
Node1* n3 = shared3.load();
- if (hptr0.try_protect(n0, shared0)) {}
- if (hptr1.try_protect(n1, shared1)) {}
+ CHECK(hptr0.try_protect(n0, shared0));
+ CHECK(hptr1.try_protect(n1, shared1));
hptr1.reset();
hptr1.reset(nullptr);
hptr1.reset(n2);
- if (hptr2.try_protect(n3, shared3)) {}
+ CHECK(hptr2.try_protect(n3, shared3));
swap(hptr1, hptr2);
hptr3.reset();
Node2* node0 = new Node2;
DEBUG_PRINT("=== new node0 " << node0 << " " << sizeof(*node0));
Node2* node1 = (Node2*)malloc(sizeof(Node2));
+ node1 = new (node1) Node2;
DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
Node2* node2 = (Node2*)malloc(sizeof(Node2));
+ node2 = new (node2) Node2;
DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
Node2* node3 = (Node2*)malloc(sizeof(Node2));
+ node3 = new (node3) Node2;
DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
DEBUG_PRINT("");
Node2* n2 = shared2.load();
Node2* n3 = shared3.load();
- if (hptr0.try_protect(n0, shared0)) {}
- if (hptr1.try_protect(n1, shared1)) {}
+ CHECK(hptr0.try_protect(n0, shared0));
+ CHECK(hptr1.try_protect(n1, shared1));
hptr1.reset();
hptr1.reset(n2);
- if (hptr2.try_protect(n3, shared3)) {}
+ CHECK(hptr2.try_protect(n3, shared3));
swap(hptr1, hptr2);
hptr3.reset();
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.push(j);
T res;
- while (!s.pop(res)) {}
+ while (!s.pop(res)) {
+ /* keep trying */
+ }
}
});
}
hazptr_local<HAZPTR_TC_SIZE + 1> h;
}
}
+
+/* Test ref counting */
+
+std::atomic<int> constructed;
+std::atomic<int> destroyed;
+
+struct Foo : hazptr_obj_base_refcounted<Foo> {
+ int val_;
+ bool marked_;
+ Foo* next_;
+ Foo(int v, Foo* n) : val_(v), marked_(false), next_(n) {
+ DEBUG_PRINT("");
+ ++constructed;
+ }
+ ~Foo() {
+ DEBUG_PRINT("");
+ ++destroyed;
+ if (marked_) {
+ return;
+ }
+ auto next = next_;
+ while (next) {
+ if (!next->release_ref()) {
+ return;
+ }
+ auto p = next;
+ next = p->next_;
+ p->marked_ = true;
+ delete p;
+ }
+ }
+};
+
+struct Dummy : hazptr_obj_base<Dummy> {};
+
+TEST_F(HazptrTest, basic_refcount) {
+ constructed.store(0);
+ destroyed.store(0);
+
+ Foo* p = nullptr;
+ int num = 20;
+ for (int i = 0; i < num; ++i) {
+ p = new Foo(i, p);
+ if (i & 1) {
+ p->acquire_ref_safe();
+ } else {
+ p->acquire_ref();
+ }
+ }
+ hazptr_holder hptr;
+ hptr.reset(p);
+ for (auto q = p->next_; q; q = q->next_) {
+ q->retire();
+ }
+ int v = num;
+ for (auto q = p; q; q = q->next_) {
+ CHECK_GT(v, 0);
+ --v;
+ CHECK_EQ(q->val_, v);
+ }
+ CHECK(!p->release_ref());
+ CHECK_EQ(constructed.load(), num);
+ CHECK_EQ(destroyed.load(), 0);
+ p->retire();
+ CHECK_EQ(constructed.load(), num);
+ CHECK_EQ(destroyed.load(), 0);
+ hptr.reset();
+
+ /* retire enough objects to guarantee reclamation of Foo objects */
+ for (int i = 0; i < 100; ++i) {
+ auto a = new Dummy;
+ a->retire();
+ }
+
+ CHECK_EQ(constructed.load(), num);
+ CHECK_EQ(destroyed.load(), num);
+}
+
+TEST_F(HazptrTest, mt_refcount) {
+ constructed.store(0);
+ destroyed.store(0);
+
+ std::atomic<bool> ready(false);
+ std::atomic<int> setHazptrs(0);
+ std::atomic<Foo*> head;
+
+ int num = 20;
+ int nthr = 10;
+ std::vector<std::thread> thr(nthr);
+ for (int i = 0; i < nthr; ++i) {
+ thr[i] = std::thread([&] {
+ while (!ready.load()) {
+ /* spin */
+ }
+ hazptr_holder hptr;
+ auto p = hptr.get_protected(head);
+ ++setHazptrs;
+ /* Concurrent with removal */
+ int v = num;
+ for (auto q = p; q; q = q->next_) {
+ CHECK_GT(v, 0);
+ --v;
+ CHECK_EQ(q->val_, v);
+ }
+ CHECK_EQ(v, 0);
+ });
+ }
+
+ Foo* p = nullptr;
+ for (int i = 0; i < num; ++i) {
+ p = new Foo(i, p);
+ p->acquire_ref_safe();
+ }
+ head.store(p);
+
+ ready.store(true);
+
+ while (setHazptrs.load() < nthr) {
+ /* spin */
+ }
+
+ /* this is concurrent with traversal by reader */
+ head.store(nullptr);
+ for (auto q = p; q; q = q->next_) {
+ q->retire();
+ }
+ DEBUG_PRINT("Foo should not be destroyed");
+ CHECK_EQ(constructed.load(), num);
+ CHECK_EQ(destroyed.load(), 0);
+
+ DEBUG_PRINT("Foo may be destroyed after releasing the last reference");
+ if (p->release_ref()) {
+ delete p;
+ }
+
+ /* retire enough objects to guarantee reclamation of Foo objects */
+ for (int i = 0; i < 100; ++i) {
+ auto a = new Dummy;
+ a->retire();
+ }
+
+ for (int i = 0; i < nthr; ++i) {
+ thr[i].join();
+ }
+
+ CHECK_EQ(constructed.load(), num);
+ CHECK_EQ(destroyed.load(), num);
+}