From 2d80d4e0d0ea2f473639de5adfd24d22577a3a33 Mon Sep 17 00:00:00 2001 From: Maged Michael Date: Wed, 10 Jan 2018 12:43:40 -0800 Subject: [PATCH] UnboundedQueue: Use hazptr_obj_batch Summary: Manage retirement of removed segments using hazptr_obj_batch in order to reduce the chances of fragmenting related segments across thread local lists of retired objects of many threads, which could lead to unnecessarily high memory usage. Reviewed By: djwatson Differential Revision: D6686697 fbshipit-source-id: 0d786c0f9e0bac2c44183ed3da21619e1feb3d52 --- folly/concurrency/UnboundedQueue.h | 28 ++++++++++++++++++- .../test/DynamicBoundedQueueTest.cpp | 2 +- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/folly/concurrency/UnboundedQueue.h b/folly/concurrency/UnboundedQueue.h index 22e94e07..1947cdae 100644 --- a/folly/concurrency/UnboundedQueue.h +++ b/folly/concurrency/UnboundedQueue.h @@ -219,6 +219,7 @@ class UnboundedQueue { struct Consumer { Atom head; Atom ticket; + folly::hazptr::hazptr_obj_batch batch; }; struct Producer { Atom tail; @@ -524,8 +525,22 @@ class UnboundedQueue { // segment may incorrectly set head back. asm_volatile_pause(); } + /* ***IMPORTANT*** prepReclaimSegment() must be called after + * confirming that head() is up-to-date and before calling + * setHead() to be thread-safe. */ + /* ***IMPORTANT*** Segment s cannot be retired before the call to + * setHead(s). This is why prep_retire_refcounted(), which is + * called by prepReclaimSegment() does not retire objects, it + * merely adds the object to the batch and returns a private batch + * structure of a list of objects that can be retired later, if + * there are enough objects for amortizing the cost of updating + * the domain structure. */ + auto res = prepReclaimSegment(s); setHead(next); - reclaimSegment(s); + /* Now it is safe to retire s. */ + /* ***IMPORTANT*** The destructor of res automatically calls + * retire_all(), which retires to the domain any objects moved to + * res from batch in the call to prepReclaimSegment(). */ } /** reclaimSegment */ @@ -537,6 +552,17 @@ class UnboundedQueue { } } + /** prepReclaimSegment */ + folly::hazptr::hazptr_obj_batch prepReclaimSegment(Segment* s) noexcept { + if (SPSC) { + delete s; + /*Return an empty result; nothing more to do for this segment */ + return folly::hazptr::hazptr_obj_batch(); + } else { + return c_.batch.prep_retire_refcounted(s); + } + } + FOLLY_ALWAYS_INLINE size_t index(Ticket t) const noexcept { return (t * Stride) & (SegmentSize - 1); } diff --git a/folly/concurrency/test/DynamicBoundedQueueTest.cpp b/folly/concurrency/test/DynamicBoundedQueueTest.cpp index bce25ec0..290df3da 100644 --- a/folly/concurrency/test/DynamicBoundedQueueTest.cpp +++ b/folly/concurrency/test/DynamicBoundedQueueTest.cpp @@ -100,7 +100,7 @@ TEST(DynamicBoundedQueue, size) { } { folly::DynamicBoundedQueue q(10); - ASSERT_EQ(sizeof(q), 80); + ASSERT_EQ(sizeof(q), 80 + sizeof(folly::hazptr::hazptr_obj_batch)); } } -- 2.34.1