From: Maged Michael Date: Fri, 10 Mar 2017 23:34:07 +0000 (-0800) Subject: Fix data race in IndexedMemPool X-Git-Tag: v2017.03.13.00~1 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=95310177db919718d72f966da6f2a746f09e63dc;p=folly.git Fix data race in IndexedMemPool Summary: IndexedMemPool uses regular memory for the global and local next links. There can be concurrent reads and writes of such links. In order to avoid C++ undefined behavior due to the data races, these links should be atomic. Relaxed loads and stores are sufficient for correctness. Depends on D4680286 Reviewed By: nbronson Differential Revision: D4680317 fbshipit-source-id: 27c0d888e08bd5d862aee4c6dc6ee393033b32e3 --- diff --git a/folly/IndexedMemPool.h b/folly/IndexedMemPool.h index 1fa43082..06cd44a3 100644 --- a/folly/IndexedMemPool.h +++ b/folly/IndexedMemPool.h @@ -230,7 +230,7 @@ struct IndexedMemPool : boost::noncopyable { /// Returns true iff idx has been alloc()ed and not recycleIndex()ed bool isAllocated(uint32_t idx) const { - return slot(idx).localNext == uint32_t(-1); + return slot(idx).localNext.load(std::memory_order_relaxed) == uint32_t(-1); } @@ -239,8 +239,8 @@ struct IndexedMemPool : boost::noncopyable { struct Slot { T elem; - uint32_t localNext; - uint32_t globalNext; + Atom localNext; + Atom globalNext; Slot() : localNext{}, globalNext{} {} }; @@ -346,7 +346,7 @@ struct IndexedMemPool : boost::noncopyable { void globalPush(Slot& s, uint32_t localHead) { while (true) { TaggedPtr gh = globalHead_.load(std::memory_order_acquire); - s.globalNext = gh.idx; + s.globalNext.store(gh.idx, std::memory_order_relaxed); if (globalHead_.compare_exchange_strong(gh, gh.withIdx(localHead))) { // success return; @@ -359,7 +359,7 @@ struct IndexedMemPool : boost::noncopyable { Slot& s = slot(idx); TaggedPtr h = head.load(std::memory_order_acquire); while (true) { - s.localNext = h.idx; + s.localNext.store(h.idx, std::memory_order_relaxed); if (h.size() == LocalListLimit) { // push will overflow local list, steal it instead @@ -383,8 +383,11 @@ struct IndexedMemPool : boost::noncopyable { uint32_t globalPop() { while (true) { TaggedPtr gh = globalHead_.load(std::memory_order_acquire); - if (gh.idx == 0 || globalHead_.compare_exchange_strong( - gh, gh.withIdx(slot(gh.idx).globalNext))) { + if (gh.idx == 0 || + globalHead_.compare_exchange_strong( + gh, + gh.withIdx( + slot(gh.idx).globalNext.load(std::memory_order_relaxed)))) { // global list is empty, or pop was successful return gh.idx; } @@ -398,10 +401,10 @@ struct IndexedMemPool : boost::noncopyable { if (h.idx != 0) { // local list is non-empty, try to pop Slot& s = slot(h.idx); - if (head.compare_exchange_strong( - h, h.withIdx(s.localNext).withSizeDecr())) { + auto next = s.localNext.load(std::memory_order_relaxed); + if (head.compare_exchange_strong(h, h.withIdx(next).withSizeDecr())) { // success - s.localNext = uint32_t(-1); + s.localNext.store(uint32_t(-1), std::memory_order_relaxed); return h.idx; } continue; @@ -421,15 +424,16 @@ struct IndexedMemPool : boost::noncopyable { T* ptr = &slot(idx).elem; new (ptr) T(); } - slot(idx).localNext = uint32_t(-1); + slot(idx).localNext.store(uint32_t(-1), std::memory_order_relaxed); return idx; } Slot& s = slot(idx); + auto next = s.localNext.load(std::memory_order_relaxed); if (head.compare_exchange_strong( - h, h.withIdx(s.localNext).withSize(LocalListLimit))) { + h, h.withIdx(next).withSize(LocalListLimit))) { // global list moved to local list, keep head for us - s.localNext = uint32_t(-1); + s.localNext.store(uint32_t(-1), std::memory_order_relaxed); return idx; } // local bulk push failed, return idx to the global list and try again