#include <array>
#include <memory>
-#include <folly/Enumerate.h>
-#include <folly/detail/CacheLocality.h>
+#include <folly/concurrency/AtomicSharedPtr.h>
+#include <folly/concurrency/CacheLocality.h>
+#include <folly/container/Enumerate.h>
+#include <folly/experimental/hazptr/hazptr.h>
namespace folly {
}
void reset(const std::shared_ptr<T>& p = nullptr) {
- for (auto& slot : slots_) {
- auto holder = std::make_shared<Holder>(p);
- slot = std::shared_ptr<T>(holder, p.get());
+ // Allocate each Holder in a different CoreAllocator stripe to
+ // prevent false sharing. Their control blocks will be adjacent
+ // thanks to allocate_shared().
+ for (auto slot : folly::enumerate(slots_)) {
+ auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index);
+ auto holder = std::allocate_shared<Holder>(alloc, p);
+ *slot = std::shared_ptr<T>(holder, p.get());
}
}
std::shared_ptr<T> get() const {
- return slots_[detail::AccessSpreader<>::current(kNumSlots)];
+ return slots_[AccessSpreader<>::current(kNumSlots)];
}
private:
+ using Holder = std::shared_ptr<T>;
+
template <class, size_t>
friend class CoreCachedWeakPtr;
- // Space the Holders by a cache line, so their control blocks (which
- // are adjacent to the slots thanks to make_shared()) will also be
- // spaced.
- struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Holder {
- explicit Holder(std::shared_ptr<T> p) : ptr(std::move(p)) {}
- std::shared_ptr<T> ptr;
- };
-
std::array<std::shared_ptr<T>, kNumSlots> slots_;
};
}
std::weak_ptr<T> get() const {
- return slots_[detail::AccessSpreader<>::current(kNumSlots)];
+ return slots_[AccessSpreader<>::current(kNumSlots)];
}
private:
std::array<std::weak_ptr<T>, kNumSlots> slots_;
};
+/**
+ * This class creates core-local caches for a given shared_ptr, to
+ * mitigate contention when acquiring/releasing it.
+ *
+ * All methods are threadsafe. Hazard pointers are used to avoid
+ * use-after-free for concurrent reset() and get() operations.
+ *
+ * Concurrent reset()s are sequenced with respect to each other: the
+ * sharded shared_ptrs will always all be set to the same value.
+ * get()s will never see a newer pointer on one core, and an older
+ * pointer on another after a subsequent thread migration.
+ */
+template <class T, size_t kNumSlots = 64>
+class AtomicCoreCachedSharedPtr {
+ public:
+ explicit AtomicCoreCachedSharedPtr(const std::shared_ptr<T>& p = nullptr) {
+ reset(p);
+ }
+
+ ~AtomicCoreCachedSharedPtr() {
+ auto slots = slots_.load(std::memory_order_acquire);
+ // Delete of AtomicCoreCachedSharedPtr must be synchronized, no
+ // need for stlots->retire().
+ if (slots) {
+ delete slots;
+ }
+ }
+
+ void reset(const std::shared_ptr<T>& p = nullptr) {
+ auto newslots = folly::make_unique<Slots>();
+ // Allocate each Holder in a different CoreAllocator stripe to
+ // prevent false sharing. Their control blocks will be adjacent
+ // thanks to allocate_shared().
+ for (auto slot : folly::enumerate(newslots->slots_)) {
+ auto alloc = getCoreAllocatorStl<Holder, kNumSlots>(slot.index);
+ auto holder = std::allocate_shared<Holder>(alloc, p);
+ *slot = std::shared_ptr<T>(holder, p.get());
+ }
+
+ auto oldslots = slots_.exchange(newslots.release());
+ if (oldslots) {
+ oldslots->retire();
+ }
+ }
+
+ std::shared_ptr<T> get() const {
+ folly::hazptr::hazptr_holder hazptr;
+ auto slots = hazptr.get_protected(slots_);
+ if (!slots) {
+ return nullptr;
+ }
+ return (slots->slots_)[AccessSpreader<>::current(kNumSlots)];
+ }
+
+ private:
+ using Holder = std::shared_ptr<T>;
+ struct Slots : folly::hazptr::hazptr_obj_base<Slots> {
+ std::array<std::shared_ptr<T>, kNumSlots> slots_;
+ };
+ std::atomic<Slots*> slots_{nullptr};
+};
+
} // namespace