2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author: Xin Liu <xliux@fb.com>
27 #include <type_traits>
30 #include <boost/noncopyable.hpp>
31 #include <boost/random.hpp>
32 #include <boost/type_traits.hpp>
33 #include <glog/logging.h>
35 #include <folly/Memory.h>
36 #include <folly/MicroSpinLock.h>
37 #include <folly/ThreadLocal.h>
39 namespace folly { namespace detail {
41 template <typename ValT, typename NodeT> class csl_iterator;
44 class SkipListNode : private boost::noncopyable {
47 MARKED_FOR_REMOVAL = (1 << 1),
48 FULLY_LINKED = (1 << 2),
58 typename std::enable_if<std::is_convertible<U, T>::value>::type>
59 static SkipListNode* create(
60 NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
61 DCHECK(height >= 1 && height < 64) << height;
63 size_t size = sizeof(SkipListNode) +
64 height * sizeof(std::atomic<SkipListNode*>);
65 auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
67 new (node) SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
71 template <typename NodeAlloc>
72 static void destroy(NodeAlloc& alloc, SkipListNode* node) {
73 node->~SkipListNode();
74 alloc.deallocate(node);
77 template <typename NodeAlloc>
78 struct DestroyIsNoOp : std::integral_constant<bool,
79 IsArenaAllocator<NodeAlloc>::value &&
80 boost::has_trivial_destructor<SkipListNode>::value> { };
82 // copy the head node to a new head node assuming lock acquired
83 SkipListNode* copyHead(SkipListNode* node) {
84 DCHECK(node != nullptr && height_ > node->height_);
85 setFlags(node->getFlags());
86 for (uint8_t i = 0; i < node->height_; ++i) {
87 setSkip(i, node->skip(i));
92 inline SkipListNode* skip(int layer) const {
93 DCHECK_LT(layer, height_);
94 return skip_[layer].load(std::memory_order_consume);
97 // next valid node as in the linked list
98 SkipListNode* next() {
101 (node != nullptr && node->markedForRemoval());
102 node = node->skip(0)) {}
106 void setSkip(uint8_t h, SkipListNode* next) {
107 DCHECK_LT(h, height_);
108 skip_[h].store(next, std::memory_order_release);
111 value_type& data() { return data_; }
112 const value_type& data() const { return data_; }
113 int maxLayer() const { return height_ - 1; }
114 int height() const { return height_; }
116 std::unique_lock<MicroSpinLock> acquireGuard() {
117 return std::unique_lock<MicroSpinLock>(spinLock_);
120 bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
121 bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
122 bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
124 void setIsHeadNode() {
125 setFlags(uint16_t(getFlags() | IS_HEAD_NODE));
127 void setFullyLinked() {
128 setFlags(uint16_t(getFlags() | FULLY_LINKED));
130 void setMarkedForRemoval() {
131 setFlags(uint16_t(getFlags() | MARKED_FOR_REMOVAL));
135 // Note! this can only be called from create() as a placement new.
136 template <typename U>
137 SkipListNode(uint8_t height, U&& data, bool isHead) :
138 height_(height), data_(std::forward<U>(data)) {
144 // need to explicitly init the dynamic atomic pointer array
145 for (uint8_t i = 0; i < height_; ++i) {
146 new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
151 for (uint8_t i = 0; i < height_; ++i) {
156 uint16_t getFlags() const {
157 return flags_.load(std::memory_order_consume);
159 void setFlags(uint16_t flags) {
160 flags_.store(flags, std::memory_order_release);
163 // TODO(xliu): on x86_64, it's possible to squeeze these into
164 // skip_[0] to maybe save 8 bytes depending on the data alignments.
165 // NOTE: currently this is x86_64 only anyway, due to the
167 std::atomic<uint16_t> flags_;
168 const uint8_t height_;
169 MicroSpinLock spinLock_;
173 std::atomic<SkipListNode*> skip_[0];
176 class SkipListRandomHeight {
177 enum { kMaxHeight = 64 };
179 // make it a singleton.
180 static SkipListRandomHeight *instance() {
181 static SkipListRandomHeight instance_;
185 int getHeight(int maxHeight) const {
186 DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
187 double p = randomProb();
188 for (int i = 0; i < maxHeight; ++i) {
189 if (p < lookupTable_[i]) {
196 size_t getSizeLimit(int height) const {
197 DCHECK_LT(height, kMaxHeight);
198 return sizeLimitTable_[height];
202 SkipListRandomHeight() { initLookupTable(); }
204 void initLookupTable() {
205 // set skip prob = 1/E
206 static const double kProbInv = exp(1);
207 static const double kProb = 1.0 / kProbInv;
208 static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
210 double sizeLimit = 1;
211 double p = lookupTable_[0] = (1 - kProb);
212 sizeLimitTable_[0] = 1;
213 for (int i = 1; i < kMaxHeight - 1; ++i) {
215 sizeLimit *= kProbInv;
216 lookupTable_[i] = lookupTable_[i - 1] + p;
217 sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
219 static_cast<size_t>(sizeLimit);
221 lookupTable_[kMaxHeight - 1] = 1;
222 sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
225 static double randomProb() {
226 static ThreadLocal<boost::lagged_fibonacci2281> rng_;
230 double lookupTable_[kMaxHeight];
231 size_t sizeLimitTable_[kMaxHeight];
234 template <typename NodeType, typename NodeAlloc, typename = void>
237 template <typename NodeType, typename NodeAlloc>
238 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
239 !NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
241 explicit NodeRecycler(const NodeAlloc& alloc)
242 : refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
244 explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
249 for (auto& node : *nodes_) {
250 NodeType::destroy(alloc_, node);
255 void add(NodeType* node) {
256 std::lock_guard<MicroSpinLock> g(lock_);
257 if (nodes_.get() == nullptr) {
258 nodes_ = std::make_unique<std::vector<NodeType*>>(1, node);
260 nodes_->push_back(node);
262 DCHECK_GT(refs(), 0);
263 dirty_.store(true, std::memory_order_relaxed);
267 return refs_.fetch_add(1, std::memory_order_relaxed);
271 // We don't expect to clean the recycler immediately everytime it is OK
272 // to do so. Here, it is possible that multiple accessors all release at
273 // the same time but nobody would clean the recycler here. If this
274 // happens, the recycler will usually still get cleaned when
275 // such a race doesn't happen. The worst case is the recycler will
276 // eventually get deleted along with the skiplist.
277 if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
278 return refs_.fetch_add(-1, std::memory_order_relaxed);
281 std::unique_ptr<std::vector<NodeType*>> newNodes;
283 std::lock_guard<MicroSpinLock> g(lock_);
284 if (nodes_.get() == nullptr || refs() > 1) {
285 return refs_.fetch_add(-1, std::memory_order_relaxed);
287 // once refs_ reaches 1 and there is no other accessor, it is safe to
288 // remove all the current nodes in the recycler, as we already acquired
289 // the lock here so no more new nodes can be added, even though new
290 // accessors may be added after that.
291 newNodes.swap(nodes_);
292 dirty_.store(false, std::memory_order_relaxed);
295 // TODO(xliu) should we spawn a thread to do this when there are large
296 // number of nodes in the recycler?
297 for (auto& node : *newNodes) {
298 NodeType::destroy(alloc_, node);
301 // decrease the ref count at the very end, to minimize the
302 // chance of other threads acquiring lock_ to clear the deleted
304 return refs_.fetch_add(-1, std::memory_order_relaxed);
307 NodeAlloc& alloc() { return alloc_; }
311 return refs_.load(std::memory_order_relaxed);
314 std::unique_ptr<std::vector<NodeType*>> nodes_;
315 std::atomic<int32_t> refs_; // current number of visitors to the list
316 std::atomic<bool> dirty_; // whether *nodes_ is non-empty
317 MicroSpinLock lock_; // protects access to *nodes_
321 // In case of arena allocator, no recycling is necessary, and it's possible
322 // to save on ConcurrentSkipList size.
323 template <typename NodeType, typename NodeAlloc>
324 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
325 NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
327 explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { }
330 void releaseRef() { }
332 void add(NodeType* /* node */) {}
334 NodeAlloc& alloc() { return alloc_; }
340 } // namespace detail