2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author: Xin Liu <xliux@fb.com>
27 #include <type_traits>
29 #include <boost/noncopyable.hpp>
30 #include <boost/random.hpp>
31 #include <boost/type_traits.hpp>
32 #include <glog/logging.h>
34 #include <folly/Memory.h>
35 #include <folly/MicroSpinLock.h>
36 #include <folly/ThreadLocal.h>
38 namespace folly { namespace detail {
40 template<typename ValT, typename NodeT> class csl_iterator;
43 class SkipListNode : private boost::noncopyable {
46 MARKED_FOR_REMOVAL = (1 << 1),
47 FULLY_LINKED = (1 << 2),
52 template<typename NodeAlloc, typename U,
53 typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
54 static SkipListNode* create(
55 NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
56 DCHECK(height >= 1 && height < 64) << height;
58 size_t size = sizeof(SkipListNode) +
59 height * sizeof(std::atomic<SkipListNode*>);
60 auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
62 new (node) SkipListNode(height, std::forward<U>(data), isHead);
66 template<typename NodeAlloc>
67 static void destroy(NodeAlloc& alloc, SkipListNode* node) {
68 node->~SkipListNode();
69 alloc.deallocate(node);
72 template<typename NodeAlloc>
73 struct DestroyIsNoOp : std::integral_constant<bool,
74 IsArenaAllocator<NodeAlloc>::value &&
75 boost::has_trivial_destructor<SkipListNode>::value> { };
77 // copy the head node to a new head node assuming lock acquired
78 SkipListNode* copyHead(SkipListNode* node) {
79 DCHECK(node != nullptr && height_ > node->height_);
80 setFlags(node->getFlags());
81 for (int i = 0; i < node->height_; ++i) {
82 setSkip(i, node->skip(i));
87 inline SkipListNode* skip(int layer) const {
88 DCHECK_LT(layer, height_);
89 return skip_[layer].load(std::memory_order_consume);
92 // next valid node as in the linked list
93 SkipListNode* next() {
96 (node != nullptr && node->markedForRemoval());
97 node = node->skip(0)) {}
101 void setSkip(uint8_t h, SkipListNode* next) {
102 DCHECK_LT(h, height_);
103 skip_[h].store(next, std::memory_order_release);
106 value_type& data() { return data_; }
107 const value_type& data() const { return data_; }
108 int maxLayer() const { return height_ - 1; }
109 int height() const { return height_; }
111 std::unique_lock<MicroSpinLock> acquireGuard() {
112 return std::unique_lock<MicroSpinLock>(spinLock_);
115 bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
116 bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
117 bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
119 void setIsHeadNode() {
120 setFlags(getFlags() | IS_HEAD_NODE);
122 void setFullyLinked() {
123 setFlags(getFlags() | FULLY_LINKED);
125 void setMarkedForRemoval() {
126 setFlags(getFlags() | MARKED_FOR_REMOVAL);
130 // Note! this can only be called from create() as a placement new.
132 SkipListNode(uint8_t height, U&& data, bool isHead) :
133 height_(height), data_(std::forward<U>(data)) {
136 if (isHead) setIsHeadNode();
137 // need to explicitly init the dynamic atomic pointer array
138 for (uint8_t i = 0; i < height_; ++i) {
139 new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
144 for (uint8_t i = 0; i < height_; ++i) {
149 uint16_t getFlags() const {
150 return flags_.load(std::memory_order_consume);
152 void setFlags(uint16_t flags) {
153 flags_.store(flags, std::memory_order_release);
156 // TODO(xliu): on x86_64, it's possible to squeeze these into
157 // skip_[0] to maybe save 8 bytes depending on the data alignments.
158 // NOTE: currently this is x86_64 only anyway, due to the
160 std::atomic<uint16_t> flags_;
161 const uint8_t height_;
162 MicroSpinLock spinLock_;
166 std::atomic<SkipListNode*> skip_[0];
169 class SkipListRandomHeight {
170 enum { kMaxHeight = 64 };
172 // make it a singleton.
173 static SkipListRandomHeight *instance() {
174 static SkipListRandomHeight instance_;
178 int getHeight(int maxHeight) const {
179 DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
180 double p = randomProb();
181 for (int i = 0; i < maxHeight; ++i) {
182 if (p < lookupTable_[i]) {
189 size_t getSizeLimit(int height) const {
190 DCHECK_LT(height, kMaxHeight);
191 return sizeLimitTable_[height];
195 SkipListRandomHeight() { initLookupTable(); }
197 void initLookupTable() {
198 // set skip prob = 1/E
199 static const double kProbInv = exp(1);
200 static const double kProb = 1.0 / kProbInv;
201 static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
203 double sizeLimit = 1;
204 double p = lookupTable_[0] = (1 - kProb);
205 sizeLimitTable_[0] = 1;
206 for (int i = 1; i < kMaxHeight - 1; ++i) {
208 sizeLimit *= kProbInv;
209 lookupTable_[i] = lookupTable_[i - 1] + p;
210 sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
212 static_cast<size_t>(sizeLimit);
214 lookupTable_[kMaxHeight - 1] = 1;
215 sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
218 static double randomProb() {
219 static ThreadLocal<boost::lagged_fibonacci2281> rng_;
223 double lookupTable_[kMaxHeight];
224 size_t sizeLimitTable_[kMaxHeight];
227 template<typename NodeType, typename NodeAlloc, typename = void>
230 template<typename NodeType, typename NodeAlloc>
231 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
232 !NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
234 explicit NodeRecycler(const NodeAlloc& alloc)
235 : refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
237 explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
242 for (auto& node : *nodes_) {
243 NodeType::destroy(alloc_, node);
248 void add(NodeType* node) {
249 std::lock_guard<MicroSpinLock> g(lock_);
250 if (nodes_.get() == nullptr) {
251 nodes_.reset(new std::vector<NodeType*>(1, node));
253 nodes_->push_back(node);
255 DCHECK_GT(refs(), 0);
256 dirty_.store(true, std::memory_order_relaxed);
260 return refs_.fetch_add(1, std::memory_order_relaxed);
264 // We don't expect to clean the recycler immediately everytime it is OK
265 // to do so. Here, it is possible that multiple accessors all release at
266 // the same time but nobody would clean the recycler here. If this
267 // happens, the recycler will usually still get cleaned when
268 // such a race doesn't happen. The worst case is the recycler will
269 // eventually get deleted along with the skiplist.
270 if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
271 return refs_.fetch_add(-1, std::memory_order_relaxed);
274 std::unique_ptr<std::vector<NodeType*>> newNodes;
276 std::lock_guard<MicroSpinLock> g(lock_);
277 if (nodes_.get() == nullptr || refs() > 1) {
278 return refs_.fetch_add(-1, std::memory_order_relaxed);
280 // once refs_ reaches 1 and there is no other accessor, it is safe to
281 // remove all the current nodes in the recycler, as we already acquired
282 // the lock here so no more new nodes can be added, even though new
283 // accessors may be added after that.
284 newNodes.swap(nodes_);
285 dirty_.store(false, std::memory_order_relaxed);
288 // TODO(xliu) should we spawn a thread to do this when there are large
289 // number of nodes in the recycler?
290 for (auto& node : *newNodes) {
291 NodeType::destroy(alloc_, node);
294 // decrease the ref count at the very end, to minimize the
295 // chance of other threads acquiring lock_ to clear the deleted
297 return refs_.fetch_add(-1, std::memory_order_relaxed);
300 NodeAlloc& alloc() { return alloc_; }
304 return refs_.load(std::memory_order_relaxed);
307 std::unique_ptr<std::vector<NodeType*>> nodes_;
308 std::atomic<int32_t> refs_; // current number of visitors to the list
309 std::atomic<bool> dirty_; // whether *nodes_ is non-empty
310 MicroSpinLock lock_; // protects access to *nodes_
314 // In case of arena allocator, no recycling is necessary, and it's possible
315 // to save on ConcurrentSkipList size.
316 template<typename NodeType, typename NodeAlloc>
317 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
318 NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
320 explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { }
323 void releaseRef() { }
325 void add(NodeType* /* node */) {}
327 NodeAlloc& alloc() { return alloc_; }