2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author: Xin Liu <xliux@fb.com>
27 #include <type_traits>
29 #include <boost/noncopyable.hpp>
30 #include <boost/random.hpp>
31 #include <boost/type_traits.hpp>
32 #include <glog/logging.h>
34 #include <folly/Memory.h>
35 #include <folly/MicroSpinLock.h>
36 #include <folly/ThreadLocal.h>
38 namespace folly { namespace detail {
40 template<typename ValT, typename NodeT> class csl_iterator;
43 class SkipListNode : private boost::noncopyable {
46 MARKED_FOR_REMOVAL = (1 << 1),
47 FULLY_LINKED = (1 << 2),
52 template<typename NodeAlloc, typename U,
53 typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
54 static SkipListNode* create(
55 NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
56 DCHECK(height >= 1 && height < 64) << height;
58 size_t size = sizeof(SkipListNode) +
59 height * sizeof(std::atomic<SkipListNode*>);
60 auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
62 new (node) SkipListNode(height, std::forward<U>(data), isHead);
66 template<typename NodeAlloc>
67 static void destroy(NodeAlloc& alloc, SkipListNode* node) {
68 node->~SkipListNode();
69 alloc.deallocate(node);
72 template<typename NodeAlloc>
73 static constexpr bool destroyIsNoOp() {
74 return IsArenaAllocator<NodeAlloc>::value &&
75 boost::has_trivial_destructor<SkipListNode>::value;
78 // copy the head node to a new head node assuming lock acquired
79 SkipListNode* copyHead(SkipListNode* node) {
80 DCHECK(node != nullptr && height_ > node->height_);
81 setFlags(node->getFlags());
82 for (int i = 0; i < node->height_; ++i) {
83 setSkip(i, node->skip(i));
88 inline SkipListNode* skip(int layer) const {
89 DCHECK_LT(layer, height_);
90 return skip_[layer].load(std::memory_order_consume);
93 // next valid node as in the linked list
94 SkipListNode* next() {
97 (node != nullptr && node->markedForRemoval());
98 node = node->skip(0)) {}
102 void setSkip(uint8_t h, SkipListNode* next) {
103 DCHECK_LT(h, height_);
104 skip_[h].store(next, std::memory_order_release);
107 value_type& data() { return data_; }
108 const value_type& data() const { return data_; }
109 int maxLayer() const { return height_ - 1; }
110 int height() const { return height_; }
112 std::unique_lock<MicroSpinLock> acquireGuard() {
113 return std::unique_lock<MicroSpinLock>(spinLock_);
116 bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
117 bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
118 bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
120 void setIsHeadNode() {
121 setFlags(getFlags() | IS_HEAD_NODE);
123 void setFullyLinked() {
124 setFlags(getFlags() | FULLY_LINKED);
126 void setMarkedForRemoval() {
127 setFlags(getFlags() | MARKED_FOR_REMOVAL);
131 // Note! this can only be called from create() as a placement new.
133 SkipListNode(uint8_t height, U&& data, bool isHead) :
134 height_(height), data_(std::forward<U>(data)) {
137 if (isHead) setIsHeadNode();
138 // need to explicitly init the dynamic atomic pointer array
139 for (uint8_t i = 0; i < height_; ++i) {
140 new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
145 for (uint8_t i = 0; i < height_; ++i) {
150 uint16_t getFlags() const {
151 return flags_.load(std::memory_order_consume);
153 void setFlags(uint16_t flags) {
154 flags_.store(flags, std::memory_order_release);
157 // TODO(xliu): on x86_64, it's possible to squeeze these into
158 // skip_[0] to maybe save 8 bytes depending on the data alignments.
159 // NOTE: currently this is x86_64 only anyway, due to the
161 std::atomic<uint16_t> flags_;
162 const uint8_t height_;
163 MicroSpinLock spinLock_;
167 std::atomic<SkipListNode*> skip_[0];
170 class SkipListRandomHeight {
171 enum { kMaxHeight = 64 };
173 // make it a singleton.
174 static SkipListRandomHeight *instance() {
175 static SkipListRandomHeight instance_;
179 int getHeight(int maxHeight) const {
180 DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
181 double p = randomProb();
182 for (int i = 0; i < maxHeight; ++i) {
183 if (p < lookupTable_[i]) {
190 size_t getSizeLimit(int height) const {
191 DCHECK_LT(height, kMaxHeight);
192 return sizeLimitTable_[height];
196 SkipListRandomHeight() { initLookupTable(); }
198 void initLookupTable() {
199 // set skip prob = 1/E
200 static const double kProbInv = exp(1);
201 static const double kProb = 1.0 / kProbInv;
202 static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
204 double sizeLimit = 1;
205 double p = lookupTable_[0] = (1 - kProb);
206 sizeLimitTable_[0] = 1;
207 for (int i = 1; i < kMaxHeight - 1; ++i) {
209 sizeLimit *= kProbInv;
210 lookupTable_[i] = lookupTable_[i - 1] + p;
211 sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
213 static_cast<size_t>(sizeLimit);
215 lookupTable_[kMaxHeight - 1] = 1;
216 sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
219 static double randomProb() {
220 static ThreadLocal<boost::lagged_fibonacci2281> rng_;
224 double lookupTable_[kMaxHeight];
225 size_t sizeLimitTable_[kMaxHeight];
228 template<typename NodeType, typename NodeAlloc, typename = void>
231 template<typename NodeType, typename NodeAlloc>
232 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
233 !NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
235 explicit NodeRecycler(const NodeAlloc& alloc)
236 : refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
238 explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
243 for (auto& node : *nodes_) {
244 NodeType::destroy(alloc_, node);
249 void add(NodeType* node) {
250 std::lock_guard<MicroSpinLock> g(lock_);
251 if (nodes_.get() == nullptr) {
252 nodes_.reset(new std::vector<NodeType*>(1, node));
254 nodes_->push_back(node);
256 DCHECK_GT(refs(), 0);
257 dirty_.store(true, std::memory_order_relaxed);
261 return refs_.fetch_add(1, std::memory_order_relaxed);
265 // We don't expect to clean the recycler immediately everytime it is OK
266 // to do so. Here, it is possible that multiple accessors all release at
267 // the same time but nobody would clean the recycler here. If this
268 // happens, the recycler will usually still get cleaned when
269 // such a race doesn't happen. The worst case is the recycler will
270 // eventually get deleted along with the skiplist.
271 if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
272 return refs_.fetch_add(-1, std::memory_order_relaxed);
275 std::unique_ptr<std::vector<NodeType*>> newNodes;
277 std::lock_guard<MicroSpinLock> g(lock_);
278 if (nodes_.get() == nullptr || refs() > 1) {
279 return refs_.fetch_add(-1, std::memory_order_relaxed);
281 // once refs_ reaches 1 and there is no other accessor, it is safe to
282 // remove all the current nodes in the recycler, as we already acquired
283 // the lock here so no more new nodes can be added, even though new
284 // accessors may be added after that.
285 newNodes.swap(nodes_);
286 dirty_.store(false, std::memory_order_relaxed);
289 // TODO(xliu) should we spawn a thread to do this when there are large
290 // number of nodes in the recycler?
291 for (auto& node : *newNodes) {
292 NodeType::destroy(alloc_, node);
295 // decrease the ref count at the very end, to minimize the
296 // chance of other threads acquiring lock_ to clear the deleted
298 return refs_.fetch_add(-1, std::memory_order_relaxed);
301 NodeAlloc& alloc() { return alloc_; }
305 return refs_.load(std::memory_order_relaxed);
308 std::unique_ptr<std::vector<NodeType*>> nodes_;
309 std::atomic<int32_t> refs_; // current number of visitors to the list
310 std::atomic<bool> dirty_; // whether *nodes_ is non-empty
311 MicroSpinLock lock_; // protects access to *nodes_
315 // In case of arena allocator, no recycling is necessary, and it's possible
316 // to save on ConcurrentSkipList size.
317 template<typename NodeType, typename NodeAlloc>
318 class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
319 NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
321 explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { }
324 void releaseRef() { }
326 void add(NodeType* /* node */) {}
328 NodeAlloc& alloc() { return alloc_; }