2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <folly/experimental/hazptr/debug.h>
19 #include <folly/experimental/hazptr/hazptr.h>
24 /** Set implemented as an ordered singly-linked list.
26 * Multiple writers may add or remove elements. Multiple reader
27 * threads may search the set concurrently with each other and with
28 * the writers' operations.
32 class Node : public hazptr_obj_base<Node> {
35 std::atomic<uint64_t> refcount_{1};
36 std::atomic<Node*> next_{nullptr};
38 // Node must be refcounted for wait-free access: A deleted node
39 // may have hazptrs pointing at it, so the rest of the list (or at
40 // least, what existed at the time of the hazptr load) must still
43 if (refcount_.fetch_sub(1) == 1) {
48 // Optimization in the case that we know there are no hazptrs pointing
51 if (refcount_.load(std::memory_order_relaxed) == 1) {
52 auto next = getPtr(next_.load(std::memory_order_relaxed));
55 next_.store(nullptr, std::memory_order_relaxed);
62 DCHECK(refcount_.load() != 0);
63 refcount_.fetch_add(1);
67 explicit Node(T e) : elem_(e) {
68 DEBUG_PRINT(this << " " << e);
73 auto next = getPtr(next_.load(std::memory_order_relaxed));
80 static bool getDeleted(Node* ptr) {
81 return uintptr_t(ptr) & 1;
84 static Node* getPtr(Node* ptr) {
85 return (Node*)(uintptr_t(ptr) & ~1UL);
88 mutable std::atomic<Node*> head_ = {nullptr};
90 // Remove a single deleted item.
91 // Although it doesn't have to be our item.
93 // Note that standard lock-free Michael linked lists put this in the
94 // contains() path, while this implementation leaves it only in
95 // remove(), such that contains() is wait-free.
97 hazptr_holder& hptr_prev,
98 hazptr_holder& hptr_curr,
99 std::atomic<Node*>*& prev,
103 curr = hptr_curr.get_protected(*prev, getPtr);
104 while (getPtr(curr)) {
105 auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
106 if (getDeleted(next)) {
107 auto nextp = getPtr(next);
112 auto curr_no_mark = getPtr(curr);
113 if (prev->compare_exchange_weak(curr_no_mark, nextp)) {
115 curr_no_mark->release();
124 prev = &(getPtr(curr)->next_);
125 curr = hptr_prev.get_protected(getPtr(curr)->next_, getPtr);
127 swap(hptr_curr, hptr_prev);
129 DCHECK(getPtr(curr));
133 /* wait-free set search */
136 hazptr_holder& hptr_prev,
137 hazptr_holder& hptr_curr,
138 std::atomic<Node*>*& prev,
141 curr = hptr_curr.get_protected(*prev, getPtr);
142 while (getPtr(curr)) {
143 auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
144 if (!getDeleted(next)) {
145 if (getPtr(curr)->elem_ == val) {
147 } else if (!(getPtr(curr)->elem_ < val)) {
148 break; // Because the list is sorted.
151 prev = &(getPtr(curr)->next_);
152 curr = hptr_prev.get_protected(getPtr(curr)->next_, getPtr);
153 /* Swap does not change the values of the owned hazard
154 * pointers themselves. After the swap, The hazard pointer
155 * owned by hptr_prev continues to protect the node that
156 * contains the pointer *prev. The hazard pointer owned by
157 * hptr_curr will continue to protect the node that contains
158 * the old *prev (unless the old prev was &head), which no
159 * longer needs protection, so hptr_curr's hazard pointer is
160 * now free to protect *curr in the next iteration (if curr !=
163 swap(hptr_curr, hptr_prev);
170 explicit MWMRListSet() {}
173 Node* next = head_.load();
180 hazptr_holder hptr_prev;
181 hazptr_holder hptr_curr;
182 std::atomic<Node*>* prev;
185 auto newnode = folly::make_unique<Node>(v);
188 if (find(v, hptr_prev, hptr_curr, prev, cur)) {
191 newnode->next_.store(cur, std::memory_order_relaxed);
192 auto cur_no_mark = getPtr(cur);
193 if (prev->compare_exchange_weak(cur_no_mark, newnode.get())) {
197 // Ensure ~Node() destructor doesn't destroy next_
198 newnode->next_.store(nullptr, std::memory_order_relaxed);
202 bool remove(const T& v) {
203 hazptr_holder hptr_prev;
204 hazptr_holder hptr_curr;
205 std::atomic<Node*>* prev;
209 if (!find(v, hptr_prev, hptr_curr, prev, curr)) {
212 auto next = getPtr(curr)->next_.load(std::memory_order_acquire);
213 auto next_no_mark = getPtr(next); // Ensure only one deleter wins
215 if (!getPtr(curr)->next_.compare_exchange_weak(
216 next_no_mark, (Node*)(uintptr_t(next_no_mark) | 1))) {
224 auto curr_no_mark = getPtr(curr); /* ensure not deleted */
225 if (prev->compare_exchange_weak(curr_no_mark, next)) {
234 // Someone else modified prev. Call fixlist
235 // to unlink deleted element by re-walking list.
236 fixlist(hptr_prev, hptr_curr, prev, curr);
240 bool contains(const T& v) const {
241 hazptr_holder hptr_prev;
242 hazptr_holder hptr_curr;
243 std::atomic<Node*>* prev;
246 return find(v, hptr_prev, hptr_curr, prev, curr);
250 } // namespace hazptr