--- /dev/null
+/*
+ * Copyright 2016 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <cassert>
+
+namespace folly {
+
+/**
+ * A very simple atomic single-linked list primitive.
+ *
+ * Usage:
+ *
+ * class MyClass {
+ * AtomicIntrusiveLinkedListHook<MyClass> hook_;
+ * }
+ *
+ * AtomicIntrusiveLinkedList<MyClass, &MyClass::hook_> list;
+ * list.insert(&a);
+ * list.sweep([] (MyClass* c) { doSomething(c); }
+ */
+template <class T>
+struct AtomicIntrusiveLinkedListHook {
+ T* next{nullptr};
+};
+
+template <class T, AtomicIntrusiveLinkedListHook<T> T::*HookMember>
+class AtomicIntrusiveLinkedList {
+ public:
+ AtomicIntrusiveLinkedList() {}
+ AtomicIntrusiveLinkedList(const AtomicIntrusiveLinkedList&) = delete;
+ AtomicIntrusiveLinkedList& operator=(const AtomicIntrusiveLinkedList&) =
+ delete;
+ AtomicIntrusiveLinkedList(AtomicIntrusiveLinkedList&& other) noexcept {
+ auto tmp = other.head_.load();
+ other.head_ = head_.load();
+ head_ = tmp;
+ }
+ AtomicIntrusiveLinkedList& operator=(
+ AtomicIntrusiveLinkedList&& other) noexcept {
+ auto tmp = other.head_.load();
+ other.head_ = head_.load();
+ head_ = tmp;
+
+ return *this;
+ }
+
+ /**
+ * Note: list must be empty on destruction.
+ */
+ ~AtomicIntrusiveLinkedList() {
+ assert(empty());
+ }
+
+ bool empty() const {
+ return head_ == nullptr;
+ }
+
+ /**
+ * Atomically insert t at the head of the list.
+ * @return True if the inserted element is the only one in the list
+ * after the call.
+ */
+ bool insertHead(T* t) {
+ assert(next(t) == nullptr);
+
+ auto oldHead = head_.load(std::memory_order_relaxed);
+ do {
+ next(t) = oldHead;
+ /* oldHead is updated by the call below.
+
+ NOTE: we don't use next(t) instead of oldHead directly due to
+ compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
+ MSVC (bug 819819); source:
+ http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
+ } while (!head_.compare_exchange_weak(oldHead, t,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+
+ return oldHead == nullptr;
+ }
+
+ /**
+ * Repeatedly replaces the head with nullptr,
+ * and calls func() on the removed elements in the order from tail to head.
+ * Stops when the list is empty.
+ */
+ template <typename F>
+ void sweep(F&& func) {
+ while (auto head = head_.exchange(nullptr)) {
+ auto rhead = reverse(head);
+ while (rhead != nullptr) {
+ auto t = rhead;
+ rhead = next(t);
+ next(t) = nullptr;
+ func(t);
+ }
+ }
+ }
+
+ private:
+ std::atomic<T*> head_{nullptr};
+
+ static T*& next(T* t) {
+ return (t->*HookMember).next;
+ }
+
+ /* Reverses a linked list, returning the pointer to the new head
+ (old tail) */
+ static T* reverse(T* head) {
+ T* rhead = nullptr;
+ while (head != nullptr) {
+ auto t = head;
+ head = next(t);
+ next(t) = rhead;
+ rhead = t;
+ }
+ return rhead;
+ }
+};
+
+} // namespace folly
#pragma once
-#include <atomic>
-#include <cassert>
+#include <folly/AtomicIntrusiveLinkedList.h>
+#include <folly/Memory.h>
namespace folly {
*
* Usage:
*
- * class MyClass {
- * AtomicLinkedListHook<MyClass> hook_;
- * }
- *
- * AtomicLinkedList<MyClass, &MyClass::hook_> list;
- * list.insert(&a);
- * list.sweep([] (MyClass* c) { doSomething(c); }
+ * AtomicLinkedList<MyClass> list;
+ * list.insert(a);
+ * list.sweep([] (MyClass& c) { doSomething(c); }
*/
-template <class T>
-struct AtomicLinkedListHook {
- T* next{nullptr};
-};
-template <class T, AtomicLinkedListHook<T> T::* HookMember>
+template <class T>
class AtomicLinkedList {
public:
AtomicLinkedList() {}
AtomicLinkedList(const AtomicLinkedList&) = delete;
AtomicLinkedList& operator=(const AtomicLinkedList&) = delete;
- AtomicLinkedList(AtomicLinkedList&& other) noexcept {
- auto tmp = other.head_.load();
- other.head_ = head_.load();
- head_ = tmp;
- }
- AtomicLinkedList& operator=(AtomicLinkedList&& other) noexcept {
- auto tmp = other.head_.load();
- other.head_ = head_.load();
- head_ = tmp;
-
- return *this;
- }
+ AtomicLinkedList(AtomicLinkedList&& other) noexcept = default;
+ AtomicLinkedList& operator=(AtomicLinkedList&& other) = default;
- /**
- * Note: list must be empty on destruction.
- */
~AtomicLinkedList() {
- assert(empty());
+ sweep([](T&&) {});
}
bool empty() const {
- return head_ == nullptr;
+ return list_.empty();
}
/**
* @return True if the inserted element is the only one in the list
* after the call.
*/
- bool insertHead(T* t) {
- assert(next(t) == nullptr);
-
- auto oldHead = head_.load(std::memory_order_relaxed);
- do {
- next(t) = oldHead;
- /* oldHead is updated by the call below.
+ bool insertHead(T t) {
+ auto wrapper = folly::make_unique<Wrapper>(std::move(t));
- NOTE: we don't use next(t) instead of oldHead directly due to
- compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
- MSVC (bug 819819); source:
- http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
- } while (!head_.compare_exchange_weak(oldHead, t,
- std::memory_order_release,
- std::memory_order_relaxed));
-
- return oldHead == nullptr;
+ return list_.insertHead(wrapper.release());
}
/**
- * Repeatedly replaces the head with nullptr,
+ * Repeatedly pops element from head,
* and calls func() on the removed elements in the order from tail to head.
* Stops when the list is empty.
*/
template <typename F>
void sweep(F&& func) {
- while (auto head = head_.exchange(nullptr)) {
- auto rhead = reverse(head);
- while (rhead != nullptr) {
- auto t = rhead;
- rhead = next(t);
- next(t) = nullptr;
- func(t);
- }
- }
+ list_.sweep([&](Wrapper* wrapperPtr) mutable {
+ std::unique_ptr<Wrapper> wrapper(wrapperPtr);
+
+ func(std::move(wrapper->data));
+ });
}
private:
- std::atomic<T*> head_{nullptr};
+ struct Wrapper {
+ explicit Wrapper(T&& t) : data(std::move(t)) {}
- static T*& next(T* t) {
- return (t->*HookMember).next;
- }
-
- /* Reverses a linked list, returning the pointer to the new head
- (old tail) */
- static T* reverse(T* head) {
- T* rhead = nullptr;
- while (head != nullptr) {
- auto t = head;
- head = next(t);
- next(t) = rhead;
- rhead = t;
- }
- return rhead;
- }
+ AtomicIntrusiveLinkedListHook<Wrapper> hook;
+ T data;
+ };
+ AtomicIntrusiveLinkedList<Wrapper, &Wrapper::hook> list_;
};
} // namespace folly
AtomicHashArray-inl.h \
AtomicHashMap.h \
AtomicHashMap-inl.h \
+ AtomicIntrusiveLinkedList.h \
AtomicLinkedList.h \
AtomicStruct.h \
AtomicUnorderedMap.h \
#include <functional>
#include <typeinfo>
-#include <boost/context/all.hpp>
-#include <boost/version.hpp>
-#include <folly/AtomicLinkedList.h>
+#include <folly/AtomicIntrusiveLinkedList.h>
#include <folly/CPortability.h>
#include <folly/Function.h>
#include <folly/IntrusiveList.h>
+#include <folly/Portability.h>
#include <folly/experimental/fibers/BoostContextCompatibility.h>
#include <folly/io/async/Request.h>
-#include <folly/Portability.h>
+#include <boost/context/all.hpp>
+#include <boost/version.hpp>
namespace folly { namespace fibers {
/**
* Points to next fiber in remote ready list
*/
- folly::AtomicLinkedListHook<Fiber> nextRemoteReady_;
+ folly::AtomicIntrusiveLinkedListHook<Fiber> nextRemoteReady_;
static constexpr size_t kUserBufferSize = 256;
std::aligned_storage<kUserBufferSize>::type userBuffer_;
#include <unordered_set>
#include <vector>
-#include <folly/AtomicLinkedList.h>
+#include <folly/AtomicIntrusiveLinkedList.h>
#include <folly/Executor.h>
-#include <folly/Likely.h>
#include <folly/IntrusiveList.h>
-#include <folly/io/async/Request.h>
+#include <folly/Likely.h>
#include <folly/futures/Try.h>
+#include <folly/io/async/Request.h>
#include <folly/experimental/ExecutionObserver.h>
#include <folly/experimental/fibers/BoostContextCompatibility.h>
folly::Function<void()> func;
std::unique_ptr<Fiber::LocalData> localData;
std::shared_ptr<RequestContext> rcontext;
- AtomicLinkedListHook<RemoteTask> nextRemoteTask;
+ AtomicIntrusiveLinkedListHook<RemoteTask> nextRemoteTask;
};
intptr_t activateFiber(Fiber* fiber);
ExceptionCallback exceptionCallback_; /**< task exception callback */
- folly::AtomicLinkedList<Fiber, &Fiber::nextRemoteReady_> remoteReadyQueue_;
+ folly::AtomicIntrusiveLinkedList<Fiber, &Fiber::nextRemoteReady_>
+ remoteReadyQueue_;
- folly::AtomicLinkedList<RemoteTask, &RemoteTask::nextRemoteTask>
+ folly::AtomicIntrusiveLinkedList<RemoteTask, &RemoteTask::nextRemoteTask>
remoteTaskQueue_;
std::shared_ptr<TimeoutController> timeoutManager_;
#include <memory>
#include <unordered_map>
-#include <folly/AtomicLinkedList.h>
#include <folly/ThreadLocal.h>
#include <folly/Synchronized.h>
--- /dev/null
+/*
+ * Copyright 2016 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <thread>
+
+#include <gtest/gtest.h>
+
+#include <folly/AtomicLinkedList.h>
+
+class TestIntrusiveObject {
+ public:
+ explicit TestIntrusiveObject(size_t id__) : id_(id__) {}
+ size_t id() {
+ return id_;
+ }
+
+ private:
+ folly::AtomicIntrusiveLinkedListHook<TestIntrusiveObject> hook_;
+ size_t id_;
+
+ public:
+ using List = folly::AtomicIntrusiveLinkedList<
+ TestIntrusiveObject,
+ &TestIntrusiveObject::hook_>;
+};
+
+TEST(AtomicIntrusiveLinkedList, Basic) {
+ TestIntrusiveObject a(1), b(2), c(3);
+
+ TestIntrusiveObject::List list;
+
+ EXPECT_TRUE(list.empty());
+
+ {
+ EXPECT_TRUE(list.insertHead(&a));
+ EXPECT_FALSE(list.insertHead(&b));
+
+ EXPECT_FALSE(list.empty());
+
+ size_t id = 0;
+ list.sweep(
+ [&](TestIntrusiveObject* obj) mutable { EXPECT_EQ(++id, obj->id()); });
+
+ EXPECT_TRUE(list.empty());
+ }
+
+ // Try re-inserting the same item (b) and a new item (c)
+ {
+ EXPECT_TRUE(list.insertHead(&b));
+ EXPECT_FALSE(list.insertHead(&c));
+
+ EXPECT_FALSE(list.empty());
+
+ size_t id = 1;
+ list.sweep(
+ [&](TestIntrusiveObject* obj) mutable { EXPECT_EQ(++id, obj->id()); });
+
+ EXPECT_TRUE(list.empty());
+ }
+
+ TestIntrusiveObject::List movedList = std::move(list);
+}
+
+TEST(AtomicIntrusiveLinkedList, Move) {
+ TestIntrusiveObject a(1), b(2);
+
+ TestIntrusiveObject::List list1;
+
+ EXPECT_TRUE(list1.insertHead(&a));
+ EXPECT_FALSE(list1.insertHead(&b));
+
+ EXPECT_FALSE(list1.empty());
+
+ TestIntrusiveObject::List list2(std::move(list1));
+
+ EXPECT_TRUE(list1.empty());
+ EXPECT_FALSE(list2.empty());
+
+ TestIntrusiveObject::List list3;
+
+ EXPECT_TRUE(list3.empty());
+
+ list3 = std::move(list2);
+
+ EXPECT_TRUE(list2.empty());
+ EXPECT_FALSE(list3.empty());
+
+ size_t id = 0;
+ list3.sweep(
+ [&](TestIntrusiveObject* obj) mutable { EXPECT_EQ(++id, obj->id()); });
+}
+
+TEST(AtomicIntrusiveLinkedList, Stress) {
+ constexpr size_t kNumThreads = 32;
+ constexpr size_t kNumElements = 100000;
+
+ std::vector<TestIntrusiveObject> elements;
+ for (size_t i = 0; i < kNumThreads * kNumElements; ++i) {
+ elements.emplace_back(i);
+ }
+
+ TestIntrusiveObject::List list;
+
+ std::vector<std::thread> threads;
+ for (size_t threadId = 0; threadId < kNumThreads; ++threadId) {
+ threads.emplace_back(
+ [threadId, kNumThreads, kNumElements, &list, &elements]() {
+ for (size_t id = 0; id < kNumElements; ++id) {
+ list.insertHead(&elements[threadId + kNumThreads * id]);
+ }
+ });
+ }
+
+ std::vector<size_t> ids;
+ TestIntrusiveObject* prev{nullptr};
+
+ while (ids.size() < kNumThreads * kNumElements) {
+ list.sweep([&](TestIntrusiveObject* current) {
+ ids.push_back(current->id());
+
+ if (prev && prev->id() % kNumThreads == current->id() % kNumThreads) {
+ EXPECT_EQ(prev->id() + kNumThreads, current->id());
+ }
+
+ prev = current;
+ });
+ }
+
+ std::sort(ids.begin(), ids.end());
+
+ for (size_t i = 0; i < kNumThreads * kNumElements; ++i) {
+ EXPECT_EQ(i, ids[i]);
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+}
+
+class TestObject {
+ public:
+ TestObject(size_t id__, std::shared_ptr<void> ptr) : id_(id__), ptr_(ptr) {}
+
+ size_t id() {
+ return id_;
+ }
+
+ private:
+ size_t id_;
+ std::shared_ptr<void> ptr_;
+};
+
+TEST(AtomicLinkedList, Basic) {
+ constexpr size_t kNumElements = 10;
+
+ using List = folly::AtomicLinkedList<TestObject>;
+ List list;
+
+ std::shared_ptr<void> ptr = std::make_shared<int>(42);
+
+ for (size_t id = 0; id < kNumElements; ++id) {
+ list.insertHead({id, ptr});
+ }
+
+ size_t counter = 0;
+
+ list.sweep([&](TestObject object) {
+ EXPECT_EQ(counter, object.id());
+
+ EXPECT_EQ(1 + kNumElements - counter, ptr.use_count());
+
+ ++counter;
+ });
+
+ EXPECT_TRUE(ptr.unique());
+}