2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This module implements a Synchronized abstraction useful in
19 * mutex-based concurrency.
21 * @author: Andrei Alexandrescu (andrei.alexandrescu@fb.com)
26 #include <type_traits>
28 #include <boost/thread.hpp>
29 #include <folly/Preprocessor.h>
30 #include <folly/SharedMutex.h>
31 #include <folly/Traits.h>
36 enum InternalDoNotUse {};
39 * Free function adaptors for std:: and boost::
42 // Android, OSX, and Cygwin don't have timed mutexes
43 #if defined(ANDROID) || defined(__ANDROID__) || \
44 defined(__APPLE__) || defined(__CYGWIN__)
45 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 0
47 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 1
51 * Yields true iff T has .lock() and .unlock() member functions. This
52 * is done by simply enumerating the mutexes with this interface in
56 struct HasLockUnlock {
57 enum { value = IsOneOf<T
59 , std::recursive_mutex
61 , boost::recursive_mutex
63 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
65 , std::recursive_timed_mutex
67 , boost::recursive_timed_mutex
73 * Yields true iff T has .lock_shared() and .unlock_shared() member functions.
74 * This is done by simply enumerating the mutexes with this interface.
77 struct HasLockSharedUnlockShared {
78 enum { value = IsOneOf<T
84 * Acquires a mutex for reading by calling .lock().
86 * This variant is not appropriate for shared mutexes.
89 typename std::enable_if<
90 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
91 acquireRead(T& mutex) {
96 * Acquires a mutex for reading by calling .lock_shared().
98 * This variant is not appropriate for nonshared mutexes.
101 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
102 acquireRead(T& mutex) {
107 * Acquires a mutex for reading and writing by calling .lock().
110 typename std::enable_if<HasLockUnlock<T>::value>::type
111 acquireReadWrite(T& mutex) {
115 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
117 * Acquires a mutex for reading by calling .try_lock_shared_for(). This applies
118 * to boost::shared_mutex.
121 typename std::enable_if<
123 , boost::shared_mutex
124 >::value, bool>::type
125 acquireRead(T& mutex,
126 unsigned int milliseconds) {
127 return mutex.try_lock_shared_for(boost::chrono::milliseconds(milliseconds));
131 * Acquires a mutex for reading and writing with timeout by calling
132 * .try_lock_for(). This applies to two of the std mutex classes as
136 typename std::enable_if<
139 , std::recursive_timed_mutex
140 >::value, bool>::type
141 acquireReadWrite(T& mutex,
142 unsigned int milliseconds) {
143 // work around try_lock_for bug in some gcc versions, see
144 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=54562
145 // TODO: Fixed in gcc-4.9.0.
146 return mutex.try_lock()
147 || (milliseconds > 0 &&
148 mutex.try_lock_until(std::chrono::system_clock::now() +
149 std::chrono::milliseconds(milliseconds)));
153 * Acquires a mutex for reading and writing with timeout by calling
154 * .try_lock_for(). This applies to three of the boost mutex classes as
158 typename std::enable_if<
160 , boost::shared_mutex
162 , boost::recursive_timed_mutex
163 >::value, bool>::type
164 acquireReadWrite(T& mutex,
165 unsigned int milliseconds) {
166 return mutex.try_lock_for(boost::chrono::milliseconds(milliseconds));
168 #endif // FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
171 * Releases a mutex previously acquired for reading by calling
172 * .unlock(). The exception is boost::shared_mutex, which has a
173 * special primitive called .unlock_shared().
176 typename std::enable_if<
177 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
178 releaseRead(T& mutex) {
183 * Special case for boost::shared_mutex.
186 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
187 releaseRead(T& mutex) {
188 mutex.unlock_shared();
192 * Releases a mutex previously acquired for reading-writing by calling
196 typename std::enable_if<HasLockUnlock<T>::value>::type
197 releaseReadWrite(T& mutex) {
201 } // namespace detail
204 * Synchronized<T> encapsulates an object of type T (a "datum") paired
205 * with a mutex. The only way to access the datum is while the mutex
206 * is locked, and Synchronized makes it virtually impossible to do
207 * otherwise. The code that would access the datum in unsafe ways
208 * would look odd and convoluted, thus readily alerting the human
209 * reviewer. In contrast, the code that uses Synchronized<T> correctly
210 * looks simple and intuitive.
212 * The second parameter must be a mutex type. Supported mutexes are
213 * std::mutex, std::recursive_mutex, std::timed_mutex,
214 * std::recursive_timed_mutex, boost::mutex, boost::recursive_mutex,
215 * boost::shared_mutex, boost::timed_mutex,
216 * boost::recursive_timed_mutex, and the folly/RWSpinLock.h
219 * You may define Synchronized support by defining 4-6 primitives in
220 * the same namespace as the mutex class (found via ADL). The
221 * primitives are: acquireRead, acquireReadWrite, releaseRead, and
222 * releaseReadWrite. Two optional primitives for timout operations are
223 * overloads of acquireRead and acquireReadWrite. For signatures,
224 * refer to the namespace detail below, which implements the
225 * primitives for mutexes in std and boost.
227 template <class T, class Mutex = SharedMutex>
228 struct Synchronized {
230 * Default constructor leaves both members call their own default
233 Synchronized() = default;
236 static constexpr bool nxCopyCtor{
237 std::is_nothrow_copy_constructible<T>::value};
238 static constexpr bool nxMoveCtor{
239 std::is_nothrow_move_constructible<T>::value};
242 * Helper constructors to enable Synchronized for
243 * non-default constructible types T.
244 * Guards are created in actual public constructors and are alive
245 * for the time required to construct the object
247 template <typename Guard>
248 Synchronized(const Synchronized& rhs,
249 const Guard& /*guard*/) noexcept(nxCopyCtor)
250 : datum_(rhs.datum_) {}
252 template <typename Guard>
253 Synchronized(Synchronized&& rhs, const Guard& /*guard*/) noexcept(nxMoveCtor)
254 : datum_(std::move(rhs.datum_)) {}
258 * Copy constructor copies the data (with locking the source and
259 * all) but does NOT copy the mutex. Doing so would result in
262 Synchronized(const Synchronized& rhs) noexcept(nxCopyCtor)
263 : Synchronized(rhs, rhs.operator->()) {}
266 * Move constructor moves the data (with locking the source and all)
267 * but does not move the mutex.
269 Synchronized(Synchronized&& rhs) noexcept(nxMoveCtor)
270 : Synchronized(std::move(rhs), rhs.operator->()) {}
273 * Constructor taking a datum as argument copies it. There is no
274 * need to lock the constructing object.
276 explicit Synchronized(const T& rhs) noexcept(nxCopyCtor) : datum_(rhs) {}
279 * Constructor taking a datum rvalue as argument moves it. Again,
280 * there is no need to lock the constructing object.
282 explicit Synchronized(T&& rhs) noexcept(nxMoveCtor)
283 : datum_(std::move(rhs)) {}
286 * Lets you construct non-movable types in-place. Use the constexpr
287 * instance `construct_in_place` as the first argument.
289 template <typename... Args>
290 explicit Synchronized(construct_in_place_t, Args&&... args)
291 : datum_(std::forward<Args>(args)...) {}
294 * The canonical assignment operator only assigns the data, NOT the
295 * mutex. It locks the two objects in ascending order of their
298 Synchronized& operator=(const Synchronized& rhs) {
300 // Self-assignment, pass.
301 } else if (this < &rhs) {
302 auto guard1 = operator->();
303 auto guard2 = rhs.operator->();
306 auto guard1 = rhs.operator->();
307 auto guard2 = operator->();
314 * Move assignment operator, only assigns the data, NOT the
315 * mutex. It locks the two objects in ascending order of their
318 Synchronized& operator=(Synchronized&& rhs) {
320 // Self-assignment, pass.
321 } else if (this < &rhs) {
322 auto guard1 = operator->();
323 auto guard2 = rhs.operator->();
324 datum_ = std::move(rhs.datum_);
326 auto guard1 = rhs.operator->();
327 auto guard2 = operator->();
328 datum_ = std::move(rhs.datum_);
334 * Lock object, assign datum.
336 Synchronized& operator=(const T& rhs) {
337 auto guard = operator->();
343 * Lock object, move-assign datum.
345 Synchronized& operator=(T&& rhs) {
346 auto guard = operator->();
347 datum_ = std::move(rhs);
352 * A LockedPtr lp keeps a modifiable (i.e. non-const)
353 * Synchronized<T> object locked for the duration of lp's
354 * existence. Because of this, you get to access the datum's methods
355 * directly by using lp->fun().
359 * Found no reason to leave this hanging.
361 LockedPtr() = delete;
364 * Takes a Synchronized and locks it.
366 explicit LockedPtr(Synchronized* parent) : parent_(parent) {
371 * Takes a Synchronized and attempts to lock it for some
372 * milliseconds. If not, the LockedPtr will be subsequently null.
374 LockedPtr(Synchronized* parent, unsigned int milliseconds) {
375 using namespace detail;
376 if (acquireReadWrite(parent->mutex_, milliseconds)) {
380 // Could not acquire the resource, pointer is null
385 * This is used ONLY inside SYNCHRONIZED_DUAL. It initializes
386 * everything properly, but does not lock the parent because it
387 * "knows" someone else will lock it. Please do not use.
389 LockedPtr(Synchronized* parent, detail::InternalDoNotUse)
394 * Copy ctor adds one lock.
396 LockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
401 * Assigning from another LockedPtr results in freeing the former
402 * lock and acquiring the new one. The method works with
403 * self-assignment (does nothing).
405 LockedPtr& operator=(const LockedPtr& rhs) {
406 if (parent_ != rhs.parent_) {
407 if (parent_) parent_->mutex_.unlock();
408 parent_ = rhs.parent_;
415 * Destructor releases.
418 using namespace detail;
419 if (parent_) releaseReadWrite(parent_->mutex_);
423 * Safe to access the data. Don't save the obtained pointer by
424 * invoking lp.operator->() by hand. Also, if the method returns a
425 * handle stored inside the datum, don't use this idiom - use
426 * SYNCHRONIZED below.
429 return parent_ ? &parent_->datum_ : nullptr;
433 * This class temporarily unlocks a LockedPtr in a scoped
434 * manner. It is used inside of the UNSYNCHRONIZED macro.
436 struct Unsynchronizer {
437 explicit Unsynchronizer(LockedPtr* p) : parent_(p) {
438 using namespace detail;
439 releaseReadWrite(parent_->parent_->mutex_);
441 Unsynchronizer(const Unsynchronizer&) = delete;
442 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
446 LockedPtr* operator->() const {
452 friend struct Unsynchronizer;
453 Unsynchronizer typeHackDoNotUse();
455 template <class P1, class P2>
456 friend void lockInOrder(P1& p1, P2& p2);
460 using namespace detail;
461 if (parent_) acquireReadWrite(parent_->mutex_);
464 // This is the entire state of LockedPtr.
465 Synchronized* parent_;
469 * ConstLockedPtr does exactly what LockedPtr does, but for const
470 * Synchronized objects. Of interest is that ConstLockedPtr only
471 * uses a read lock, which is faster but more restrictive - you only
472 * get to call const methods of the datum.
474 * Much of the code between LockedPtr and
475 * ConstLockedPtr is identical and could be factor out, but there
476 * are enough nagging little differences to not justify the trouble.
478 struct ConstLockedPtr {
479 ConstLockedPtr() = delete;
480 explicit ConstLockedPtr(const Synchronized* parent) : parent_(parent) {
483 ConstLockedPtr(const Synchronized* parent, detail::InternalDoNotUse)
486 ConstLockedPtr(const ConstLockedPtr& rhs) : parent_(rhs.parent_) {
489 explicit ConstLockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
492 ConstLockedPtr(const Synchronized* parent, unsigned int milliseconds) {
493 using namespace detail;
500 // Could not acquire the resource, pointer is null
504 ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
505 if (parent_ != rhs.parent_) {
506 if (parent_) parent_->mutex_.unlock_shared();
507 parent_ = rhs.parent_;
512 using namespace detail;
513 if (parent_) releaseRead(parent_->mutex_);
516 const T* operator->() const {
517 return parent_ ? &parent_->datum_ : nullptr;
520 struct Unsynchronizer {
521 explicit Unsynchronizer(ConstLockedPtr* p) : parent_(p) {
522 using namespace detail;
523 releaseRead(parent_->parent_->mutex_);
525 Unsynchronizer(const Unsynchronizer&) = delete;
526 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
528 using namespace detail;
529 acquireRead(parent_->parent_->mutex_);
531 ConstLockedPtr* operator->() const {
535 ConstLockedPtr* parent_;
537 friend struct Unsynchronizer;
538 Unsynchronizer typeHackDoNotUse();
540 template <class P1, class P2>
541 friend void lockInOrder(P1& p1, P2& p2);
545 using namespace detail;
546 if (parent_) acquireRead(parent_->mutex_);
549 const Synchronized* parent_;
553 * This accessor offers a LockedPtr. In turn. LockedPtr offers
554 * operator-> returning a pointer to T. The operator-> keeps
555 * expanding until it reaches a pointer, so syncobj->foo() will lock
556 * the object and call foo() against it.
558 LockedPtr operator->() {
559 return LockedPtr(this);
563 * Same, for constant objects. You will be able to invoke only const
566 ConstLockedPtr operator->() const {
567 return ConstLockedPtr(this);
571 * Attempts to acquire for a given number of milliseconds. If
572 * acquisition is unsuccessful, the returned LockedPtr is NULL.
574 LockedPtr timedAcquire(unsigned int milliseconds) {
575 return LockedPtr(this, milliseconds);
579 * As above, for a constant object.
581 ConstLockedPtr timedAcquire(unsigned int milliseconds) const {
582 return ConstLockedPtr(this, milliseconds);
586 * Used by SYNCHRONIZED_DUAL.
588 LockedPtr internalDoNotUse() {
589 return LockedPtr(this, detail::InternalDoNotUse());
595 ConstLockedPtr internalDoNotUse() const {
596 return ConstLockedPtr(this, detail::InternalDoNotUse());
600 * Sometimes, although you have a mutable object, you only want to
601 * call a const method against it. The most efficient way to achieve
602 * that is by using a read lock. You get to do so by using
603 * obj.asConst()->method() instead of obj->method().
605 const Synchronized& asConst() const {
610 * Swaps with another Synchronized. Protected against
611 * self-swap. Only data is swapped. Locks are acquired in increasing
614 void swap(Synchronized& rhs) {
619 return rhs.swap(*this);
621 auto guard1 = operator->();
622 auto guard2 = rhs.operator->();
625 swap(datum_, rhs.datum_);
629 * Swap with another datum. Recommended because it keeps the mutex
633 LockedPtr guard = operator->();
640 * Copies datum to a given target.
642 void copy(T* target) const {
643 ConstLockedPtr guard = operator->();
648 * Returns a fresh copy of the datum.
651 ConstLockedPtr guard = operator->();
657 mutable Mutex mutex_;
660 // Non-member swap primitive
661 template <class T, class M>
662 void swap(Synchronized<T, M>& lhs, Synchronized<T, M>& rhs) {
667 * SYNCHRONIZED is the main facility that makes Synchronized<T>
668 * helpful. It is a pseudo-statement that introduces a scope where the
669 * object is locked. Inside that scope you get to access the unadorned
674 * Synchronized<vector<int>> svector;
676 * SYNCHRONIZED (svector) { ... use svector as a vector<int> ... }
678 * SYNCHRONIZED (v, svector) { ... use v as a vector<int> ... }
680 * Refer to folly/docs/Synchronized.md for a detailed explanation and more
683 #define SYNCHRONIZED(...) \
685 FOLLY_GCC_DISABLE_WARNING(shadow) \
686 if (bool SYNCHRONIZED_state = false) { \
688 for (auto SYNCHRONIZED_lockedPtr = \
689 (FB_VA_GLUE(FB_ARG_2_OR_1, (__VA_ARGS__))).operator->(); \
690 !SYNCHRONIZED_state; \
691 SYNCHRONIZED_state = true) \
692 for (auto& FB_VA_GLUE(FB_ARG_1, (__VA_ARGS__)) = \
693 *SYNCHRONIZED_lockedPtr.operator->(); \
694 !SYNCHRONIZED_state; \
695 SYNCHRONIZED_state = true) \
698 #define TIMED_SYNCHRONIZED(timeout, ...) \
699 if (bool SYNCHRONIZED_state = false) { \
701 for (auto SYNCHRONIZED_lockedPtr = \
702 (FB_VA_GLUE(FB_ARG_2_OR_1, (__VA_ARGS__))).timedAcquire(timeout); \
703 !SYNCHRONIZED_state; \
704 SYNCHRONIZED_state = true) \
705 for (auto FB_VA_GLUE(FB_ARG_1, (__VA_ARGS__)) = \
706 SYNCHRONIZED_lockedPtr.operator->(); \
707 !SYNCHRONIZED_state; \
708 SYNCHRONIZED_state = true)
711 * Similar to SYNCHRONIZED, but only uses a read lock.
713 #define SYNCHRONIZED_CONST(...) \
715 FB_VA_GLUE(FB_ARG_1, (__VA_ARGS__)), \
716 (FB_VA_GLUE(FB_ARG_2_OR_1, (__VA_ARGS__))).asConst())
719 * Similar to TIMED_SYNCHRONIZED, but only uses a read lock.
721 #define TIMED_SYNCHRONIZED_CONST(timeout, ...) \
722 TIMED_SYNCHRONIZED( \
724 FB_VA_GLUE(FB_ARG_1, (__VA_ARGS__)), \
725 (FB_VA_GLUE(FB_ARG_2_OR_1, (__VA_ARGS__))).asConst())
728 * Temporarily disables synchronization inside a SYNCHRONIZED block.
730 #define UNSYNCHRONIZED(name) \
731 for (decltype(SYNCHRONIZED_lockedPtr.typeHackDoNotUse()) \
732 SYNCHRONIZED_state3(&SYNCHRONIZED_lockedPtr); \
733 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
734 for (auto& name = *SYNCHRONIZED_state3.operator->(); \
735 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
738 * Locks two objects in increasing order of their addresses.
740 template <class P1, class P2>
741 void lockInOrder(P1& p1, P2& p2) {
742 if (static_cast<const void*>(p1.operator->()) >
743 static_cast<const void*>(p2.operator->())) {
753 * Synchronizes two Synchronized objects (they may encapsulate
754 * different data). Synchronization is done in increasing address of
755 * object order, so there is no deadlock risk.
757 #define SYNCHRONIZED_DUAL(n1, e1, n2, e2) \
758 if (bool SYNCHRONIZED_state = false) {} else \
759 for (auto SYNCHRONIZED_lp1 = (e1).internalDoNotUse(); \
760 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
761 for (auto& n1 = *SYNCHRONIZED_lp1.operator->(); \
762 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
763 for (auto SYNCHRONIZED_lp2 = (e2).internalDoNotUse(); \
764 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
765 for (auto& n2 = *SYNCHRONIZED_lp2.operator->(); \
766 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
767 if ((::folly::lockInOrder( \
768 SYNCHRONIZED_lp1, SYNCHRONIZED_lp2), \
772 } /* namespace folly */