2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This module implements a Synchronized abstraction useful in
19 * mutex-based concurrency.
21 * @author: Andrei Alexandrescu (andrei.alexandrescu@fb.com)
24 #ifndef SYNCHRONIZED_H_
25 #define SYNCHRONIZED_H_
27 #include <type_traits>
29 #include <boost/thread.hpp>
30 #include <folly/Preprocessor.h>
31 #include <folly/Traits.h>
36 enum InternalDoNotUse {};
39 * Free function adaptors for std:: and boost::
42 // Android, OSX, and Cygwin don't have timed mutexes
43 #if defined(ANDROID) || defined(__ANDROID__) || \
44 defined(__APPLE__) || defined(__CYGWIN__)
45 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 0
47 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 1
51 * Yields true iff T has .lock() and .unlock() member functions. This
52 * is done by simply enumerating the mutexes with this interface in
56 struct HasLockUnlock {
57 enum { value = IsOneOf<T
59 , std::recursive_mutex
61 , boost::recursive_mutex
63 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
65 , std::recursive_timed_mutex
67 , boost::recursive_timed_mutex
73 * Yields true iff T has .lock_shared() and .unlock_shared() member functions.
74 * This is done by simply enumerating the mutexes with this interface.
77 struct HasLockSharedUnlockShared {
78 enum { value = IsOneOf<T
84 * Acquires a mutex for reading by calling .lock().
86 * This variant is not appropriate for shared mutexes.
89 typename std::enable_if<
90 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
91 acquireRead(T& mutex) {
96 * Acquires a mutex for reading by calling .lock_shared().
98 * This variant is not appropriate for nonshared mutexes.
101 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
102 acquireRead(T& mutex) {
107 * Acquires a mutex for reading and writing by calling .lock().
110 typename std::enable_if<HasLockUnlock<T>::value>::type
111 acquireReadWrite(T& mutex) {
115 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
117 * Acquires a mutex for reading by calling .try_lock_shared_for(). This applies
118 * to boost::shared_mutex.
121 typename std::enable_if<
123 , boost::shared_mutex
124 >::value, bool>::type
125 acquireRead(T& mutex,
126 unsigned int milliseconds) {
127 return mutex.try_lock_shared_for(boost::chrono::milliseconds(milliseconds));
131 * Acquires a mutex for reading and writing with timeout by calling
132 * .try_lock_for(). This applies to two of the std mutex classes as
136 typename std::enable_if<
139 , std::recursive_timed_mutex
140 >::value, bool>::type
141 acquireReadWrite(T& mutex,
142 unsigned int milliseconds) {
143 // work around try_lock_for bug in some gcc versions, see
144 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=54562
145 // TODO: Fixed in gcc-4.9.0.
146 return mutex.try_lock()
147 || (milliseconds > 0 &&
148 mutex.try_lock_until(std::chrono::system_clock::now() +
149 std::chrono::milliseconds(milliseconds)));
153 * Acquires a mutex for reading and writing with timeout by calling
154 * .try_lock_for(). This applies to three of the boost mutex classes as
158 typename std::enable_if<
160 , boost::shared_mutex
162 , boost::recursive_timed_mutex
163 >::value, bool>::type
164 acquireReadWrite(T& mutex,
165 unsigned int milliseconds) {
166 return mutex.try_lock_for(boost::chrono::milliseconds(milliseconds));
168 #endif // FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
171 * Releases a mutex previously acquired for reading by calling
172 * .unlock(). The exception is boost::shared_mutex, which has a
173 * special primitive called .unlock_shared().
176 typename std::enable_if<
177 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
178 releaseRead(T& mutex) {
183 * Special case for boost::shared_mutex.
186 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
187 releaseRead(T& mutex) {
188 mutex.unlock_shared();
192 * Releases a mutex previously acquired for reading-writing by calling
196 typename std::enable_if<HasLockUnlock<T>::value>::type
197 releaseReadWrite(T& mutex) {
201 } // namespace detail
204 * Synchronized<T> encapsulates an object of type T (a "datum") paired
205 * with a mutex. The only way to access the datum is while the mutex
206 * is locked, and Synchronized makes it virtually impossible to do
207 * otherwise. The code that would access the datum in unsafe ways
208 * would look odd and convoluted, thus readily alerting the human
209 * reviewer. In contrast, the code that uses Synchronized<T> correctly
210 * looks simple and intuitive.
212 * The second parameter must be a mutex type. Supported mutexes are
213 * std::mutex, std::recursive_mutex, std::timed_mutex,
214 * std::recursive_timed_mutex, boost::mutex, boost::recursive_mutex,
215 * boost::shared_mutex, boost::timed_mutex,
216 * boost::recursive_timed_mutex, and the folly/RWSpinLock.h
219 * You may define Synchronized support by defining 4-6 primitives in
220 * the same namespace as the mutex class (found via ADL). The
221 * primitives are: acquireRead, acquireReadWrite, releaseRead, and
222 * releaseReadWrite. Two optional primitives for timout operations are
223 * overloads of acquireRead and acquireReadWrite. For signatures,
224 * refer to the namespace detail below, which implements the
225 * primitives for mutexes in std and boost.
227 template <class T, class Mutex = boost::shared_mutex>
228 struct Synchronized {
230 * Default constructor leaves both members call their own default
233 Synchronized() = default;
236 static constexpr bool nxCopyCtor{
237 std::is_nothrow_copy_constructible<T>::value};
238 static constexpr bool nxMoveCtor{
239 std::is_nothrow_move_constructible<T>::value};
242 * Helper constructors to enable Synchronized for
243 * non-default constructible types T.
244 * Guards are created in actual public constructors and are alive
245 * for the time required to construct the object
247 template <typename Guard>
248 Synchronized(const Synchronized& rhs,
249 const Guard& /*guard*/) noexcept(nxCopyCtor)
250 : datum_(rhs.datum_) {}
252 template <typename Guard>
253 Synchronized(Synchronized&& rhs, const Guard& /*guard*/) noexcept(nxMoveCtor)
254 : datum_(std::move(rhs.datum_)) {}
258 * Copy constructor copies the data (with locking the source and
259 * all) but does NOT copy the mutex. Doing so would result in
262 Synchronized(const Synchronized& rhs) noexcept(nxCopyCtor)
263 : Synchronized(rhs, rhs.operator->()) {}
266 * Move constructor moves the data (with locking the source and all)
267 * but does not move the mutex.
269 Synchronized(Synchronized&& rhs) noexcept(nxMoveCtor)
270 : Synchronized(std::move(rhs), rhs.operator->()) {}
273 * Constructor taking a datum as argument copies it. There is no
274 * need to lock the constructing object.
276 explicit Synchronized(const T& rhs) noexcept(nxCopyCtor) : datum_(rhs) {}
279 * Constructor taking a datum rvalue as argument moves it. Again,
280 * there is no need to lock the constructing object.
282 explicit Synchronized(T&& rhs) noexcept(nxMoveCtor)
283 : datum_(std::move(rhs)) {}
286 * The canonical assignment operator only assigns the data, NOT the
287 * mutex. It locks the two objects in ascending order of their
290 Synchronized& operator=(const Synchronized& rhs) {
292 // Self-assignment, pass.
293 } else if (this < &rhs) {
294 auto guard1 = operator->();
295 auto guard2 = rhs.operator->();
298 auto guard1 = rhs.operator->();
299 auto guard2 = operator->();
306 * Move assignment operator, only assigns the data, NOT the
307 * mutex. It locks the two objects in ascending order of their
310 Synchronized& operator=(Synchronized&& rhs) {
312 // Self-assignment, pass.
313 } else if (this < &rhs) {
314 auto guard1 = operator->();
315 auto guard2 = rhs.operator->();
316 datum_ = std::move(rhs.datum_);
318 auto guard1 = rhs.operator->();
319 auto guard2 = operator->();
320 datum_ = std::move(rhs.datum_);
326 * Lock object, assign datum.
328 Synchronized& operator=(const T& rhs) {
329 auto guard = operator->();
335 * Lock object, move-assign datum.
337 Synchronized& operator=(T&& rhs) {
338 auto guard = operator->();
339 datum_ = std::move(rhs);
344 * A LockedPtr lp keeps a modifiable (i.e. non-const)
345 * Synchronized<T> object locked for the duration of lp's
346 * existence. Because of this, you get to access the datum's methods
347 * directly by using lp->fun().
351 * Found no reason to leave this hanging.
353 LockedPtr() = delete;
356 * Takes a Synchronized and locks it.
358 explicit LockedPtr(Synchronized* parent) : parent_(parent) {
363 * Takes a Synchronized and attempts to lock it for some
364 * milliseconds. If not, the LockedPtr will be subsequently null.
366 LockedPtr(Synchronized* parent, unsigned int milliseconds) {
367 using namespace detail;
368 if (acquireReadWrite(parent->mutex_, milliseconds)) {
372 // Could not acquire the resource, pointer is null
377 * This is used ONLY inside SYNCHRONIZED_DUAL. It initializes
378 * everything properly, but does not lock the parent because it
379 * "knows" someone else will lock it. Please do not use.
381 LockedPtr(Synchronized* parent, detail::InternalDoNotUse)
386 * Copy ctor adds one lock.
388 LockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
393 * Assigning from another LockedPtr results in freeing the former
394 * lock and acquiring the new one. The method works with
395 * self-assignment (does nothing).
397 LockedPtr& operator=(const LockedPtr& rhs) {
398 if (parent_ != rhs.parent_) {
399 if (parent_) parent_->mutex_.unlock();
400 parent_ = rhs.parent_;
407 * Destructor releases.
410 using namespace detail;
411 if (parent_) releaseReadWrite(parent_->mutex_);
415 * Safe to access the data. Don't save the obtained pointer by
416 * invoking lp.operator->() by hand. Also, if the method returns a
417 * handle stored inside the datum, don't use this idiom - use
418 * SYNCHRONIZED below.
421 return parent_ ? &parent_->datum_ : nullptr;
425 * This class temporarily unlocks a LockedPtr in a scoped
426 * manner. It is used inside of the UNSYNCHRONIZED macro.
428 struct Unsynchronizer {
429 explicit Unsynchronizer(LockedPtr* p) : parent_(p) {
430 using namespace detail;
431 releaseReadWrite(parent_->parent_->mutex_);
433 Unsynchronizer(const Unsynchronizer&) = delete;
434 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
438 LockedPtr* operator->() const {
444 friend struct Unsynchronizer;
445 Unsynchronizer typeHackDoNotUse();
447 template <class P1, class P2>
448 friend void lockInOrder(P1& p1, P2& p2);
452 using namespace detail;
453 if (parent_) acquireReadWrite(parent_->mutex_);
456 // This is the entire state of LockedPtr.
457 Synchronized* parent_;
461 * ConstLockedPtr does exactly what LockedPtr does, but for const
462 * Synchronized objects. Of interest is that ConstLockedPtr only
463 * uses a read lock, which is faster but more restrictive - you only
464 * get to call const methods of the datum.
466 * Much of the code between LockedPtr and
467 * ConstLockedPtr is identical and could be factor out, but there
468 * are enough nagging little differences to not justify the trouble.
470 struct ConstLockedPtr {
471 ConstLockedPtr() = delete;
472 explicit ConstLockedPtr(const Synchronized* parent) : parent_(parent) {
475 ConstLockedPtr(const Synchronized* parent, detail::InternalDoNotUse)
478 ConstLockedPtr(const ConstLockedPtr& rhs) : parent_(rhs.parent_) {
481 explicit ConstLockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
484 ConstLockedPtr(const Synchronized* parent, unsigned int milliseconds) {
485 using namespace detail;
492 // Could not acquire the resource, pointer is null
496 ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
497 if (parent_ != rhs.parent_) {
498 if (parent_) parent_->mutex_.unlock_shared();
499 parent_ = rhs.parent_;
504 using namespace detail;
505 if (parent_) releaseRead(parent_->mutex_);
508 const T* operator->() const {
509 return parent_ ? &parent_->datum_ : nullptr;
512 struct Unsynchronizer {
513 explicit Unsynchronizer(ConstLockedPtr* p) : parent_(p) {
514 using namespace detail;
515 releaseRead(parent_->parent_->mutex_);
517 Unsynchronizer(const Unsynchronizer&) = delete;
518 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
520 using namespace detail;
521 acquireRead(parent_->parent_->mutex_);
523 ConstLockedPtr* operator->() const {
527 ConstLockedPtr* parent_;
529 friend struct Unsynchronizer;
530 Unsynchronizer typeHackDoNotUse();
532 template <class P1, class P2>
533 friend void lockInOrder(P1& p1, P2& p2);
537 using namespace detail;
538 if (parent_) acquireRead(parent_->mutex_);
541 const Synchronized* parent_;
545 * This accessor offers a LockedPtr. In turn. LockedPtr offers
546 * operator-> returning a pointer to T. The operator-> keeps
547 * expanding until it reaches a pointer, so syncobj->foo() will lock
548 * the object and call foo() against it.
550 LockedPtr operator->() {
551 return LockedPtr(this);
555 * Same, for constant objects. You will be able to invoke only const
558 ConstLockedPtr operator->() const {
559 return ConstLockedPtr(this);
563 * Attempts to acquire for a given number of milliseconds. If
564 * acquisition is unsuccessful, the returned LockedPtr is NULL.
566 LockedPtr timedAcquire(unsigned int milliseconds) {
567 return LockedPtr(this, milliseconds);
571 * As above, for a constant object.
573 ConstLockedPtr timedAcquire(unsigned int milliseconds) const {
574 return ConstLockedPtr(this, milliseconds);
578 * Used by SYNCHRONIZED_DUAL.
580 LockedPtr internalDoNotUse() {
581 return LockedPtr(this, detail::InternalDoNotUse());
587 ConstLockedPtr internalDoNotUse() const {
588 return ConstLockedPtr(this, detail::InternalDoNotUse());
592 * Sometimes, although you have a mutable object, you only want to
593 * call a const method against it. The most efficient way to achieve
594 * that is by using a read lock. You get to do so by using
595 * obj.asConst()->method() instead of obj->method().
597 const Synchronized& asConst() const {
602 * Swaps with another Synchronized. Protected against
603 * self-swap. Only data is swapped. Locks are acquired in increasing
606 void swap(Synchronized& rhs) {
611 return rhs.swap(*this);
613 auto guard1 = operator->();
614 auto guard2 = rhs.operator->();
617 swap(datum_, rhs.datum_);
621 * Swap with another datum. Recommended because it keeps the mutex
625 LockedPtr guard = operator->();
632 * Copies datum to a given target.
634 void copy(T* target) const {
635 ConstLockedPtr guard = operator->();
640 * Returns a fresh copy of the datum.
643 ConstLockedPtr guard = operator->();
649 mutable Mutex mutex_;
652 // Non-member swap primitive
653 template <class T, class M>
654 void swap(Synchronized<T, M>& lhs, Synchronized<T, M>& rhs) {
659 * SYNCHRONIZED is the main facility that makes Synchronized<T>
660 * helpful. It is a pseudo-statement that introduces a scope where the
661 * object is locked. Inside that scope you get to access the unadorned
666 * Synchronized<vector<int>> svector;
668 * SYNCHRONIZED (svector) { ... use svector as a vector<int> ... }
670 * SYNCHRONIZED (v, svector) { ... use v as a vector<int> ... }
672 * Refer to folly/docs/Synchronized.md for a detailed explanation and more
675 #define SYNCHRONIZED(...) \
676 if (bool SYNCHRONIZED_state = false) {} else \
677 for (auto SYNCHRONIZED_lockedPtr = \
678 (FB_ARG_2_OR_1(__VA_ARGS__)).operator->(); \
679 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
680 for (auto& FB_ARG_1(__VA_ARGS__) = \
681 *SYNCHRONIZED_lockedPtr.operator->(); \
682 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
684 #define TIMED_SYNCHRONIZED(timeout, ...) \
685 if (bool SYNCHRONIZED_state = false) {} else \
686 for (auto SYNCHRONIZED_lockedPtr = \
687 (FB_ARG_2_OR_1(__VA_ARGS__)).timedAcquire(timeout); \
688 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
689 for (auto FB_ARG_1(__VA_ARGS__) = \
690 SYNCHRONIZED_lockedPtr.operator->(); \
691 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
694 * Similar to SYNCHRONIZED, but only uses a read lock.
696 #define SYNCHRONIZED_CONST(...) \
697 SYNCHRONIZED(FB_ARG_1(__VA_ARGS__), \
698 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
701 * Similar to TIMED_SYNCHRONIZED, but only uses a read lock.
703 #define TIMED_SYNCHRONIZED_CONST(timeout, ...) \
704 TIMED_SYNCHRONIZED(timeout, FB_ARG_1(__VA_ARGS__), \
705 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
708 * Temporarily disables synchronization inside a SYNCHRONIZED block.
710 #define UNSYNCHRONIZED(name) \
711 for (decltype(SYNCHRONIZED_lockedPtr.typeHackDoNotUse()) \
712 SYNCHRONIZED_state3(&SYNCHRONIZED_lockedPtr); \
713 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
714 for (auto name = *SYNCHRONIZED_state3.operator->(); \
715 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
718 * Locks two objects in increasing order of their addresses.
720 template <class P1, class P2>
721 void lockInOrder(P1& p1, P2& p2) {
722 if (static_cast<const void*>(p1.operator->()) >
723 static_cast<const void*>(p2.operator->())) {
733 * Synchronizes two Synchronized objects (they may encapsulate
734 * different data). Synchronization is done in increasing address of
735 * object order, so there is no deadlock risk.
737 #define SYNCHRONIZED_DUAL(n1, e1, n2, e2) \
738 if (bool SYNCHRONIZED_state = false) {} else \
739 for (auto SYNCHRONIZED_lp1 = (e1).internalDoNotUse(); \
740 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
741 for (auto& n1 = *SYNCHRONIZED_lp1.operator->(); \
742 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
743 for (auto SYNCHRONIZED_lp2 = (e2).internalDoNotUse(); \
744 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
745 for (auto& n2 = *SYNCHRONIZED_lp2.operator->(); \
746 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
747 if ((::folly::lockInOrder( \
748 SYNCHRONIZED_lp1, SYNCHRONIZED_lp2), \
752 } /* namespace folly */
754 #endif // SYNCHRONIZED_H_