2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This module implements a Synchronized abstraction useful in
19 * mutex-based concurrency.
21 * @author: Andrei Alexandrescu (andrei.alexandrescu@fb.com)
24 #ifndef SYNCHRONIZED_H_
25 #define SYNCHRONIZED_H_
27 #include <type_traits>
29 #include <boost/thread.hpp>
30 #include <folly/Preprocessor.h>
31 #include <folly/SharedMutex.h>
32 #include <folly/Traits.h>
37 enum InternalDoNotUse {};
40 * Free function adaptors for std:: and boost::
43 // Android, OSX, and Cygwin don't have timed mutexes
44 #if defined(ANDROID) || defined(__ANDROID__) || \
45 defined(__APPLE__) || defined(__CYGWIN__)
46 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 0
48 # define FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES 1
52 * Yields true iff T has .lock() and .unlock() member functions. This
53 * is done by simply enumerating the mutexes with this interface in
57 struct HasLockUnlock {
58 enum { value = IsOneOf<T
60 , std::recursive_mutex
62 , boost::recursive_mutex
64 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
66 , std::recursive_timed_mutex
68 , boost::recursive_timed_mutex
74 * Yields true iff T has .lock_shared() and .unlock_shared() member functions.
75 * This is done by simply enumerating the mutexes with this interface.
78 struct HasLockSharedUnlockShared {
79 enum { value = IsOneOf<T
85 * Acquires a mutex for reading by calling .lock().
87 * This variant is not appropriate for shared mutexes.
90 typename std::enable_if<
91 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
92 acquireRead(T& mutex) {
97 * Acquires a mutex for reading by calling .lock_shared().
99 * This variant is not appropriate for nonshared mutexes.
102 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
103 acquireRead(T& mutex) {
108 * Acquires a mutex for reading and writing by calling .lock().
111 typename std::enable_if<HasLockUnlock<T>::value>::type
112 acquireReadWrite(T& mutex) {
116 #if FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
118 * Acquires a mutex for reading by calling .try_lock_shared_for(). This applies
119 * to boost::shared_mutex.
122 typename std::enable_if<
124 , boost::shared_mutex
125 >::value, bool>::type
126 acquireRead(T& mutex,
127 unsigned int milliseconds) {
128 return mutex.try_lock_shared_for(boost::chrono::milliseconds(milliseconds));
132 * Acquires a mutex for reading and writing with timeout by calling
133 * .try_lock_for(). This applies to two of the std mutex classes as
137 typename std::enable_if<
140 , std::recursive_timed_mutex
141 >::value, bool>::type
142 acquireReadWrite(T& mutex,
143 unsigned int milliseconds) {
144 // work around try_lock_for bug in some gcc versions, see
145 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=54562
146 // TODO: Fixed in gcc-4.9.0.
147 return mutex.try_lock()
148 || (milliseconds > 0 &&
149 mutex.try_lock_until(std::chrono::system_clock::now() +
150 std::chrono::milliseconds(milliseconds)));
154 * Acquires a mutex for reading and writing with timeout by calling
155 * .try_lock_for(). This applies to three of the boost mutex classes as
159 typename std::enable_if<
161 , boost::shared_mutex
163 , boost::recursive_timed_mutex
164 >::value, bool>::type
165 acquireReadWrite(T& mutex,
166 unsigned int milliseconds) {
167 return mutex.try_lock_for(boost::chrono::milliseconds(milliseconds));
169 #endif // FOLLY_SYNCHRONIZED_HAVE_TIMED_MUTEXES
172 * Releases a mutex previously acquired for reading by calling
173 * .unlock(). The exception is boost::shared_mutex, which has a
174 * special primitive called .unlock_shared().
177 typename std::enable_if<
178 HasLockUnlock<T>::value && !HasLockSharedUnlockShared<T>::value>::type
179 releaseRead(T& mutex) {
184 * Special case for boost::shared_mutex.
187 typename std::enable_if<HasLockSharedUnlockShared<T>::value>::type
188 releaseRead(T& mutex) {
189 mutex.unlock_shared();
193 * Releases a mutex previously acquired for reading-writing by calling
197 typename std::enable_if<HasLockUnlock<T>::value>::type
198 releaseReadWrite(T& mutex) {
202 } // namespace detail
205 * Synchronized<T> encapsulates an object of type T (a "datum") paired
206 * with a mutex. The only way to access the datum is while the mutex
207 * is locked, and Synchronized makes it virtually impossible to do
208 * otherwise. The code that would access the datum in unsafe ways
209 * would look odd and convoluted, thus readily alerting the human
210 * reviewer. In contrast, the code that uses Synchronized<T> correctly
211 * looks simple and intuitive.
213 * The second parameter must be a mutex type. Supported mutexes are
214 * std::mutex, std::recursive_mutex, std::timed_mutex,
215 * std::recursive_timed_mutex, boost::mutex, boost::recursive_mutex,
216 * boost::shared_mutex, boost::timed_mutex,
217 * boost::recursive_timed_mutex, and the folly/RWSpinLock.h
220 * You may define Synchronized support by defining 4-6 primitives in
221 * the same namespace as the mutex class (found via ADL). The
222 * primitives are: acquireRead, acquireReadWrite, releaseRead, and
223 * releaseReadWrite. Two optional primitives for timout operations are
224 * overloads of acquireRead and acquireReadWrite. For signatures,
225 * refer to the namespace detail below, which implements the
226 * primitives for mutexes in std and boost.
228 template <class T, class Mutex = SharedMutex>
229 struct Synchronized {
231 * Default constructor leaves both members call their own default
234 Synchronized() = default;
237 static constexpr bool nxCopyCtor{
238 std::is_nothrow_copy_constructible<T>::value};
239 static constexpr bool nxMoveCtor{
240 std::is_nothrow_move_constructible<T>::value};
243 * Helper constructors to enable Synchronized for
244 * non-default constructible types T.
245 * Guards are created in actual public constructors and are alive
246 * for the time required to construct the object
248 template <typename Guard>
249 Synchronized(const Synchronized& rhs,
250 const Guard& /*guard*/) noexcept(nxCopyCtor)
251 : datum_(rhs.datum_) {}
253 template <typename Guard>
254 Synchronized(Synchronized&& rhs, const Guard& /*guard*/) noexcept(nxMoveCtor)
255 : datum_(std::move(rhs.datum_)) {}
259 * Copy constructor copies the data (with locking the source and
260 * all) but does NOT copy the mutex. Doing so would result in
263 Synchronized(const Synchronized& rhs) noexcept(nxCopyCtor)
264 : Synchronized(rhs, rhs.operator->()) {}
267 * Move constructor moves the data (with locking the source and all)
268 * but does not move the mutex.
270 Synchronized(Synchronized&& rhs) noexcept(nxMoveCtor)
271 : Synchronized(std::move(rhs), rhs.operator->()) {}
274 * Constructor taking a datum as argument copies it. There is no
275 * need to lock the constructing object.
277 explicit Synchronized(const T& rhs) noexcept(nxCopyCtor) : datum_(rhs) {}
280 * Constructor taking a datum rvalue as argument moves it. Again,
281 * there is no need to lock the constructing object.
283 explicit Synchronized(T&& rhs) noexcept(nxMoveCtor)
284 : datum_(std::move(rhs)) {}
287 * Lets you construct non-movable types in-place. Use the constexpr
288 * instance `construct_in_place` as the first argument.
290 template <typename... Args>
291 explicit Synchronized(construct_in_place_t, Args&&... args)
292 : datum_(std::forward<Args>(args)...) {}
295 * The canonical assignment operator only assigns the data, NOT the
296 * mutex. It locks the two objects in ascending order of their
299 Synchronized& operator=(const Synchronized& rhs) {
301 // Self-assignment, pass.
302 } else if (this < &rhs) {
303 auto guard1 = operator->();
304 auto guard2 = rhs.operator->();
307 auto guard1 = rhs.operator->();
308 auto guard2 = operator->();
315 * Move assignment operator, only assigns the data, NOT the
316 * mutex. It locks the two objects in ascending order of their
319 Synchronized& operator=(Synchronized&& rhs) {
321 // Self-assignment, pass.
322 } else if (this < &rhs) {
323 auto guard1 = operator->();
324 auto guard2 = rhs.operator->();
325 datum_ = std::move(rhs.datum_);
327 auto guard1 = rhs.operator->();
328 auto guard2 = operator->();
329 datum_ = std::move(rhs.datum_);
335 * Lock object, assign datum.
337 Synchronized& operator=(const T& rhs) {
338 auto guard = operator->();
344 * Lock object, move-assign datum.
346 Synchronized& operator=(T&& rhs) {
347 auto guard = operator->();
348 datum_ = std::move(rhs);
353 * A LockedPtr lp keeps a modifiable (i.e. non-const)
354 * Synchronized<T> object locked for the duration of lp's
355 * existence. Because of this, you get to access the datum's methods
356 * directly by using lp->fun().
360 * Found no reason to leave this hanging.
362 LockedPtr() = delete;
365 * Takes a Synchronized and locks it.
367 explicit LockedPtr(Synchronized* parent) : parent_(parent) {
372 * Takes a Synchronized and attempts to lock it for some
373 * milliseconds. If not, the LockedPtr will be subsequently null.
375 LockedPtr(Synchronized* parent, unsigned int milliseconds) {
376 using namespace detail;
377 if (acquireReadWrite(parent->mutex_, milliseconds)) {
381 // Could not acquire the resource, pointer is null
386 * This is used ONLY inside SYNCHRONIZED_DUAL. It initializes
387 * everything properly, but does not lock the parent because it
388 * "knows" someone else will lock it. Please do not use.
390 LockedPtr(Synchronized* parent, detail::InternalDoNotUse)
395 * Copy ctor adds one lock.
397 LockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
402 * Assigning from another LockedPtr results in freeing the former
403 * lock and acquiring the new one. The method works with
404 * self-assignment (does nothing).
406 LockedPtr& operator=(const LockedPtr& rhs) {
407 if (parent_ != rhs.parent_) {
408 if (parent_) parent_->mutex_.unlock();
409 parent_ = rhs.parent_;
416 * Destructor releases.
419 using namespace detail;
420 if (parent_) releaseReadWrite(parent_->mutex_);
424 * Safe to access the data. Don't save the obtained pointer by
425 * invoking lp.operator->() by hand. Also, if the method returns a
426 * handle stored inside the datum, don't use this idiom - use
427 * SYNCHRONIZED below.
430 return parent_ ? &parent_->datum_ : nullptr;
434 * This class temporarily unlocks a LockedPtr in a scoped
435 * manner. It is used inside of the UNSYNCHRONIZED macro.
437 struct Unsynchronizer {
438 explicit Unsynchronizer(LockedPtr* p) : parent_(p) {
439 using namespace detail;
440 releaseReadWrite(parent_->parent_->mutex_);
442 Unsynchronizer(const Unsynchronizer&) = delete;
443 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
447 LockedPtr* operator->() const {
453 friend struct Unsynchronizer;
454 Unsynchronizer typeHackDoNotUse();
456 template <class P1, class P2>
457 friend void lockInOrder(P1& p1, P2& p2);
461 using namespace detail;
462 if (parent_) acquireReadWrite(parent_->mutex_);
465 // This is the entire state of LockedPtr.
466 Synchronized* parent_;
470 * ConstLockedPtr does exactly what LockedPtr does, but for const
471 * Synchronized objects. Of interest is that ConstLockedPtr only
472 * uses a read lock, which is faster but more restrictive - you only
473 * get to call const methods of the datum.
475 * Much of the code between LockedPtr and
476 * ConstLockedPtr is identical and could be factor out, but there
477 * are enough nagging little differences to not justify the trouble.
479 struct ConstLockedPtr {
480 ConstLockedPtr() = delete;
481 explicit ConstLockedPtr(const Synchronized* parent) : parent_(parent) {
484 ConstLockedPtr(const Synchronized* parent, detail::InternalDoNotUse)
487 ConstLockedPtr(const ConstLockedPtr& rhs) : parent_(rhs.parent_) {
490 explicit ConstLockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
493 ConstLockedPtr(const Synchronized* parent, unsigned int milliseconds) {
494 using namespace detail;
501 // Could not acquire the resource, pointer is null
505 ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
506 if (parent_ != rhs.parent_) {
507 if (parent_) parent_->mutex_.unlock_shared();
508 parent_ = rhs.parent_;
513 using namespace detail;
514 if (parent_) releaseRead(parent_->mutex_);
517 const T* operator->() const {
518 return parent_ ? &parent_->datum_ : nullptr;
521 struct Unsynchronizer {
522 explicit Unsynchronizer(ConstLockedPtr* p) : parent_(p) {
523 using namespace detail;
524 releaseRead(parent_->parent_->mutex_);
526 Unsynchronizer(const Unsynchronizer&) = delete;
527 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
529 using namespace detail;
530 acquireRead(parent_->parent_->mutex_);
532 ConstLockedPtr* operator->() const {
536 ConstLockedPtr* parent_;
538 friend struct Unsynchronizer;
539 Unsynchronizer typeHackDoNotUse();
541 template <class P1, class P2>
542 friend void lockInOrder(P1& p1, P2& p2);
546 using namespace detail;
547 if (parent_) acquireRead(parent_->mutex_);
550 const Synchronized* parent_;
554 * This accessor offers a LockedPtr. In turn. LockedPtr offers
555 * operator-> returning a pointer to T. The operator-> keeps
556 * expanding until it reaches a pointer, so syncobj->foo() will lock
557 * the object and call foo() against it.
559 LockedPtr operator->() {
560 return LockedPtr(this);
564 * Same, for constant objects. You will be able to invoke only const
567 ConstLockedPtr operator->() const {
568 return ConstLockedPtr(this);
572 * Attempts to acquire for a given number of milliseconds. If
573 * acquisition is unsuccessful, the returned LockedPtr is NULL.
575 LockedPtr timedAcquire(unsigned int milliseconds) {
576 return LockedPtr(this, milliseconds);
580 * As above, for a constant object.
582 ConstLockedPtr timedAcquire(unsigned int milliseconds) const {
583 return ConstLockedPtr(this, milliseconds);
587 * Used by SYNCHRONIZED_DUAL.
589 LockedPtr internalDoNotUse() {
590 return LockedPtr(this, detail::InternalDoNotUse());
596 ConstLockedPtr internalDoNotUse() const {
597 return ConstLockedPtr(this, detail::InternalDoNotUse());
601 * Sometimes, although you have a mutable object, you only want to
602 * call a const method against it. The most efficient way to achieve
603 * that is by using a read lock. You get to do so by using
604 * obj.asConst()->method() instead of obj->method().
606 const Synchronized& asConst() const {
611 * Swaps with another Synchronized. Protected against
612 * self-swap. Only data is swapped. Locks are acquired in increasing
615 void swap(Synchronized& rhs) {
620 return rhs.swap(*this);
622 auto guard1 = operator->();
623 auto guard2 = rhs.operator->();
626 swap(datum_, rhs.datum_);
630 * Swap with another datum. Recommended because it keeps the mutex
634 LockedPtr guard = operator->();
641 * Copies datum to a given target.
643 void copy(T* target) const {
644 ConstLockedPtr guard = operator->();
649 * Returns a fresh copy of the datum.
652 ConstLockedPtr guard = operator->();
658 mutable Mutex mutex_;
661 // Non-member swap primitive
662 template <class T, class M>
663 void swap(Synchronized<T, M>& lhs, Synchronized<T, M>& rhs) {
668 * SYNCHRONIZED is the main facility that makes Synchronized<T>
669 * helpful. It is a pseudo-statement that introduces a scope where the
670 * object is locked. Inside that scope you get to access the unadorned
675 * Synchronized<vector<int>> svector;
677 * SYNCHRONIZED (svector) { ... use svector as a vector<int> ... }
679 * SYNCHRONIZED (v, svector) { ... use v as a vector<int> ... }
681 * Refer to folly/docs/Synchronized.md for a detailed explanation and more
684 #define SYNCHRONIZED(...) \
686 FOLLY_GCC_DISABLE_WARNING(shadow) \
687 if (bool SYNCHRONIZED_state = false) {} else \
688 for (auto SYNCHRONIZED_lockedPtr = \
689 (FB_ARG_2_OR_1(__VA_ARGS__)).operator->(); \
690 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
691 for (auto& FB_ARG_1(__VA_ARGS__) = \
692 *SYNCHRONIZED_lockedPtr.operator->(); \
693 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
696 #define TIMED_SYNCHRONIZED(timeout, ...) \
697 if (bool SYNCHRONIZED_state = false) {} else \
698 for (auto SYNCHRONIZED_lockedPtr = \
699 (FB_ARG_2_OR_1(__VA_ARGS__)).timedAcquire(timeout); \
700 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
701 for (auto FB_ARG_1(__VA_ARGS__) = \
702 SYNCHRONIZED_lockedPtr.operator->(); \
703 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
706 * Similar to SYNCHRONIZED, but only uses a read lock.
708 #define SYNCHRONIZED_CONST(...) \
709 SYNCHRONIZED(FB_ARG_1(__VA_ARGS__), \
710 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
713 * Similar to TIMED_SYNCHRONIZED, but only uses a read lock.
715 #define TIMED_SYNCHRONIZED_CONST(timeout, ...) \
716 TIMED_SYNCHRONIZED(timeout, FB_ARG_1(__VA_ARGS__), \
717 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
720 * Temporarily disables synchronization inside a SYNCHRONIZED block.
722 #define UNSYNCHRONIZED(name) \
723 for (decltype(SYNCHRONIZED_lockedPtr.typeHackDoNotUse()) \
724 SYNCHRONIZED_state3(&SYNCHRONIZED_lockedPtr); \
725 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
726 for (auto& name = *SYNCHRONIZED_state3.operator->(); \
727 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
730 * Locks two objects in increasing order of their addresses.
732 template <class P1, class P2>
733 void lockInOrder(P1& p1, P2& p2) {
734 if (static_cast<const void*>(p1.operator->()) >
735 static_cast<const void*>(p2.operator->())) {
745 * Synchronizes two Synchronized objects (they may encapsulate
746 * different data). Synchronization is done in increasing address of
747 * object order, so there is no deadlock risk.
749 #define SYNCHRONIZED_DUAL(n1, e1, n2, e2) \
750 if (bool SYNCHRONIZED_state = false) {} else \
751 for (auto SYNCHRONIZED_lp1 = (e1).internalDoNotUse(); \
752 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
753 for (auto& n1 = *SYNCHRONIZED_lp1.operator->(); \
754 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
755 for (auto SYNCHRONIZED_lp2 = (e2).internalDoNotUse(); \
756 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
757 for (auto& n2 = *SYNCHRONIZED_lp2.operator->(); \
758 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
759 if ((::folly::lockInOrder( \
760 SYNCHRONIZED_lp1, SYNCHRONIZED_lp2), \
764 } /* namespace folly */
766 #endif // SYNCHRONIZED_H_