2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This module implements a Synchronized abstraction useful in
19 * mutex-based concurrency.
21 * @author: Andrei Alexandrescu (andrei.alexandrescu@fb.com)
24 #ifndef SYNCHRONIZED_H_
25 #define SYNCHRONIZED_H_
27 #include <type_traits>
29 #include <boost/thread.hpp>
30 #include "folly/Preprocessor.h"
31 #include "folly/Traits.h"
36 enum InternalDoNotUse {};
39 * Free function adaptors for std:: and boost::
43 * Yields true iff T has .lock() and .unlock() member functions. This
44 * is done by simply enumerating the mutexes with this interface in
48 struct HasLockUnlock {
49 enum { value = IsOneOf<T,
50 std::mutex, std::recursive_mutex,
51 boost::mutex, boost::recursive_mutex, boost::shared_mutex
52 #ifndef __APPLE__ // OSX doesn't have timed mutexes
53 ,std::timed_mutex, std::recursive_timed_mutex,
54 boost::timed_mutex, boost::recursive_timed_mutex
60 * Acquires a mutex for reading by calling .lock(). The exception is
61 * boost::shared_mutex, which has a special read-lock primitive called
65 typename std::enable_if<
66 HasLockUnlock<T>::value && !std::is_same<T, boost::shared_mutex>::value>::type
67 acquireRead(T& mutex) {
72 * Special case for boost::shared_mutex.
75 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value>::type
76 acquireRead(T& mutex) {
81 * Acquires a mutex for reading with timeout by calling .timed_lock(). This
82 * applies to three of the boost mutex classes as enumerated below.
85 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value, bool>::type
87 unsigned int milliseconds) {
88 return mutex.timed_lock_shared(boost::posix_time::milliseconds(milliseconds));
92 * Acquires a mutex for reading and writing by calling .lock().
95 typename std::enable_if<HasLockUnlock<T>::value>::type
96 acquireReadWrite(T& mutex) {
100 #ifndef __APPLE__ // OSX doesn't have timed mutexes
102 * Acquires a mutex for reading and writing with timeout by calling
103 * .try_lock_for(). This applies to two of the std mutex classes as
107 typename std::enable_if<
108 IsOneOf<T, std::timed_mutex, std::recursive_timed_mutex>::value, bool>::type
109 acquireReadWrite(T& mutex,
110 unsigned int milliseconds) {
111 return mutex.try_lock_for(std::chrono::milliseconds(milliseconds));
115 * Acquires a mutex for reading and writing with timeout by calling
116 * .timed_lock(). This applies to three of the boost mutex classes as
120 typename std::enable_if<
121 IsOneOf<T, boost::shared_mutex, boost::timed_mutex,
122 boost::recursive_timed_mutex>::value, bool>::type
123 acquireReadWrite(T& mutex,
124 unsigned int milliseconds) {
125 return mutex.timed_lock(boost::posix_time::milliseconds(milliseconds));
130 * Releases a mutex previously acquired for reading by calling
131 * .unlock(). The exception is boost::shared_mutex, which has a
132 * special primitive called .unlock_shared().
135 typename std::enable_if<
136 HasLockUnlock<T>::value && !std::is_same<T, boost::shared_mutex>::value>::type
137 releaseRead(T& mutex) {
142 * Special case for boost::shared_mutex.
145 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value>::type
146 releaseRead(T& mutex) {
147 mutex.unlock_shared();
151 * Releases a mutex previously acquired for reading-writing by calling
155 typename std::enable_if<HasLockUnlock<T>::value>::type
156 releaseReadWrite(T& mutex) {
160 } // namespace detail
163 * Synchronized<T> encapsulates an object of type T (a "datum") paired
164 * with a mutex. The only way to access the datum is while the mutex
165 * is locked, and Synchronized makes it virtually impossible to do
166 * otherwise. The code that would access the datum in unsafe ways
167 * would look odd and convoluted, thus readily alerting the human
168 * reviewer. In contrast, the code that uses Synchronized<T> correctly
169 * looks simple and intuitive.
171 * The second parameter must be a mutex type. Supported mutexes are
172 * std::mutex, std::recursive_mutex, std::timed_mutex,
173 * std::recursive_timed_mutex, boost::mutex, boost::recursive_mutex,
174 * boost::shared_mutex, boost::timed_mutex,
175 * boost::recursive_timed_mutex, and the folly/RWSpinLock.h
178 * You may define Synchronized support by defining 4-6 primitives in
179 * the same namespace as the mutex class (found via ADL). The
180 * primitives are: acquireRead, acquireReadWrite, releaseRead, and
181 * releaseReadWrite. Two optional primitives for timout operations are
182 * overloads of acquireRead and acquireReadWrite. For signatures,
183 * refer to the namespace detail below, which implements the
184 * primitives for mutexes in std and boost.
186 template <class T, class Mutex = boost::shared_mutex>
187 struct Synchronized {
189 * Default constructor leaves both members call their own default
192 Synchronized() = default;
195 * Copy constructor copies the data (with locking the source and
196 * all) but does NOT copy the mutex. Doing so would result in
199 Synchronized(const Synchronized& rhs) {
200 auto guard = rhs.operator->();
205 * Move constructor moves the data (with locking the source and all)
206 * but does not move the mutex.
208 Synchronized(Synchronized&& rhs) {
209 auto guard = rhs.operator->();
210 datum_ = std::move(rhs.datum_);
214 * Constructor taking a datum as argument copies it. There is no
215 * need to lock the constructing object.
217 explicit Synchronized(const T& rhs) : datum_(rhs) {}
220 * Constructor taking a datum rvalue as argument moves it. Again,
221 * there is no need to lock the constructing object.
223 explicit Synchronized(T&& rhs) : datum_(std::move(rhs)) {}
226 * The canonical assignment operator only assigns the data, NOT the
227 * mutex. It locks the two objects in ascending order of their
230 Synchronized& operator=(const Synchronized& rhs) {
232 // Self-assignment, pass.
233 } else if (this < &rhs) {
234 auto guard1 = operator->();
235 auto guard2 = rhs.operator->();
238 auto guard1 = rhs.operator->();
239 auto guard2 = operator->();
246 * Move assignment operator, only assigns the data, NOT the
247 * mutex. It locks the two objects in ascending order of their
250 Synchronized& operator=(Synchronized&& rhs) {
252 // Self-assignment, pass.
253 } else if (this < &rhs) {
254 auto guard1 = operator->();
255 auto guard2 = rhs.operator->();
256 datum_ = std::move(rhs.datum_);
258 auto guard1 = rhs.operator->();
259 auto guard2 = operator->();
260 datum_ = std::move(rhs.datum_);
266 * Lock object, assign datum.
268 Synchronized& operator=(const T& rhs) {
269 auto guard = operator->();
275 * Lock object, move-assign datum.
277 Synchronized& operator=(T&& rhs) {
278 auto guard = operator->();
279 datum_ = std::move(rhs);
284 * A LockedPtr lp keeps a modifiable (i.e. non-const)
285 * Synchronized<T> object locked for the duration of lp's
286 * existence. Because of this, you get to access the datum's methods
287 * directly by using lp->fun().
291 * Found no reason to leave this hanging.
293 LockedPtr() = delete;
296 * Takes a Synchronized and locks it.
298 explicit LockedPtr(Synchronized* parent) : parent_(parent) {
303 * Takes a Synchronized and attempts to lock it for some
304 * milliseconds. If not, the LockedPtr will be subsequently null.
306 LockedPtr(Synchronized* parent, unsigned int milliseconds) {
307 using namespace detail;
308 if (acquireReadWrite(parent->mutex_, milliseconds)) {
312 // Could not acquire the resource, pointer is null
317 * This is used ONLY inside SYNCHRONIZED_DUAL. It initializes
318 * everything properly, but does not lock the parent because it
319 * "knows" someone else will lock it. Please do not use.
321 LockedPtr(Synchronized* parent, detail::InternalDoNotUse)
326 * Copy ctor adds one lock.
328 LockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
333 * Assigning from another LockedPtr results in freeing the former
334 * lock and acquiring the new one. The method works with
335 * self-assignment (does nothing).
337 LockedPtr& operator=(const LockedPtr& rhs) {
338 if (parent_ != rhs.parent_) {
339 if (parent_) parent_->mutex_.unlock();
340 parent_ = rhs.parent_;
347 * Destructor releases.
350 using namespace detail;
351 if (parent_) releaseReadWrite(parent_->mutex_);
355 * Safe to access the data. Don't save the obtained pointer by
356 * invoking lp.operator->() by hand. Also, if the method returns a
357 * handle stored inside the datum, don't use this idiom - use
358 * SYNCHRONIZED below.
361 return parent_ ? &parent_->datum_ : NULL;
365 * This class temporarily unlocks a LockedPtr in a scoped
366 * manner. It is used inside of the UNSYNCHRONIZED macro.
368 struct Unsynchronizer {
369 explicit Unsynchronizer(LockedPtr* p) : parent_(p) {
370 using namespace detail;
371 releaseReadWrite(parent_->parent_->mutex_);
373 Unsynchronizer(const Unsynchronizer&) = delete;
374 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
378 LockedPtr* operator->() const {
384 friend struct Unsynchronizer;
385 Unsynchronizer typeHackDoNotUse();
387 template <class P1, class P2>
388 friend void lockInOrder(P1& p1, P2& p2);
392 using namespace detail;
393 if (parent_) acquireReadWrite(parent_->mutex_);
396 // This is the entire state of LockedPtr.
397 Synchronized* parent_;
401 * ConstLockedPtr does exactly what LockedPtr does, but for const
402 * Synchronized objects. Of interest is that ConstLockedPtr only
403 * uses a read lock, which is faster but more restrictive - you only
404 * get to call const methods of the datum.
406 * Much of the code between LockedPtr and
407 * ConstLockedPtr is identical and could be factor out, but there
408 * are enough nagging little differences to not justify the trouble.
410 struct ConstLockedPtr {
411 ConstLockedPtr() = delete;
412 explicit ConstLockedPtr(const Synchronized* parent) : parent_(parent) {
415 ConstLockedPtr(const Synchronized* parent, detail::InternalDoNotUse)
418 ConstLockedPtr(const ConstLockedPtr& rhs) : parent_(rhs.parent_) {
421 explicit ConstLockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
424 ConstLockedPtr(const Synchronized* parent, unsigned int milliseconds) {
425 if (parent->mutex_.timed_lock(
426 boost::posix_time::milliseconds(milliseconds))) {
430 // Could not acquire the resource, pointer is null
434 ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
435 if (parent_ != rhs.parent_) {
436 if (parent_) parent_->mutex_.unlock_shared();
437 parent_ = rhs.parent_;
442 using namespace detail;
443 if (parent_) releaseRead(parent_->mutex_);
446 const T* operator->() const {
447 return parent_ ? &parent_->datum_ : NULL;
450 struct Unsynchronizer {
451 explicit Unsynchronizer(ConstLockedPtr* p) : parent_(p) {
452 using namespace detail;
453 releaseRead(parent_->parent_->mutex_);
455 Unsynchronizer(const Unsynchronizer&) = delete;
456 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
458 using namespace detail;
459 acquireRead(parent_->parent_->mutex_);
461 ConstLockedPtr* operator->() const {
465 ConstLockedPtr* parent_;
467 friend struct Unsynchronizer;
468 Unsynchronizer typeHackDoNotUse();
470 template <class P1, class P2>
471 friend void lockInOrder(P1& p1, P2& p2);
475 using namespace detail;
476 if (parent_) acquireRead(parent_->mutex_);
479 const Synchronized* parent_;
483 * This accessor offers a LockedPtr. In turn. LockedPtr offers
484 * operator-> returning a pointer to T. The operator-> keeps
485 * expanding until it reaches a pointer, so syncobj->foo() will lock
486 * the object and call foo() against it.
488 LockedPtr operator->() {
489 return LockedPtr(this);
493 * Same, for constant objects. You will be able to invoke only const
496 ConstLockedPtr operator->() const {
497 return ConstLockedPtr(this);
501 * Attempts to acquire for a given number of milliseconds. If
502 * acquisition is unsuccessful, the returned LockedPtr is NULL.
504 LockedPtr timedAcquire(unsigned int milliseconds) {
505 return LockedPtr(this, milliseconds);
509 * As above, for a constant object.
511 ConstLockedPtr timedAcquire(unsigned int milliseconds) const {
512 return ConstLockedPtr(this, milliseconds);
516 * Used by SYNCHRONIZED_DUAL.
518 LockedPtr internalDoNotUse() {
519 return LockedPtr(this, detail::InternalDoNotUse());
525 ConstLockedPtr internalDoNotUse() const {
526 return ConstLockedPtr(this, detail::InternalDoNotUse());
530 * Sometimes, although you have a mutable object, you only want to
531 * call a const method against it. The most efficient way to achieve
532 * that is by using a read lock. You get to do so by using
533 * obj.asConst()->method() instead of obj->method().
535 const Synchronized& asConst() const {
540 * Swaps with another Synchronized. Protected against
541 * self-swap. Only data is swapped. Locks are acquired in increasing
544 void swap(Synchronized& rhs) {
549 return rhs.swap(*this);
551 auto guard1 = operator->();
552 auto guard2 = rhs.operator->();
555 swap(datum_, rhs.datum_);
559 * Swap with another datum. Recommended because it keeps the mutex
563 LockedPtr guard = operator->();
570 * Copies datum to a given target.
572 void copy(T* target) const {
573 ConstLockedPtr guard = operator->();
578 * Returns a fresh copy of the datum.
581 ConstLockedPtr guard = operator->();
587 mutable Mutex mutex_;
590 // Non-member swap primitive
591 template <class T, class M>
592 void swap(Synchronized<T, M>& lhs, Synchronized<T, M>& rhs) {
597 * SYNCHRONIZED is the main facility that makes Synchronized<T>
598 * helpful. It is a pseudo-statement that introduces a scope where the
599 * object is locked. Inside that scope you get to access the unadorned
604 * Synchronized<vector<int>> svector;
606 * SYNCHRONIZED (svector) { ... use svector as a vector<int> ... }
608 * SYNCHRONIZED (v, svector) { ... use v as a vector<int> ... }
610 * Refer to folly/docs/Synchronized.md for a detailed explanation and more
613 #define SYNCHRONIZED(...) \
614 if (bool SYNCHRONIZED_state = false) {} else \
615 for (auto SYNCHRONIZED_lockedPtr = \
616 (FB_ARG_2_OR_1(__VA_ARGS__)).operator->(); \
617 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
618 for (auto& FB_ARG_1(__VA_ARGS__) = \
619 *SYNCHRONIZED_lockedPtr.operator->(); \
620 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
622 #define TIMED_SYNCHRONIZED(timeout, ...) \
623 if (bool SYNCHRONIZED_state = false) {} else \
624 for (auto SYNCHRONIZED_lockedPtr = \
625 (FB_ARG_2_OR_1(__VA_ARGS__)).timedAcquire(timeout); \
626 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
627 for (auto FB_ARG_1(__VA_ARGS__) = \
628 SYNCHRONIZED_lockedPtr.operator->(); \
629 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
632 * Similar to SYNCHRONIZED, but only uses a read lock.
634 #define SYNCHRONIZED_CONST(...) \
635 SYNCHRONIZED(FB_ARG_1(__VA_ARGS__), \
636 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
639 * Similar to TIMED_SYNCHRONIZED, but only uses a read lock.
641 #define TIMED_SYNCHRONIZED_CONST(timeout, ...) \
642 TIMED_SYNCHRONIZED(timeout, FB_ARG_1(__VA_ARGS__), \
643 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
646 * Temporarily disables synchronization inside a SYNCHRONIZED block.
648 #define UNSYNCHRONIZED(name) \
649 for (decltype(SYNCHRONIZED_lockedPtr.typeHackDoNotUse()) \
650 SYNCHRONIZED_state3(&SYNCHRONIZED_lockedPtr); \
651 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
652 for (auto name = *SYNCHRONIZED_state3.operator->(); \
653 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
656 * Locks two objects in increasing order of their addresses.
658 template <class P1, class P2>
659 void lockInOrder(P1& p1, P2& p2) {
660 if (static_cast<const void*>(p1.operator->()) >
661 static_cast<const void*>(p2.operator->())) {
671 * Synchronizes two Synchronized objects (they may encapsulate
672 * different data). Synchronization is done in increasing address of
673 * object order, so there is no deadlock risk.
675 #define SYNCHRONIZED_DUAL(n1, e1, n2, e2) \
676 if (bool SYNCHRONIZED_state = false) {} else \
677 for (auto SYNCHRONIZED_lp1 = (e1).internalDoNotUse(); \
678 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
679 for (auto& n1 = *SYNCHRONIZED_lp1.operator->(); \
680 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
681 for (auto SYNCHRONIZED_lp2 = (e2).internalDoNotUse(); \
682 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
683 for (auto& n2 = *SYNCHRONIZED_lp2.operator->(); \
684 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
685 if ((::folly::lockInOrder( \
686 SYNCHRONIZED_lp1, SYNCHRONIZED_lp2), \
690 } /* namespace folly */
692 #endif // SYNCHRONIZED_H_