2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
42 # undef __mutex_slowpath_needs_to_unlock
43 # define __mutex_slowpath_needs_to_unlock() 0
46 # include <asm/mutex.h>
50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock->osq);
60 debug_mutex_init(lock, name, key);
63 EXPORT_SYMBOL(__mutex_init);
65 #ifndef CONFIG_DEBUG_LOCK_ALLOC
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
72 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
93 * This function is similar to (but not equivalent to) down().
95 void __sched mutex_lock(struct mutex *lock)
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
106 EXPORT_SYMBOL(mutex_lock);
109 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 struct ww_acquire_ctx *ww_ctx)
112 #ifdef CONFIG_DEBUG_MUTEXES
114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 * but released with a normal mutex_unlock in this call.
117 * This should never happen, always use ww_mutex_unlock.
119 DEBUG_LOCKS_WARN_ON(ww->ctx);
122 * Not quite done after calling ww_acquire_done() ?
124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
126 if (ww_ctx->contending_lock) {
128 * After -EDEADLK you tried to
129 * acquire a different ww_mutex? Bad!
131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
134 * You called ww_mutex_lock after receiving -EDEADLK,
135 * but 'forgot' to unlock everything else first?
137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 ww_ctx->contending_lock = NULL;
142 * Naughty, using a different class will lead to undefined behavior!
144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
156 static __always_inline void
157 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158 struct ww_acquire_ctx *ctx)
161 struct mutex_waiter *cur;
163 ww_mutex_lock_acquired(lock, ctx);
168 * The lock->ctx update should be visible on all cores before
169 * the atomic read is done, otherwise contended waiters might be
170 * missed. The contended waiters will either see ww_ctx == NULL
171 * and keep spinning, or it will acquire wait_lock, add itself
172 * to waiter list and sleep.
177 * Check if lock is contended, if not there is nobody to wake up
179 if (likely(atomic_read(&lock->base.count) == 0))
183 * Uh oh, we raced in fastpath, wake up everyone in this case,
184 * so they can see the new lock->ctx.
186 spin_lock_mutex(&lock->base.wait_lock, flags);
187 list_for_each_entry(cur, &lock->base.wait_list, list) {
188 debug_mutex_wake_waiter(&lock->base, cur);
189 wake_up_process(cur->task);
191 spin_unlock_mutex(&lock->base.wait_lock, flags);
195 * After acquiring lock in the slowpath set ctx and wake up any
196 * waiters so they can recheck.
198 * Callers must hold the mutex wait_lock.
200 static __always_inline void
201 ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
204 struct mutex_waiter *cur;
206 ww_mutex_lock_acquired(lock, ctx);
210 * Give any possible sleeping processes the chance to wake up,
211 * so they can recheck if they have to back off.
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
219 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
222 if (lock->owner != owner)
226 * Ensure we emit the owner->on_cpu, dereference _after_ checking
227 * lock->owner still matches owner, if that fails, owner might
228 * point to free()d memory, if it still matches, the rcu_read_lock()
229 * ensures the memory stays valid.
233 return owner->on_cpu;
237 * Look out! "owner" is an entirely speculative pointer
238 * access and not reliable.
241 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
244 while (owner_running(lock, owner)) {
248 cpu_relax_lowlatency();
253 * We break out the loop above on need_resched() and when the
254 * owner changed, which is a sign for heavy contention. Return
255 * success only when lock->owner is NULL.
257 return lock->owner == NULL;
261 * Initial check for entering the mutex spinning loop
263 static inline int mutex_can_spin_on_owner(struct mutex *lock)
265 struct task_struct *owner;
272 owner = ACCESS_ONCE(lock->owner);
274 retval = owner->on_cpu;
277 * if lock->owner is not set, the mutex owner may have just acquired
278 * it and not set the owner yet or the mutex has been released.
284 * Atomically try to take the lock when it is available
286 static inline bool mutex_try_to_acquire(struct mutex *lock)
288 return !mutex_is_locked(lock) &&
289 (atomic_cmpxchg(&lock->count, 1, 0) == 1);
293 * Optimistic spinning.
295 * We try to spin for acquisition when we find that the lock owner
296 * is currently running on a (different) CPU and while we don't
297 * need to reschedule. The rationale is that if the lock owner is
298 * running, it is likely to release the lock soon.
300 * Since this needs the lock owner, and this mutex implementation
301 * doesn't track the owner atomically in the lock field, we need to
302 * track it non-atomically.
304 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
305 * to serialize everything.
307 * The mutex spinners are queued up using MCS lock so that only one
308 * spinner can compete for the mutex. However, if mutex spinning isn't
309 * going to happen, there is no point in going through the lock/unlock
312 * Returns true when the lock was taken, otherwise false, indicating
313 * that we need to jump to the slowpath and sleep.
315 static bool mutex_optimistic_spin(struct mutex *lock,
316 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
318 struct task_struct *task = current;
320 if (!mutex_can_spin_on_owner(lock))
324 * In order to avoid a stampede of mutex spinners trying to
325 * acquire the mutex all at once, the spinners need to take a
326 * MCS (queued) lock first before spinning on the owner field.
328 if (!osq_lock(&lock->osq))
332 struct task_struct *owner;
334 if (use_ww_ctx && ww_ctx->acquired > 0) {
337 ww = container_of(lock, struct ww_mutex, base);
339 * If ww->ctx is set the contents are undefined, only
340 * by acquiring wait_lock there is a guarantee that
341 * they are not invalid when reading.
343 * As such, when deadlock detection needs to be
344 * performed the optimistic spinning cannot be done.
346 if (ACCESS_ONCE(ww->ctx))
351 * If there's an owner, wait for it to either
352 * release the lock or go to sleep.
354 owner = ACCESS_ONCE(lock->owner);
355 if (owner && !mutex_spin_on_owner(lock, owner))
358 /* Try to acquire the mutex if it is unlocked. */
359 if (mutex_try_to_acquire(lock)) {
360 lock_acquired(&lock->dep_map, ip);
364 ww = container_of(lock, struct ww_mutex, base);
366 ww_mutex_set_context_fastpath(ww, ww_ctx);
369 mutex_set_owner(lock);
370 osq_unlock(&lock->osq);
375 * When there's no owner, we might have preempted between the
376 * owner acquiring the lock and setting the owner field. If
377 * we're an RT task that will live-lock because we won't let
378 * the owner complete.
380 if (!owner && (need_resched() || rt_task(task)))
384 * The cpu_relax() call is a compiler barrier which forces
385 * everything in this loop to be re-loaded. We don't need
386 * memory barriers as we'll eventually observe the right
387 * values at the cost of a few extra spins.
389 cpu_relax_lowlatency();
392 osq_unlock(&lock->osq);
395 * If we fell out of the spin path because of need_resched(),
396 * reschedule now, before we try-lock the mutex. This avoids getting
397 * scheduled out right after we obtained the mutex.
399 if (need_resched()) {
401 * We _should_ have TASK_RUNNING here, but just in case
402 * we do not, make it so, otherwise we might get stuck.
404 __set_current_state(TASK_RUNNING);
405 schedule_preempt_disabled();
411 static bool mutex_optimistic_spin(struct mutex *lock,
412 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
418 __visible __used noinline
419 void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
422 * mutex_unlock - release the mutex
423 * @lock: the mutex to be released
425 * Unlock a mutex that has been locked by this task previously.
427 * This function must not be used in interrupt context. Unlocking
428 * of a not locked mutex is not allowed.
430 * This function is similar to (but not equivalent to) up().
432 void __sched mutex_unlock(struct mutex *lock)
435 * The unlocking fastpath is the 0->1 transition from 'locked'
436 * into 'unlocked' state:
438 #ifndef CONFIG_DEBUG_MUTEXES
440 * When debugging is enabled we must not clear the owner before time,
441 * the slow path will always be taken, and that clears the owner field
442 * after verifying that it was indeed current.
444 mutex_clear_owner(lock);
446 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
449 EXPORT_SYMBOL(mutex_unlock);
452 * ww_mutex_unlock - release the w/w mutex
453 * @lock: the mutex to be released
455 * Unlock a mutex that has been locked by this task previously with any of the
456 * ww_mutex_lock* functions (with or without an acquire context). It is
457 * forbidden to release the locks after releasing the acquire context.
459 * This function must not be used in interrupt context. Unlocking
460 * of a unlocked mutex is not allowed.
462 void __sched ww_mutex_unlock(struct ww_mutex *lock)
465 * The unlocking fastpath is the 0->1 transition from 'locked'
466 * into 'unlocked' state:
469 #ifdef CONFIG_DEBUG_MUTEXES
470 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
472 if (lock->ctx->acquired > 0)
473 lock->ctx->acquired--;
477 #ifndef CONFIG_DEBUG_MUTEXES
479 * When debugging is enabled we must not clear the owner before time,
480 * the slow path will always be taken, and that clears the owner field
481 * after verifying that it was indeed current.
483 mutex_clear_owner(&lock->base);
485 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
487 EXPORT_SYMBOL(ww_mutex_unlock);
489 static inline int __sched
490 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
492 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
493 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
498 if (unlikely(ctx == hold_ctx))
501 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
502 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
503 #ifdef CONFIG_DEBUG_MUTEXES
504 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
505 ctx->contending_lock = ww;
514 * Lock a mutex (possibly interruptible), slowpath:
516 static __always_inline int __sched
517 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
518 struct lockdep_map *nest_lock, unsigned long ip,
519 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
521 struct task_struct *task = current;
522 struct mutex_waiter waiter;
527 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
529 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
530 /* got the lock, yay! */
535 spin_lock_mutex(&lock->wait_lock, flags);
538 * Once more, try to acquire the lock. Only try-lock the mutex if
539 * it is unlocked to reduce unnecessary xchg() operations.
541 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
544 debug_mutex_lock_common(lock, &waiter);
545 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
547 /* add waiting tasks to the end of the waitqueue (FIFO): */
548 list_add_tail(&waiter.list, &lock->wait_list);
551 lock_contended(&lock->dep_map, ip);
555 * Lets try to take the lock again - this is needed even if
556 * we get here for the first time (shortly after failing to
557 * acquire the lock), to make sure that we get a wakeup once
558 * it's unlocked. Later on, if we sleep, this is the
559 * operation that gives us the lock. We xchg it to -1, so
560 * that when we release the lock, we properly wake up the
561 * other waiters. We only attempt the xchg if the count is
562 * non-negative in order to avoid unnecessary xchg operations:
564 if (atomic_read(&lock->count) >= 0 &&
565 (atomic_xchg(&lock->count, -1) == 1))
569 * got a signal? (This code gets eliminated in the
570 * TASK_UNINTERRUPTIBLE case.)
572 if (unlikely(signal_pending_state(state, task))) {
577 if (use_ww_ctx && ww_ctx->acquired > 0) {
578 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
583 __set_task_state(task, state);
585 /* didn't get the lock, go to sleep: */
586 spin_unlock_mutex(&lock->wait_lock, flags);
587 schedule_preempt_disabled();
588 spin_lock_mutex(&lock->wait_lock, flags);
590 __set_task_state(task, TASK_RUNNING);
592 mutex_remove_waiter(lock, &waiter, current_thread_info());
593 /* set it to 0 if there are no waiters left: */
594 if (likely(list_empty(&lock->wait_list)))
595 atomic_set(&lock->count, 0);
596 debug_mutex_free_waiter(&waiter);
599 /* got the lock - cleanup and rejoice! */
600 lock_acquired(&lock->dep_map, ip);
601 mutex_set_owner(lock);
604 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
605 ww_mutex_set_context_slowpath(ww, ww_ctx);
608 spin_unlock_mutex(&lock->wait_lock, flags);
613 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
614 spin_unlock_mutex(&lock->wait_lock, flags);
615 debug_mutex_free_waiter(&waiter);
616 mutex_release(&lock->dep_map, 1, ip);
621 #ifdef CONFIG_DEBUG_LOCK_ALLOC
623 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
626 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
627 subclass, NULL, _RET_IP_, NULL, 0);
630 EXPORT_SYMBOL_GPL(mutex_lock_nested);
633 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
636 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
637 0, nest, _RET_IP_, NULL, 0);
640 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
643 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
646 return __mutex_lock_common(lock, TASK_KILLABLE,
647 subclass, NULL, _RET_IP_, NULL, 0);
649 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
652 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
655 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
656 subclass, NULL, _RET_IP_, NULL, 0);
659 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
662 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
664 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
667 if (ctx->deadlock_inject_countdown-- == 0) {
668 tmp = ctx->deadlock_inject_interval;
669 if (tmp > UINT_MAX/4)
672 tmp = tmp*2 + tmp + tmp/2;
674 ctx->deadlock_inject_interval = tmp;
675 ctx->deadlock_inject_countdown = tmp;
676 ctx->contending_lock = lock;
678 ww_mutex_unlock(lock);
688 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
693 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
694 0, &ctx->dep_map, _RET_IP_, ctx, 1);
695 if (!ret && ctx->acquired > 1)
696 return ww_mutex_deadlock_injection(lock, ctx);
700 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
703 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
708 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
709 0, &ctx->dep_map, _RET_IP_, ctx, 1);
711 if (!ret && ctx->acquired > 1)
712 return ww_mutex_deadlock_injection(lock, ctx);
716 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
721 * Release the lock, slowpath:
724 __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
729 * As a performance measurement, release the lock before doing other
730 * wakeup related duties to follow. This allows other tasks to acquire
731 * the lock sooner, while still handling cleanups in past unlock calls.
732 * This can be done as we do not enforce strict equivalence between the
733 * mutex counter and wait_list.
736 * Some architectures leave the lock unlocked in the fastpath failure
737 * case, others need to leave it locked. In the later case we have to
738 * unlock it here - as the lock counter is currently 0 or negative.
740 if (__mutex_slowpath_needs_to_unlock())
741 atomic_set(&lock->count, 1);
743 spin_lock_mutex(&lock->wait_lock, flags);
744 mutex_release(&lock->dep_map, nested, _RET_IP_);
745 debug_mutex_unlock(lock);
747 if (!list_empty(&lock->wait_list)) {
748 /* get the first entry from the wait-list: */
749 struct mutex_waiter *waiter =
750 list_entry(lock->wait_list.next,
751 struct mutex_waiter, list);
753 debug_mutex_wake_waiter(lock, waiter);
755 wake_up_process(waiter->task);
758 spin_unlock_mutex(&lock->wait_lock, flags);
762 * Release the lock, slowpath:
765 __mutex_unlock_slowpath(atomic_t *lock_count)
767 struct mutex *lock = container_of(lock_count, struct mutex, count);
769 __mutex_unlock_common_slowpath(lock, 1);
772 #ifndef CONFIG_DEBUG_LOCK_ALLOC
774 * Here come the less common (and hence less performance-critical) APIs:
775 * mutex_lock_interruptible() and mutex_trylock().
777 static noinline int __sched
778 __mutex_lock_killable_slowpath(struct mutex *lock);
780 static noinline int __sched
781 __mutex_lock_interruptible_slowpath(struct mutex *lock);
784 * mutex_lock_interruptible - acquire the mutex, interruptible
785 * @lock: the mutex to be acquired
787 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
788 * been acquired or sleep until the mutex becomes available. If a
789 * signal arrives while waiting for the lock then this function
792 * This function is similar to (but not equivalent to) down_interruptible().
794 int __sched mutex_lock_interruptible(struct mutex *lock)
799 ret = __mutex_fastpath_lock_retval(&lock->count);
801 mutex_set_owner(lock);
804 return __mutex_lock_interruptible_slowpath(lock);
807 EXPORT_SYMBOL(mutex_lock_interruptible);
809 int __sched mutex_lock_killable(struct mutex *lock)
814 ret = __mutex_fastpath_lock_retval(&lock->count);
816 mutex_set_owner(lock);
819 return __mutex_lock_killable_slowpath(lock);
821 EXPORT_SYMBOL(mutex_lock_killable);
823 __visible void __sched
824 __mutex_lock_slowpath(atomic_t *lock_count)
826 struct mutex *lock = container_of(lock_count, struct mutex, count);
828 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
829 NULL, _RET_IP_, NULL, 0);
832 static noinline int __sched
833 __mutex_lock_killable_slowpath(struct mutex *lock)
835 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
836 NULL, _RET_IP_, NULL, 0);
839 static noinline int __sched
840 __mutex_lock_interruptible_slowpath(struct mutex *lock)
842 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
843 NULL, _RET_IP_, NULL, 0);
846 static noinline int __sched
847 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
849 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
850 NULL, _RET_IP_, ctx, 1);
853 static noinline int __sched
854 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
855 struct ww_acquire_ctx *ctx)
857 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
858 NULL, _RET_IP_, ctx, 1);
864 * Spinlock based trylock, we take the spinlock and check whether we
867 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
869 struct mutex *lock = container_of(lock_count, struct mutex, count);
873 /* No need to trylock if the mutex is locked. */
874 if (mutex_is_locked(lock))
877 spin_lock_mutex(&lock->wait_lock, flags);
879 prev = atomic_xchg(&lock->count, -1);
880 if (likely(prev == 1)) {
881 mutex_set_owner(lock);
882 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
885 /* Set it back to 0 if there are no waiters: */
886 if (likely(list_empty(&lock->wait_list)))
887 atomic_set(&lock->count, 0);
889 spin_unlock_mutex(&lock->wait_lock, flags);
895 * mutex_trylock - try to acquire the mutex, without waiting
896 * @lock: the mutex to be acquired
898 * Try to acquire the mutex atomically. Returns 1 if the mutex
899 * has been acquired successfully, and 0 on contention.
901 * NOTE: this function follows the spin_trylock() convention, so
902 * it is negated from the down_trylock() return values! Be careful
903 * about this when converting semaphore users to mutexes.
905 * This function must not be used in interrupt context. The
906 * mutex must be released by the same task that acquired it.
908 int __sched mutex_trylock(struct mutex *lock)
912 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
914 mutex_set_owner(lock);
918 EXPORT_SYMBOL(mutex_trylock);
920 #ifndef CONFIG_DEBUG_LOCK_ALLOC
922 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
928 ret = __mutex_fastpath_lock_retval(&lock->base.count);
931 ww_mutex_set_context_fastpath(lock, ctx);
932 mutex_set_owner(&lock->base);
934 ret = __ww_mutex_lock_slowpath(lock, ctx);
937 EXPORT_SYMBOL(__ww_mutex_lock);
940 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
946 ret = __mutex_fastpath_lock_retval(&lock->base.count);
949 ww_mutex_set_context_fastpath(lock, ctx);
950 mutex_set_owner(&lock->base);
952 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
955 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
960 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
961 * @cnt: the atomic which we are to dec
962 * @lock: the mutex to return holding if we dec to 0
964 * return true and hold lock if we dec to 0, return false otherwise
966 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
968 /* dec if we can't possibly hit 0 */
969 if (atomic_add_unless(cnt, -1, 1))
971 /* we might hit 0, so take the lock */
973 if (!atomic_dec_and_test(cnt)) {
974 /* when we actually did the dec, we didn't hit 0 */
978 /* we hit 0, and we hold the lock */
981 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);