2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 * See Documentation/rt-mutex-design.txt for details.
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/timer.h>
19 #include "rtmutex_common.h"
22 * lock->owner state tracking:
24 * lock->owner holds the task_struct pointer of the owner. Bit 0
25 * is used to keep track of the "lock has waiters" state.
28 * NULL 0 lock is free (fast acquire possible)
29 * NULL 1 lock is free and has waiters and the top waiter
30 * is going to take the lock*
31 * taskpointer 0 lock is held (fast release possible)
32 * taskpointer 1 lock is held and has waiters**
34 * The fast atomic compare exchange based acquire and release is only
35 * possible when bit 0 of lock->owner is 0.
37 * (*) It also can be a transitional state when grabbing the lock
38 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
39 * we need to set the bit0 before looking at the lock, and the owner may be
40 * NULL in this small time, hence this can be a transitional state.
42 * (**) There is a small time when bit 0 is set but there are no
43 * waiters. This can happen when grabbing the lock in the slow path.
44 * To prevent a cmpxchg of the owner releasing the lock, we need to
45 * set this bit before looking at the lock.
49 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 unsigned long val = (unsigned long)owner;
53 if (rt_mutex_has_waiters(lock))
54 val |= RT_MUTEX_HAS_WAITERS;
56 lock->owner = (struct task_struct *)val;
59 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 lock->owner = (struct task_struct *)
62 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
65 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 if (!rt_mutex_has_waiters(lock))
68 clear_rt_mutex_waiters(lock);
72 * We can speed up the acquire/release, if the architecture
73 * supports cmpxchg and if there's no debugging state to be set up
75 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
76 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
77 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 unsigned long owner, *p = (unsigned long *) &lock->owner;
83 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
86 # define rt_mutex_cmpxchg(l,c,n) (0)
87 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 lock->owner = (struct task_struct *)
90 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
95 * Calculate task priority from the waiter list priority
97 * Return task->normal_prio when the waiter list is empty or when
98 * the waiter is not allowed to do priority boosting
100 int rt_mutex_getprio(struct task_struct *task)
102 if (likely(!task_has_pi_waiters(task)))
103 return task->normal_prio;
105 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
110 * Adjust the priority of a task, after its pi_waiters got modified.
112 * This can be both boosting and unboosting. task->pi_lock must be held.
114 static void __rt_mutex_adjust_prio(struct task_struct *task)
116 int prio = rt_mutex_getprio(task);
118 if (task->prio != prio)
119 rt_mutex_setprio(task, prio);
123 * Adjust task priority (undo boosting). Called from the exit path of
124 * rt_mutex_slowunlock() and rt_mutex_slowlock().
126 * (Note: We do this outside of the protection of lock->wait_lock to
127 * allow the lock to be taken while or before we readjust the priority
128 * of task. We do not use the spin_xx_mutex() variants here as we are
129 * outside of the debug path.)
131 static void rt_mutex_adjust_prio(struct task_struct *task)
135 raw_spin_lock_irqsave(&task->pi_lock, flags);
136 __rt_mutex_adjust_prio(task);
137 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
141 * Max number of times we'll walk the boosting chain:
143 int max_lock_depth = 1024;
146 * Adjust the priority chain. Also used for deadlock detection.
147 * Decreases task's usage by one - may thus free the task.
148 * Returns 0 or -EDEADLK.
150 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
152 struct rt_mutex *orig_lock,
153 struct rt_mutex_waiter *orig_waiter,
154 struct task_struct *top_task)
156 struct rt_mutex *lock;
157 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
158 int detect_deadlock, ret = 0, depth = 0;
161 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
165 * The (de)boosting is a step by step approach with a lot of
166 * pitfalls. We want this to be preemptible and we want hold a
167 * maximum of two locks per step. So we have to check
168 * carefully whether things change under us.
171 if (++depth > max_lock_depth) {
175 * Print this only once. If the admin changes the limit,
176 * print a new message when reaching the limit again.
178 if (prev_max != max_lock_depth) {
179 prev_max = max_lock_depth;
180 printk(KERN_WARNING "Maximum lock depth %d reached "
181 "task: %s (%d)\n", max_lock_depth,
182 top_task->comm, task_pid_nr(top_task));
184 put_task_struct(task);
186 return deadlock_detect ? -EDEADLK : 0;
190 * Task can not go away as we did a get_task() before !
192 raw_spin_lock_irqsave(&task->pi_lock, flags);
194 waiter = task->pi_blocked_on;
196 * Check whether the end of the boosting chain has been
197 * reached or the state of the chain has changed while we
204 * Check the orig_waiter state. After we dropped the locks,
205 * the previous owner of the lock might have released the lock.
207 if (orig_waiter && !rt_mutex_owner(orig_lock))
211 * Drop out, when the task has no waiters. Note,
212 * top_waiter can be NULL, when we are in the deboosting
216 if (!task_has_pi_waiters(task))
219 * If deadlock detection is off, we stop here if we
220 * are not the top pi waiter of the task.
222 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
227 * When deadlock detection is off then we check, if further
228 * priority adjustment is necessary.
230 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
234 if (!raw_spin_trylock(&lock->wait_lock)) {
235 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
241 * Deadlock detection. If the lock is the same as the original
242 * lock which caused us to walk the lock chain or if the
243 * current lock is owned by the task which initiated the chain
244 * walk, we detected a deadlock.
246 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
247 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
248 raw_spin_unlock(&lock->wait_lock);
249 ret = deadlock_detect ? -EDEADLK : 0;
253 top_waiter = rt_mutex_top_waiter(lock);
255 /* Requeue the waiter */
256 plist_del(&waiter->list_entry, &lock->wait_list);
257 waiter->list_entry.prio = task->prio;
258 plist_add(&waiter->list_entry, &lock->wait_list);
260 /* Release the task */
261 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
262 if (!rt_mutex_owner(lock)) {
264 * If the requeue above changed the top waiter, then we need
265 * to wake the new top waiter up to try to get the lock.
268 if (top_waiter != rt_mutex_top_waiter(lock))
269 wake_up_process(rt_mutex_top_waiter(lock)->task);
270 raw_spin_unlock(&lock->wait_lock);
273 put_task_struct(task);
275 /* Grab the next task */
276 task = rt_mutex_owner(lock);
277 get_task_struct(task);
278 raw_spin_lock_irqsave(&task->pi_lock, flags);
280 if (waiter == rt_mutex_top_waiter(lock)) {
281 /* Boost the owner */
282 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
283 waiter->pi_list_entry.prio = waiter->list_entry.prio;
284 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
285 __rt_mutex_adjust_prio(task);
287 } else if (top_waiter == waiter) {
288 /* Deboost the owner */
289 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
290 waiter = rt_mutex_top_waiter(lock);
291 waiter->pi_list_entry.prio = waiter->list_entry.prio;
292 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
293 __rt_mutex_adjust_prio(task);
296 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
298 top_waiter = rt_mutex_top_waiter(lock);
299 raw_spin_unlock(&lock->wait_lock);
301 if (!detect_deadlock && waiter != top_waiter)
307 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
309 put_task_struct(task);
315 * Try to take an rt-mutex
317 * Must be called with lock->wait_lock held.
319 * @lock: the lock to be acquired.
320 * @task: the task which wants to acquire the lock
321 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
323 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
324 struct rt_mutex_waiter *waiter)
327 * We have to be careful here if the atomic speedups are
328 * enabled, such that, when
329 * - no other waiter is on the lock
330 * - the lock has been released since we did the cmpxchg
331 * the lock can be released or taken while we are doing the
332 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
334 * The atomic acquire/release aware variant of
335 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
336 * the WAITERS bit, the atomic release / acquire can not
337 * happen anymore and lock->wait_lock protects us from the
340 * Note, that this might set lock->owner =
341 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
342 * any more. This is fixed up when we take the ownership.
343 * This is the transitional state explained at the top of this file.
345 mark_rt_mutex_waiters(lock);
347 if (rt_mutex_owner(lock))
351 * It will get the lock because of one of these conditions:
352 * 1) there is no waiter
353 * 2) higher priority than waiters
354 * 3) it is top waiter
356 if (rt_mutex_has_waiters(lock)) {
357 if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
358 if (!waiter || waiter != rt_mutex_top_waiter(lock))
363 if (waiter || rt_mutex_has_waiters(lock)) {
365 struct rt_mutex_waiter *top;
367 raw_spin_lock_irqsave(&task->pi_lock, flags);
369 /* remove the queued waiter. */
371 plist_del(&waiter->list_entry, &lock->wait_list);
372 task->pi_blocked_on = NULL;
376 * We have to enqueue the top waiter(if it exists) into
377 * task->pi_waiters list.
379 if (rt_mutex_has_waiters(lock)) {
380 top = rt_mutex_top_waiter(lock);
381 top->pi_list_entry.prio = top->list_entry.prio;
382 plist_add(&top->pi_list_entry, &task->pi_waiters);
384 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
387 /* We got the lock. */
388 debug_rt_mutex_lock(lock);
390 rt_mutex_set_owner(lock, task);
392 rt_mutex_deadlock_account_lock(lock, task);
398 * Task blocks on lock.
400 * Prepare waiter and propagate pi chain
402 * This must be called with lock->wait_lock held.
404 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
405 struct rt_mutex_waiter *waiter,
406 struct task_struct *task,
409 struct task_struct *owner = rt_mutex_owner(lock);
410 struct rt_mutex_waiter *top_waiter = waiter;
412 int chain_walk = 0, res;
415 * Early deadlock detection. We really don't want the task to
416 * enqueue on itself just to untangle the mess later. It's not
417 * only an optimization. We drop the locks, so another waiter
418 * can come in before the chain walk detects the deadlock. So
419 * the other will detect the deadlock and return -EDEADLOCK,
420 * which is wrong, as the other waiter is not in a deadlock
423 if (detect_deadlock && owner == task)
426 raw_spin_lock_irqsave(&task->pi_lock, flags);
427 __rt_mutex_adjust_prio(task);
430 plist_node_init(&waiter->list_entry, task->prio);
431 plist_node_init(&waiter->pi_list_entry, task->prio);
433 /* Get the top priority waiter on the lock */
434 if (rt_mutex_has_waiters(lock))
435 top_waiter = rt_mutex_top_waiter(lock);
436 plist_add(&waiter->list_entry, &lock->wait_list);
438 task->pi_blocked_on = waiter;
440 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
445 if (waiter == rt_mutex_top_waiter(lock)) {
446 raw_spin_lock_irqsave(&owner->pi_lock, flags);
447 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
448 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
450 __rt_mutex_adjust_prio(owner);
451 if (owner->pi_blocked_on)
453 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
455 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
462 * The owner can't disappear while holding a lock,
463 * so the owner struct is protected by wait_lock.
464 * Gets dropped in rt_mutex_adjust_prio_chain()!
466 get_task_struct(owner);
468 raw_spin_unlock(&lock->wait_lock);
470 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
473 raw_spin_lock(&lock->wait_lock);
479 * Wake up the next waiter on the lock.
481 * Remove the top waiter from the current tasks waiter list and wake it up.
483 * Called with lock->wait_lock held.
485 static void wakeup_next_waiter(struct rt_mutex *lock)
487 struct rt_mutex_waiter *waiter;
490 raw_spin_lock_irqsave(¤t->pi_lock, flags);
492 waiter = rt_mutex_top_waiter(lock);
495 * Remove it from current->pi_waiters. We do not adjust a
496 * possible priority boost right now. We execute wakeup in the
497 * boosted mode and go back to normal after releasing
500 plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
502 rt_mutex_set_owner(lock, NULL);
504 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
506 wake_up_process(waiter->task);
510 * Remove a waiter from a lock and give up
512 * Must be called with lock->wait_lock held and
513 * have just failed to try_to_take_rt_mutex().
515 static void remove_waiter(struct rt_mutex *lock,
516 struct rt_mutex_waiter *waiter)
518 int first = (waiter == rt_mutex_top_waiter(lock));
519 struct task_struct *owner = rt_mutex_owner(lock);
523 raw_spin_lock_irqsave(¤t->pi_lock, flags);
524 plist_del(&waiter->list_entry, &lock->wait_list);
525 current->pi_blocked_on = NULL;
526 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
533 raw_spin_lock_irqsave(&owner->pi_lock, flags);
535 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
537 if (rt_mutex_has_waiters(lock)) {
538 struct rt_mutex_waiter *next;
540 next = rt_mutex_top_waiter(lock);
541 plist_add(&next->pi_list_entry, &owner->pi_waiters);
543 __rt_mutex_adjust_prio(owner);
545 if (owner->pi_blocked_on)
548 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
551 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
556 /* gets dropped in rt_mutex_adjust_prio_chain()! */
557 get_task_struct(owner);
559 raw_spin_unlock(&lock->wait_lock);
561 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
563 raw_spin_lock(&lock->wait_lock);
567 * Recheck the pi chain, in case we got a priority setting
569 * Called from sched_setscheduler
571 void rt_mutex_adjust_pi(struct task_struct *task)
573 struct rt_mutex_waiter *waiter;
576 raw_spin_lock_irqsave(&task->pi_lock, flags);
578 waiter = task->pi_blocked_on;
579 if (!waiter || waiter->list_entry.prio == task->prio) {
580 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
584 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
586 /* gets dropped in rt_mutex_adjust_prio_chain()! */
587 get_task_struct(task);
588 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
592 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
593 * @lock: the rt_mutex to take
594 * @state: the state the task should block in (TASK_INTERRUPTIBLE
595 * or TASK_UNINTERRUPTIBLE)
596 * @timeout: the pre-initialized and started timer, or NULL for none
597 * @waiter: the pre-initialized rt_mutex_waiter
599 * lock->wait_lock must be held by the caller.
602 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
603 struct hrtimer_sleeper *timeout,
604 struct rt_mutex_waiter *waiter)
609 /* Try to acquire the lock: */
610 if (try_to_take_rt_mutex(lock, current, waiter))
614 * TASK_INTERRUPTIBLE checks for signals and
615 * timeout. Ignored otherwise.
617 if (unlikely(state == TASK_INTERRUPTIBLE)) {
618 /* Signal pending? */
619 if (signal_pending(current))
621 if (timeout && !timeout->task)
627 raw_spin_unlock(&lock->wait_lock);
629 debug_rt_mutex_print_deadlock(waiter);
631 schedule_rt_mutex(lock);
633 raw_spin_lock(&lock->wait_lock);
634 set_current_state(state);
641 * Slow path lock function:
644 rt_mutex_slowlock(struct rt_mutex *lock, int state,
645 struct hrtimer_sleeper *timeout,
648 struct rt_mutex_waiter waiter;
651 debug_rt_mutex_init_waiter(&waiter);
653 raw_spin_lock(&lock->wait_lock);
655 /* Try to acquire the lock again: */
656 if (try_to_take_rt_mutex(lock, current, NULL)) {
657 raw_spin_unlock(&lock->wait_lock);
661 set_current_state(state);
663 /* Setup the timer, when timeout != NULL */
664 if (unlikely(timeout)) {
665 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
666 if (!hrtimer_active(&timeout->timer))
667 timeout->task = NULL;
670 ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
673 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
675 set_current_state(TASK_RUNNING);
678 remove_waiter(lock, &waiter);
681 * try_to_take_rt_mutex() sets the waiter bit
682 * unconditionally. We might have to fix that up.
684 fixup_rt_mutex_waiters(lock);
686 raw_spin_unlock(&lock->wait_lock);
688 /* Remove pending timer: */
689 if (unlikely(timeout))
690 hrtimer_cancel(&timeout->timer);
692 debug_rt_mutex_free_waiter(&waiter);
698 * Slow path try-lock function:
701 rt_mutex_slowtrylock(struct rt_mutex *lock)
705 raw_spin_lock(&lock->wait_lock);
707 if (likely(rt_mutex_owner(lock) != current)) {
709 ret = try_to_take_rt_mutex(lock, current, NULL);
711 * try_to_take_rt_mutex() sets the lock waiters
712 * bit unconditionally. Clean this up.
714 fixup_rt_mutex_waiters(lock);
717 raw_spin_unlock(&lock->wait_lock);
723 * Slow path to release a rt-mutex:
726 rt_mutex_slowunlock(struct rt_mutex *lock)
728 raw_spin_lock(&lock->wait_lock);
730 debug_rt_mutex_unlock(lock);
732 rt_mutex_deadlock_account_unlock(current);
734 if (!rt_mutex_has_waiters(lock)) {
736 raw_spin_unlock(&lock->wait_lock);
740 wakeup_next_waiter(lock);
742 raw_spin_unlock(&lock->wait_lock);
744 /* Undo pi boosting if necessary: */
745 rt_mutex_adjust_prio(current);
749 * debug aware fast / slowpath lock,trylock,unlock
751 * The atomic acquire/release ops are compiled away, when either the
752 * architecture does not support cmpxchg or when debugging is enabled.
755 rt_mutex_fastlock(struct rt_mutex *lock, int state,
757 int (*slowfn)(struct rt_mutex *lock, int state,
758 struct hrtimer_sleeper *timeout,
759 int detect_deadlock))
761 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
762 rt_mutex_deadlock_account_lock(lock, current);
765 return slowfn(lock, state, NULL, detect_deadlock);
769 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
770 struct hrtimer_sleeper *timeout, int detect_deadlock,
771 int (*slowfn)(struct rt_mutex *lock, int state,
772 struct hrtimer_sleeper *timeout,
773 int detect_deadlock))
775 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
776 rt_mutex_deadlock_account_lock(lock, current);
779 return slowfn(lock, state, timeout, detect_deadlock);
783 rt_mutex_fasttrylock(struct rt_mutex *lock,
784 int (*slowfn)(struct rt_mutex *lock))
786 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
787 rt_mutex_deadlock_account_lock(lock, current);
794 rt_mutex_fastunlock(struct rt_mutex *lock,
795 void (*slowfn)(struct rt_mutex *lock))
797 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
798 rt_mutex_deadlock_account_unlock(current);
804 * rt_mutex_lock - lock a rt_mutex
806 * @lock: the rt_mutex to be locked
808 void __sched rt_mutex_lock(struct rt_mutex *lock)
812 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
814 EXPORT_SYMBOL_GPL(rt_mutex_lock);
817 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
819 * @lock: the rt_mutex to be locked
820 * @detect_deadlock: deadlock detection on/off
824 * -EINTR when interrupted by a signal
825 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
827 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
832 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
833 detect_deadlock, rt_mutex_slowlock);
835 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
838 * rt_mutex_timed_lock - lock a rt_mutex interruptible
839 * the timeout structure is provided
842 * @lock: the rt_mutex to be locked
843 * @timeout: timeout structure or NULL (no timeout)
844 * @detect_deadlock: deadlock detection on/off
848 * -EINTR when interrupted by a signal
849 * -ETIMEDOUT when the timeout expired
850 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
853 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
858 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
859 detect_deadlock, rt_mutex_slowlock);
861 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
864 * rt_mutex_trylock - try to lock a rt_mutex
866 * @lock: the rt_mutex to be locked
868 * Returns 1 on success and 0 on contention
870 int __sched rt_mutex_trylock(struct rt_mutex *lock)
872 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
874 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
877 * rt_mutex_unlock - unlock a rt_mutex
879 * @lock: the rt_mutex to be unlocked
881 void __sched rt_mutex_unlock(struct rt_mutex *lock)
883 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
885 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
888 * rt_mutex_destroy - mark a mutex unusable
889 * @lock: the mutex to be destroyed
891 * This function marks the mutex uninitialized, and any subsequent
892 * use of the mutex is forbidden. The mutex must not be locked when
893 * this function is called.
895 void rt_mutex_destroy(struct rt_mutex *lock)
897 WARN_ON(rt_mutex_is_locked(lock));
898 #ifdef CONFIG_DEBUG_RT_MUTEXES
903 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
906 * __rt_mutex_init - initialize the rt lock
908 * @lock: the rt lock to be initialized
910 * Initialize the rt lock to unlocked state.
912 * Initializing of a locked rt lock is not allowed
914 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
917 raw_spin_lock_init(&lock->wait_lock);
918 plist_head_init(&lock->wait_list);
920 debug_rt_mutex_init(lock, name);
922 EXPORT_SYMBOL_GPL(__rt_mutex_init);
925 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
928 * @lock: the rt_mutex to be locked
929 * @proxy_owner:the task to set as owner
931 * No locking. Caller has to do serializing itself
932 * Special API call for PI-futex support
934 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
935 struct task_struct *proxy_owner)
937 __rt_mutex_init(lock, NULL);
938 debug_rt_mutex_proxy_lock(lock, proxy_owner);
939 rt_mutex_set_owner(lock, proxy_owner);
940 rt_mutex_deadlock_account_lock(lock, proxy_owner);
944 * rt_mutex_proxy_unlock - release a lock on behalf of owner
946 * @lock: the rt_mutex to be locked
948 * No locking. Caller has to do serializing itself
949 * Special API call for PI-futex support
951 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
952 struct task_struct *proxy_owner)
954 debug_rt_mutex_proxy_unlock(lock);
955 rt_mutex_set_owner(lock, NULL);
956 rt_mutex_deadlock_account_unlock(proxy_owner);
960 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
961 * @lock: the rt_mutex to take
962 * @waiter: the pre-initialized rt_mutex_waiter
963 * @task: the task to prepare
964 * @detect_deadlock: perform deadlock detection (1) or not (0)
967 * 0 - task blocked on lock
968 * 1 - acquired the lock for task, caller should wake it up
971 * Special API call for FUTEX_REQUEUE_PI support.
973 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
974 struct rt_mutex_waiter *waiter,
975 struct task_struct *task, int detect_deadlock)
979 raw_spin_lock(&lock->wait_lock);
981 if (try_to_take_rt_mutex(lock, task, NULL)) {
982 raw_spin_unlock(&lock->wait_lock);
986 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
988 if (ret && !rt_mutex_owner(lock)) {
990 * Reset the return value. We might have
991 * returned with -EDEADLK and the owner
992 * released the lock while we were walking the
993 * pi chain. Let the waiter sort it out.
999 remove_waiter(lock, waiter);
1001 raw_spin_unlock(&lock->wait_lock);
1003 debug_rt_mutex_print_deadlock(waiter);
1009 * rt_mutex_next_owner - return the next owner of the lock
1011 * @lock: the rt lock query
1013 * Returns the next owner of the lock or NULL
1015 * Caller has to serialize against other accessors to the lock
1018 * Special API call for PI-futex support
1020 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1022 if (!rt_mutex_has_waiters(lock))
1025 return rt_mutex_top_waiter(lock)->task;
1029 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1030 * @lock: the rt_mutex we were woken on
1031 * @to: the timeout, null if none. hrtimer should already have
1033 * @waiter: the pre-initialized rt_mutex_waiter
1034 * @detect_deadlock: perform deadlock detection (1) or not (0)
1036 * Complete the lock acquisition started our behalf by another thread.
1040 * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1042 * Special API call for PI-futex requeue support
1044 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1045 struct hrtimer_sleeper *to,
1046 struct rt_mutex_waiter *waiter,
1047 int detect_deadlock)
1051 raw_spin_lock(&lock->wait_lock);
1053 set_current_state(TASK_INTERRUPTIBLE);
1055 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1057 set_current_state(TASK_RUNNING);
1060 remove_waiter(lock, waiter);
1063 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1064 * have to fix that up.
1066 fixup_rt_mutex_waiters(lock);
1068 raw_spin_unlock(&lock->wait_lock);