2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 #include <linux/slab.h>
48 #include <linux/poll.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
63 #include <asm/futex.h>
65 #include "rtmutex_common.h"
67 int __read_mostly futex_cmpxchg_enabled;
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
72 * Futex flags used to encode options to functions and preserve them across
75 #define FLAGS_SHARED 0x01
76 #define FLAGS_CLOCKRT 0x02
77 #define FLAGS_HAS_TIMEOUT 0x04
80 * Priority Inheritance state:
82 struct futex_pi_state {
84 * list of 'owned' pi_state instances - these have to be
85 * cleaned up in do_exit() if the task exits prematurely:
87 struct list_head list;
92 struct rt_mutex pi_mutex;
94 struct task_struct *owner;
101 * struct futex_q - The hashed futex queue entry, one per waiting task
102 * @list: priority-sorted list of tasks waiting on this futex
103 * @task: the task waiting on the futex
104 * @lock_ptr: the hash bucket lock
105 * @key: the key the futex is hashed on
106 * @pi_state: optional priority inheritance state
107 * @rt_waiter: rt_waiter storage for use with requeue_pi
108 * @requeue_pi_key: the requeue_pi target futex key
109 * @bitset: bitset for the optional bitmasked wakeup
111 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
112 * we can wake only the relevant ones (hashed queues may be shared).
114 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
115 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
116 * The order of wakeup is always to make the first condition true, then
119 * PI futexes are typically woken before they are removed from the hash list via
120 * the rt_mutex code. See unqueue_me_pi().
123 struct plist_node list;
125 struct task_struct *task;
126 spinlock_t *lock_ptr;
128 struct futex_pi_state *pi_state;
129 struct rt_mutex_waiter *rt_waiter;
130 union futex_key *requeue_pi_key;
134 static const struct futex_q futex_q_init = {
135 /* list gets initialized in queue_me()*/
136 .key = FUTEX_KEY_INIT,
137 .bitset = FUTEX_BITSET_MATCH_ANY
141 * Hash buckets are shared by all the futex_keys that hash to the same
142 * location. Each key may have multiple futex_q structures, one for each task
143 * waiting on a futex.
145 struct futex_hash_bucket {
147 struct plist_head chain;
150 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
153 * We hash on the keys returned from get_futex_key (see below).
155 static struct futex_hash_bucket *hash_futex(union futex_key *key)
157 u32 hash = jhash2((u32*)&key->both.word,
158 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
160 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
164 * Return 1 if two futex_keys are equal, 0 otherwise.
166 static inline int match_futex(union futex_key *key1, union futex_key *key2)
169 && key1->both.word == key2->both.word
170 && key1->both.ptr == key2->both.ptr
171 && key1->both.offset == key2->both.offset);
175 * Take a reference to the resource addressed by a key.
176 * Can be called while holding spinlocks.
179 static void get_futex_key_refs(union futex_key *key)
184 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
186 ihold(key->shared.inode);
188 case FUT_OFF_MMSHARED:
189 atomic_inc(&key->private.mm->mm_count);
195 * Drop a reference to the resource addressed by a key.
196 * The hash bucket spinlock must not be held.
198 static void drop_futex_key_refs(union futex_key *key)
200 if (!key->both.ptr) {
201 /* If we're here then we tried to put a key we failed to get */
206 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
208 iput(key->shared.inode);
210 case FUT_OFF_MMSHARED:
211 mmdrop(key->private.mm);
217 * get_futex_key() - Get parameters which are the keys for a futex
218 * @uaddr: virtual address of the futex
219 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
220 * @key: address where result is stored.
222 * Returns a negative error code or 0
223 * The key words are stored in *key on success.
225 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
226 * offset_within_page). For private mappings, it's (uaddr, current->mm).
227 * We can usually work out the index without swapping in the page.
229 * lock_page() might sleep, the caller should not hold a spinlock.
232 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
234 unsigned long address = (unsigned long)uaddr;
235 struct mm_struct *mm = current->mm;
236 struct page *page, *page_head;
240 * The futex address must be "naturally" aligned.
242 key->both.offset = address % PAGE_SIZE;
243 if (unlikely((address % sizeof(u32)) != 0))
245 address -= key->both.offset;
248 * PROCESS_PRIVATE futexes are fast.
249 * As the mm cannot disappear under us and the 'key' only needs
250 * virtual address, we dont even have to find the underlying vma.
251 * Note : We do have to check 'uaddr' is a valid user address,
252 * but access_ok() should be faster than find_vma()
255 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
257 key->private.mm = mm;
258 key->private.address = address;
259 get_futex_key_refs(key);
264 err = get_user_pages_fast(address, 1, 1, &page);
268 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
270 if (unlikely(PageTail(page))) {
272 /* serialize against __split_huge_page_splitting() */
274 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
275 page_head = compound_head(page);
277 * page_head is valid pointer but we must pin
278 * it before taking the PG_lock and/or
279 * PG_compound_lock. The moment we re-enable
280 * irqs __split_huge_page_splitting() can
281 * return and the head page can be freed from
282 * under us. We can't take the PG_lock and/or
283 * PG_compound_lock on a page that could be
284 * freed from under us.
286 if (page != page_head) {
297 page_head = compound_head(page);
298 if (page != page_head) {
304 lock_page(page_head);
305 if (!page_head->mapping) {
306 unlock_page(page_head);
312 * Private mappings are handled in a simple way.
314 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
315 * it's a read-only handle, it's expected that futexes attach to
316 * the object not the particular process.
318 if (PageAnon(page_head)) {
319 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
320 key->private.mm = mm;
321 key->private.address = address;
323 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
324 key->shared.inode = page_head->mapping->host;
325 key->shared.pgoff = page_head->index;
328 get_futex_key_refs(key);
330 unlock_page(page_head);
335 static inline void put_futex_key(union futex_key *key)
337 drop_futex_key_refs(key);
341 * fault_in_user_writeable() - Fault in user address and verify RW access
342 * @uaddr: pointer to faulting user space address
344 * Slow path to fixup the fault we just took in the atomic write
347 * We have no generic implementation of a non-destructive write to the
348 * user address. We know that we faulted in the atomic pagefault
349 * disabled section so we can as well avoid the #PF overhead by
350 * calling get_user_pages() right away.
352 static int fault_in_user_writeable(u32 __user *uaddr)
354 struct mm_struct *mm = current->mm;
357 down_read(&mm->mmap_sem);
358 ret = get_user_pages(current, mm, (unsigned long)uaddr,
359 1, 1, 0, NULL, NULL);
360 up_read(&mm->mmap_sem);
362 return ret < 0 ? ret : 0;
366 * futex_top_waiter() - Return the highest priority waiter on a futex
367 * @hb: the hash bucket the futex_q's reside in
368 * @key: the futex key (to distinguish it from other futex futex_q's)
370 * Must be called with the hb lock held.
372 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
373 union futex_key *key)
375 struct futex_q *this;
377 plist_for_each_entry(this, &hb->chain, list) {
378 if (match_futex(&this->key, key))
384 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
389 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
395 static int get_futex_value_locked(u32 *dest, u32 __user *from)
400 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
403 return ret ? -EFAULT : 0;
410 static int refill_pi_state_cache(void)
412 struct futex_pi_state *pi_state;
414 if (likely(current->pi_state_cache))
417 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
422 INIT_LIST_HEAD(&pi_state->list);
423 /* pi_mutex gets initialized later */
424 pi_state->owner = NULL;
425 atomic_set(&pi_state->refcount, 1);
426 pi_state->key = FUTEX_KEY_INIT;
428 current->pi_state_cache = pi_state;
433 static struct futex_pi_state * alloc_pi_state(void)
435 struct futex_pi_state *pi_state = current->pi_state_cache;
438 current->pi_state_cache = NULL;
443 static void free_pi_state(struct futex_pi_state *pi_state)
445 if (!atomic_dec_and_test(&pi_state->refcount))
449 * If pi_state->owner is NULL, the owner is most probably dying
450 * and has cleaned up the pi_state already
452 if (pi_state->owner) {
453 raw_spin_lock_irq(&pi_state->owner->pi_lock);
454 list_del_init(&pi_state->list);
455 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
457 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
460 if (current->pi_state_cache)
464 * pi_state->list is already empty.
465 * clear pi_state->owner.
466 * refcount is at 0 - put it back to 1.
468 pi_state->owner = NULL;
469 atomic_set(&pi_state->refcount, 1);
470 current->pi_state_cache = pi_state;
475 * Look up the task based on what TID userspace gave us.
478 static struct task_struct * futex_find_get_task(pid_t pid)
480 struct task_struct *p;
483 p = find_task_by_vpid(pid);
493 * This task is holding PI mutexes at exit time => bad.
494 * Kernel cleans up PI-state, but userspace is likely hosed.
495 * (Robust-futex cleanup is separate and might save the day for userspace.)
497 void exit_pi_state_list(struct task_struct *curr)
499 struct list_head *next, *head = &curr->pi_state_list;
500 struct futex_pi_state *pi_state;
501 struct futex_hash_bucket *hb;
502 union futex_key key = FUTEX_KEY_INIT;
504 if (!futex_cmpxchg_enabled)
507 * We are a ZOMBIE and nobody can enqueue itself on
508 * pi_state_list anymore, but we have to be careful
509 * versus waiters unqueueing themselves:
511 raw_spin_lock_irq(&curr->pi_lock);
512 while (!list_empty(head)) {
515 pi_state = list_entry(next, struct futex_pi_state, list);
517 hb = hash_futex(&key);
518 raw_spin_unlock_irq(&curr->pi_lock);
520 spin_lock(&hb->lock);
522 raw_spin_lock_irq(&curr->pi_lock);
524 * We dropped the pi-lock, so re-check whether this
525 * task still owns the PI-state:
527 if (head->next != next) {
528 spin_unlock(&hb->lock);
532 WARN_ON(pi_state->owner != curr);
533 WARN_ON(list_empty(&pi_state->list));
534 list_del_init(&pi_state->list);
535 pi_state->owner = NULL;
536 raw_spin_unlock_irq(&curr->pi_lock);
538 rt_mutex_unlock(&pi_state->pi_mutex);
540 spin_unlock(&hb->lock);
542 raw_spin_lock_irq(&curr->pi_lock);
544 raw_spin_unlock_irq(&curr->pi_lock);
548 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
549 union futex_key *key, struct futex_pi_state **ps)
551 struct futex_pi_state *pi_state = NULL;
552 struct futex_q *this, *next;
553 struct plist_head *head;
554 struct task_struct *p;
555 pid_t pid = uval & FUTEX_TID_MASK;
559 plist_for_each_entry_safe(this, next, head, list) {
560 if (match_futex(&this->key, key)) {
562 * Another waiter already exists - bump up
563 * the refcount and return its pi_state:
565 pi_state = this->pi_state;
567 * Userspace might have messed up non-PI and PI futexes
569 if (unlikely(!pi_state))
572 WARN_ON(!atomic_read(&pi_state->refcount));
575 * When pi_state->owner is NULL then the owner died
576 * and another waiter is on the fly. pi_state->owner
577 * is fixed up by the task which acquires
578 * pi_state->rt_mutex.
580 * We do not check for pid == 0 which can happen when
581 * the owner died and robust_list_exit() cleared the
584 if (pid && pi_state->owner) {
586 * Bail out if user space manipulated the
589 if (pid != task_pid_vnr(pi_state->owner))
593 atomic_inc(&pi_state->refcount);
601 * We are the first waiter - try to look up the real owner and attach
602 * the new pi_state to it, but bail out when TID = 0
606 p = futex_find_get_task(pid);
611 * We need to look at the task state flags to figure out,
612 * whether the task is exiting. To protect against the do_exit
613 * change of the task flags, we do this protected by
616 raw_spin_lock_irq(&p->pi_lock);
617 if (unlikely(p->flags & PF_EXITING)) {
619 * The task is on the way out. When PF_EXITPIDONE is
620 * set, we know that the task has finished the
623 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
625 raw_spin_unlock_irq(&p->pi_lock);
630 pi_state = alloc_pi_state();
633 * Initialize the pi_mutex in locked state and make 'p'
636 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
638 /* Store the key for possible exit cleanups: */
639 pi_state->key = *key;
641 WARN_ON(!list_empty(&pi_state->list));
642 list_add(&pi_state->list, &p->pi_state_list);
644 raw_spin_unlock_irq(&p->pi_lock);
654 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
655 * @uaddr: the pi futex user address
656 * @hb: the pi futex hash bucket
657 * @key: the futex key associated with uaddr and hb
658 * @ps: the pi_state pointer where we store the result of the
660 * @task: the task to perform the atomic lock work for. This will
661 * be "current" except in the case of requeue pi.
662 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
666 * 1 - acquired the lock
669 * The hb->lock and futex_key refs shall be held by the caller.
671 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
672 union futex_key *key,
673 struct futex_pi_state **ps,
674 struct task_struct *task, int set_waiters)
676 int lock_taken, ret, ownerdied = 0;
677 u32 uval, newval, curval;
680 ret = lock_taken = 0;
683 * To avoid races, we attempt to take the lock here again
684 * (by doing a 0 -> TID atomic cmpxchg), while holding all
685 * the locks. It will most likely not succeed.
687 newval = task_pid_vnr(task);
689 newval |= FUTEX_WAITERS;
691 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
693 if (unlikely(curval == -EFAULT))
699 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
703 * Surprise - we got the lock. Just return to userspace:
705 if (unlikely(!curval))
711 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
712 * to wake at the next unlock.
714 newval = curval | FUTEX_WAITERS;
717 * There are two cases, where a futex might have no owner (the
718 * owner TID is 0): OWNER_DIED. We take over the futex in this
719 * case. We also do an unconditional take over, when the owner
722 * This is safe as we are protected by the hash bucket lock !
724 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
725 /* Keep the OWNER_DIED bit */
726 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
731 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
733 if (unlikely(curval == -EFAULT))
735 if (unlikely(curval != uval))
739 * We took the lock due to owner died take over.
741 if (unlikely(lock_taken))
745 * We dont have the lock. Look up the PI state (or create it if
746 * we are the first waiter):
748 ret = lookup_pi_state(uval, hb, key, ps);
754 * No owner found for this futex. Check if the
755 * OWNER_DIED bit is set to figure out whether
756 * this is a robust futex or not.
758 if (get_futex_value_locked(&curval, uaddr))
762 * We simply start over in case of a robust
763 * futex. The code above will take the futex
766 if (curval & FUTEX_OWNER_DIED) {
779 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
780 * @q: The futex_q to unqueue
782 * The q->lock_ptr must not be NULL and must be held by the caller.
784 static void __unqueue_futex(struct futex_q *q)
786 struct futex_hash_bucket *hb;
788 if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr)
789 || plist_node_empty(&q->list)))
792 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
793 plist_del(&q->list, &hb->chain);
797 * The hash bucket lock must be held when this is called.
798 * Afterwards, the futex_q must not be accessed.
800 static void wake_futex(struct futex_q *q)
802 struct task_struct *p = q->task;
805 * We set q->lock_ptr = NULL _before_ we wake up the task. If
806 * a non-futex wake up happens on another CPU then the task
807 * might exit and p would dereference a non-existing task
808 * struct. Prevent this by holding a reference on p across the
815 * The waiting task can free the futex_q as soon as
816 * q->lock_ptr = NULL is written, without taking any locks. A
817 * memory barrier is required here to prevent the following
818 * store to lock_ptr from getting ahead of the plist_del.
823 wake_up_state(p, TASK_NORMAL);
827 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
829 struct task_struct *new_owner;
830 struct futex_pi_state *pi_state = this->pi_state;
837 * If current does not own the pi_state then the futex is
838 * inconsistent and user space fiddled with the futex value.
840 if (pi_state->owner != current)
843 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
844 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
847 * It is possible that the next waiter (the one that brought
848 * this owner to the kernel) timed out and is no longer
849 * waiting on the lock.
852 new_owner = this->task;
855 * We pass it to the next owner. (The WAITERS bit is always
856 * kept enabled while there is PI state around. We must also
857 * preserve the owner died bit.)
859 if (!(uval & FUTEX_OWNER_DIED)) {
862 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
864 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
866 if (curval == -EFAULT)
868 else if (curval != uval)
871 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
876 raw_spin_lock_irq(&pi_state->owner->pi_lock);
877 WARN_ON(list_empty(&pi_state->list));
878 list_del_init(&pi_state->list);
879 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
881 raw_spin_lock_irq(&new_owner->pi_lock);
882 WARN_ON(!list_empty(&pi_state->list));
883 list_add(&pi_state->list, &new_owner->pi_state_list);
884 pi_state->owner = new_owner;
885 raw_spin_unlock_irq(&new_owner->pi_lock);
887 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
888 rt_mutex_unlock(&pi_state->pi_mutex);
893 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
898 * There is no waiter, so we unlock the futex. The owner died
899 * bit has not to be preserved here. We are the owner:
901 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
903 if (oldval == -EFAULT)
912 * Express the locking dependencies for lockdep:
915 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
918 spin_lock(&hb1->lock);
920 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
921 } else { /* hb1 > hb2 */
922 spin_lock(&hb2->lock);
923 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
928 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
930 spin_unlock(&hb1->lock);
932 spin_unlock(&hb2->lock);
936 * Wake up waiters matching bitset queued on this futex (uaddr).
939 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
941 struct futex_hash_bucket *hb;
942 struct futex_q *this, *next;
943 struct plist_head *head;
944 union futex_key key = FUTEX_KEY_INIT;
950 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
951 if (unlikely(ret != 0))
954 hb = hash_futex(&key);
955 spin_lock(&hb->lock);
958 plist_for_each_entry_safe(this, next, head, list) {
959 if (match_futex (&this->key, &key)) {
960 if (this->pi_state || this->rt_waiter) {
965 /* Check if one of the bits is set in both bitsets */
966 if (!(this->bitset & bitset))
970 if (++ret >= nr_wake)
975 spin_unlock(&hb->lock);
982 * Wake up all waiters hashed on the physical page that is mapped
983 * to this virtual address:
986 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
987 int nr_wake, int nr_wake2, int op)
989 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
990 struct futex_hash_bucket *hb1, *hb2;
991 struct plist_head *head;
992 struct futex_q *this, *next;
996 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
997 if (unlikely(ret != 0))
999 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1000 if (unlikely(ret != 0))
1003 hb1 = hash_futex(&key1);
1004 hb2 = hash_futex(&key2);
1007 double_lock_hb(hb1, hb2);
1008 op_ret = futex_atomic_op_inuser(op, uaddr2);
1009 if (unlikely(op_ret < 0)) {
1011 double_unlock_hb(hb1, hb2);
1015 * we don't get EFAULT from MMU faults if we don't have an MMU,
1016 * but we might get them from range checking
1022 if (unlikely(op_ret != -EFAULT)) {
1027 ret = fault_in_user_writeable(uaddr2);
1031 if (!(flags & FLAGS_SHARED))
1034 put_futex_key(&key2);
1035 put_futex_key(&key1);
1041 plist_for_each_entry_safe(this, next, head, list) {
1042 if (match_futex (&this->key, &key1)) {
1044 if (++ret >= nr_wake)
1053 plist_for_each_entry_safe(this, next, head, list) {
1054 if (match_futex (&this->key, &key2)) {
1056 if (++op_ret >= nr_wake2)
1063 double_unlock_hb(hb1, hb2);
1065 put_futex_key(&key2);
1067 put_futex_key(&key1);
1073 * requeue_futex() - Requeue a futex_q from one hb to another
1074 * @q: the futex_q to requeue
1075 * @hb1: the source hash_bucket
1076 * @hb2: the target hash_bucket
1077 * @key2: the new key for the requeued futex_q
1080 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1081 struct futex_hash_bucket *hb2, union futex_key *key2)
1085 * If key1 and key2 hash to the same bucket, no need to
1088 if (likely(&hb1->chain != &hb2->chain)) {
1089 plist_del(&q->list, &hb1->chain);
1090 plist_add(&q->list, &hb2->chain);
1091 q->lock_ptr = &hb2->lock;
1092 #ifdef CONFIG_DEBUG_PI_LIST
1093 q->list.plist.spinlock = &hb2->lock;
1096 get_futex_key_refs(key2);
1101 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1103 * @key: the key of the requeue target futex
1104 * @hb: the hash_bucket of the requeue target futex
1106 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1107 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1108 * to the requeue target futex so the waiter can detect the wakeup on the right
1109 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1110 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1111 * to protect access to the pi_state to fixup the owner later. Must be called
1112 * with both q->lock_ptr and hb->lock held.
1115 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1116 struct futex_hash_bucket *hb)
1118 get_futex_key_refs(key);
1123 WARN_ON(!q->rt_waiter);
1124 q->rt_waiter = NULL;
1126 q->lock_ptr = &hb->lock;
1127 #ifdef CONFIG_DEBUG_PI_LIST
1128 q->list.plist.spinlock = &hb->lock;
1131 wake_up_state(q->task, TASK_NORMAL);
1135 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1136 * @pifutex: the user address of the to futex
1137 * @hb1: the from futex hash bucket, must be locked by the caller
1138 * @hb2: the to futex hash bucket, must be locked by the caller
1139 * @key1: the from futex key
1140 * @key2: the to futex key
1141 * @ps: address to store the pi_state pointer
1142 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1144 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1145 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1146 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1147 * hb1 and hb2 must be held by the caller.
1150 * 0 - failed to acquire the lock atomicly
1151 * 1 - acquired the lock
1154 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1155 struct futex_hash_bucket *hb1,
1156 struct futex_hash_bucket *hb2,
1157 union futex_key *key1, union futex_key *key2,
1158 struct futex_pi_state **ps, int set_waiters)
1160 struct futex_q *top_waiter = NULL;
1164 if (get_futex_value_locked(&curval, pifutex))
1168 * Find the top_waiter and determine if there are additional waiters.
1169 * If the caller intends to requeue more than 1 waiter to pifutex,
1170 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1171 * as we have means to handle the possible fault. If not, don't set
1172 * the bit unecessarily as it will force the subsequent unlock to enter
1175 top_waiter = futex_top_waiter(hb1, key1);
1177 /* There are no waiters, nothing for us to do. */
1181 /* Ensure we requeue to the expected futex. */
1182 if (!match_futex(top_waiter->requeue_pi_key, key2))
1186 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1187 * the contended case or if set_waiters is 1. The pi_state is returned
1188 * in ps in contended cases.
1190 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1193 requeue_pi_wake_futex(top_waiter, key2, hb2);
1199 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1200 * @uaddr1: source futex user address
1201 * @flags: futex flags (FLAGS_SHARED, etc.)
1202 * @uaddr2: target futex user address
1203 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1204 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1205 * @cmpval: @uaddr1 expected value (or %NULL)
1206 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1207 * pi futex (pi to pi requeue is not supported)
1209 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1210 * uaddr2 atomically on behalf of the top waiter.
1213 * >=0 - on success, the number of tasks requeued or woken
1216 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1217 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1218 u32 *cmpval, int requeue_pi)
1220 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1221 int drop_count = 0, task_count = 0, ret;
1222 struct futex_pi_state *pi_state = NULL;
1223 struct futex_hash_bucket *hb1, *hb2;
1224 struct plist_head *head1;
1225 struct futex_q *this, *next;
1230 * requeue_pi requires a pi_state, try to allocate it now
1231 * without any locks in case it fails.
1233 if (refill_pi_state_cache())
1236 * requeue_pi must wake as many tasks as it can, up to nr_wake
1237 * + nr_requeue, since it acquires the rt_mutex prior to
1238 * returning to userspace, so as to not leave the rt_mutex with
1239 * waiters and no owner. However, second and third wake-ups
1240 * cannot be predicted as they involve race conditions with the
1241 * first wake and a fault while looking up the pi_state. Both
1242 * pthread_cond_signal() and pthread_cond_broadcast() should
1250 if (pi_state != NULL) {
1252 * We will have to lookup the pi_state again, so free this one
1253 * to keep the accounting correct.
1255 free_pi_state(pi_state);
1259 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
1260 if (unlikely(ret != 0))
1262 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1263 if (unlikely(ret != 0))
1266 hb1 = hash_futex(&key1);
1267 hb2 = hash_futex(&key2);
1270 double_lock_hb(hb1, hb2);
1272 if (likely(cmpval != NULL)) {
1275 ret = get_futex_value_locked(&curval, uaddr1);
1277 if (unlikely(ret)) {
1278 double_unlock_hb(hb1, hb2);
1280 ret = get_user(curval, uaddr1);
1284 if (!(flags & FLAGS_SHARED))
1287 put_futex_key(&key2);
1288 put_futex_key(&key1);
1291 if (curval != *cmpval) {
1297 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1299 * Attempt to acquire uaddr2 and wake the top waiter. If we
1300 * intend to requeue waiters, force setting the FUTEX_WAITERS
1301 * bit. We force this here where we are able to easily handle
1302 * faults rather in the requeue loop below.
1304 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1305 &key2, &pi_state, nr_requeue);
1308 * At this point the top_waiter has either taken uaddr2 or is
1309 * waiting on it. If the former, then the pi_state will not
1310 * exist yet, look it up one more time to ensure we have a
1317 ret = get_futex_value_locked(&curval2, uaddr2);
1319 ret = lookup_pi_state(curval2, hb2, &key2,
1327 double_unlock_hb(hb1, hb2);
1328 put_futex_key(&key2);
1329 put_futex_key(&key1);
1330 ret = fault_in_user_writeable(uaddr2);
1335 /* The owner was exiting, try again. */
1336 double_unlock_hb(hb1, hb2);
1337 put_futex_key(&key2);
1338 put_futex_key(&key1);
1346 head1 = &hb1->chain;
1347 plist_for_each_entry_safe(this, next, head1, list) {
1348 if (task_count - nr_wake >= nr_requeue)
1351 if (!match_futex(&this->key, &key1))
1355 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1356 * be paired with each other and no other futex ops.
1358 if ((requeue_pi && !this->rt_waiter) ||
1359 (!requeue_pi && this->rt_waiter)) {
1365 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1366 * lock, we already woke the top_waiter. If not, it will be
1367 * woken by futex_unlock_pi().
1369 if (++task_count <= nr_wake && !requeue_pi) {
1374 /* Ensure we requeue to the expected futex for requeue_pi. */
1375 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1381 * Requeue nr_requeue waiters and possibly one more in the case
1382 * of requeue_pi if we couldn't acquire the lock atomically.
1385 /* Prepare the waiter to take the rt_mutex. */
1386 atomic_inc(&pi_state->refcount);
1387 this->pi_state = pi_state;
1388 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1392 /* We got the lock. */
1393 requeue_pi_wake_futex(this, &key2, hb2);
1398 this->pi_state = NULL;
1399 free_pi_state(pi_state);
1403 requeue_futex(this, hb1, hb2, &key2);
1408 double_unlock_hb(hb1, hb2);
1411 * drop_futex_key_refs() must be called outside the spinlocks. During
1412 * the requeue we moved futex_q's from the hash bucket at key1 to the
1413 * one at key2 and updated their key pointer. We no longer need to
1414 * hold the references to key1.
1416 while (--drop_count >= 0)
1417 drop_futex_key_refs(&key1);
1420 put_futex_key(&key2);
1422 put_futex_key(&key1);
1424 if (pi_state != NULL)
1425 free_pi_state(pi_state);
1426 return ret ? ret : task_count;
1429 /* The key must be already stored in q->key. */
1430 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1431 __acquires(&hb->lock)
1433 struct futex_hash_bucket *hb;
1435 hb = hash_futex(&q->key);
1436 q->lock_ptr = &hb->lock;
1438 spin_lock(&hb->lock);
1443 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1444 __releases(&hb->lock)
1446 spin_unlock(&hb->lock);
1450 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1451 * @q: The futex_q to enqueue
1452 * @hb: The destination hash bucket
1454 * The hb->lock must be held by the caller, and is released here. A call to
1455 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1456 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1457 * or nothing if the unqueue is done as part of the wake process and the unqueue
1458 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1461 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1462 __releases(&hb->lock)
1467 * The priority used to register this element is
1468 * - either the real thread-priority for the real-time threads
1469 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1470 * - or MAX_RT_PRIO for non-RT threads.
1471 * Thus, all RT-threads are woken first in priority order, and
1472 * the others are woken last, in FIFO order.
1474 prio = min(current->normal_prio, MAX_RT_PRIO);
1476 plist_node_init(&q->list, prio);
1477 #ifdef CONFIG_DEBUG_PI_LIST
1478 q->list.plist.spinlock = &hb->lock;
1480 plist_add(&q->list, &hb->chain);
1482 spin_unlock(&hb->lock);
1486 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1487 * @q: The futex_q to unqueue
1489 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1490 * be paired with exactly one earlier call to queue_me().
1493 * 1 - if the futex_q was still queued (and we removed unqueued it)
1494 * 0 - if the futex_q was already removed by the waking thread
1496 static int unqueue_me(struct futex_q *q)
1498 spinlock_t *lock_ptr;
1501 /* In the common case we don't take the spinlock, which is nice. */
1503 lock_ptr = q->lock_ptr;
1505 if (lock_ptr != NULL) {
1506 spin_lock(lock_ptr);
1508 * q->lock_ptr can change between reading it and
1509 * spin_lock(), causing us to take the wrong lock. This
1510 * corrects the race condition.
1512 * Reasoning goes like this: if we have the wrong lock,
1513 * q->lock_ptr must have changed (maybe several times)
1514 * between reading it and the spin_lock(). It can
1515 * change again after the spin_lock() but only if it was
1516 * already changed before the spin_lock(). It cannot,
1517 * however, change back to the original value. Therefore
1518 * we can detect whether we acquired the correct lock.
1520 if (unlikely(lock_ptr != q->lock_ptr)) {
1521 spin_unlock(lock_ptr);
1526 BUG_ON(q->pi_state);
1528 spin_unlock(lock_ptr);
1532 drop_futex_key_refs(&q->key);
1537 * PI futexes can not be requeued and must remove themself from the
1538 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1541 static void unqueue_me_pi(struct futex_q *q)
1542 __releases(q->lock_ptr)
1546 BUG_ON(!q->pi_state);
1547 free_pi_state(q->pi_state);
1550 spin_unlock(q->lock_ptr);
1554 * Fixup the pi_state owner with the new owner.
1556 * Must be called with hash bucket lock held and mm->sem held for non
1559 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1560 struct task_struct *newowner)
1562 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1563 struct futex_pi_state *pi_state = q->pi_state;
1564 struct task_struct *oldowner = pi_state->owner;
1565 u32 uval, curval, newval;
1569 if (!pi_state->owner)
1570 newtid |= FUTEX_OWNER_DIED;
1573 * We are here either because we stole the rtmutex from the
1574 * pending owner or we are the pending owner which failed to
1575 * get the rtmutex. We have to replace the pending owner TID
1576 * in the user space variable. This must be atomic as we have
1577 * to preserve the owner died bit here.
1579 * Note: We write the user space value _before_ changing the pi_state
1580 * because we can fault here. Imagine swapped out pages or a fork
1581 * that marked all the anonymous memory readonly for cow.
1583 * Modifying pi_state _before_ the user space value would
1584 * leave the pi_state in an inconsistent state when we fault
1585 * here, because we need to drop the hash bucket lock to
1586 * handle the fault. This might be observed in the PID check
1587 * in lookup_pi_state.
1590 if (get_futex_value_locked(&uval, uaddr))
1594 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1596 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1598 if (curval == -EFAULT)
1606 * We fixed up user space. Now we need to fix the pi_state
1609 if (pi_state->owner != NULL) {
1610 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1611 WARN_ON(list_empty(&pi_state->list));
1612 list_del_init(&pi_state->list);
1613 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1616 pi_state->owner = newowner;
1618 raw_spin_lock_irq(&newowner->pi_lock);
1619 WARN_ON(!list_empty(&pi_state->list));
1620 list_add(&pi_state->list, &newowner->pi_state_list);
1621 raw_spin_unlock_irq(&newowner->pi_lock);
1625 * To handle the page fault we need to drop the hash bucket
1626 * lock here. That gives the other task (either the pending
1627 * owner itself or the task which stole the rtmutex) the
1628 * chance to try the fixup of the pi_state. So once we are
1629 * back from handling the fault we need to check the pi_state
1630 * after reacquiring the hash bucket lock and before trying to
1631 * do another fixup. When the fixup has been done already we
1635 spin_unlock(q->lock_ptr);
1637 ret = fault_in_user_writeable(uaddr);
1639 spin_lock(q->lock_ptr);
1642 * Check if someone else fixed it for us:
1644 if (pi_state->owner != oldowner)
1653 static long futex_wait_restart(struct restart_block *restart);
1656 * fixup_owner() - Post lock pi_state and corner case management
1657 * @uaddr: user address of the futex
1658 * @q: futex_q (contains pi_state and access to the rt_mutex)
1659 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1661 * After attempting to lock an rt_mutex, this function is called to cleanup
1662 * the pi_state owner as well as handle race conditions that may allow us to
1663 * acquire the lock. Must be called with the hb lock held.
1666 * 1 - success, lock taken
1667 * 0 - success, lock not taken
1668 * <0 - on error (-EFAULT)
1670 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1672 struct task_struct *owner;
1677 * Got the lock. We might not be the anticipated owner if we
1678 * did a lock-steal - fix up the PI-state in that case:
1680 if (q->pi_state->owner != current)
1681 ret = fixup_pi_state_owner(uaddr, q, current);
1686 * Catch the rare case, where the lock was released when we were on the
1687 * way back before we locked the hash bucket.
1689 if (q->pi_state->owner == current) {
1691 * Try to get the rt_mutex now. This might fail as some other
1692 * task acquired the rt_mutex after we removed ourself from the
1693 * rt_mutex waiters list.
1695 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1701 * pi_state is incorrect, some other task did a lock steal and
1702 * we returned due to timeout or signal without taking the
1703 * rt_mutex. Too late. We can access the rt_mutex_owner without
1704 * locking, as the other task is now blocked on the hash bucket
1705 * lock. Fix the state up.
1707 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1708 ret = fixup_pi_state_owner(uaddr, q, owner);
1713 * Paranoia check. If we did not take the lock, then we should not be
1714 * the owner, nor the pending owner, of the rt_mutex.
1716 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1717 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1718 "pi-state %p\n", ret,
1719 q->pi_state->pi_mutex.owner,
1720 q->pi_state->owner);
1723 return ret ? ret : locked;
1727 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1728 * @hb: the futex hash bucket, must be locked by the caller
1729 * @q: the futex_q to queue up on
1730 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
1732 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1733 struct hrtimer_sleeper *timeout)
1736 * The task state is guaranteed to be set before another task can
1737 * wake it. set_current_state() is implemented using set_mb() and
1738 * queue_me() calls spin_unlock() upon completion, both serializing
1739 * access to the hash list and forcing another memory barrier.
1741 set_current_state(TASK_INTERRUPTIBLE);
1746 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1747 if (!hrtimer_active(&timeout->timer))
1748 timeout->task = NULL;
1752 * If we have been removed from the hash list, then another task
1753 * has tried to wake us, and we can skip the call to schedule().
1755 if (likely(!plist_node_empty(&q->list))) {
1757 * If the timer has already expired, current will already be
1758 * flagged for rescheduling. Only call schedule if there
1759 * is no timeout, or if it has yet to expire.
1761 if (!timeout || timeout->task)
1764 __set_current_state(TASK_RUNNING);
1768 * futex_wait_setup() - Prepare to wait on a futex
1769 * @uaddr: the futex userspace address
1770 * @val: the expected value
1771 * @flags: futex flags (FLAGS_SHARED, etc.)
1772 * @q: the associated futex_q
1773 * @hb: storage for hash_bucket pointer to be returned to caller
1775 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1776 * compare it with the expected value. Handle atomic faults internally.
1777 * Return with the hb lock held and a q.key reference on success, and unlocked
1778 * with no q.key reference on failure.
1781 * 0 - uaddr contains val and hb has been locked
1782 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1784 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1785 struct futex_q *q, struct futex_hash_bucket **hb)
1791 * Access the page AFTER the hash-bucket is locked.
1792 * Order is important:
1794 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1795 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1797 * The basic logical guarantee of a futex is that it blocks ONLY
1798 * if cond(var) is known to be true at the time of blocking, for
1799 * any cond. If we queued after testing *uaddr, that would open
1800 * a race condition where we could block indefinitely with
1801 * cond(var) false, which would violate the guarantee.
1803 * A consequence is that futex_wait() can return zero and absorb
1804 * a wakeup when *uaddr != val on entry to the syscall. This is
1808 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
1809 if (unlikely(ret != 0))
1813 *hb = queue_lock(q);
1815 ret = get_futex_value_locked(&uval, uaddr);
1818 queue_unlock(q, *hb);
1820 ret = get_user(uval, uaddr);
1824 if (!(flags & FLAGS_SHARED))
1827 put_futex_key(&q->key);
1832 queue_unlock(q, *hb);
1838 put_futex_key(&q->key);
1842 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1843 ktime_t *abs_time, u32 bitset)
1845 struct hrtimer_sleeper timeout, *to = NULL;
1846 struct restart_block *restart;
1847 struct futex_hash_bucket *hb;
1848 struct futex_q q = futex_q_init;
1858 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1859 CLOCK_REALTIME : CLOCK_MONOTONIC,
1861 hrtimer_init_sleeper(to, current);
1862 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1863 current->timer_slack_ns);
1868 * Prepare to wait on uaddr. On success, holds hb lock and increments
1871 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1875 /* queue_me and wait for wakeup, timeout, or a signal. */
1876 futex_wait_queue_me(hb, &q, to);
1878 /* If we were woken (and unqueued), we succeeded, whatever. */
1880 /* unqueue_me() drops q.key ref */
1881 if (!unqueue_me(&q))
1884 if (to && !to->task)
1888 * We expect signal_pending(current), but we might be the
1889 * victim of a spurious wakeup as well.
1891 if (!signal_pending(current))
1898 restart = ¤t_thread_info()->restart_block;
1899 restart->fn = futex_wait_restart;
1900 restart->futex.uaddr = uaddr;
1901 restart->futex.val = val;
1902 restart->futex.time = abs_time->tv64;
1903 restart->futex.bitset = bitset;
1904 restart->futex.flags = flags;
1906 ret = -ERESTART_RESTARTBLOCK;
1910 hrtimer_cancel(&to->timer);
1911 destroy_hrtimer_on_stack(&to->timer);
1917 static long futex_wait_restart(struct restart_block *restart)
1919 u32 __user *uaddr = restart->futex.uaddr;
1920 ktime_t t, *tp = NULL;
1922 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1923 t.tv64 = restart->futex.time;
1926 restart->fn = do_no_restart_syscall;
1928 return (long)futex_wait(uaddr, restart->futex.flags,
1929 restart->futex.val, tp, restart->futex.bitset);
1934 * Userspace tried a 0 -> TID atomic transition of the futex value
1935 * and failed. The kernel side here does the whole locking operation:
1936 * if there are waiters then it will block, it does PI, etc. (Due to
1937 * races the kernel might see a 0 value of the futex too.)
1939 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1940 ktime_t *time, int trylock)
1942 struct hrtimer_sleeper timeout, *to = NULL;
1943 struct futex_hash_bucket *hb;
1944 struct futex_q q = futex_q_init;
1947 if (refill_pi_state_cache())
1952 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1954 hrtimer_init_sleeper(to, current);
1955 hrtimer_set_expires(&to->timer, *time);
1959 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
1960 if (unlikely(ret != 0))
1964 hb = queue_lock(&q);
1966 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1967 if (unlikely(ret)) {
1970 /* We got the lock. */
1972 goto out_unlock_put_key;
1977 * Task is exiting and we just wait for the
1980 queue_unlock(&q, hb);
1981 put_futex_key(&q.key);
1985 goto out_unlock_put_key;
1990 * Only actually queue now that the atomic ops are done:
1994 WARN_ON(!q.pi_state);
1996 * Block on the PI mutex:
1999 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2001 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2002 /* Fixup the trylock return value: */
2003 ret = ret ? 0 : -EWOULDBLOCK;
2006 spin_lock(q.lock_ptr);
2008 * Fixup the pi_state owner and possibly acquire the lock if we
2011 res = fixup_owner(uaddr, &q, !ret);
2013 * If fixup_owner() returned an error, proprogate that. If it acquired
2014 * the lock, clear our -ETIMEDOUT or -EINTR.
2017 ret = (res < 0) ? res : 0;
2020 * If fixup_owner() faulted and was unable to handle the fault, unlock
2021 * it and return the fault to userspace.
2023 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2024 rt_mutex_unlock(&q.pi_state->pi_mutex);
2026 /* Unqueue and drop the lock */
2032 queue_unlock(&q, hb);
2035 put_futex_key(&q.key);
2038 destroy_hrtimer_on_stack(&to->timer);
2039 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2042 queue_unlock(&q, hb);
2044 ret = fault_in_user_writeable(uaddr);
2048 if (!(flags & FLAGS_SHARED))
2051 put_futex_key(&q.key);
2056 * Userspace attempted a TID -> 0 atomic transition, and failed.
2057 * This is the in-kernel slowpath: we look up the PI state (if any),
2058 * and do the rt-mutex unlock.
2060 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2062 struct futex_hash_bucket *hb;
2063 struct futex_q *this, *next;
2065 struct plist_head *head;
2066 union futex_key key = FUTEX_KEY_INIT;
2070 if (get_user(uval, uaddr))
2073 * We release only a lock we actually own:
2075 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
2078 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
2079 if (unlikely(ret != 0))
2082 hb = hash_futex(&key);
2083 spin_lock(&hb->lock);
2086 * To avoid races, try to do the TID -> 0 atomic transition
2087 * again. If it succeeds then we can return without waking
2090 if (!(uval & FUTEX_OWNER_DIED))
2091 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
2094 if (unlikely(uval == -EFAULT))
2097 * Rare case: we managed to release the lock atomically,
2098 * no need to wake anyone else up:
2100 if (unlikely(uval == task_pid_vnr(current)))
2104 * Ok, other tasks may need to be woken up - check waiters
2105 * and do the wakeup if necessary:
2109 plist_for_each_entry_safe(this, next, head, list) {
2110 if (!match_futex (&this->key, &key))
2112 ret = wake_futex_pi(uaddr, uval, this);
2114 * The atomic access to the futex value
2115 * generated a pagefault, so retry the
2116 * user-access and the wakeup:
2123 * No waiters - kernel unlocks the futex:
2125 if (!(uval & FUTEX_OWNER_DIED)) {
2126 ret = unlock_futex_pi(uaddr, uval);
2132 spin_unlock(&hb->lock);
2133 put_futex_key(&key);
2139 spin_unlock(&hb->lock);
2140 put_futex_key(&key);
2142 ret = fault_in_user_writeable(uaddr);
2150 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2151 * @hb: the hash_bucket futex_q was original enqueued on
2152 * @q: the futex_q woken while waiting to be requeued
2153 * @key2: the futex_key of the requeue target futex
2154 * @timeout: the timeout associated with the wait (NULL if none)
2156 * Detect if the task was woken on the initial futex as opposed to the requeue
2157 * target futex. If so, determine if it was a timeout or a signal that caused
2158 * the wakeup and return the appropriate error code to the caller. Must be
2159 * called with the hb lock held.
2162 * 0 - no early wakeup detected
2163 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2166 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2167 struct futex_q *q, union futex_key *key2,
2168 struct hrtimer_sleeper *timeout)
2173 * With the hb lock held, we avoid races while we process the wakeup.
2174 * We only need to hold hb (and not hb2) to ensure atomicity as the
2175 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2176 * It can't be requeued from uaddr2 to something else since we don't
2177 * support a PI aware source futex for requeue.
2179 if (!match_futex(&q->key, key2)) {
2180 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2182 * We were woken prior to requeue by a timeout or a signal.
2183 * Unqueue the futex_q and determine which it was.
2185 plist_del(&q->list, &hb->chain);
2187 /* Handle spurious wakeups gracefully */
2189 if (timeout && !timeout->task)
2191 else if (signal_pending(current))
2192 ret = -ERESTARTNOINTR;
2198 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2199 * @uaddr: the futex we initially wait on (non-pi)
2200 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2201 * the same type, no requeueing from private to shared, etc.
2202 * @val: the expected value of uaddr
2203 * @abs_time: absolute timeout
2204 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2205 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2206 * @uaddr2: the pi futex we will take prior to returning to user-space
2208 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2209 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2210 * complete the acquisition of the rt_mutex prior to returning to userspace.
2211 * This ensures the rt_mutex maintains an owner when it has waiters; without
2212 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2215 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2216 * via the following:
2217 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2218 * 2) wakeup on uaddr2 after a requeue
2222 * If 3, cleanup and return -ERESTARTNOINTR.
2224 * If 2, we may then block on trying to take the rt_mutex and return via:
2225 * 5) successful lock
2228 * 8) other lock acquisition failure
2230 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2232 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2238 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2239 u32 val, ktime_t *abs_time, u32 bitset,
2242 struct hrtimer_sleeper timeout, *to = NULL;
2243 struct rt_mutex_waiter rt_waiter;
2244 struct rt_mutex *pi_mutex = NULL;
2245 struct futex_hash_bucket *hb;
2246 union futex_key key2 = FUTEX_KEY_INIT;
2247 struct futex_q q = futex_q_init;
2255 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2256 CLOCK_REALTIME : CLOCK_MONOTONIC,
2258 hrtimer_init_sleeper(to, current);
2259 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2260 current->timer_slack_ns);
2264 * The waiter is allocated on our stack, manipulated by the requeue
2265 * code while we sleep on uaddr.
2267 debug_rt_mutex_init_waiter(&rt_waiter);
2268 rt_waiter.task = NULL;
2270 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
2271 if (unlikely(ret != 0))
2275 q.rt_waiter = &rt_waiter;
2276 q.requeue_pi_key = &key2;
2279 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2282 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2286 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2287 futex_wait_queue_me(hb, &q, to);
2289 spin_lock(&hb->lock);
2290 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2291 spin_unlock(&hb->lock);
2296 * In order for us to be here, we know our q.key == key2, and since
2297 * we took the hb->lock above, we also know that futex_requeue() has
2298 * completed and we no longer have to concern ourselves with a wakeup
2299 * race with the atomic proxy lock acquisition by the requeue code. The
2300 * futex_requeue dropped our key1 reference and incremented our key2
2304 /* Check if the requeue code acquired the second futex for us. */
2307 * Got the lock. We might not be the anticipated owner if we
2308 * did a lock-steal - fix up the PI-state in that case.
2310 if (q.pi_state && (q.pi_state->owner != current)) {
2311 spin_lock(q.lock_ptr);
2312 ret = fixup_pi_state_owner(uaddr2, &q, current);
2313 spin_unlock(q.lock_ptr);
2317 * We have been woken up by futex_unlock_pi(), a timeout, or a
2318 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2321 WARN_ON(!&q.pi_state);
2322 pi_mutex = &q.pi_state->pi_mutex;
2323 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2324 debug_rt_mutex_free_waiter(&rt_waiter);
2326 spin_lock(q.lock_ptr);
2328 * Fixup the pi_state owner and possibly acquire the lock if we
2331 res = fixup_owner(uaddr2, &q, !ret);
2333 * If fixup_owner() returned an error, proprogate that. If it
2334 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2337 ret = (res < 0) ? res : 0;
2339 /* Unqueue and drop the lock. */
2344 * If fixup_pi_state_owner() faulted and was unable to handle the
2345 * fault, unlock the rt_mutex and return the fault to userspace.
2347 if (ret == -EFAULT) {
2348 if (rt_mutex_owner(pi_mutex) == current)
2349 rt_mutex_unlock(pi_mutex);
2350 } else if (ret == -EINTR) {
2352 * We've already been requeued, but cannot restart by calling
2353 * futex_lock_pi() directly. We could restart this syscall, but
2354 * it would detect that the user space "val" changed and return
2355 * -EWOULDBLOCK. Save the overhead of the restart and return
2356 * -EWOULDBLOCK directly.
2362 put_futex_key(&q.key);
2364 put_futex_key(&key2);
2368 hrtimer_cancel(&to->timer);
2369 destroy_hrtimer_on_stack(&to->timer);
2375 * Support for robust futexes: the kernel cleans up held futexes at
2378 * Implementation: user-space maintains a per-thread list of locks it
2379 * is holding. Upon do_exit(), the kernel carefully walks this list,
2380 * and marks all locks that are owned by this thread with the
2381 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2382 * always manipulated with the lock held, so the list is private and
2383 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2384 * field, to allow the kernel to clean up if the thread dies after
2385 * acquiring the lock, but just before it could have added itself to
2386 * the list. There can only be one such pending lock.
2390 * sys_set_robust_list() - Set the robust-futex list head of a task
2391 * @head: pointer to the list-head
2392 * @len: length of the list-head, as userspace expects
2394 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2397 if (!futex_cmpxchg_enabled)
2400 * The kernel knows only one size for now:
2402 if (unlikely(len != sizeof(*head)))
2405 current->robust_list = head;
2411 * sys_get_robust_list() - Get the robust-futex list head of a task
2412 * @pid: pid of the process [zero for current task]
2413 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2414 * @len_ptr: pointer to a length field, the kernel fills in the header size
2416 SYSCALL_DEFINE3(get_robust_list, int, pid,
2417 struct robust_list_head __user * __user *, head_ptr,
2418 size_t __user *, len_ptr)
2420 struct robust_list_head __user *head;
2422 const struct cred *cred = current_cred(), *pcred;
2424 if (!futex_cmpxchg_enabled)
2428 head = current->robust_list;
2430 struct task_struct *p;
2434 p = find_task_by_vpid(pid);
2438 pcred = __task_cred(p);
2439 if (cred->euid != pcred->euid &&
2440 cred->euid != pcred->uid &&
2441 !capable(CAP_SYS_PTRACE))
2443 head = p->robust_list;
2447 if (put_user(sizeof(*head), len_ptr))
2449 return put_user(head, head_ptr);
2458 * Process a futex-list entry, check whether it's owned by the
2459 * dying task, and do notification if so:
2461 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2463 u32 uval, nval, mval;
2466 if (get_user(uval, uaddr))
2469 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2471 * Ok, this dying thread is truly holding a futex
2472 * of interest. Set the OWNER_DIED bit atomically
2473 * via cmpxchg, and if the value had FUTEX_WAITERS
2474 * set, wake up a waiter (if any). (We have to do a
2475 * futex_wake() even if OWNER_DIED is already set -
2476 * to handle the rare but possible case of recursive
2477 * thread-death.) The rest of the cleanup is done in
2480 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2481 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2483 if (nval == -EFAULT)
2490 * Wake robust non-PI futexes here. The wakeup of
2491 * PI futexes happens in exit_pi_state():
2493 if (!pi && (uval & FUTEX_WAITERS))
2494 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2500 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2502 static inline int fetch_robust_entry(struct robust_list __user **entry,
2503 struct robust_list __user * __user *head,
2506 unsigned long uentry;
2508 if (get_user(uentry, (unsigned long __user *)head))
2511 *entry = (void __user *)(uentry & ~1UL);
2518 * Walk curr->robust_list (very carefully, it's a userspace list!)
2519 * and mark any locks found there dead, and notify any waiters.
2521 * We silently return on any sign of list-walking problem.
2523 void exit_robust_list(struct task_struct *curr)
2525 struct robust_list_head __user *head = curr->robust_list;
2526 struct robust_list __user *entry, *next_entry, *pending;
2527 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2528 unsigned int uninitialized_var(next_pi);
2529 unsigned long futex_offset;
2532 if (!futex_cmpxchg_enabled)
2536 * Fetch the list head (which was registered earlier, via
2537 * sys_set_robust_list()):
2539 if (fetch_robust_entry(&entry, &head->list.next, &pi))
2542 * Fetch the relative futex offset:
2544 if (get_user(futex_offset, &head->futex_offset))
2547 * Fetch any possibly pending lock-add first, and handle it
2550 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2553 next_entry = NULL; /* avoid warning with gcc */
2554 while (entry != &head->list) {
2556 * Fetch the next entry in the list before calling
2557 * handle_futex_death:
2559 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2561 * A pending lock might already be on the list, so
2562 * don't process it twice:
2564 if (entry != pending)
2565 if (handle_futex_death((void __user *)entry + futex_offset,
2573 * Avoid excessively long or circular lists:
2582 handle_futex_death((void __user *)pending + futex_offset,
2586 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2587 u32 __user *uaddr2, u32 val2, u32 val3)
2589 int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK;
2590 unsigned int flags = 0;
2592 if (!(op & FUTEX_PRIVATE_FLAG))
2593 flags |= FLAGS_SHARED;
2595 if (op & FUTEX_CLOCK_REALTIME) {
2596 flags |= FLAGS_CLOCKRT;
2597 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2603 val3 = FUTEX_BITSET_MATCH_ANY;
2604 case FUTEX_WAIT_BITSET:
2605 ret = futex_wait(uaddr, flags, val, timeout, val3);
2608 val3 = FUTEX_BITSET_MATCH_ANY;
2609 case FUTEX_WAKE_BITSET:
2610 ret = futex_wake(uaddr, flags, val, val3);
2613 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2615 case FUTEX_CMP_REQUEUE:
2616 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2619 ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2622 if (futex_cmpxchg_enabled)
2623 ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
2625 case FUTEX_UNLOCK_PI:
2626 if (futex_cmpxchg_enabled)
2627 ret = futex_unlock_pi(uaddr, flags);
2629 case FUTEX_TRYLOCK_PI:
2630 if (futex_cmpxchg_enabled)
2631 ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
2633 case FUTEX_WAIT_REQUEUE_PI:
2634 val3 = FUTEX_BITSET_MATCH_ANY;
2635 ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2638 case FUTEX_CMP_REQUEUE_PI:
2639 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2648 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2649 struct timespec __user *, utime, u32 __user *, uaddr2,
2653 ktime_t t, *tp = NULL;
2655 int cmd = op & FUTEX_CMD_MASK;
2657 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2658 cmd == FUTEX_WAIT_BITSET ||
2659 cmd == FUTEX_WAIT_REQUEUE_PI)) {
2660 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2662 if (!timespec_valid(&ts))
2665 t = timespec_to_ktime(ts);
2666 if (cmd == FUTEX_WAIT)
2667 t = ktime_add_safe(ktime_get(), t);
2671 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2672 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2674 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2675 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2676 val2 = (u32) (unsigned long) utime;
2678 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2681 static int __init futex_init(void)
2687 * This will fail and we want it. Some arch implementations do
2688 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2689 * functionality. We want to know that before we call in any
2690 * of the complex code paths. Also we want to prevent
2691 * registration of robust lists in that case. NULL is
2692 * guaranteed to fault and we get -EFAULT on functional
2693 * implementation, the non-functional ones will return
2696 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2697 if (curval == -EFAULT)
2698 futex_cmpxchg_enabled = 1;
2700 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2701 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2702 spin_lock_init(&futex_queues[i].lock);
2707 __initcall(futex_init);