6feeea4f8f154826896eb3af70e9acf58e338705
[firefly-linux-kernel-4.4.55.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62
63 #include <asm/futex.h>
64
65 #include "rtmutex_common.h"
66
67 int __read_mostly futex_cmpxchg_enabled;
68
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71 /*
72  * Futex flags used to encode options to functions and preserve them across
73  * restarts.
74  */
75 #define FLAGS_SHARED            0x01
76 #define FLAGS_CLOCKRT           0x02
77 #define FLAGS_HAS_TIMEOUT       0x04
78
79 /*
80  * Priority Inheritance state:
81  */
82 struct futex_pi_state {
83         /*
84          * list of 'owned' pi_state instances - these have to be
85          * cleaned up in do_exit() if the task exits prematurely:
86          */
87         struct list_head list;
88
89         /*
90          * The PI object:
91          */
92         struct rt_mutex pi_mutex;
93
94         struct task_struct *owner;
95         atomic_t refcount;
96
97         union futex_key key;
98 };
99
100 /**
101  * struct futex_q - The hashed futex queue entry, one per waiting task
102  * @list:               priority-sorted list of tasks waiting on this futex
103  * @task:               the task waiting on the futex
104  * @lock_ptr:           the hash bucket lock
105  * @key:                the key the futex is hashed on
106  * @pi_state:           optional priority inheritance state
107  * @rt_waiter:          rt_waiter storage for use with requeue_pi
108  * @requeue_pi_key:     the requeue_pi target futex key
109  * @bitset:             bitset for the optional bitmasked wakeup
110  *
111  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
112  * we can wake only the relevant ones (hashed queues may be shared).
113  *
114  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
115  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
116  * The order of wakeup is always to make the first condition true, then
117  * the second.
118  *
119  * PI futexes are typically woken before they are removed from the hash list via
120  * the rt_mutex code. See unqueue_me_pi().
121  */
122 struct futex_q {
123         struct plist_node list;
124
125         struct task_struct *task;
126         spinlock_t *lock_ptr;
127         union futex_key key;
128         struct futex_pi_state *pi_state;
129         struct rt_mutex_waiter *rt_waiter;
130         union futex_key *requeue_pi_key;
131         u32 bitset;
132 };
133
134 static const struct futex_q futex_q_init = {
135         /* list gets initialized in queue_me()*/
136         .key = FUTEX_KEY_INIT,
137         .bitset = FUTEX_BITSET_MATCH_ANY
138 };
139
140 /*
141  * Hash buckets are shared by all the futex_keys that hash to the same
142  * location.  Each key may have multiple futex_q structures, one for each task
143  * waiting on a futex.
144  */
145 struct futex_hash_bucket {
146         spinlock_t lock;
147         struct plist_head chain;
148 };
149
150 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
151
152 /*
153  * We hash on the keys returned from get_futex_key (see below).
154  */
155 static struct futex_hash_bucket *hash_futex(union futex_key *key)
156 {
157         u32 hash = jhash2((u32*)&key->both.word,
158                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
159                           key->both.offset);
160         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
161 }
162
163 /*
164  * Return 1 if two futex_keys are equal, 0 otherwise.
165  */
166 static inline int match_futex(union futex_key *key1, union futex_key *key2)
167 {
168         return (key1 && key2
169                 && key1->both.word == key2->both.word
170                 && key1->both.ptr == key2->both.ptr
171                 && key1->both.offset == key2->both.offset);
172 }
173
174 /*
175  * Take a reference to the resource addressed by a key.
176  * Can be called while holding spinlocks.
177  *
178  */
179 static void get_futex_key_refs(union futex_key *key)
180 {
181         if (!key->both.ptr)
182                 return;
183
184         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
185         case FUT_OFF_INODE:
186                 ihold(key->shared.inode);
187                 break;
188         case FUT_OFF_MMSHARED:
189                 atomic_inc(&key->private.mm->mm_count);
190                 break;
191         }
192 }
193
194 /*
195  * Drop a reference to the resource addressed by a key.
196  * The hash bucket spinlock must not be held.
197  */
198 static void drop_futex_key_refs(union futex_key *key)
199 {
200         if (!key->both.ptr) {
201                 /* If we're here then we tried to put a key we failed to get */
202                 WARN_ON_ONCE(1);
203                 return;
204         }
205
206         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
207         case FUT_OFF_INODE:
208                 iput(key->shared.inode);
209                 break;
210         case FUT_OFF_MMSHARED:
211                 mmdrop(key->private.mm);
212                 break;
213         }
214 }
215
216 /**
217  * get_futex_key() - Get parameters which are the keys for a futex
218  * @uaddr:      virtual address of the futex
219  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
220  * @key:        address where result is stored.
221  *
222  * Returns a negative error code or 0
223  * The key words are stored in *key on success.
224  *
225  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
226  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
227  * We can usually work out the index without swapping in the page.
228  *
229  * lock_page() might sleep, the caller should not hold a spinlock.
230  */
231 static int
232 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
233 {
234         unsigned long address = (unsigned long)uaddr;
235         struct mm_struct *mm = current->mm;
236         struct page *page, *page_head;
237         int err;
238
239         /*
240          * The futex address must be "naturally" aligned.
241          */
242         key->both.offset = address % PAGE_SIZE;
243         if (unlikely((address % sizeof(u32)) != 0))
244                 return -EINVAL;
245         address -= key->both.offset;
246
247         /*
248          * PROCESS_PRIVATE futexes are fast.
249          * As the mm cannot disappear under us and the 'key' only needs
250          * virtual address, we dont even have to find the underlying vma.
251          * Note : We do have to check 'uaddr' is a valid user address,
252          *        but access_ok() should be faster than find_vma()
253          */
254         if (!fshared) {
255                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
256                         return -EFAULT;
257                 key->private.mm = mm;
258                 key->private.address = address;
259                 get_futex_key_refs(key);
260                 return 0;
261         }
262
263 again:
264         err = get_user_pages_fast(address, 1, 1, &page);
265         if (err < 0)
266                 return err;
267
268 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
269         page_head = page;
270         if (unlikely(PageTail(page))) {
271                 put_page(page);
272                 /* serialize against __split_huge_page_splitting() */
273                 local_irq_disable();
274                 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
275                         page_head = compound_head(page);
276                         /*
277                          * page_head is valid pointer but we must pin
278                          * it before taking the PG_lock and/or
279                          * PG_compound_lock. The moment we re-enable
280                          * irqs __split_huge_page_splitting() can
281                          * return and the head page can be freed from
282                          * under us. We can't take the PG_lock and/or
283                          * PG_compound_lock on a page that could be
284                          * freed from under us.
285                          */
286                         if (page != page_head) {
287                                 get_page(page_head);
288                                 put_page(page);
289                         }
290                         local_irq_enable();
291                 } else {
292                         local_irq_enable();
293                         goto again;
294                 }
295         }
296 #else
297         page_head = compound_head(page);
298         if (page != page_head) {
299                 get_page(page_head);
300                 put_page(page);
301         }
302 #endif
303
304         lock_page(page_head);
305         if (!page_head->mapping) {
306                 unlock_page(page_head);
307                 put_page(page_head);
308                 goto again;
309         }
310
311         /*
312          * Private mappings are handled in a simple way.
313          *
314          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
315          * it's a read-only handle, it's expected that futexes attach to
316          * the object not the particular process.
317          */
318         if (PageAnon(page_head)) {
319                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
320                 key->private.mm = mm;
321                 key->private.address = address;
322         } else {
323                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
324                 key->shared.inode = page_head->mapping->host;
325                 key->shared.pgoff = page_head->index;
326         }
327
328         get_futex_key_refs(key);
329
330         unlock_page(page_head);
331         put_page(page_head);
332         return 0;
333 }
334
335 static inline void put_futex_key(union futex_key *key)
336 {
337         drop_futex_key_refs(key);
338 }
339
340 /**
341  * fault_in_user_writeable() - Fault in user address and verify RW access
342  * @uaddr:      pointer to faulting user space address
343  *
344  * Slow path to fixup the fault we just took in the atomic write
345  * access to @uaddr.
346  *
347  * We have no generic implementation of a non-destructive write to the
348  * user address. We know that we faulted in the atomic pagefault
349  * disabled section so we can as well avoid the #PF overhead by
350  * calling get_user_pages() right away.
351  */
352 static int fault_in_user_writeable(u32 __user *uaddr)
353 {
354         struct mm_struct *mm = current->mm;
355         int ret;
356
357         down_read(&mm->mmap_sem);
358         ret = get_user_pages(current, mm, (unsigned long)uaddr,
359                              1, 1, 0, NULL, NULL);
360         up_read(&mm->mmap_sem);
361
362         return ret < 0 ? ret : 0;
363 }
364
365 /**
366  * futex_top_waiter() - Return the highest priority waiter on a futex
367  * @hb:         the hash bucket the futex_q's reside in
368  * @key:        the futex key (to distinguish it from other futex futex_q's)
369  *
370  * Must be called with the hb lock held.
371  */
372 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
373                                         union futex_key *key)
374 {
375         struct futex_q *this;
376
377         plist_for_each_entry(this, &hb->chain, list) {
378                 if (match_futex(&this->key, key))
379                         return this;
380         }
381         return NULL;
382 }
383
384 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
385 {
386         u32 curval;
387
388         pagefault_disable();
389         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
390         pagefault_enable();
391
392         return curval;
393 }
394
395 static int get_futex_value_locked(u32 *dest, u32 __user *from)
396 {
397         int ret;
398
399         pagefault_disable();
400         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
401         pagefault_enable();
402
403         return ret ? -EFAULT : 0;
404 }
405
406
407 /*
408  * PI code:
409  */
410 static int refill_pi_state_cache(void)
411 {
412         struct futex_pi_state *pi_state;
413
414         if (likely(current->pi_state_cache))
415                 return 0;
416
417         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
418
419         if (!pi_state)
420                 return -ENOMEM;
421
422         INIT_LIST_HEAD(&pi_state->list);
423         /* pi_mutex gets initialized later */
424         pi_state->owner = NULL;
425         atomic_set(&pi_state->refcount, 1);
426         pi_state->key = FUTEX_KEY_INIT;
427
428         current->pi_state_cache = pi_state;
429
430         return 0;
431 }
432
433 static struct futex_pi_state * alloc_pi_state(void)
434 {
435         struct futex_pi_state *pi_state = current->pi_state_cache;
436
437         WARN_ON(!pi_state);
438         current->pi_state_cache = NULL;
439
440         return pi_state;
441 }
442
443 static void free_pi_state(struct futex_pi_state *pi_state)
444 {
445         if (!atomic_dec_and_test(&pi_state->refcount))
446                 return;
447
448         /*
449          * If pi_state->owner is NULL, the owner is most probably dying
450          * and has cleaned up the pi_state already
451          */
452         if (pi_state->owner) {
453                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
454                 list_del_init(&pi_state->list);
455                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
456
457                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
458         }
459
460         if (current->pi_state_cache)
461                 kfree(pi_state);
462         else {
463                 /*
464                  * pi_state->list is already empty.
465                  * clear pi_state->owner.
466                  * refcount is at 0 - put it back to 1.
467                  */
468                 pi_state->owner = NULL;
469                 atomic_set(&pi_state->refcount, 1);
470                 current->pi_state_cache = pi_state;
471         }
472 }
473
474 /*
475  * Look up the task based on what TID userspace gave us.
476  * We dont trust it.
477  */
478 static struct task_struct * futex_find_get_task(pid_t pid)
479 {
480         struct task_struct *p;
481
482         rcu_read_lock();
483         p = find_task_by_vpid(pid);
484         if (p)
485                 get_task_struct(p);
486
487         rcu_read_unlock();
488
489         return p;
490 }
491
492 /*
493  * This task is holding PI mutexes at exit time => bad.
494  * Kernel cleans up PI-state, but userspace is likely hosed.
495  * (Robust-futex cleanup is separate and might save the day for userspace.)
496  */
497 void exit_pi_state_list(struct task_struct *curr)
498 {
499         struct list_head *next, *head = &curr->pi_state_list;
500         struct futex_pi_state *pi_state;
501         struct futex_hash_bucket *hb;
502         union futex_key key = FUTEX_KEY_INIT;
503
504         if (!futex_cmpxchg_enabled)
505                 return;
506         /*
507          * We are a ZOMBIE and nobody can enqueue itself on
508          * pi_state_list anymore, but we have to be careful
509          * versus waiters unqueueing themselves:
510          */
511         raw_spin_lock_irq(&curr->pi_lock);
512         while (!list_empty(head)) {
513
514                 next = head->next;
515                 pi_state = list_entry(next, struct futex_pi_state, list);
516                 key = pi_state->key;
517                 hb = hash_futex(&key);
518                 raw_spin_unlock_irq(&curr->pi_lock);
519
520                 spin_lock(&hb->lock);
521
522                 raw_spin_lock_irq(&curr->pi_lock);
523                 /*
524                  * We dropped the pi-lock, so re-check whether this
525                  * task still owns the PI-state:
526                  */
527                 if (head->next != next) {
528                         spin_unlock(&hb->lock);
529                         continue;
530                 }
531
532                 WARN_ON(pi_state->owner != curr);
533                 WARN_ON(list_empty(&pi_state->list));
534                 list_del_init(&pi_state->list);
535                 pi_state->owner = NULL;
536                 raw_spin_unlock_irq(&curr->pi_lock);
537
538                 rt_mutex_unlock(&pi_state->pi_mutex);
539
540                 spin_unlock(&hb->lock);
541
542                 raw_spin_lock_irq(&curr->pi_lock);
543         }
544         raw_spin_unlock_irq(&curr->pi_lock);
545 }
546
547 static int
548 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
549                 union futex_key *key, struct futex_pi_state **ps)
550 {
551         struct futex_pi_state *pi_state = NULL;
552         struct futex_q *this, *next;
553         struct plist_head *head;
554         struct task_struct *p;
555         pid_t pid = uval & FUTEX_TID_MASK;
556
557         head = &hb->chain;
558
559         plist_for_each_entry_safe(this, next, head, list) {
560                 if (match_futex(&this->key, key)) {
561                         /*
562                          * Another waiter already exists - bump up
563                          * the refcount and return its pi_state:
564                          */
565                         pi_state = this->pi_state;
566                         /*
567                          * Userspace might have messed up non-PI and PI futexes
568                          */
569                         if (unlikely(!pi_state))
570                                 return -EINVAL;
571
572                         WARN_ON(!atomic_read(&pi_state->refcount));
573
574                         /*
575                          * When pi_state->owner is NULL then the owner died
576                          * and another waiter is on the fly. pi_state->owner
577                          * is fixed up by the task which acquires
578                          * pi_state->rt_mutex.
579                          *
580                          * We do not check for pid == 0 which can happen when
581                          * the owner died and robust_list_exit() cleared the
582                          * TID.
583                          */
584                         if (pid && pi_state->owner) {
585                                 /*
586                                  * Bail out if user space manipulated the
587                                  * futex value.
588                                  */
589                                 if (pid != task_pid_vnr(pi_state->owner))
590                                         return -EINVAL;
591                         }
592
593                         atomic_inc(&pi_state->refcount);
594                         *ps = pi_state;
595
596                         return 0;
597                 }
598         }
599
600         /*
601          * We are the first waiter - try to look up the real owner and attach
602          * the new pi_state to it, but bail out when TID = 0
603          */
604         if (!pid)
605                 return -ESRCH;
606         p = futex_find_get_task(pid);
607         if (!p)
608                 return -ESRCH;
609
610         /*
611          * We need to look at the task state flags to figure out,
612          * whether the task is exiting. To protect against the do_exit
613          * change of the task flags, we do this protected by
614          * p->pi_lock:
615          */
616         raw_spin_lock_irq(&p->pi_lock);
617         if (unlikely(p->flags & PF_EXITING)) {
618                 /*
619                  * The task is on the way out. When PF_EXITPIDONE is
620                  * set, we know that the task has finished the
621                  * cleanup:
622                  */
623                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
624
625                 raw_spin_unlock_irq(&p->pi_lock);
626                 put_task_struct(p);
627                 return ret;
628         }
629
630         pi_state = alloc_pi_state();
631
632         /*
633          * Initialize the pi_mutex in locked state and make 'p'
634          * the owner of it:
635          */
636         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
637
638         /* Store the key for possible exit cleanups: */
639         pi_state->key = *key;
640
641         WARN_ON(!list_empty(&pi_state->list));
642         list_add(&pi_state->list, &p->pi_state_list);
643         pi_state->owner = p;
644         raw_spin_unlock_irq(&p->pi_lock);
645
646         put_task_struct(p);
647
648         *ps = pi_state;
649
650         return 0;
651 }
652
653 /**
654  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
655  * @uaddr:              the pi futex user address
656  * @hb:                 the pi futex hash bucket
657  * @key:                the futex key associated with uaddr and hb
658  * @ps:                 the pi_state pointer where we store the result of the
659  *                      lookup
660  * @task:               the task to perform the atomic lock work for.  This will
661  *                      be "current" except in the case of requeue pi.
662  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
663  *
664  * Returns:
665  *  0 - ready to wait
666  *  1 - acquired the lock
667  * <0 - error
668  *
669  * The hb->lock and futex_key refs shall be held by the caller.
670  */
671 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
672                                 union futex_key *key,
673                                 struct futex_pi_state **ps,
674                                 struct task_struct *task, int set_waiters)
675 {
676         int lock_taken, ret, ownerdied = 0;
677         u32 uval, newval, curval;
678
679 retry:
680         ret = lock_taken = 0;
681
682         /*
683          * To avoid races, we attempt to take the lock here again
684          * (by doing a 0 -> TID atomic cmpxchg), while holding all
685          * the locks. It will most likely not succeed.
686          */
687         newval = task_pid_vnr(task);
688         if (set_waiters)
689                 newval |= FUTEX_WAITERS;
690
691         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
692
693         if (unlikely(curval == -EFAULT))
694                 return -EFAULT;
695
696         /*
697          * Detect deadlocks.
698          */
699         if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
700                 return -EDEADLK;
701
702         /*
703          * Surprise - we got the lock. Just return to userspace:
704          */
705         if (unlikely(!curval))
706                 return 1;
707
708         uval = curval;
709
710         /*
711          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
712          * to wake at the next unlock.
713          */
714         newval = curval | FUTEX_WAITERS;
715
716         /*
717          * There are two cases, where a futex might have no owner (the
718          * owner TID is 0): OWNER_DIED. We take over the futex in this
719          * case. We also do an unconditional take over, when the owner
720          * of the futex died.
721          *
722          * This is safe as we are protected by the hash bucket lock !
723          */
724         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
725                 /* Keep the OWNER_DIED bit */
726                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
727                 ownerdied = 0;
728                 lock_taken = 1;
729         }
730
731         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
732
733         if (unlikely(curval == -EFAULT))
734                 return -EFAULT;
735         if (unlikely(curval != uval))
736                 goto retry;
737
738         /*
739          * We took the lock due to owner died take over.
740          */
741         if (unlikely(lock_taken))
742                 return 1;
743
744         /*
745          * We dont have the lock. Look up the PI state (or create it if
746          * we are the first waiter):
747          */
748         ret = lookup_pi_state(uval, hb, key, ps);
749
750         if (unlikely(ret)) {
751                 switch (ret) {
752                 case -ESRCH:
753                         /*
754                          * No owner found for this futex. Check if the
755                          * OWNER_DIED bit is set to figure out whether
756                          * this is a robust futex or not.
757                          */
758                         if (get_futex_value_locked(&curval, uaddr))
759                                 return -EFAULT;
760
761                         /*
762                          * We simply start over in case of a robust
763                          * futex. The code above will take the futex
764                          * and return happy.
765                          */
766                         if (curval & FUTEX_OWNER_DIED) {
767                                 ownerdied = 1;
768                                 goto retry;
769                         }
770                 default:
771                         break;
772                 }
773         }
774
775         return ret;
776 }
777
778 /**
779  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
780  * @q:  The futex_q to unqueue
781  *
782  * The q->lock_ptr must not be NULL and must be held by the caller.
783  */
784 static void __unqueue_futex(struct futex_q *q)
785 {
786         struct futex_hash_bucket *hb;
787
788         if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr)
789                         || plist_node_empty(&q->list)))
790                 return;
791
792         hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
793         plist_del(&q->list, &hb->chain);
794 }
795
796 /*
797  * The hash bucket lock must be held when this is called.
798  * Afterwards, the futex_q must not be accessed.
799  */
800 static void wake_futex(struct futex_q *q)
801 {
802         struct task_struct *p = q->task;
803
804         /*
805          * We set q->lock_ptr = NULL _before_ we wake up the task. If
806          * a non-futex wake up happens on another CPU then the task
807          * might exit and p would dereference a non-existing task
808          * struct. Prevent this by holding a reference on p across the
809          * wake up.
810          */
811         get_task_struct(p);
812
813         __unqueue_futex(q);
814         /*
815          * The waiting task can free the futex_q as soon as
816          * q->lock_ptr = NULL is written, without taking any locks. A
817          * memory barrier is required here to prevent the following
818          * store to lock_ptr from getting ahead of the plist_del.
819          */
820         smp_wmb();
821         q->lock_ptr = NULL;
822
823         wake_up_state(p, TASK_NORMAL);
824         put_task_struct(p);
825 }
826
827 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
828 {
829         struct task_struct *new_owner;
830         struct futex_pi_state *pi_state = this->pi_state;
831         u32 curval, newval;
832
833         if (!pi_state)
834                 return -EINVAL;
835
836         /*
837          * If current does not own the pi_state then the futex is
838          * inconsistent and user space fiddled with the futex value.
839          */
840         if (pi_state->owner != current)
841                 return -EINVAL;
842
843         raw_spin_lock(&pi_state->pi_mutex.wait_lock);
844         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
845
846         /*
847          * It is possible that the next waiter (the one that brought
848          * this owner to the kernel) timed out and is no longer
849          * waiting on the lock.
850          */
851         if (!new_owner)
852                 new_owner = this->task;
853
854         /*
855          * We pass it to the next owner. (The WAITERS bit is always
856          * kept enabled while there is PI state around. We must also
857          * preserve the owner died bit.)
858          */
859         if (!(uval & FUTEX_OWNER_DIED)) {
860                 int ret = 0;
861
862                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
863
864                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
865
866                 if (curval == -EFAULT)
867                         ret = -EFAULT;
868                 else if (curval != uval)
869                         ret = -EINVAL;
870                 if (ret) {
871                         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
872                         return ret;
873                 }
874         }
875
876         raw_spin_lock_irq(&pi_state->owner->pi_lock);
877         WARN_ON(list_empty(&pi_state->list));
878         list_del_init(&pi_state->list);
879         raw_spin_unlock_irq(&pi_state->owner->pi_lock);
880
881         raw_spin_lock_irq(&new_owner->pi_lock);
882         WARN_ON(!list_empty(&pi_state->list));
883         list_add(&pi_state->list, &new_owner->pi_state_list);
884         pi_state->owner = new_owner;
885         raw_spin_unlock_irq(&new_owner->pi_lock);
886
887         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
888         rt_mutex_unlock(&pi_state->pi_mutex);
889
890         return 0;
891 }
892
893 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
894 {
895         u32 oldval;
896
897         /*
898          * There is no waiter, so we unlock the futex. The owner died
899          * bit has not to be preserved here. We are the owner:
900          */
901         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
902
903         if (oldval == -EFAULT)
904                 return oldval;
905         if (oldval != uval)
906                 return -EAGAIN;
907
908         return 0;
909 }
910
911 /*
912  * Express the locking dependencies for lockdep:
913  */
914 static inline void
915 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
916 {
917         if (hb1 <= hb2) {
918                 spin_lock(&hb1->lock);
919                 if (hb1 < hb2)
920                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
921         } else { /* hb1 > hb2 */
922                 spin_lock(&hb2->lock);
923                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
924         }
925 }
926
927 static inline void
928 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
929 {
930         spin_unlock(&hb1->lock);
931         if (hb1 != hb2)
932                 spin_unlock(&hb2->lock);
933 }
934
935 /*
936  * Wake up waiters matching bitset queued on this futex (uaddr).
937  */
938 static int
939 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
940 {
941         struct futex_hash_bucket *hb;
942         struct futex_q *this, *next;
943         struct plist_head *head;
944         union futex_key key = FUTEX_KEY_INIT;
945         int ret;
946
947         if (!bitset)
948                 return -EINVAL;
949
950         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
951         if (unlikely(ret != 0))
952                 goto out;
953
954         hb = hash_futex(&key);
955         spin_lock(&hb->lock);
956         head = &hb->chain;
957
958         plist_for_each_entry_safe(this, next, head, list) {
959                 if (match_futex (&this->key, &key)) {
960                         if (this->pi_state || this->rt_waiter) {
961                                 ret = -EINVAL;
962                                 break;
963                         }
964
965                         /* Check if one of the bits is set in both bitsets */
966                         if (!(this->bitset & bitset))
967                                 continue;
968
969                         wake_futex(this);
970                         if (++ret >= nr_wake)
971                                 break;
972                 }
973         }
974
975         spin_unlock(&hb->lock);
976         put_futex_key(&key);
977 out:
978         return ret;
979 }
980
981 /*
982  * Wake up all waiters hashed on the physical page that is mapped
983  * to this virtual address:
984  */
985 static int
986 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
987               int nr_wake, int nr_wake2, int op)
988 {
989         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
990         struct futex_hash_bucket *hb1, *hb2;
991         struct plist_head *head;
992         struct futex_q *this, *next;
993         int ret, op_ret;
994
995 retry:
996         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
997         if (unlikely(ret != 0))
998                 goto out;
999         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1000         if (unlikely(ret != 0))
1001                 goto out_put_key1;
1002
1003         hb1 = hash_futex(&key1);
1004         hb2 = hash_futex(&key2);
1005
1006 retry_private:
1007         double_lock_hb(hb1, hb2);
1008         op_ret = futex_atomic_op_inuser(op, uaddr2);
1009         if (unlikely(op_ret < 0)) {
1010
1011                 double_unlock_hb(hb1, hb2);
1012
1013 #ifndef CONFIG_MMU
1014                 /*
1015                  * we don't get EFAULT from MMU faults if we don't have an MMU,
1016                  * but we might get them from range checking
1017                  */
1018                 ret = op_ret;
1019                 goto out_put_keys;
1020 #endif
1021
1022                 if (unlikely(op_ret != -EFAULT)) {
1023                         ret = op_ret;
1024                         goto out_put_keys;
1025                 }
1026
1027                 ret = fault_in_user_writeable(uaddr2);
1028                 if (ret)
1029                         goto out_put_keys;
1030
1031                 if (!(flags & FLAGS_SHARED))
1032                         goto retry_private;
1033
1034                 put_futex_key(&key2);
1035                 put_futex_key(&key1);
1036                 goto retry;
1037         }
1038
1039         head = &hb1->chain;
1040
1041         plist_for_each_entry_safe(this, next, head, list) {
1042                 if (match_futex (&this->key, &key1)) {
1043                         wake_futex(this);
1044                         if (++ret >= nr_wake)
1045                                 break;
1046                 }
1047         }
1048
1049         if (op_ret > 0) {
1050                 head = &hb2->chain;
1051
1052                 op_ret = 0;
1053                 plist_for_each_entry_safe(this, next, head, list) {
1054                         if (match_futex (&this->key, &key2)) {
1055                                 wake_futex(this);
1056                                 if (++op_ret >= nr_wake2)
1057                                         break;
1058                         }
1059                 }
1060                 ret += op_ret;
1061         }
1062
1063         double_unlock_hb(hb1, hb2);
1064 out_put_keys:
1065         put_futex_key(&key2);
1066 out_put_key1:
1067         put_futex_key(&key1);
1068 out:
1069         return ret;
1070 }
1071
1072 /**
1073  * requeue_futex() - Requeue a futex_q from one hb to another
1074  * @q:          the futex_q to requeue
1075  * @hb1:        the source hash_bucket
1076  * @hb2:        the target hash_bucket
1077  * @key2:       the new key for the requeued futex_q
1078  */
1079 static inline
1080 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1081                    struct futex_hash_bucket *hb2, union futex_key *key2)
1082 {
1083
1084         /*
1085          * If key1 and key2 hash to the same bucket, no need to
1086          * requeue.
1087          */
1088         if (likely(&hb1->chain != &hb2->chain)) {
1089                 plist_del(&q->list, &hb1->chain);
1090                 plist_add(&q->list, &hb2->chain);
1091                 q->lock_ptr = &hb2->lock;
1092 #ifdef CONFIG_DEBUG_PI_LIST
1093                 q->list.plist.spinlock = &hb2->lock;
1094 #endif
1095         }
1096         get_futex_key_refs(key2);
1097         q->key = *key2;
1098 }
1099
1100 /**
1101  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1102  * @q:          the futex_q
1103  * @key:        the key of the requeue target futex
1104  * @hb:         the hash_bucket of the requeue target futex
1105  *
1106  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1107  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1108  * to the requeue target futex so the waiter can detect the wakeup on the right
1109  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1110  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1111  * to protect access to the pi_state to fixup the owner later.  Must be called
1112  * with both q->lock_ptr and hb->lock held.
1113  */
1114 static inline
1115 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1116                            struct futex_hash_bucket *hb)
1117 {
1118         get_futex_key_refs(key);
1119         q->key = *key;
1120
1121         __unqueue_futex(q);
1122
1123         WARN_ON(!q->rt_waiter);
1124         q->rt_waiter = NULL;
1125
1126         q->lock_ptr = &hb->lock;
1127 #ifdef CONFIG_DEBUG_PI_LIST
1128         q->list.plist.spinlock = &hb->lock;
1129 #endif
1130
1131         wake_up_state(q->task, TASK_NORMAL);
1132 }
1133
1134 /**
1135  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1136  * @pifutex:            the user address of the to futex
1137  * @hb1:                the from futex hash bucket, must be locked by the caller
1138  * @hb2:                the to futex hash bucket, must be locked by the caller
1139  * @key1:               the from futex key
1140  * @key2:               the to futex key
1141  * @ps:                 address to store the pi_state pointer
1142  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1143  *
1144  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1145  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1146  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1147  * hb1 and hb2 must be held by the caller.
1148  *
1149  * Returns:
1150  *  0 - failed to acquire the lock atomicly
1151  *  1 - acquired the lock
1152  * <0 - error
1153  */
1154 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1155                                  struct futex_hash_bucket *hb1,
1156                                  struct futex_hash_bucket *hb2,
1157                                  union futex_key *key1, union futex_key *key2,
1158                                  struct futex_pi_state **ps, int set_waiters)
1159 {
1160         struct futex_q *top_waiter = NULL;
1161         u32 curval;
1162         int ret;
1163
1164         if (get_futex_value_locked(&curval, pifutex))
1165                 return -EFAULT;
1166
1167         /*
1168          * Find the top_waiter and determine if there are additional waiters.
1169          * If the caller intends to requeue more than 1 waiter to pifutex,
1170          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1171          * as we have means to handle the possible fault.  If not, don't set
1172          * the bit unecessarily as it will force the subsequent unlock to enter
1173          * the kernel.
1174          */
1175         top_waiter = futex_top_waiter(hb1, key1);
1176
1177         /* There are no waiters, nothing for us to do. */
1178         if (!top_waiter)
1179                 return 0;
1180
1181         /* Ensure we requeue to the expected futex. */
1182         if (!match_futex(top_waiter->requeue_pi_key, key2))
1183                 return -EINVAL;
1184
1185         /*
1186          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1187          * the contended case or if set_waiters is 1.  The pi_state is returned
1188          * in ps in contended cases.
1189          */
1190         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1191                                    set_waiters);
1192         if (ret == 1)
1193                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1194
1195         return ret;
1196 }
1197
1198 /**
1199  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1200  * @uaddr1:     source futex user address
1201  * @flags:      futex flags (FLAGS_SHARED, etc.)
1202  * @uaddr2:     target futex user address
1203  * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1204  * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1205  * @cmpval:     @uaddr1 expected value (or %NULL)
1206  * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1207  *              pi futex (pi to pi requeue is not supported)
1208  *
1209  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1210  * uaddr2 atomically on behalf of the top waiter.
1211  *
1212  * Returns:
1213  * >=0 - on success, the number of tasks requeued or woken
1214  *  <0 - on error
1215  */
1216 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1217                          u32 __user *uaddr2, int nr_wake, int nr_requeue,
1218                          u32 *cmpval, int requeue_pi)
1219 {
1220         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1221         int drop_count = 0, task_count = 0, ret;
1222         struct futex_pi_state *pi_state = NULL;
1223         struct futex_hash_bucket *hb1, *hb2;
1224         struct plist_head *head1;
1225         struct futex_q *this, *next;
1226         u32 curval2;
1227
1228         if (requeue_pi) {
1229                 /*
1230                  * requeue_pi requires a pi_state, try to allocate it now
1231                  * without any locks in case it fails.
1232                  */
1233                 if (refill_pi_state_cache())
1234                         return -ENOMEM;
1235                 /*
1236                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1237                  * + nr_requeue, since it acquires the rt_mutex prior to
1238                  * returning to userspace, so as to not leave the rt_mutex with
1239                  * waiters and no owner.  However, second and third wake-ups
1240                  * cannot be predicted as they involve race conditions with the
1241                  * first wake and a fault while looking up the pi_state.  Both
1242                  * pthread_cond_signal() and pthread_cond_broadcast() should
1243                  * use nr_wake=1.
1244                  */
1245                 if (nr_wake != 1)
1246                         return -EINVAL;
1247         }
1248
1249 retry:
1250         if (pi_state != NULL) {
1251                 /*
1252                  * We will have to lookup the pi_state again, so free this one
1253                  * to keep the accounting correct.
1254                  */
1255                 free_pi_state(pi_state);
1256                 pi_state = NULL;
1257         }
1258
1259         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
1260         if (unlikely(ret != 0))
1261                 goto out;
1262         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1263         if (unlikely(ret != 0))
1264                 goto out_put_key1;
1265
1266         hb1 = hash_futex(&key1);
1267         hb2 = hash_futex(&key2);
1268
1269 retry_private:
1270         double_lock_hb(hb1, hb2);
1271
1272         if (likely(cmpval != NULL)) {
1273                 u32 curval;
1274
1275                 ret = get_futex_value_locked(&curval, uaddr1);
1276
1277                 if (unlikely(ret)) {
1278                         double_unlock_hb(hb1, hb2);
1279
1280                         ret = get_user(curval, uaddr1);
1281                         if (ret)
1282                                 goto out_put_keys;
1283
1284                         if (!(flags & FLAGS_SHARED))
1285                                 goto retry_private;
1286
1287                         put_futex_key(&key2);
1288                         put_futex_key(&key1);
1289                         goto retry;
1290                 }
1291                 if (curval != *cmpval) {
1292                         ret = -EAGAIN;
1293                         goto out_unlock;
1294                 }
1295         }
1296
1297         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1298                 /*
1299                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1300                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1301                  * bit.  We force this here where we are able to easily handle
1302                  * faults rather in the requeue loop below.
1303                  */
1304                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1305                                                  &key2, &pi_state, nr_requeue);
1306
1307                 /*
1308                  * At this point the top_waiter has either taken uaddr2 or is
1309                  * waiting on it.  If the former, then the pi_state will not
1310                  * exist yet, look it up one more time to ensure we have a
1311                  * reference to it.
1312                  */
1313                 if (ret == 1) {
1314                         WARN_ON(pi_state);
1315                         drop_count++;
1316                         task_count++;
1317                         ret = get_futex_value_locked(&curval2, uaddr2);
1318                         if (!ret)
1319                                 ret = lookup_pi_state(curval2, hb2, &key2,
1320                                                       &pi_state);
1321                 }
1322
1323                 switch (ret) {
1324                 case 0:
1325                         break;
1326                 case -EFAULT:
1327                         double_unlock_hb(hb1, hb2);
1328                         put_futex_key(&key2);
1329                         put_futex_key(&key1);
1330                         ret = fault_in_user_writeable(uaddr2);
1331                         if (!ret)
1332                                 goto retry;
1333                         goto out;
1334                 case -EAGAIN:
1335                         /* The owner was exiting, try again. */
1336                         double_unlock_hb(hb1, hb2);
1337                         put_futex_key(&key2);
1338                         put_futex_key(&key1);
1339                         cond_resched();
1340                         goto retry;
1341                 default:
1342                         goto out_unlock;
1343                 }
1344         }
1345
1346         head1 = &hb1->chain;
1347         plist_for_each_entry_safe(this, next, head1, list) {
1348                 if (task_count - nr_wake >= nr_requeue)
1349                         break;
1350
1351                 if (!match_futex(&this->key, &key1))
1352                         continue;
1353
1354                 /*
1355                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1356                  * be paired with each other and no other futex ops.
1357                  */
1358                 if ((requeue_pi && !this->rt_waiter) ||
1359                     (!requeue_pi && this->rt_waiter)) {
1360                         ret = -EINVAL;
1361                         break;
1362                 }
1363
1364                 /*
1365                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1366                  * lock, we already woke the top_waiter.  If not, it will be
1367                  * woken by futex_unlock_pi().
1368                  */
1369                 if (++task_count <= nr_wake && !requeue_pi) {
1370                         wake_futex(this);
1371                         continue;
1372                 }
1373
1374                 /* Ensure we requeue to the expected futex for requeue_pi. */
1375                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1376                         ret = -EINVAL;
1377                         break;
1378                 }
1379
1380                 /*
1381                  * Requeue nr_requeue waiters and possibly one more in the case
1382                  * of requeue_pi if we couldn't acquire the lock atomically.
1383                  */
1384                 if (requeue_pi) {
1385                         /* Prepare the waiter to take the rt_mutex. */
1386                         atomic_inc(&pi_state->refcount);
1387                         this->pi_state = pi_state;
1388                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1389                                                         this->rt_waiter,
1390                                                         this->task, 1);
1391                         if (ret == 1) {
1392                                 /* We got the lock. */
1393                                 requeue_pi_wake_futex(this, &key2, hb2);
1394                                 drop_count++;
1395                                 continue;
1396                         } else if (ret) {
1397                                 /* -EDEADLK */
1398                                 this->pi_state = NULL;
1399                                 free_pi_state(pi_state);
1400                                 goto out_unlock;
1401                         }
1402                 }
1403                 requeue_futex(this, hb1, hb2, &key2);
1404                 drop_count++;
1405         }
1406
1407 out_unlock:
1408         double_unlock_hb(hb1, hb2);
1409
1410         /*
1411          * drop_futex_key_refs() must be called outside the spinlocks. During
1412          * the requeue we moved futex_q's from the hash bucket at key1 to the
1413          * one at key2 and updated their key pointer.  We no longer need to
1414          * hold the references to key1.
1415          */
1416         while (--drop_count >= 0)
1417                 drop_futex_key_refs(&key1);
1418
1419 out_put_keys:
1420         put_futex_key(&key2);
1421 out_put_key1:
1422         put_futex_key(&key1);
1423 out:
1424         if (pi_state != NULL)
1425                 free_pi_state(pi_state);
1426         return ret ? ret : task_count;
1427 }
1428
1429 /* The key must be already stored in q->key. */
1430 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1431         __acquires(&hb->lock)
1432 {
1433         struct futex_hash_bucket *hb;
1434
1435         hb = hash_futex(&q->key);
1436         q->lock_ptr = &hb->lock;
1437
1438         spin_lock(&hb->lock);
1439         return hb;
1440 }
1441
1442 static inline void
1443 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1444         __releases(&hb->lock)
1445 {
1446         spin_unlock(&hb->lock);
1447 }
1448
1449 /**
1450  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1451  * @q:  The futex_q to enqueue
1452  * @hb: The destination hash bucket
1453  *
1454  * The hb->lock must be held by the caller, and is released here. A call to
1455  * queue_me() is typically paired with exactly one call to unqueue_me().  The
1456  * exceptions involve the PI related operations, which may use unqueue_me_pi()
1457  * or nothing if the unqueue is done as part of the wake process and the unqueue
1458  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1459  * an example).
1460  */
1461 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1462         __releases(&hb->lock)
1463 {
1464         int prio;
1465
1466         /*
1467          * The priority used to register this element is
1468          * - either the real thread-priority for the real-time threads
1469          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1470          * - or MAX_RT_PRIO for non-RT threads.
1471          * Thus, all RT-threads are woken first in priority order, and
1472          * the others are woken last, in FIFO order.
1473          */
1474         prio = min(current->normal_prio, MAX_RT_PRIO);
1475
1476         plist_node_init(&q->list, prio);
1477 #ifdef CONFIG_DEBUG_PI_LIST
1478         q->list.plist.spinlock = &hb->lock;
1479 #endif
1480         plist_add(&q->list, &hb->chain);
1481         q->task = current;
1482         spin_unlock(&hb->lock);
1483 }
1484
1485 /**
1486  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1487  * @q:  The futex_q to unqueue
1488  *
1489  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1490  * be paired with exactly one earlier call to queue_me().
1491  *
1492  * Returns:
1493  *   1 - if the futex_q was still queued (and we removed unqueued it)
1494  *   0 - if the futex_q was already removed by the waking thread
1495  */
1496 static int unqueue_me(struct futex_q *q)
1497 {
1498         spinlock_t *lock_ptr;
1499         int ret = 0;
1500
1501         /* In the common case we don't take the spinlock, which is nice. */
1502 retry:
1503         lock_ptr = q->lock_ptr;
1504         barrier();
1505         if (lock_ptr != NULL) {
1506                 spin_lock(lock_ptr);
1507                 /*
1508                  * q->lock_ptr can change between reading it and
1509                  * spin_lock(), causing us to take the wrong lock.  This
1510                  * corrects the race condition.
1511                  *
1512                  * Reasoning goes like this: if we have the wrong lock,
1513                  * q->lock_ptr must have changed (maybe several times)
1514                  * between reading it and the spin_lock().  It can
1515                  * change again after the spin_lock() but only if it was
1516                  * already changed before the spin_lock().  It cannot,
1517                  * however, change back to the original value.  Therefore
1518                  * we can detect whether we acquired the correct lock.
1519                  */
1520                 if (unlikely(lock_ptr != q->lock_ptr)) {
1521                         spin_unlock(lock_ptr);
1522                         goto retry;
1523                 }
1524                 __unqueue_futex(q);
1525
1526                 BUG_ON(q->pi_state);
1527
1528                 spin_unlock(lock_ptr);
1529                 ret = 1;
1530         }
1531
1532         drop_futex_key_refs(&q->key);
1533         return ret;
1534 }
1535
1536 /*
1537  * PI futexes can not be requeued and must remove themself from the
1538  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1539  * and dropped here.
1540  */
1541 static void unqueue_me_pi(struct futex_q *q)
1542         __releases(q->lock_ptr)
1543 {
1544         __unqueue_futex(q);
1545
1546         BUG_ON(!q->pi_state);
1547         free_pi_state(q->pi_state);
1548         q->pi_state = NULL;
1549
1550         spin_unlock(q->lock_ptr);
1551 }
1552
1553 /*
1554  * Fixup the pi_state owner with the new owner.
1555  *
1556  * Must be called with hash bucket lock held and mm->sem held for non
1557  * private futexes.
1558  */
1559 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1560                                 struct task_struct *newowner)
1561 {
1562         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1563         struct futex_pi_state *pi_state = q->pi_state;
1564         struct task_struct *oldowner = pi_state->owner;
1565         u32 uval, curval, newval;
1566         int ret;
1567
1568         /* Owner died? */
1569         if (!pi_state->owner)
1570                 newtid |= FUTEX_OWNER_DIED;
1571
1572         /*
1573          * We are here either because we stole the rtmutex from the
1574          * pending owner or we are the pending owner which failed to
1575          * get the rtmutex. We have to replace the pending owner TID
1576          * in the user space variable. This must be atomic as we have
1577          * to preserve the owner died bit here.
1578          *
1579          * Note: We write the user space value _before_ changing the pi_state
1580          * because we can fault here. Imagine swapped out pages or a fork
1581          * that marked all the anonymous memory readonly for cow.
1582          *
1583          * Modifying pi_state _before_ the user space value would
1584          * leave the pi_state in an inconsistent state when we fault
1585          * here, because we need to drop the hash bucket lock to
1586          * handle the fault. This might be observed in the PID check
1587          * in lookup_pi_state.
1588          */
1589 retry:
1590         if (get_futex_value_locked(&uval, uaddr))
1591                 goto handle_fault;
1592
1593         while (1) {
1594                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1595
1596                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1597
1598                 if (curval == -EFAULT)
1599                         goto handle_fault;
1600                 if (curval == uval)
1601                         break;
1602                 uval = curval;
1603         }
1604
1605         /*
1606          * We fixed up user space. Now we need to fix the pi_state
1607          * itself.
1608          */
1609         if (pi_state->owner != NULL) {
1610                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1611                 WARN_ON(list_empty(&pi_state->list));
1612                 list_del_init(&pi_state->list);
1613                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1614         }
1615
1616         pi_state->owner = newowner;
1617
1618         raw_spin_lock_irq(&newowner->pi_lock);
1619         WARN_ON(!list_empty(&pi_state->list));
1620         list_add(&pi_state->list, &newowner->pi_state_list);
1621         raw_spin_unlock_irq(&newowner->pi_lock);
1622         return 0;
1623
1624         /*
1625          * To handle the page fault we need to drop the hash bucket
1626          * lock here. That gives the other task (either the pending
1627          * owner itself or the task which stole the rtmutex) the
1628          * chance to try the fixup of the pi_state. So once we are
1629          * back from handling the fault we need to check the pi_state
1630          * after reacquiring the hash bucket lock and before trying to
1631          * do another fixup. When the fixup has been done already we
1632          * simply return.
1633          */
1634 handle_fault:
1635         spin_unlock(q->lock_ptr);
1636
1637         ret = fault_in_user_writeable(uaddr);
1638
1639         spin_lock(q->lock_ptr);
1640
1641         /*
1642          * Check if someone else fixed it for us:
1643          */
1644         if (pi_state->owner != oldowner)
1645                 return 0;
1646
1647         if (ret)
1648                 return ret;
1649
1650         goto retry;
1651 }
1652
1653 static long futex_wait_restart(struct restart_block *restart);
1654
1655 /**
1656  * fixup_owner() - Post lock pi_state and corner case management
1657  * @uaddr:      user address of the futex
1658  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1659  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1660  *
1661  * After attempting to lock an rt_mutex, this function is called to cleanup
1662  * the pi_state owner as well as handle race conditions that may allow us to
1663  * acquire the lock. Must be called with the hb lock held.
1664  *
1665  * Returns:
1666  *  1 - success, lock taken
1667  *  0 - success, lock not taken
1668  * <0 - on error (-EFAULT)
1669  */
1670 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1671 {
1672         struct task_struct *owner;
1673         int ret = 0;
1674
1675         if (locked) {
1676                 /*
1677                  * Got the lock. We might not be the anticipated owner if we
1678                  * did a lock-steal - fix up the PI-state in that case:
1679                  */
1680                 if (q->pi_state->owner != current)
1681                         ret = fixup_pi_state_owner(uaddr, q, current);
1682                 goto out;
1683         }
1684
1685         /*
1686          * Catch the rare case, where the lock was released when we were on the
1687          * way back before we locked the hash bucket.
1688          */
1689         if (q->pi_state->owner == current) {
1690                 /*
1691                  * Try to get the rt_mutex now. This might fail as some other
1692                  * task acquired the rt_mutex after we removed ourself from the
1693                  * rt_mutex waiters list.
1694                  */
1695                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1696                         locked = 1;
1697                         goto out;
1698                 }
1699
1700                 /*
1701                  * pi_state is incorrect, some other task did a lock steal and
1702                  * we returned due to timeout or signal without taking the
1703                  * rt_mutex. Too late. We can access the rt_mutex_owner without
1704                  * locking, as the other task is now blocked on the hash bucket
1705                  * lock. Fix the state up.
1706                  */
1707                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1708                 ret = fixup_pi_state_owner(uaddr, q, owner);
1709                 goto out;
1710         }
1711
1712         /*
1713          * Paranoia check. If we did not take the lock, then we should not be
1714          * the owner, nor the pending owner, of the rt_mutex.
1715          */
1716         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1717                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1718                                 "pi-state %p\n", ret,
1719                                 q->pi_state->pi_mutex.owner,
1720                                 q->pi_state->owner);
1721
1722 out:
1723         return ret ? ret : locked;
1724 }
1725
1726 /**
1727  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1728  * @hb:         the futex hash bucket, must be locked by the caller
1729  * @q:          the futex_q to queue up on
1730  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1731  */
1732 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1733                                 struct hrtimer_sleeper *timeout)
1734 {
1735         /*
1736          * The task state is guaranteed to be set before another task can
1737          * wake it. set_current_state() is implemented using set_mb() and
1738          * queue_me() calls spin_unlock() upon completion, both serializing
1739          * access to the hash list and forcing another memory barrier.
1740          */
1741         set_current_state(TASK_INTERRUPTIBLE);
1742         queue_me(q, hb);
1743
1744         /* Arm the timer */
1745         if (timeout) {
1746                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1747                 if (!hrtimer_active(&timeout->timer))
1748                         timeout->task = NULL;
1749         }
1750
1751         /*
1752          * If we have been removed from the hash list, then another task
1753          * has tried to wake us, and we can skip the call to schedule().
1754          */
1755         if (likely(!plist_node_empty(&q->list))) {
1756                 /*
1757                  * If the timer has already expired, current will already be
1758                  * flagged for rescheduling. Only call schedule if there
1759                  * is no timeout, or if it has yet to expire.
1760                  */
1761                 if (!timeout || timeout->task)
1762                         schedule();
1763         }
1764         __set_current_state(TASK_RUNNING);
1765 }
1766
1767 /**
1768  * futex_wait_setup() - Prepare to wait on a futex
1769  * @uaddr:      the futex userspace address
1770  * @val:        the expected value
1771  * @flags:      futex flags (FLAGS_SHARED, etc.)
1772  * @q:          the associated futex_q
1773  * @hb:         storage for hash_bucket pointer to be returned to caller
1774  *
1775  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1776  * compare it with the expected value.  Handle atomic faults internally.
1777  * Return with the hb lock held and a q.key reference on success, and unlocked
1778  * with no q.key reference on failure.
1779  *
1780  * Returns:
1781  *  0 - uaddr contains val and hb has been locked
1782  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1783  */
1784 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1785                            struct futex_q *q, struct futex_hash_bucket **hb)
1786 {
1787         u32 uval;
1788         int ret;
1789
1790         /*
1791          * Access the page AFTER the hash-bucket is locked.
1792          * Order is important:
1793          *
1794          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1795          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1796          *
1797          * The basic logical guarantee of a futex is that it blocks ONLY
1798          * if cond(var) is known to be true at the time of blocking, for
1799          * any cond.  If we queued after testing *uaddr, that would open
1800          * a race condition where we could block indefinitely with
1801          * cond(var) false, which would violate the guarantee.
1802          *
1803          * A consequence is that futex_wait() can return zero and absorb
1804          * a wakeup when *uaddr != val on entry to the syscall.  This is
1805          * rare, but normal.
1806          */
1807 retry:
1808         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
1809         if (unlikely(ret != 0))
1810                 return ret;
1811
1812 retry_private:
1813         *hb = queue_lock(q);
1814
1815         ret = get_futex_value_locked(&uval, uaddr);
1816
1817         if (ret) {
1818                 queue_unlock(q, *hb);
1819
1820                 ret = get_user(uval, uaddr);
1821                 if (ret)
1822                         goto out;
1823
1824                 if (!(flags & FLAGS_SHARED))
1825                         goto retry_private;
1826
1827                 put_futex_key(&q->key);
1828                 goto retry;
1829         }
1830
1831         if (uval != val) {
1832                 queue_unlock(q, *hb);
1833                 ret = -EWOULDBLOCK;
1834         }
1835
1836 out:
1837         if (ret)
1838                 put_futex_key(&q->key);
1839         return ret;
1840 }
1841
1842 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1843                       ktime_t *abs_time, u32 bitset)
1844 {
1845         struct hrtimer_sleeper timeout, *to = NULL;
1846         struct restart_block *restart;
1847         struct futex_hash_bucket *hb;
1848         struct futex_q q = futex_q_init;
1849         int ret;
1850
1851         if (!bitset)
1852                 return -EINVAL;
1853         q.bitset = bitset;
1854
1855         if (abs_time) {
1856                 to = &timeout;
1857
1858                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1859                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
1860                                       HRTIMER_MODE_ABS);
1861                 hrtimer_init_sleeper(to, current);
1862                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1863                                              current->timer_slack_ns);
1864         }
1865
1866 retry:
1867         /*
1868          * Prepare to wait on uaddr. On success, holds hb lock and increments
1869          * q.key refs.
1870          */
1871         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1872         if (ret)
1873                 goto out;
1874
1875         /* queue_me and wait for wakeup, timeout, or a signal. */
1876         futex_wait_queue_me(hb, &q, to);
1877
1878         /* If we were woken (and unqueued), we succeeded, whatever. */
1879         ret = 0;
1880         /* unqueue_me() drops q.key ref */
1881         if (!unqueue_me(&q))
1882                 goto out;
1883         ret = -ETIMEDOUT;
1884         if (to && !to->task)
1885                 goto out;
1886
1887         /*
1888          * We expect signal_pending(current), but we might be the
1889          * victim of a spurious wakeup as well.
1890          */
1891         if (!signal_pending(current))
1892                 goto retry;
1893
1894         ret = -ERESTARTSYS;
1895         if (!abs_time)
1896                 goto out;
1897
1898         restart = &current_thread_info()->restart_block;
1899         restart->fn = futex_wait_restart;
1900         restart->futex.uaddr = uaddr;
1901         restart->futex.val = val;
1902         restart->futex.time = abs_time->tv64;
1903         restart->futex.bitset = bitset;
1904         restart->futex.flags = flags;
1905
1906         ret = -ERESTART_RESTARTBLOCK;
1907
1908 out:
1909         if (to) {
1910                 hrtimer_cancel(&to->timer);
1911                 destroy_hrtimer_on_stack(&to->timer);
1912         }
1913         return ret;
1914 }
1915
1916
1917 static long futex_wait_restart(struct restart_block *restart)
1918 {
1919         u32 __user *uaddr = restart->futex.uaddr;
1920         ktime_t t, *tp = NULL;
1921
1922         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1923                 t.tv64 = restart->futex.time;
1924                 tp = &t;
1925         }
1926         restart->fn = do_no_restart_syscall;
1927
1928         return (long)futex_wait(uaddr, restart->futex.flags,
1929                                 restart->futex.val, tp, restart->futex.bitset);
1930 }
1931
1932
1933 /*
1934  * Userspace tried a 0 -> TID atomic transition of the futex value
1935  * and failed. The kernel side here does the whole locking operation:
1936  * if there are waiters then it will block, it does PI, etc. (Due to
1937  * races the kernel might see a 0 value of the futex too.)
1938  */
1939 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1940                          ktime_t *time, int trylock)
1941 {
1942         struct hrtimer_sleeper timeout, *to = NULL;
1943         struct futex_hash_bucket *hb;
1944         struct futex_q q = futex_q_init;
1945         int res, ret;
1946
1947         if (refill_pi_state_cache())
1948                 return -ENOMEM;
1949
1950         if (time) {
1951                 to = &timeout;
1952                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1953                                       HRTIMER_MODE_ABS);
1954                 hrtimer_init_sleeper(to, current);
1955                 hrtimer_set_expires(&to->timer, *time);
1956         }
1957
1958 retry:
1959         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
1960         if (unlikely(ret != 0))
1961                 goto out;
1962
1963 retry_private:
1964         hb = queue_lock(&q);
1965
1966         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1967         if (unlikely(ret)) {
1968                 switch (ret) {
1969                 case 1:
1970                         /* We got the lock. */
1971                         ret = 0;
1972                         goto out_unlock_put_key;
1973                 case -EFAULT:
1974                         goto uaddr_faulted;
1975                 case -EAGAIN:
1976                         /*
1977                          * Task is exiting and we just wait for the
1978                          * exit to complete.
1979                          */
1980                         queue_unlock(&q, hb);
1981                         put_futex_key(&q.key);
1982                         cond_resched();
1983                         goto retry;
1984                 default:
1985                         goto out_unlock_put_key;
1986                 }
1987         }
1988
1989         /*
1990          * Only actually queue now that the atomic ops are done:
1991          */
1992         queue_me(&q, hb);
1993
1994         WARN_ON(!q.pi_state);
1995         /*
1996          * Block on the PI mutex:
1997          */
1998         if (!trylock)
1999                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2000         else {
2001                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2002                 /* Fixup the trylock return value: */
2003                 ret = ret ? 0 : -EWOULDBLOCK;
2004         }
2005
2006         spin_lock(q.lock_ptr);
2007         /*
2008          * Fixup the pi_state owner and possibly acquire the lock if we
2009          * haven't already.
2010          */
2011         res = fixup_owner(uaddr, &q, !ret);
2012         /*
2013          * If fixup_owner() returned an error, proprogate that.  If it acquired
2014          * the lock, clear our -ETIMEDOUT or -EINTR.
2015          */
2016         if (res)
2017                 ret = (res < 0) ? res : 0;
2018
2019         /*
2020          * If fixup_owner() faulted and was unable to handle the fault, unlock
2021          * it and return the fault to userspace.
2022          */
2023         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2024                 rt_mutex_unlock(&q.pi_state->pi_mutex);
2025
2026         /* Unqueue and drop the lock */
2027         unqueue_me_pi(&q);
2028
2029         goto out_put_key;
2030
2031 out_unlock_put_key:
2032         queue_unlock(&q, hb);
2033
2034 out_put_key:
2035         put_futex_key(&q.key);
2036 out:
2037         if (to)
2038                 destroy_hrtimer_on_stack(&to->timer);
2039         return ret != -EINTR ? ret : -ERESTARTNOINTR;
2040
2041 uaddr_faulted:
2042         queue_unlock(&q, hb);
2043
2044         ret = fault_in_user_writeable(uaddr);
2045         if (ret)
2046                 goto out_put_key;
2047
2048         if (!(flags & FLAGS_SHARED))
2049                 goto retry_private;
2050
2051         put_futex_key(&q.key);
2052         goto retry;
2053 }
2054
2055 /*
2056  * Userspace attempted a TID -> 0 atomic transition, and failed.
2057  * This is the in-kernel slowpath: we look up the PI state (if any),
2058  * and do the rt-mutex unlock.
2059  */
2060 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2061 {
2062         struct futex_hash_bucket *hb;
2063         struct futex_q *this, *next;
2064         u32 uval;
2065         struct plist_head *head;
2066         union futex_key key = FUTEX_KEY_INIT;
2067         int ret;
2068
2069 retry:
2070         if (get_user(uval, uaddr))
2071                 return -EFAULT;
2072         /*
2073          * We release only a lock we actually own:
2074          */
2075         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
2076                 return -EPERM;
2077
2078         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
2079         if (unlikely(ret != 0))
2080                 goto out;
2081
2082         hb = hash_futex(&key);
2083         spin_lock(&hb->lock);
2084
2085         /*
2086          * To avoid races, try to do the TID -> 0 atomic transition
2087          * again. If it succeeds then we can return without waking
2088          * anyone else up:
2089          */
2090         if (!(uval & FUTEX_OWNER_DIED))
2091                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
2092
2093
2094         if (unlikely(uval == -EFAULT))
2095                 goto pi_faulted;
2096         /*
2097          * Rare case: we managed to release the lock atomically,
2098          * no need to wake anyone else up:
2099          */
2100         if (unlikely(uval == task_pid_vnr(current)))
2101                 goto out_unlock;
2102
2103         /*
2104          * Ok, other tasks may need to be woken up - check waiters
2105          * and do the wakeup if necessary:
2106          */
2107         head = &hb->chain;
2108
2109         plist_for_each_entry_safe(this, next, head, list) {
2110                 if (!match_futex (&this->key, &key))
2111                         continue;
2112                 ret = wake_futex_pi(uaddr, uval, this);
2113                 /*
2114                  * The atomic access to the futex value
2115                  * generated a pagefault, so retry the
2116                  * user-access and the wakeup:
2117                  */
2118                 if (ret == -EFAULT)
2119                         goto pi_faulted;
2120                 goto out_unlock;
2121         }
2122         /*
2123          * No waiters - kernel unlocks the futex:
2124          */
2125         if (!(uval & FUTEX_OWNER_DIED)) {
2126                 ret = unlock_futex_pi(uaddr, uval);
2127                 if (ret == -EFAULT)
2128                         goto pi_faulted;
2129         }
2130
2131 out_unlock:
2132         spin_unlock(&hb->lock);
2133         put_futex_key(&key);
2134
2135 out:
2136         return ret;
2137
2138 pi_faulted:
2139         spin_unlock(&hb->lock);
2140         put_futex_key(&key);
2141
2142         ret = fault_in_user_writeable(uaddr);
2143         if (!ret)
2144                 goto retry;
2145
2146         return ret;
2147 }
2148
2149 /**
2150  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2151  * @hb:         the hash_bucket futex_q was original enqueued on
2152  * @q:          the futex_q woken while waiting to be requeued
2153  * @key2:       the futex_key of the requeue target futex
2154  * @timeout:    the timeout associated with the wait (NULL if none)
2155  *
2156  * Detect if the task was woken on the initial futex as opposed to the requeue
2157  * target futex.  If so, determine if it was a timeout or a signal that caused
2158  * the wakeup and return the appropriate error code to the caller.  Must be
2159  * called with the hb lock held.
2160  *
2161  * Returns
2162  *  0 - no early wakeup detected
2163  * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2164  */
2165 static inline
2166 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2167                                    struct futex_q *q, union futex_key *key2,
2168                                    struct hrtimer_sleeper *timeout)
2169 {
2170         int ret = 0;
2171
2172         /*
2173          * With the hb lock held, we avoid races while we process the wakeup.
2174          * We only need to hold hb (and not hb2) to ensure atomicity as the
2175          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2176          * It can't be requeued from uaddr2 to something else since we don't
2177          * support a PI aware source futex for requeue.
2178          */
2179         if (!match_futex(&q->key, key2)) {
2180                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2181                 /*
2182                  * We were woken prior to requeue by a timeout or a signal.
2183                  * Unqueue the futex_q and determine which it was.
2184                  */
2185                 plist_del(&q->list, &hb->chain);
2186
2187                 /* Handle spurious wakeups gracefully */
2188                 ret = -EWOULDBLOCK;
2189                 if (timeout && !timeout->task)
2190                         ret = -ETIMEDOUT;
2191                 else if (signal_pending(current))
2192                         ret = -ERESTARTNOINTR;
2193         }
2194         return ret;
2195 }
2196
2197 /**
2198  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2199  * @uaddr:      the futex we initially wait on (non-pi)
2200  * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2201  *              the same type, no requeueing from private to shared, etc.
2202  * @val:        the expected value of uaddr
2203  * @abs_time:   absolute timeout
2204  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2205  * @clockrt:    whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2206  * @uaddr2:     the pi futex we will take prior to returning to user-space
2207  *
2208  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2209  * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
2210  * complete the acquisition of the rt_mutex prior to returning to userspace.
2211  * This ensures the rt_mutex maintains an owner when it has waiters; without
2212  * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2213  * need to.
2214  *
2215  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2216  * via the following:
2217  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2218  * 2) wakeup on uaddr2 after a requeue
2219  * 3) signal
2220  * 4) timeout
2221  *
2222  * If 3, cleanup and return -ERESTARTNOINTR.
2223  *
2224  * If 2, we may then block on trying to take the rt_mutex and return via:
2225  * 5) successful lock
2226  * 6) signal
2227  * 7) timeout
2228  * 8) other lock acquisition failure
2229  *
2230  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2231  *
2232  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2233  *
2234  * Returns:
2235  *  0 - On success
2236  * <0 - On error
2237  */
2238 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2239                                  u32 val, ktime_t *abs_time, u32 bitset,
2240                                  u32 __user *uaddr2)
2241 {
2242         struct hrtimer_sleeper timeout, *to = NULL;
2243         struct rt_mutex_waiter rt_waiter;
2244         struct rt_mutex *pi_mutex = NULL;
2245         struct futex_hash_bucket *hb;
2246         union futex_key key2 = FUTEX_KEY_INIT;
2247         struct futex_q q = futex_q_init;
2248         int res, ret;
2249
2250         if (!bitset)
2251                 return -EINVAL;
2252
2253         if (abs_time) {
2254                 to = &timeout;
2255                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2256                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2257                                       HRTIMER_MODE_ABS);
2258                 hrtimer_init_sleeper(to, current);
2259                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2260                                              current->timer_slack_ns);
2261         }
2262
2263         /*
2264          * The waiter is allocated on our stack, manipulated by the requeue
2265          * code while we sleep on uaddr.
2266          */
2267         debug_rt_mutex_init_waiter(&rt_waiter);
2268         rt_waiter.task = NULL;
2269
2270         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
2271         if (unlikely(ret != 0))
2272                 goto out;
2273
2274         q.bitset = bitset;
2275         q.rt_waiter = &rt_waiter;
2276         q.requeue_pi_key = &key2;
2277
2278         /*
2279          * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2280          * count.
2281          */
2282         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2283         if (ret)
2284                 goto out_key2;
2285
2286         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2287         futex_wait_queue_me(hb, &q, to);
2288
2289         spin_lock(&hb->lock);
2290         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2291         spin_unlock(&hb->lock);
2292         if (ret)
2293                 goto out_put_keys;
2294
2295         /*
2296          * In order for us to be here, we know our q.key == key2, and since
2297          * we took the hb->lock above, we also know that futex_requeue() has
2298          * completed and we no longer have to concern ourselves with a wakeup
2299          * race with the atomic proxy lock acquisition by the requeue code. The
2300          * futex_requeue dropped our key1 reference and incremented our key2
2301          * reference count.
2302          */
2303
2304         /* Check if the requeue code acquired the second futex for us. */
2305         if (!q.rt_waiter) {
2306                 /*
2307                  * Got the lock. We might not be the anticipated owner if we
2308                  * did a lock-steal - fix up the PI-state in that case.
2309                  */
2310                 if (q.pi_state && (q.pi_state->owner != current)) {
2311                         spin_lock(q.lock_ptr);
2312                         ret = fixup_pi_state_owner(uaddr2, &q, current);
2313                         spin_unlock(q.lock_ptr);
2314                 }
2315         } else {
2316                 /*
2317                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2318                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2319                  * the pi_state.
2320                  */
2321                 WARN_ON(!&q.pi_state);
2322                 pi_mutex = &q.pi_state->pi_mutex;
2323                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2324                 debug_rt_mutex_free_waiter(&rt_waiter);
2325
2326                 spin_lock(q.lock_ptr);
2327                 /*
2328                  * Fixup the pi_state owner and possibly acquire the lock if we
2329                  * haven't already.
2330                  */
2331                 res = fixup_owner(uaddr2, &q, !ret);
2332                 /*
2333                  * If fixup_owner() returned an error, proprogate that.  If it
2334                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2335                  */
2336                 if (res)
2337                         ret = (res < 0) ? res : 0;
2338
2339                 /* Unqueue and drop the lock. */
2340                 unqueue_me_pi(&q);
2341         }
2342
2343         /*
2344          * If fixup_pi_state_owner() faulted and was unable to handle the
2345          * fault, unlock the rt_mutex and return the fault to userspace.
2346          */
2347         if (ret == -EFAULT) {
2348                 if (rt_mutex_owner(pi_mutex) == current)
2349                         rt_mutex_unlock(pi_mutex);
2350         } else if (ret == -EINTR) {
2351                 /*
2352                  * We've already been requeued, but cannot restart by calling
2353                  * futex_lock_pi() directly. We could restart this syscall, but
2354                  * it would detect that the user space "val" changed and return
2355                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2356                  * -EWOULDBLOCK directly.
2357                  */
2358                 ret = -EWOULDBLOCK;
2359         }
2360
2361 out_put_keys:
2362         put_futex_key(&q.key);
2363 out_key2:
2364         put_futex_key(&key2);
2365
2366 out:
2367         if (to) {
2368                 hrtimer_cancel(&to->timer);
2369                 destroy_hrtimer_on_stack(&to->timer);
2370         }
2371         return ret;
2372 }
2373
2374 /*
2375  * Support for robust futexes: the kernel cleans up held futexes at
2376  * thread exit time.
2377  *
2378  * Implementation: user-space maintains a per-thread list of locks it
2379  * is holding. Upon do_exit(), the kernel carefully walks this list,
2380  * and marks all locks that are owned by this thread with the
2381  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2382  * always manipulated with the lock held, so the list is private and
2383  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2384  * field, to allow the kernel to clean up if the thread dies after
2385  * acquiring the lock, but just before it could have added itself to
2386  * the list. There can only be one such pending lock.
2387  */
2388
2389 /**
2390  * sys_set_robust_list() - Set the robust-futex list head of a task
2391  * @head:       pointer to the list-head
2392  * @len:        length of the list-head, as userspace expects
2393  */
2394 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2395                 size_t, len)
2396 {
2397         if (!futex_cmpxchg_enabled)
2398                 return -ENOSYS;
2399         /*
2400          * The kernel knows only one size for now:
2401          */
2402         if (unlikely(len != sizeof(*head)))
2403                 return -EINVAL;
2404
2405         current->robust_list = head;
2406
2407         return 0;
2408 }
2409
2410 /**
2411  * sys_get_robust_list() - Get the robust-futex list head of a task
2412  * @pid:        pid of the process [zero for current task]
2413  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2414  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2415  */
2416 SYSCALL_DEFINE3(get_robust_list, int, pid,
2417                 struct robust_list_head __user * __user *, head_ptr,
2418                 size_t __user *, len_ptr)
2419 {
2420         struct robust_list_head __user *head;
2421         unsigned long ret;
2422         const struct cred *cred = current_cred(), *pcred;
2423
2424         if (!futex_cmpxchg_enabled)
2425                 return -ENOSYS;
2426
2427         if (!pid)
2428                 head = current->robust_list;
2429         else {
2430                 struct task_struct *p;
2431
2432                 ret = -ESRCH;
2433                 rcu_read_lock();
2434                 p = find_task_by_vpid(pid);
2435                 if (!p)
2436                         goto err_unlock;
2437                 ret = -EPERM;
2438                 pcred = __task_cred(p);
2439                 if (cred->euid != pcred->euid &&
2440                     cred->euid != pcred->uid &&
2441                     !capable(CAP_SYS_PTRACE))
2442                         goto err_unlock;
2443                 head = p->robust_list;
2444                 rcu_read_unlock();
2445         }
2446
2447         if (put_user(sizeof(*head), len_ptr))
2448                 return -EFAULT;
2449         return put_user(head, head_ptr);
2450
2451 err_unlock:
2452         rcu_read_unlock();
2453
2454         return ret;
2455 }
2456
2457 /*
2458  * Process a futex-list entry, check whether it's owned by the
2459  * dying task, and do notification if so:
2460  */
2461 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2462 {
2463         u32 uval, nval, mval;
2464
2465 retry:
2466         if (get_user(uval, uaddr))
2467                 return -1;
2468
2469         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2470                 /*
2471                  * Ok, this dying thread is truly holding a futex
2472                  * of interest. Set the OWNER_DIED bit atomically
2473                  * via cmpxchg, and if the value had FUTEX_WAITERS
2474                  * set, wake up a waiter (if any). (We have to do a
2475                  * futex_wake() even if OWNER_DIED is already set -
2476                  * to handle the rare but possible case of recursive
2477                  * thread-death.) The rest of the cleanup is done in
2478                  * userspace.
2479                  */
2480                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2481                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2482
2483                 if (nval == -EFAULT)
2484                         return -1;
2485
2486                 if (nval != uval)
2487                         goto retry;
2488
2489                 /*
2490                  * Wake robust non-PI futexes here. The wakeup of
2491                  * PI futexes happens in exit_pi_state():
2492                  */
2493                 if (!pi && (uval & FUTEX_WAITERS))
2494                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2495         }
2496         return 0;
2497 }
2498
2499 /*
2500  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2501  */
2502 static inline int fetch_robust_entry(struct robust_list __user **entry,
2503                                      struct robust_list __user * __user *head,
2504                                      unsigned int *pi)
2505 {
2506         unsigned long uentry;
2507
2508         if (get_user(uentry, (unsigned long __user *)head))
2509                 return -EFAULT;
2510
2511         *entry = (void __user *)(uentry & ~1UL);
2512         *pi = uentry & 1;
2513
2514         return 0;
2515 }
2516
2517 /*
2518  * Walk curr->robust_list (very carefully, it's a userspace list!)
2519  * and mark any locks found there dead, and notify any waiters.
2520  *
2521  * We silently return on any sign of list-walking problem.
2522  */
2523 void exit_robust_list(struct task_struct *curr)
2524 {
2525         struct robust_list_head __user *head = curr->robust_list;
2526         struct robust_list __user *entry, *next_entry, *pending;
2527         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2528         unsigned int uninitialized_var(next_pi);
2529         unsigned long futex_offset;
2530         int rc;
2531
2532         if (!futex_cmpxchg_enabled)
2533                 return;
2534
2535         /*
2536          * Fetch the list head (which was registered earlier, via
2537          * sys_set_robust_list()):
2538          */
2539         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2540                 return;
2541         /*
2542          * Fetch the relative futex offset:
2543          */
2544         if (get_user(futex_offset, &head->futex_offset))
2545                 return;
2546         /*
2547          * Fetch any possibly pending lock-add first, and handle it
2548          * if it exists:
2549          */
2550         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2551                 return;
2552
2553         next_entry = NULL;      /* avoid warning with gcc */
2554         while (entry != &head->list) {
2555                 /*
2556                  * Fetch the next entry in the list before calling
2557                  * handle_futex_death:
2558                  */
2559                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2560                 /*
2561                  * A pending lock might already be on the list, so
2562                  * don't process it twice:
2563                  */
2564                 if (entry != pending)
2565                         if (handle_futex_death((void __user *)entry + futex_offset,
2566                                                 curr, pi))
2567                                 return;
2568                 if (rc)
2569                         return;
2570                 entry = next_entry;
2571                 pi = next_pi;
2572                 /*
2573                  * Avoid excessively long or circular lists:
2574                  */
2575                 if (!--limit)
2576                         break;
2577
2578                 cond_resched();
2579         }
2580
2581         if (pending)
2582                 handle_futex_death((void __user *)pending + futex_offset,
2583                                    curr, pip);
2584 }
2585
2586 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2587                 u32 __user *uaddr2, u32 val2, u32 val3)
2588 {
2589         int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK;
2590         unsigned int flags = 0;
2591
2592         if (!(op & FUTEX_PRIVATE_FLAG))
2593                 flags |= FLAGS_SHARED;
2594
2595         if (op & FUTEX_CLOCK_REALTIME) {
2596                 flags |= FLAGS_CLOCKRT;
2597                 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2598                         return -ENOSYS;
2599         }
2600
2601         switch (cmd) {
2602         case FUTEX_WAIT:
2603                 val3 = FUTEX_BITSET_MATCH_ANY;
2604         case FUTEX_WAIT_BITSET:
2605                 ret = futex_wait(uaddr, flags, val, timeout, val3);
2606                 break;
2607         case FUTEX_WAKE:
2608                 val3 = FUTEX_BITSET_MATCH_ANY;
2609         case FUTEX_WAKE_BITSET:
2610                 ret = futex_wake(uaddr, flags, val, val3);
2611                 break;
2612         case FUTEX_REQUEUE:
2613                 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2614                 break;
2615         case FUTEX_CMP_REQUEUE:
2616                 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2617                 break;
2618         case FUTEX_WAKE_OP:
2619                 ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2620                 break;
2621         case FUTEX_LOCK_PI:
2622                 if (futex_cmpxchg_enabled)
2623                         ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
2624                 break;
2625         case FUTEX_UNLOCK_PI:
2626                 if (futex_cmpxchg_enabled)
2627                         ret = futex_unlock_pi(uaddr, flags);
2628                 break;
2629         case FUTEX_TRYLOCK_PI:
2630                 if (futex_cmpxchg_enabled)
2631                         ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
2632                 break;
2633         case FUTEX_WAIT_REQUEUE_PI:
2634                 val3 = FUTEX_BITSET_MATCH_ANY;
2635                 ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2636                                             uaddr2);
2637                 break;
2638         case FUTEX_CMP_REQUEUE_PI:
2639                 ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2640                 break;
2641         default:
2642                 ret = -ENOSYS;
2643         }
2644         return ret;
2645 }
2646
2647
2648 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2649                 struct timespec __user *, utime, u32 __user *, uaddr2,
2650                 u32, val3)
2651 {
2652         struct timespec ts;
2653         ktime_t t, *tp = NULL;
2654         u32 val2 = 0;
2655         int cmd = op & FUTEX_CMD_MASK;
2656
2657         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2658                       cmd == FUTEX_WAIT_BITSET ||
2659                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2660                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2661                         return -EFAULT;
2662                 if (!timespec_valid(&ts))
2663                         return -EINVAL;
2664
2665                 t = timespec_to_ktime(ts);
2666                 if (cmd == FUTEX_WAIT)
2667                         t = ktime_add_safe(ktime_get(), t);
2668                 tp = &t;
2669         }
2670         /*
2671          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2672          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2673          */
2674         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2675             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2676                 val2 = (u32) (unsigned long) utime;
2677
2678         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2679 }
2680
2681 static int __init futex_init(void)
2682 {
2683         u32 curval;
2684         int i;
2685
2686         /*
2687          * This will fail and we want it. Some arch implementations do
2688          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2689          * functionality. We want to know that before we call in any
2690          * of the complex code paths. Also we want to prevent
2691          * registration of robust lists in that case. NULL is
2692          * guaranteed to fault and we get -EFAULT on functional
2693          * implementation, the non-functional ones will return
2694          * -ENOSYS.
2695          */
2696         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2697         if (curval == -EFAULT)
2698                 futex_cmpxchg_enabled = 1;
2699
2700         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2701                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2702                 spin_lock_init(&futex_queues[i].lock);
2703         }
2704
2705         return 0;
2706 }
2707 __initcall(futex_init);