mmu-notifier: add clear_young callback
[firefly-linux-kernel-4.4.55.git] / fs / userfaultfd.c
index a14d63e945f4749dd78c0f8e15f2046dbd914f8d..634e676072cb738467b61064beaa5fa4b98cf672 100644 (file)
@@ -45,6 +45,8 @@ struct userfaultfd_ctx {
        wait_queue_head_t fault_wqh;
        /* waitqueue head for the pseudo fd to wakeup poll/read */
        wait_queue_head_t fd_wqh;
+       /* a refile sequence protected by fault_pending_wqh lock */
+       struct seqcount refile_seq;
        /* pseudo fd refcounting */
        atomic_t refcount;
        /* userfaultfd syscall flags */
@@ -546,6 +548,15 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
                spin_lock(&ctx->fault_pending_wqh.lock);
                uwq = find_userfault(ctx);
                if (uwq) {
+                       /*
+                        * Use a seqcount to repeat the lockless check
+                        * in wake_userfault() to avoid missing
+                        * wakeups because during the refile both
+                        * waitqueue could become empty if this is the
+                        * only userfault.
+                        */
+                       write_seqcount_begin(&ctx->refile_seq);
+
                        /*
                         * The fault_pending_wqh.lock prevents the uwq
                         * to disappear from under us.
@@ -570,6 +581,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
                        list_del(&uwq->wq.task_list);
                        __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
 
+                       write_seqcount_end(&ctx->refile_seq);
+
                        /* careful to always initialize msg if ret == 0 */
                        *msg = uwq->msg;
                        spin_unlock(&ctx->fault_pending_wqh.lock);
@@ -647,6 +660,9 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
                                           struct userfaultfd_wake_range *range)
 {
+       unsigned seq;
+       bool need_wakeup;
+
        /*
         * To be sure waitqueue_active() is not reordered by the CPU
         * before the pagetable update, use an explicit SMP memory
@@ -662,8 +678,13 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
         * userfaults yet. So we take the spinlock only when we're
         * sure we've userfaults to wake.
         */
-       if (waitqueue_active(&ctx->fault_pending_wqh) ||
-           waitqueue_active(&ctx->fault_wqh))
+       do {
+               seq = read_seqcount_begin(&ctx->refile_seq);
+               need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
+                       waitqueue_active(&ctx->fault_wqh);
+               cond_resched();
+       } while (read_seqcount_retry(&ctx->refile_seq, seq));
+       if (need_wakeup)
                __wake_userfault(ctx, range);
 }
 
@@ -1219,6 +1240,7 @@ static void init_once_userfaultfd_ctx(void *mem)
        init_waitqueue_head(&ctx->fault_pending_wqh);
        init_waitqueue_head(&ctx->fault_wqh);
        init_waitqueue_head(&ctx->fd_wqh);
+       seqcount_init(&ctx->refile_seq);
 }
 
 /**