scsi: Fix wrong additional sense length in descriptor format
[firefly-linux-kernel-4.4.55.git] / mm / memory-failure.c
index 17a8e3bc3b0151a1ce8194ca42628b922ed23cd1..c53543d892828e75796239d6ce36afa90203085b 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/mm_inline.h>
 #include <linux/kfifo.h>
 #include "internal.h"
+#include "ras/ras_event.h"
 
 int sysctl_memory_failure_early_kill __read_mostly = 0;
 
@@ -504,68 +505,34 @@ static void collect_procs(struct page *page, struct list_head *tokill,
        kfree(tk);
 }
 
-/*
- * Error handlers for various types of pages.
- */
-
-enum outcome {
-       IGNORED,        /* Error: cannot be handled */
-       FAILED,         /* Error: handling failed */
-       DELAYED,        /* Will be handled later */
-       RECOVERED,      /* Successfully recovered */
-};
-
 static const char *action_name[] = {
-       [IGNORED] = "Ignored",
-       [FAILED] = "Failed",
-       [DELAYED] = "Delayed",
-       [RECOVERED] = "Recovered",
-};
-
-enum action_page_type {
-       MSG_KERNEL,
-       MSG_KERNEL_HIGH_ORDER,
-       MSG_SLAB,
-       MSG_DIFFERENT_COMPOUND,
-       MSG_POISONED_HUGE,
-       MSG_HUGE,
-       MSG_FREE_HUGE,
-       MSG_UNMAP_FAILED,
-       MSG_DIRTY_SWAPCACHE,
-       MSG_CLEAN_SWAPCACHE,
-       MSG_DIRTY_MLOCKED_LRU,
-       MSG_CLEAN_MLOCKED_LRU,
-       MSG_DIRTY_UNEVICTABLE_LRU,
-       MSG_CLEAN_UNEVICTABLE_LRU,
-       MSG_DIRTY_LRU,
-       MSG_CLEAN_LRU,
-       MSG_TRUNCATED_LRU,
-       MSG_BUDDY,
-       MSG_BUDDY_2ND,
-       MSG_UNKNOWN,
+       [MF_IGNORED] = "Ignored",
+       [MF_FAILED] = "Failed",
+       [MF_DELAYED] = "Delayed",
+       [MF_RECOVERED] = "Recovered",
 };
 
 static const char * const action_page_types[] = {
-       [MSG_KERNEL]                    = "reserved kernel page",
-       [MSG_KERNEL_HIGH_ORDER]         = "high-order kernel page",
-       [MSG_SLAB]                      = "kernel slab page",
-       [MSG_DIFFERENT_COMPOUND]        = "different compound page after locking",
-       [MSG_POISONED_HUGE]             = "huge page already hardware poisoned",
-       [MSG_HUGE]                      = "huge page",
-       [MSG_FREE_HUGE]                 = "free huge page",
-       [MSG_UNMAP_FAILED]              = "unmapping failed page",
-       [MSG_DIRTY_SWAPCACHE]           = "dirty swapcache page",
-       [MSG_CLEAN_SWAPCACHE]           = "clean swapcache page",
-       [MSG_DIRTY_MLOCKED_LRU]         = "dirty mlocked LRU page",
-       [MSG_CLEAN_MLOCKED_LRU]         = "clean mlocked LRU page",
-       [MSG_DIRTY_UNEVICTABLE_LRU]     = "dirty unevictable LRU page",
-       [MSG_CLEAN_UNEVICTABLE_LRU]     = "clean unevictable LRU page",
-       [MSG_DIRTY_LRU]                 = "dirty LRU page",
-       [MSG_CLEAN_LRU]                 = "clean LRU page",
-       [MSG_TRUNCATED_LRU]             = "already truncated LRU page",
-       [MSG_BUDDY]                     = "free buddy page",
-       [MSG_BUDDY_2ND]                 = "free buddy page (2nd try)",
-       [MSG_UNKNOWN]                   = "unknown page",
+       [MF_MSG_KERNEL]                 = "reserved kernel page",
+       [MF_MSG_KERNEL_HIGH_ORDER]      = "high-order kernel page",
+       [MF_MSG_SLAB]                   = "kernel slab page",
+       [MF_MSG_DIFFERENT_COMPOUND]     = "different compound page after locking",
+       [MF_MSG_POISONED_HUGE]          = "huge page already hardware poisoned",
+       [MF_MSG_HUGE]                   = "huge page",
+       [MF_MSG_FREE_HUGE]              = "free huge page",
+       [MF_MSG_UNMAP_FAILED]           = "unmapping failed page",
+       [MF_MSG_DIRTY_SWAPCACHE]        = "dirty swapcache page",
+       [MF_MSG_CLEAN_SWAPCACHE]        = "clean swapcache page",
+       [MF_MSG_DIRTY_MLOCKED_LRU]      = "dirty mlocked LRU page",
+       [MF_MSG_CLEAN_MLOCKED_LRU]      = "clean mlocked LRU page",
+       [MF_MSG_DIRTY_UNEVICTABLE_LRU]  = "dirty unevictable LRU page",
+       [MF_MSG_CLEAN_UNEVICTABLE_LRU]  = "clean unevictable LRU page",
+       [MF_MSG_DIRTY_LRU]              = "dirty LRU page",
+       [MF_MSG_CLEAN_LRU]              = "clean LRU page",
+       [MF_MSG_TRUNCATED_LRU]          = "already truncated LRU page",
+       [MF_MSG_BUDDY]                  = "free buddy page",
+       [MF_MSG_BUDDY_2ND]              = "free buddy page (2nd try)",
+       [MF_MSG_UNKNOWN]                = "unknown page",
 };
 
 /*
@@ -599,7 +566,7 @@ static int delete_from_lru_cache(struct page *p)
  */
 static int me_kernel(struct page *p, unsigned long pfn)
 {
-       return IGNORED;
+       return MF_IGNORED;
 }
 
 /*
@@ -608,7 +575,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
 static int me_unknown(struct page *p, unsigned long pfn)
 {
        printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
-       return FAILED;
+       return MF_FAILED;
 }
 
 /*
@@ -617,7 +584,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
 static int me_pagecache_clean(struct page *p, unsigned long pfn)
 {
        int err;
-       int ret = FAILED;
+       int ret = MF_FAILED;
        struct address_space *mapping;
 
        delete_from_lru_cache(p);
@@ -627,7 +594,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
         * should be the one m_f() holds.
         */
        if (PageAnon(p))
-               return RECOVERED;
+               return MF_RECOVERED;
 
        /*
         * Now truncate the page in the page cache. This is really
@@ -641,7 +608,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
                /*
                 * Page has been teared down in the meanwhile
                 */
-               return FAILED;
+               return MF_FAILED;
        }
 
        /*
@@ -658,7 +625,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
                                !try_to_release_page(p, GFP_NOIO)) {
                        pr_info("MCE %#lx: failed to release buffers\n", pfn);
                } else {
-                       ret = RECOVERED;
+                       ret = MF_RECOVERED;
                }
        } else {
                /*
@@ -666,7 +633,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
                 * This fails on dirty or anything with private pages
                 */
                if (invalidate_inode_page(p))
-                       ret = RECOVERED;
+                       ret = MF_RECOVERED;
                else
                        printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
                                pfn);
@@ -752,9 +719,9 @@ static int me_swapcache_dirty(struct page *p, unsigned long pfn)
        ClearPageUptodate(p);
 
        if (!delete_from_lru_cache(p))
-               return DELAYED;
+               return MF_DELAYED;
        else
-               return FAILED;
+               return MF_FAILED;
 }
 
 static int me_swapcache_clean(struct page *p, unsigned long pfn)
@@ -762,9 +729,9 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
        delete_from_swap_cache(p);
 
        if (!delete_from_lru_cache(p))
-               return RECOVERED;
+               return MF_RECOVERED;
        else
-               return FAILED;
+               return MF_FAILED;
 }
 
 /*
@@ -777,6 +744,10 @@ static int me_huge_page(struct page *p, unsigned long pfn)
 {
        int res = 0;
        struct page *hpage = compound_head(p);
+
+       if (!PageHuge(hpage))
+               return MF_DELAYED;
+
        /*
         * We can safely recover from error on free or reserved (i.e.
         * not in-use) hugepage by dequeuing it from freelist.
@@ -790,9 +761,9 @@ static int me_huge_page(struct page *p, unsigned long pfn)
        if (!(page_mapping(hpage) || PageAnon(hpage))) {
                res = dequeue_hwpoisoned_huge_page(hpage);
                if (!res)
-                       return RECOVERED;
+                       return MF_RECOVERED;
        }
-       return DELAYED;
+       return MF_DELAYED;
 }
 
 /*
@@ -824,10 +795,10 @@ static int me_huge_page(struct page *p, unsigned long pfn)
 static struct page_state {
        unsigned long mask;
        unsigned long res;
-       enum action_page_type type;
+       enum mf_action_page_type type;
        int (*action)(struct page *p, unsigned long pfn);
 } error_states[] = {
-       { reserved,     reserved,       MSG_KERNEL,     me_kernel },
+       { reserved,     reserved,       MF_MSG_KERNEL,  me_kernel },
        /*
         * free pages are specially detected outside this table:
         * PG_buddy pages only make a small fraction of all free pages.
@@ -838,31 +809,31 @@ static struct page_state {
         * currently unused objects without touching them. But just
         * treat it as standard kernel for now.
         */
-       { slab,         slab,           MSG_SLAB,       me_kernel },
+       { slab,         slab,           MF_MSG_SLAB,    me_kernel },
 
 #ifdef CONFIG_PAGEFLAGS_EXTENDED
-       { head,         head,           MSG_HUGE,               me_huge_page },
-       { tail,         tail,           MSG_HUGE,               me_huge_page },
+       { head,         head,           MF_MSG_HUGE,            me_huge_page },
+       { tail,         tail,           MF_MSG_HUGE,            me_huge_page },
 #else
-       { compound,     compound,       MSG_HUGE,               me_huge_page },
+       { compound,     compound,       MF_MSG_HUGE,            me_huge_page },
 #endif
 
-       { sc|dirty,     sc|dirty,       MSG_DIRTY_SWAPCACHE,    me_swapcache_dirty },
-       { sc|dirty,     sc,             MSG_CLEAN_SWAPCACHE,    me_swapcache_clean },
+       { sc|dirty,     sc|dirty,       MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
+       { sc|dirty,     sc,             MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
 
-       { mlock|dirty,  mlock|dirty,    MSG_DIRTY_MLOCKED_LRU,  me_pagecache_dirty },
-       { mlock|dirty,  mlock,          MSG_CLEAN_MLOCKED_LRU,  me_pagecache_clean },
+       { mlock|dirty,  mlock|dirty,    MF_MSG_DIRTY_MLOCKED_LRU,       me_pagecache_dirty },
+       { mlock|dirty,  mlock,          MF_MSG_CLEAN_MLOCKED_LRU,       me_pagecache_clean },
 
-       { unevict|dirty, unevict|dirty, MSG_DIRTY_UNEVICTABLE_LRU,      me_pagecache_dirty },
-       { unevict|dirty, unevict,       MSG_CLEAN_UNEVICTABLE_LRU,      me_pagecache_clean },
+       { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU,   me_pagecache_dirty },
+       { unevict|dirty, unevict,       MF_MSG_CLEAN_UNEVICTABLE_LRU,   me_pagecache_clean },
 
-       { lru|dirty,    lru|dirty,      MSG_DIRTY_LRU,  me_pagecache_dirty },
-       { lru|dirty,    lru,            MSG_CLEAN_LRU,  me_pagecache_clean },
+       { lru|dirty,    lru|dirty,      MF_MSG_DIRTY_LRU,       me_pagecache_dirty },
+       { lru|dirty,    lru,            MF_MSG_CLEAN_LRU,       me_pagecache_clean },
 
        /*
         * Catchall entry: must be at end.
         */
-       { 0,            0,              MSG_UNKNOWN,    me_unknown },
+       { 0,            0,              MF_MSG_UNKNOWN, me_unknown },
 };
 
 #undef dirty
@@ -882,8 +853,11 @@ static struct page_state {
  * "Dirty/Clean" indication is not 100% accurate due to the possibility of
  * setting PG_dirty outside page lock. See also comment above set_page_dirty().
  */
-static void action_result(unsigned long pfn, enum action_page_type type, int result)
+static void action_result(unsigned long pfn, enum mf_action_page_type type,
+                         enum mf_result result)
 {
+       trace_memory_failure_event(pfn, type, result);
+
        pr_err("MCE %#lx: recovery action for %s: %s\n",
                pfn, action_page_types[type], action_name[result]);
 }
@@ -897,13 +871,13 @@ static int page_action(struct page_state *ps, struct page *p,
        result = ps->action(p, pfn);
 
        count = page_count(p) - 1;
-       if (ps->action == me_swapcache_dirty && result == DELAYED)
+       if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
                count--;
        if (count != 0) {
                printk(KERN_ERR
                       "MCE %#lx: %s still referenced by %d users\n",
                       pfn, action_page_types[ps->type], count);
-               result = FAILED;
+               result = MF_FAILED;
        }
        action_result(pfn, ps->type, result);
 
@@ -912,8 +886,41 @@ static int page_action(struct page_state *ps, struct page *p,
         * Could adjust zone counters here to correct for the missing page.
         */
 
-       return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
+       return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
+}
+
+/**
+ * get_hwpoison_page() - Get refcount for memory error handling:
+ * @page:      raw error page (hit by memory error)
+ *
+ * Return: return 0 if failed to grab the refcount, otherwise true (some
+ * non-zero value.)
+ */
+int get_hwpoison_page(struct page *page)
+{
+       struct page *head = compound_head(page);
+
+       if (PageHuge(head))
+               return get_page_unless_zero(head);
+
+       /*
+        * Thp tail page has special refcounting rule (refcount of tail pages
+        * is stored in ->_mapcount,) so we can't call get_page_unless_zero()
+        * directly for tail pages.
+        */
+       if (PageTransHuge(head)) {
+               if (get_page_unless_zero(head)) {
+                       if (PageTail(page))
+                               get_page(page);
+                       return 1;
+               } else {
+                       return 0;
+               }
+       }
+
+       return get_page_unless_zero(page);
 }
+EXPORT_SYMBOL_GPL(get_hwpoison_page);
 
 /*
  * Do all that is necessary to remove user space mappings. Unmap
@@ -1097,10 +1104,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * In fact it's dangerous to directly bump up page count from 0,
         * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
         */
-       if (!(flags & MF_COUNT_INCREASED) &&
-               !get_page_unless_zero(hpage)) {
+       if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
                if (is_free_buddy_page(p)) {
-                       action_result(pfn, MSG_BUDDY, DELAYED);
+                       action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
@@ -1117,12 +1123,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
-                       action_result(pfn, MSG_FREE_HUGE,
-                                     res ? IGNORED : DELAYED);
+                       action_result(pfn, MF_MSG_FREE_HUGE,
+                                     res ? MF_IGNORED : MF_DELAYED);
                        unlock_page(hpage);
                        return res;
                } else {
-                       action_result(pfn, MSG_KERNEL_HIGH_ORDER, IGNORED);
+                       action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
                        return -EBUSY;
                }
        }
@@ -1130,12 +1136,20 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        if (!PageHuge(p) && PageTransHuge(hpage)) {
                if (!PageAnon(hpage)) {
                        pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       if (TestClearPageHWPoison(p))
+                               atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
+                       if (p != hpage)
+                               put_page(hpage);
                        return -EBUSY;
                }
                if (unlikely(split_huge_page(hpage))) {
                        pr_err("MCE: %#lx: thp split failed\n", pfn);
+                       if (TestClearPageHWPoison(p))
+                               atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
+                       if (p != hpage)
+                               put_page(hpage);
                        return -EBUSY;
                }
                VM_BUG_ON_PAGE(!page_count(p), p);
@@ -1159,10 +1173,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                         */
                        if (is_free_buddy_page(p)) {
                                if (flags & MF_COUNT_INCREASED)
-                                       action_result(pfn, MSG_BUDDY, DELAYED);
+                                       action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
                                else
-                                       action_result(pfn, MSG_BUDDY_2ND,
-                                                     DELAYED);
+                                       action_result(pfn, MF_MSG_BUDDY_2ND,
+                                                     MF_DELAYED);
                                return 0;
                        }
                }
@@ -1175,7 +1189,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * If this happens just bail out.
         */
        if (PageCompound(p) && compound_head(p) != orig_head) {
-               action_result(pfn, MSG_DIFFERENT_COMPOUND, IGNORED);
+               action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
                res = -EBUSY;
                goto out;
        }
@@ -1215,7 +1229,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * on the head page to show that the hugepage is hwpoisoned
         */
        if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
-               action_result(pfn, MSG_POISONED_HUGE, IGNORED);
+               action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
                unlock_page(hpage);
                put_page(hpage);
                return 0;
@@ -1244,7 +1258,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
            != SWAP_SUCCESS) {
-               action_result(pfn, MSG_UNMAP_FAILED, IGNORED);
+               action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
                res = -EBUSY;
                goto out;
        }
@@ -1253,7 +1267,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * Torn down by someone else?
         */
        if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
-               action_result(pfn, MSG_TRUNCATED_LRU, IGNORED);
+               action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
                res = -EBUSY;
                goto out;
        }
@@ -1413,12 +1427,12 @@ int unpoison_memory(unsigned long pfn)
         */
        if (!PageHuge(page) && PageTransHuge(page)) {
                pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
-                       return 0;
+               return 0;
        }
 
        nr_pages = 1 << compound_order(page);
 
-       if (!get_page_unless_zero(page)) {
+       if (!get_hwpoison_page(p)) {
                /*
                 * Since HWPoisoned hugepage should have non-zero refcount,
                 * race between memory failure and unpoison seems to happen.
@@ -1486,7 +1500,7 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
         * When the target page is a free hugepage, just remove it
         * from free hugepage list.
         */
-       if (!get_page_unless_zero(compound_head(p))) {
+       if (!get_hwpoison_page(p)) {
                if (PageHuge(p)) {
                        pr_info("%s: %#lx free huge page\n", __func__, pfn);
                        ret = 0;
@@ -1657,20 +1671,7 @@ static int __soft_offline_page(struct page *page, int flags)
                        if (ret > 0)
                                ret = -EIO;
                } else {
-                       /*
-                        * After page migration succeeds, the source page can
-                        * be trapped in pagevec and actual freeing is delayed.
-                        * Freeing code works differently based on PG_hwpoison,
-                        * so there's a race. We need to make sure that the
-                        * source page should be freed back to buddy before
-                        * setting PG_hwpoison.
-                        */
-                       if (!is_free_buddy_page(page))
-                               drain_all_pages(page_zone(page));
                        SetPageHWPoison(page);
-                       if (!is_free_buddy_page(page))
-                               pr_info("soft offline: %#lx: page leaked\n",
-                                       pfn);
                        atomic_long_inc(&num_poisoned_pages);
                }
        } else {
@@ -1722,14 +1723,6 @@ int soft_offline_page(struct page *page, int flags)
 
        get_online_mems();
 
-       /*
-        * Isolate the page, so that it doesn't get reallocated if it
-        * was free. This flag should be kept set until the source page
-        * is freed and PG_hwpoison on it is set.
-        */
-       if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
-               set_migratetype_isolate(page, true);
-
        ret = get_any_page(page, pfn, flags);
        put_online_mems();
        if (ret > 0) { /* for in-use pages */
@@ -1748,6 +1741,5 @@ int soft_offline_page(struct page *page, int flags)
                                atomic_long_inc(&num_poisoned_pages);
                }
        }
-       unset_migratetype_isolate(page, MIGRATE_MOVABLE);
        return ret;
 }