mm/hwpoison: fix loss of PG_dirty for errors on mlocked pages
[firefly-linux-kernel-4.4.55.git] / mm / memory-failure.c
index 2c13aa7a0164101a360e4f776090f6b183aab5ab..ec9ad5270d325e146113a558774099029b011382 100644 (file)
@@ -1204,6 +1204,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        for (ps = error_states;; ps++)
                if ((p->flags & ps->mask) == ps->res)
                        break;
+
+       page_flags |= (p->flags & (1UL << PG_dirty));
+
        if (!ps->mask)
                for (ps = error_states;; ps++)
                        if ((page_flags & ps->mask) == ps->res)
@@ -1265,7 +1268,7 @@ void memory_failure_queue(unsigned long pfn, int trapno, int flags)
        if (kfifo_put(&mf_cpu->fifo, &entry))
                schedule_work_on(smp_processor_id(), &mf_cpu->work);
        else
-               pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
+               pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
                       pfn);
        spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
        put_cpu_var(memory_failure_cpu);
@@ -1286,7 +1289,10 @@ static void memory_failure_work_func(struct work_struct *work)
                spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
                if (!gotten)
                        break;
-               memory_failure(entry.pfn, entry.trapno, entry.flags);
+               if (entry.flags & MF_SOFT_OFFLINE)
+                       soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
+               else
+                       memory_failure(entry.pfn, entry.trapno, entry.flags);
        }
 }
 
@@ -1467,6 +1473,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
        int ret;
        unsigned long pfn = page_to_pfn(page);
        struct page *hpage = compound_head(page);
+       LIST_HEAD(pagelist);
 
        /*
         * This double-check of PageHWPoison is to avoid the race with
@@ -1482,12 +1489,20 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        /* Keep page count to indicate a given hugepage is isolated. */
-       ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
-                               MIGRATE_SYNC);
-       put_page(hpage);
+       list_move(&hpage->lru, &pagelist);
+       ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+                               MIGRATE_SYNC, MR_MEMORY_FAILURE);
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
                        pfn, ret, page->flags);
+               /*
+                * We know that soft_offline_huge_page() tries to migrate
+                * only one hugepage pointed to by hpage, so we need not
+                * run through the pagelist here.
+                */
+               putback_active_hugepage(hpage);
+               if (ret > 0)
+                       ret = -EIO;
        } else {
                set_page_hwpoison_huge_page(hpage);
                dequeue_hwpoisoned_huge_page(hpage);
@@ -1541,7 +1556,7 @@ int soft_offline_page(struct page *page, int flags)
 
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
-               return ret;
+               goto unset;
        if (ret) { /* for in-use pages */
                if (PageHuge(page))
                        ret = soft_offline_huge_page(page, flags);
@@ -1558,6 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
                        atomic_long_inc(&num_poisoned_pages);
                }
        }
+unset:
        unset_migratetype_isolate(page, MIGRATE_MOVABLE);
        return ret;
 }