x86, 64bit, mm: hibernate use generic mapping_init
authorYinghai Lu <yinghai@kernel.org>
Thu, 24 Jan 2013 20:20:14 +0000 (12:20 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Wed, 30 Jan 2013 03:32:59 +0000 (19:32 -0800)
We should set mappings only for usable memory ranges under max_pfn
Otherwise causes same problem that is fixed by

x86, mm: Only direct map addresses that are marked as E820_RAM

Make it only map range in pfn_mapped array.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-34-git-send-email-yinghai@kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: linux-pm@vger.kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/power/hibernate_64.c

index 460f314d13e54fff4be0c3cd3d9a126a2973d0c7..a0fde91c16cf779203bcb0195c6f063be9f303b3 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/gfp.h>
 #include <linux/smp.h>
 #include <linux/suspend.h>
+
+#include <asm/init.h>
 #include <asm/proto.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -39,41 +41,21 @@ pgd_t *temp_level4_pgt;
 
 void *relocated_restore_code;
 
-static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+static void *alloc_pgt_page(void *context)
 {
-       long i, j;
-
-       i = pud_index(address);
-       pud = pud + i;
-       for (; i < PTRS_PER_PUD; pud++, i++) {
-               unsigned long paddr;
-               pmd_t *pmd;
-
-               paddr = address + i*PUD_SIZE;
-               if (paddr >= end)
-                       break;
-
-               pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
-               if (!pmd)
-                       return -ENOMEM;
-               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
-               for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
-                       unsigned long pe;
-
-                       if (paddr >= end)
-                               break;
-                       pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
-                       pe &= __supported_pte_mask;
-                       set_pmd(pmd, __pmd(pe));
-               }
-       }
-       return 0;
+       return (void *)get_safe_page(GFP_ATOMIC);
 }
 
 static int set_up_temporary_mappings(void)
 {
-       unsigned long start, end, next;
-       int error;
+       struct x86_mapping_info info = {
+               .alloc_pgt_page = alloc_pgt_page,
+               .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
+               .kernel_mapping = true,
+       };
+       unsigned long mstart, mend;
+       int result;
+       int i;
 
        temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
        if (!temp_level4_pgt)
@@ -84,21 +66,17 @@ static int set_up_temporary_mappings(void)
                init_level4_pgt[pgd_index(__START_KERNEL_map)]);
 
        /* Set up the direct mapping from scratch */
-       start = (unsigned long)pfn_to_kaddr(0);
-       end = (unsigned long)pfn_to_kaddr(max_pfn);
-
-       for (; start < end; start = next) {
-               pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
-               if (!pud)
-                       return -ENOMEM;
-               next = start + PGDIR_SIZE;
-               if (next > end)
-                       next = end;
-               if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
-                       return error;
-               set_pgd(temp_level4_pgt + pgd_index(start),
-                       mk_kernel_pgd(__pa(pud)));
+       for (i = 0; i < nr_pfn_mapped; i++) {
+               mstart = pfn_mapped[i].start << PAGE_SHIFT;
+               mend   = pfn_mapped[i].end << PAGE_SHIFT;
+
+               result = kernel_ident_mapping_init(&info, temp_level4_pgt,
+                                                  mstart, mend);
+
+               if (result)
+                       return result;
        }
+
        return 0;
 }