[S390] memory hotplug: prevent memory zone interleave
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Fri, 24 Feb 2012 17:01:29 +0000 (18:01 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 24 Feb 2012 17:01:36 +0000 (18:01 +0100)
This fixes a kernel oops with CONFIG_DEBUG_VM triggered by a
VM_BUG_ON(bad_range()): kernel BUG at mm/page_alloc.c:748.

With memory hotplug on System z, it is possible that the memory
online/offline state is preserved over a system restart, e.g. there
may be offline memory blocks in ZONE_DMA or ZONE_NORMAL. So far,
the offline memory range has always been added to ZONE_MOVABLE during
system start, so that it was possible to have ZONE_MOVABLE interleave
with ZONE_DMA or ZONE_NORMAL. This patch fixes that by checking for
zone overlap before adding memory.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/init.c

index 5d633019d8f3f86fb4da43a1c04ad060a6f2e83f..50236610de83cc0cb8d76ca34bbd2cd0f5c0e0aa 100644 (file)
@@ -223,16 +223,38 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
-       struct pglist_data *pgdat;
+       unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+       unsigned long start_pfn = PFN_DOWN(start);
+       unsigned long size_pages = PFN_DOWN(size);
        struct zone *zone;
        int rc;
 
-       pgdat = NODE_DATA(nid);
-       zone = pgdat->node_zones + ZONE_MOVABLE;
        rc = vmem_add_mapping(start, size);
        if (rc)
                return rc;
-       rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
+       for_each_zone(zone) {
+               if (zone_idx(zone) != ZONE_MOVABLE) {
+                       /* Add range within existing zone limits */
+                       zone_start_pfn = zone->zone_start_pfn;
+                       zone_end_pfn = zone->zone_start_pfn +
+                                      zone->spanned_pages;
+               } else {
+                       /* Add remaining range to ZONE_MOVABLE */
+                       zone_start_pfn = start_pfn;
+                       zone_end_pfn = start_pfn + size_pages;
+               }
+               if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+                       continue;
+               nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+                          zone_end_pfn - start_pfn : size_pages;
+               rc = __add_pages(nid, zone, start_pfn, nr_pages);
+               if (rc)
+                       break;
+               start_pfn += nr_pages;
+               size_pages -= nr_pages;
+               if (!size_pages)
+                       break;
+       }
        if (rc)
                vmem_remove_mapping(start, size);
        return rc;