mempolicy: rename mpol_free to mpol_put
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index 74c1b6b0b37b82dce75e06533c989ab73001afeb..53afa8c76ada95081fe0f4b27c375fe47ba32eea 100644 (file)
@@ -95,13 +95,16 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
        int nid;
        struct page *page = NULL;
        struct mempolicy *mpol;
+       nodemask_t *nodemask;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                       htlb_alloc_mask, &mpol);
-       struct zone **z;
-
-       for (z = zonelist->zones; *z; z++) {
-               nid = zone_to_nid(*z);
-               if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+                                       htlb_alloc_mask, &mpol, &nodemask);
+       struct zone *zone;
+       struct zoneref *z;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                               MAX_NR_ZONES - 1, nodemask) {
+               nid = zone_to_nid(zone);
+               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
                    !list_empty(&hugepage_freelists[nid])) {
                        page = list_entry(hugepage_freelists[nid].next,
                                          struct page, lru);
@@ -113,7 +116,7 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                        break;
                }
        }
-       mpol_free(mpol);        /* unref if mpol !NULL */
+       mpol_put(mpol); /* unref if mpol !NULL */
        return page;
 }
 
@@ -239,6 +242,11 @@ static int alloc_fresh_huge_page(void)
                hugetlb_next_nid = next_nid;
        } while (!page && hugetlb_next_nid != start_nid);
 
+       if (ret)
+               count_vm_event(HTLB_BUDDY_PGALLOC);
+       else
+               count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
        return ret;
 }
 
@@ -299,9 +307,11 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
                 */
                nr_huge_pages_node[nid]++;
                surplus_huge_pages_node[nid]++;
+               __count_vm_event(HTLB_BUDDY_PGALLOC);
        } else {
                nr_huge_pages--;
                surplus_huge_pages--;
+               __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
        }
        spin_unlock(&hugetlb_lock);
 
@@ -369,11 +379,19 @@ retry:
        resv_huge_pages += delta;
        ret = 0;
 free:
+       /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               if ((--needed) < 0)
+                       break;
                list_del(&page->lru);
-               if ((--needed) >= 0)
-                       enqueue_huge_page(page);
-               else {
+               enqueue_huge_page(page);
+       }
+
+       /* Free unnecessary surplus pages to the buddy allocator */
+       if (!list_empty(&surplus_list)) {
+               spin_unlock(&hugetlb_lock);
+               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+                       list_del(&page->lru);
                        /*
                         * The page has a reference count of zero already, so
                         * call free_huge_page directly instead of using
@@ -381,10 +399,9 @@ free:
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
-                       spin_unlock(&hugetlb_lock);
                        free_huge_page(page);
-                       spin_lock(&hugetlb_lock);
                }
+               spin_lock(&hugetlb_lock);
        }
 
        return ret;
@@ -401,12 +418,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
        struct page *page;
        unsigned long nr_pages;
 
+       /*
+        * We want to release as many surplus pages as possible, spread
+        * evenly across all nodes. Iterate across all nodes until we
+        * can no longer free unreserved surplus pages. This occurs when
+        * the nodes with surplus pages have no free pages.
+        */
+       unsigned long remaining_iterations = num_online_nodes();
+
        /* Uncommit the reservation */
        resv_huge_pages -= unused_resv_pages;
 
        nr_pages = min(unused_resv_pages, surplus_huge_pages);
 
-       while (nr_pages) {
+       while (remaining_iterations-- && nr_pages) {
                nid = next_node(nid, node_online_map);
                if (nid == MAX_NUMNODES)
                        nid = first_node(node_online_map);
@@ -424,6 +449,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
                        surplus_huge_pages--;
                        surplus_huge_pages_node[nid]--;
                        nr_pages--;
+                       remaining_iterations = num_online_nodes();
                }
        }
 }
@@ -671,9 +697,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
-               "Node %d HugePages_Free:  %5u\n",
+               "Node %d HugePages_Free:  %5u\n"
+               "Node %d HugePages_Surp:  %5u\n",
                nid, nr_huge_pages_node[nid],
-               nid, free_huge_pages_node[nid]);
+               nid, free_huge_pages_node[nid],
+               nid, surplus_huge_pages_node[nid]);
 }
 
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */