mm, hugetlb: remove redundant list_empty check in gather_surplus_pages()
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index bfca1b00b09b2ab4eaf4c14be2e3c5eada727793..a13be48b818becf82f5f27df2add172f37c23301 100644 (file)
@@ -772,33 +772,6 @@ static int hstate_next_node_to_alloc(struct hstate *h,
        return nid;
 }
 
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
-{
-       struct page *page;
-       int start_nid;
-       int next_nid;
-       int ret = 0;
-
-       start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-       next_nid = start_nid;
-
-       do {
-               page = alloc_fresh_huge_page_node(h, next_nid);
-               if (page) {
-                       ret = 1;
-                       break;
-               }
-               next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-       } while (next_nid != start_nid);
-
-       if (ret)
-               count_vm_event(HTLB_BUDDY_PGALLOC);
-       else
-               count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
-
-       return ret;
-}
-
 /*
  * helper for free_pool_huge_page() - return the previously saved
  * node ["this node"] from which to free a huge page.  Advance the
@@ -817,6 +790,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
        return nid;
 }
 
+#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)          \
+       for (nr_nodes = nodes_weight(*mask);                            \
+               nr_nodes > 0 &&                                         \
+               ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
+               nr_nodes--)
+
+#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)           \
+       for (nr_nodes = nodes_weight(*mask);                            \
+               nr_nodes > 0 &&                                         \
+               ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
+               nr_nodes--)
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+{
+       struct page *page;
+       int nr_nodes, node;
+       int ret = 0;
+
+       for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+               page = alloc_fresh_huge_page_node(h, node);
+               if (page) {
+                       ret = 1;
+                       break;
+               }
+       }
+
+       if (ret)
+               count_vm_event(HTLB_BUDDY_PGALLOC);
+       else
+               count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
+       return ret;
+}
+
 /*
  * Free huge page from pool from next node to free.
  * Attempt to keep persistent huge pages more or less
@@ -826,36 +833,31 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
                                                         bool acct_surplus)
 {
-       int start_nid;
-       int next_nid;
+       int nr_nodes, node;
        int ret = 0;
 
-       start_nid = hstate_next_node_to_free(h, nodes_allowed);
-       next_nid = start_nid;
-
-       do {
+       for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
                /*
                 * If we're returning unused surplus pages, only examine
                 * nodes with surplus pages.
                 */
-               if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
-                   !list_empty(&h->hugepage_freelists[next_nid])) {
+               if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
+                   !list_empty(&h->hugepage_freelists[node])) {
                        struct page *page =
-                               list_entry(h->hugepage_freelists[next_nid].next,
+                               list_entry(h->hugepage_freelists[node].next,
                                          struct page, lru);
                        list_del(&page->lru);
                        h->free_huge_pages--;
-                       h->free_huge_pages_node[next_nid]--;
+                       h->free_huge_pages_node[node]--;
                        if (acct_surplus) {
                                h->surplus_huge_pages--;
-                               h->surplus_huge_pages_node[next_nid]--;
+                               h->surplus_huge_pages_node[node]--;
                        }
                        update_and_free_page(h, page);
                        ret = 1;
                        break;
                }
-               next_nid = hstate_next_node_to_free(h, nodes_allowed);
-       } while (next_nid != start_nid);
+       }
 
        return ret;
 }
@@ -1035,11 +1037,8 @@ free:
        spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
-       if (!list_empty(&surplus_list)) {
-               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
-                       put_page(page);
-               }
-       }
+       list_for_each_entry_safe(page, tmp, &surplus_list, lru)
+               put_page(page);
        spin_lock(&hugetlb_lock);
 
        return ret;
@@ -1166,12 +1165,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        }
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
-       if (page) {
-               /* update page cgroup details */
-               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-                                            h_cg, page);
-               spin_unlock(&hugetlb_lock);
-       } else {
+       if (!page) {
                spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
@@ -1182,11 +1176,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                        return ERR_PTR(-ENOSPC);
                }
                spin_lock(&hugetlb_lock);
-               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
-                                            h_cg, page);
                list_move(&page->lru, &h->hugepage_activelist);
-               spin_unlock(&hugetlb_lock);
+               /* Fall through */
        }
+       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+       spin_unlock(&hugetlb_lock);
 
        set_page_private(page, (unsigned long)spool);
 
@@ -1197,14 +1191,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
-       int nr_nodes = nodes_weight(node_states[N_MEMORY]);
+       int nr_nodes, node;
 
-       while (nr_nodes) {
+       for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
                void *addr;
 
-               addr = __alloc_bootmem_node_nopanic(
-                               NODE_DATA(hstate_next_node_to_alloc(h,
-                                               &node_states[N_MEMORY])),
+               addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
                                huge_page_size(h), huge_page_size(h), 0);
 
                if (addr) {
@@ -1216,7 +1208,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
                        m = addr;
                        goto found;
                }
-               nr_nodes--;
        }
        return 0;
 
@@ -1355,48 +1346,28 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count,
 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
                                int delta)
 {
-       int start_nid, next_nid;
-       int ret = 0;
+       int nr_nodes, node;
 
        VM_BUG_ON(delta != -1 && delta != 1);
 
-       if (delta < 0)
-               start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
-       else
-               start_nid = hstate_next_node_to_free(h, nodes_allowed);
-       next_nid = start_nid;
-
-       do {
-               int nid = next_nid;
-               if (delta < 0)  {
-                       /*
-                        * To shrink on this node, there must be a surplus page
-                        */
-                       if (!h->surplus_huge_pages_node[nid]) {
-                               next_nid = hstate_next_node_to_alloc(h,
-                                                               nodes_allowed);
-                               continue;
-                       }
+       if (delta < 0) {
+               for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+                       if (h->surplus_huge_pages_node[node])
+                               goto found;
                }
-               if (delta > 0) {
-                       /*
-                        * Surplus cannot exceed the total number of pages
-                        */
-                       if (h->surplus_huge_pages_node[nid] >=
-                                               h->nr_huge_pages_node[nid]) {
-                               next_nid = hstate_next_node_to_free(h,
-                                                               nodes_allowed);
-                               continue;
-                       }
+       } else {
+               for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+                       if (h->surplus_huge_pages_node[node] <
+                                       h->nr_huge_pages_node[node])
+                               goto found;
                }
+       }
+       return 0;
 
-               h->surplus_huge_pages += delta;
-               h->surplus_huge_pages_node[nid] += delta;
-               ret = 1;
-               break;
-       } while (next_nid != start_nid);
-
-       return ret;
+found:
+       h->surplus_huge_pages += delta;
+       h->surplus_huge_pages_node[node] += delta;
+       return 1;
 }
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)