mm/zbud: change zbud_alloc size type to size_t
[firefly-linux-kernel-4.4.55.git] / mm / huge_memory.c
index 4b95ff4120f5ae49cf01a29b2b921c6501f4c002..3630d577e9879e9d6dc6a80912e2eb88d5f1c959 100644 (file)
@@ -827,7 +827,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_TRANSHUGE))) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1132,7 +1132,7 @@ alloc:
                goto out;
        }
 
-       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE))) {
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
@@ -2233,6 +2233,30 @@ static void khugepaged_alloc_sleep(void)
 
 static int khugepaged_node_load[MAX_NUMNODES];
 
+static bool khugepaged_scan_abort(int nid)
+{
+       int i;
+
+       /*
+        * If zone_reclaim_mode is disabled, then no extra effort is made to
+        * allocate memory locally.
+        */
+       if (!zone_reclaim_mode)
+               return false;
+
+       /* If there is a count for this node already, it must be acceptable */
+       if (khugepaged_node_load[nid])
+               return false;
+
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               if (!khugepaged_node_load[i])
+                       continue;
+               if (node_distance(nid, i) > RECLAIM_DISTANCE)
+                       return true;
+       }
+       return false;
+}
+
 #ifdef CONFIG_NUMA
 static int khugepaged_find_target_node(void)
 {
@@ -2399,7 +2423,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        if (!new_page)
                return;
 
-       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE)))
                return;
 
        /*
@@ -2545,6 +2569,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * hit record.
                 */
                node = page_to_nid(page);
+               if (khugepaged_scan_abort(node))
+                       goto out_unmap;
                khugepaged_node_load[node]++;
                VM_BUG_ON_PAGE(PageCompound(page), page);
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))