sched/numa: Set preferred NUMA node based on number of private faults
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index 3e3b4b8b6c41cac8f8eadf4c6660b44a5cc1e92e..cc7f20691c829662614d3b3a5a6729a85384a50c 100644 (file)
@@ -69,8 +69,8 @@
 
 #include "internal.h"
 
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
-#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
+#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
+#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nidpid.
 #endif
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -3536,7 +3536,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page = NULL;
        spinlock_t *ptl;
        int page_nid = -1;
-       int last_nid;
+       int last_nidpid;
        int target_nid;
        bool migrated = false;
 
@@ -3567,7 +3567,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        BUG_ON(is_zero_pfn(page_to_pfn(page)));
 
-       last_nid = page_nid_last(page);
+       last_nidpid = page_nidpid_last(page);
        page_nid = page_to_nid(page);
        target_nid = numa_migrate_prep(page, vma, addr, page_nid);
        pte_unmap_unlock(ptep, ptl);
@@ -3583,7 +3583,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
 out:
        if (page_nid != -1)
-               task_numa_fault(last_nid, page_nid, 1, migrated);
+               task_numa_fault(last_nidpid, page_nid, 1, migrated);
        return 0;
 }
 
@@ -3598,7 +3598,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long offset;
        spinlock_t *ptl;
        bool numa = false;
-       int last_nid;
+       int last_nidpid;
 
        spin_lock(&mm->page_table_lock);
        pmd = *pmdp;
@@ -3643,7 +3643,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(!page))
                        continue;
 
-               last_nid = page_nid_last(page);
+               last_nidpid = page_nidpid_last(page);
                page_nid = page_to_nid(page);
                target_nid = numa_migrate_prep(page, vma, addr, page_nid);
                pte_unmap_unlock(pte, ptl);
@@ -3656,7 +3656,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
 
                if (page_nid != -1)
-                       task_numa_fault(last_nid, page_nid, 1, migrated);
+                       task_numa_fault(last_nidpid, page_nid, 1, migrated);
 
                pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
        }