net: sched: do not requeue a NULL skb
[firefly-linux-kernel-4.4.55.git] / fs / proc / task_mmu.c
index b029d426c55892544afcd3bf2b8a5965f6e0e5ee..f6478301db00bcd276f9c2ffb58908bc817928df 100644 (file)
@@ -70,6 +70,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                ptes >> 10,
                pmds >> 10,
                swap << (PAGE_SHIFT-10));
+       hugetlb_report_usage(m, mm);
 }
 
 unsigned long task_vsize(struct mm_struct *mm)
@@ -446,6 +447,8 @@ struct mem_size_stats {
        unsigned long anonymous;
        unsigned long anonymous_thp;
        unsigned long swap;
+       unsigned long shared_hugetlb;
+       unsigned long private_hugetlb;
        u64 pss;
        u64 swap_pss;
 };
@@ -625,12 +628,44 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
        seq_putc(m, '\n');
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+                                unsigned long addr, unsigned long end,
+                                struct mm_walk *walk)
+{
+       struct mem_size_stats *mss = walk->private;
+       struct vm_area_struct *vma = walk->vma;
+       struct page *page = NULL;
+
+       if (pte_present(*pte)) {
+               page = vm_normal_page(vma, addr, *pte);
+       } else if (is_swap_pte(*pte)) {
+               swp_entry_t swpent = pte_to_swp_entry(*pte);
+
+               if (is_migration_entry(swpent))
+                       page = migration_entry_to_page(swpent);
+       }
+       if (page) {
+               int mapcount = page_mapcount(page);
+
+               if (mapcount >= 2)
+                       mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+               else
+                       mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+       }
+       return 0;
+}
+#endif /* HUGETLB_PAGE */
+
 static int show_smap(struct seq_file *m, void *v, int is_pid)
 {
        struct vm_area_struct *vma = v;
        struct mem_size_stats mss;
        struct mm_walk smaps_walk = {
                .pmd_entry = smaps_pte_range,
+#ifdef CONFIG_HUGETLB_PAGE
+               .hugetlb_entry = smaps_hugetlb_range,
+#endif
                .mm = vma->vm_mm,
                .private = &mss,
        };
@@ -652,6 +687,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   "Referenced:     %8lu kB\n"
                   "Anonymous:      %8lu kB\n"
                   "AnonHugePages:  %8lu kB\n"
+                  "Shared_Hugetlb: %8lu kB\n"
+                  "Private_Hugetlb: %7lu kB\n"
                   "Swap:           %8lu kB\n"
                   "SwapPss:        %8lu kB\n"
                   "KernelPageSize: %8lu kB\n"
@@ -667,6 +704,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   mss.referenced >> 10,
                   mss.anonymous >> 10,
                   mss.anonymous_thp >> 10,
+                  mss.shared_hugetlb >> 10,
+                  mss.private_hugetlb >> 10,
                   mss.swap >> 10,
                   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
                   vma_kernel_pagesize(vma) >> 10,
@@ -753,19 +792,27 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
        pte_t ptent = *pte;
 
        if (pte_present(ptent)) {
+               ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
                ptent = pte_wrprotect(ptent);
                ptent = pte_clear_soft_dirty(ptent);
+               ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
        } else if (is_swap_pte(ptent)) {
                ptent = pte_swp_clear_soft_dirty(ptent);
+               set_pte_at(vma->vm_mm, addr, pte, ptent);
        }
-
-       set_pte_at(vma->vm_mm, addr, pte, ptent);
 }
+#else
+static inline void clear_soft_dirty(struct vm_area_struct *vma,
+               unsigned long addr, pte_t *pte)
+{
+}
+#endif
 
+#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
 {
-       pmd_t pmd = *pmdp;
+       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 
        pmd = pmd_wrprotect(pmd);
        pmd = pmd_clear_soft_dirty(pmd);
@@ -775,14 +822,7 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 
        set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 }
-
 #else
-
-static inline void clear_soft_dirty(struct vm_area_struct *vma,
-               unsigned long addr, pte_t *pte)
-{
-}
-
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
 {
@@ -1395,6 +1435,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
        return page;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+                                             struct vm_area_struct *vma,
+                                             unsigned long addr)
+{
+       struct page *page;
+       int nid;
+
+       if (!pmd_present(pmd))
+               return NULL;
+
+       page = vm_normal_page_pmd(vma, addr, pmd);
+       if (!page)
+               return NULL;
+
+       if (PageReserved(page))
+               return NULL;
+
+       nid = page_to_nid(page);
+       if (!node_isset(nid, node_states[N_MEMORY]))
+               return NULL;
+
+       return page;
+}
+#endif
+
 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
                unsigned long end, struct mm_walk *walk)
 {
@@ -1404,13 +1470,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        pte_t *orig_pte;
        pte_t *pte;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
-               pte_t huge_pte = *(pte_t *)pmd;
                struct page *page;
 
-               page = can_gather_numa_stats(huge_pte, vma, addr);
+               page = can_gather_numa_stats_pmd(*pmd, vma, addr);
                if (page)
-                       gather_stats(page, md, pte_dirty(huge_pte),
+                       gather_stats(page, md, pmd_dirty(*pmd),
                                     HPAGE_PMD_SIZE/PAGE_SIZE);
                spin_unlock(ptl);
                return 0;
@@ -1418,6 +1484,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 
        if (pmd_trans_unstable(pmd))
                return 0;
+#endif
        orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        do {
                struct page *page = can_gather_numa_stats(*pte, vma, addr);
@@ -1433,18 +1500,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
                unsigned long addr, unsigned long end, struct mm_walk *walk)
 {
+       pte_t huge_pte = huge_ptep_get(pte);
        struct numa_maps *md;
        struct page *page;
 
-       if (!pte_present(*pte))
+       if (!pte_present(huge_pte))
                return 0;
 
-       page = pte_page(*pte);
+       page = pte_page(huge_pte);
        if (!page)
                return 0;
 
        md = walk->private;
-       gather_stats(page, md, pte_dirty(*pte), 1);
+       gather_stats(page, md, pte_dirty(huge_pte), 1);
        return 0;
 }