Input: remove CONFIG_EXPERIMENTAL from keyboard drivers
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index e54b695336f94f91f3b847dad9e728f58c5fa58b..bc727122dd44de6c4ae9307c618e58ddf4da3c87 100644 (file)
 
 #include <linux/io.h>
 #include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
 #include <linux/node.h>
+#include <linux/hugetlb_cgroup.h>
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
 
-static int hugetlb_max_hstate;
+int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
 
@@ -46,13 +48,10 @@ static struct hstate * __initdata parsed_hstate;
 static unsigned long __initdata default_hstate_max_huge_pages;
 static unsigned long __initdata default_hstate_size;
 
-#define for_each_hstate(h) \
-       for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
-
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  */
-static DEFINE_SPINLOCK(hugetlb_lock);
+DEFINE_SPINLOCK(hugetlb_lock);
 
 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
 {
@@ -510,7 +509,7 @@ void copy_huge_page(struct page *dst, struct page *src)
 static void enqueue_huge_page(struct hstate *h, struct page *page)
 {
        int nid = page_to_nid(page);
-       list_add(&page->lru, &h->hugepage_freelists[nid]);
+       list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
 }
@@ -522,7 +521,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
        if (list_empty(&h->hugepage_freelists[nid]))
                return NULL;
        page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
-       list_del(&page->lru);
+       list_move(&page->lru, &h->hugepage_activelist);
        set_page_refcounted(page);
        h->free_huge_pages--;
        h->free_huge_pages_node[nid]--;
@@ -594,6 +593,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
                                1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1 << PG_writeback);
        }
+       VM_BUG_ON(hugetlb_cgroup_from_page(page));
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
@@ -626,10 +626,13 @@ static void free_huge_page(struct page *page)
        page->mapping = NULL;
        BUG_ON(page_count(page));
        BUG_ON(page_mapcount(page));
-       INIT_LIST_HEAD(&page->lru);
 
        spin_lock(&hugetlb_lock);
+       hugetlb_cgroup_uncharge_page(hstate_index(h),
+                                    pages_per_huge_page(h), page);
        if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
+               /* remove the page from active list */
+               list_del(&page->lru);
                update_and_free_page(h, page);
                h->surplus_huge_pages--;
                h->surplus_huge_pages_node[nid]--;
@@ -642,8 +645,10 @@ static void free_huge_page(struct page *page)
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
+       INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, free_huge_page);
        spin_lock(&hugetlb_lock);
+       set_hugetlb_cgroup(page, NULL);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
        spin_unlock(&hugetlb_lock);
@@ -890,8 +895,10 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 
        spin_lock(&hugetlb_lock);
        if (page) {
+               INIT_LIST_HEAD(&page->lru);
                r_nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
+               set_hugetlb_cgroup(page, NULL);
                /*
                 * We incremented the global counters already
                 */
@@ -994,7 +1001,6 @@ retry:
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
                        break;
-               list_del(&page->lru);
                /*
                 * This page is now managed by the hugetlb allocator and has
                 * no users -- drop the buddy allocator's reference.
@@ -1009,7 +1015,6 @@ free:
        /* Free unnecessary surplus pages to the buddy allocator */
        if (!list_empty(&surplus_list)) {
                list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
-                       list_del(&page->lru);
                        put_page(page);
                }
        }
@@ -1113,7 +1118,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        struct hstate *h = hstate_vma(vma);
        struct page *page;
        long chg;
+       int ret, idx;
+       struct hugetlb_cgroup *h_cg;
 
+       idx = hstate_index(h);
        /*
         * Processes that did not create the mapping will have no
         * reserves and will not have accounted against subpool
@@ -1129,22 +1137,38 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                if (hugepage_subpool_get_pages(spool, chg))
                        return ERR_PTR(-ENOSPC);
 
+       ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
+       if (ret) {
+               hugepage_subpool_put_pages(spool, chg);
+               return ERR_PTR(-ENOSPC);
+       }
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
-       spin_unlock(&hugetlb_lock);
-
-       if (!page) {
+       if (page) {
+               /* update page cgroup details */
+               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+                                            h_cg, page);
+               spin_unlock(&hugetlb_lock);
+       } else {
+               spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
+                       hugetlb_cgroup_uncharge_cgroup(idx,
+                                                      pages_per_huge_page(h),
+                                                      h_cg);
                        hugepage_subpool_put_pages(spool, chg);
                        return ERR_PTR(-ENOSPC);
                }
+               spin_lock(&hugetlb_lock);
+               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+                                            h_cg, page);
+               list_move(&page->lru, &h->hugepage_activelist);
+               spin_unlock(&hugetlb_lock);
        }
 
        set_page_private(page, (unsigned long)spool);
 
        vma_commit_reservation(h, vma, addr);
-
        return page;
 }
 
@@ -1909,10 +1933,18 @@ void __init hugetlb_add_hstate(unsigned order)
        h->free_huge_pages = 0;
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
+       INIT_LIST_HEAD(&h->hugepage_activelist);
        h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
        h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/1024);
+       /*
+        * Add cgroup control files only if the huge page consists
+        * of more than two normal pages. This is because we use
+        * page[2].lru.next for storing cgoup details.
+        */
+       if (order >= HUGETLB_CGROUP_MIN_ORDER)
+               hugetlb_cgroup_file_init(hugetlb_max_hstate - 1);
 
        parsed_hstate = h;
 }
@@ -2397,6 +2429,25 @@ again:
        tlb_end_vma(tlb, vma);
 }
 
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+                         struct vm_area_struct *vma, unsigned long start,
+                         unsigned long end, struct page *ref_page)
+{
+       __unmap_hugepage_range(tlb, vma, start, end, ref_page);
+
+       /*
+        * Clear this flag so that x86's huge_pmd_share page_table_shareable
+        * test will fail on a vma being torn down, and not grab a page table
+        * on its way out.  We're lucky that the flag has such an appropriate
+        * name, and can in fact be safely cleared here. We could clear it
+        * before the __unmap_hugepage_range above, but all that's necessary
+        * is to clear it before releasing the i_mmap_mutex. This works
+        * because in the context this is called, the VMA is about to be
+        * destroyed and the i_mmap_mutex is held.
+        */
+       vma->vm_flags &= ~VM_MAYSHARE;
+}
+
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
@@ -2980,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+       /*
+        * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+        * may have cleared our pud entry and done put_page on the page table:
+        * once we release i_mmap_mutex, another task can do the final put_page
+        * and that page table be reused and filled with junk.
+        */
        flush_tlb_range(vma, start, end);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 int hugetlb_reserve_pages(struct inode *inode,