mm/zbud: change zbud_alloc size type to size_t
[firefly-linux-kernel-4.4.55.git] / mm / memcontrol.c
index 07908ea954b6c5ea1210a037c6cf3fd6bdc5e923..90dc501eaf3fbcbc7a60efeb1a4b3072220c04dc 100644 (file)
@@ -2612,7 +2612,7 @@ retry:
 
        nr_reclaimed = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
 
-       if (mem_cgroup_margin(mem_over_limit) >= batch)
+       if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
                goto retry;
 
        if (gfp_mask & __GFP_NORETRY)
@@ -2626,7 +2626,7 @@ retry:
         * unlikely to succeed so close to the limit, and we fall back
         * to regular pages anyway in case of failure.
         */
-       if (nr_reclaimed && batch <= (1 << PAGE_ALLOC_COSTLY_ORDER))
+       if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
                goto retry;
        /*
         * At task move, charge accounts can be doubly counted. So, it's
@@ -2644,7 +2644,7 @@ retry:
        if (fatal_signal_pending(current))
                goto bypass;
 
-       mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(batch));
+       mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
 nomem:
        if (!(gfp_mask & __GFP_NOFAIL))
                return -ENOMEM;
@@ -2795,14 +2795,6 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
        }
 
        pc->mem_cgroup = memcg;
-       /*
-        * We access a page_cgroup asynchronously without lock_page_cgroup().
-        * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
-        * is accessed after testing USED bit. To make pc->mem_cgroup visible
-        * before USED bit, we need memory barrier here.
-        * See mem_cgroup_add_lru_list(), etc.
-        */
-       smp_wmb();
        SetPageCgroupUsed(pc);
 
        if (lrucare) {
@@ -3415,12 +3407,13 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
                memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
                return;
        }
-
+       /*
+        * The page is freshly allocated and not visible to any
+        * outside callers yet.  Set up pc non-atomically.
+        */
        pc = lookup_page_cgroup(page);
-       lock_page_cgroup(pc);
        pc->mem_cgroup = memcg;
-       SetPageCgroupUsed(pc);
-       unlock_page_cgroup(pc);
+       pc->flags = PCG_USED;
 }
 
 void __memcg_kmem_uncharge_pages(struct page *page, int order)
@@ -3430,19 +3423,11 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
 
 
        pc = lookup_page_cgroup(page);
-       /*
-        * Fast unlocked return. Theoretically might have changed, have to
-        * check again after locking.
-        */
        if (!PageCgroupUsed(pc))
                return;
 
-       lock_page_cgroup(pc);
-       if (PageCgroupUsed(pc)) {
-               memcg = pc->mem_cgroup;
-               ClearPageCgroupUsed(pc);
-       }
-       unlock_page_cgroup(pc);
+       memcg = pc->mem_cgroup;
+       pc->flags = 0;
 
        /*
         * We trust that only if there is a memcg associated with the page, it
@@ -3483,7 +3468,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
        for (i = 1; i < HPAGE_PMD_NR; i++) {
                pc = head_pc + i;
                pc->mem_cgroup = memcg;
-               smp_wmb();/* see __commit_charge() */
                pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
        }
        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],