Merge tag 'powerpc-3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
[firefly-linux-kernel-4.4.55.git] / mm / memcontrol.c
index 3dfb56a9311730f789b59afeb0dfea8f0b25a32b..85df503ec02347f14b9703c7318d8e67accdd8fb 100644 (file)
@@ -51,7 +51,7 @@
 #include <linux/seq_file.h>
 #include <linux/vmpressure.h>
 #include <linux/mm_inline.h>
-#include <linux/page_cgroup.h>
+#include <linux/swap_cgroup.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include <linux/lockdep.h>
@@ -1274,7 +1274,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 {
        struct mem_cgroup_per_zone *mz;
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        struct lruvec *lruvec;
 
        if (mem_cgroup_disabled()) {
@@ -1282,16 +1281,13 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
                goto out;
        }
 
-       pc = lookup_page_cgroup(page);
-       memcg = pc->mem_cgroup;
-
+       memcg = page->mem_cgroup;
        /*
         * Swapcache readahead pages are added to the LRU - and
-        * possibly migrated - before they are charged.  Ensure
-        * pc->mem_cgroup is sane.
+        * possibly migrated - before they are charged.
         */
-       if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
-               pc->mem_cgroup = memcg = root_mem_cgroup;
+       if (!memcg)
+               memcg = root_mem_cgroup;
 
        mz = mem_cgroup_page_zoneinfo(memcg, page);
        lruvec = &mz->lruvec;
@@ -1330,41 +1326,24 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
        VM_BUG_ON((long)(*lru_size) < 0);
 }
 
-/*
- * Checks whether given mem is same or in the root_mem_cgroup's
- * hierarchy subtree
- */
-bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-                                 struct mem_cgroup *memcg)
+bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
 {
-       if (root_memcg == memcg)
+       if (root == memcg)
                return true;
-       if (!root_memcg->use_hierarchy || !memcg)
+       if (!root->use_hierarchy)
                return false;
-       return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
+       return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
 }
 
-static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-                                      struct mem_cgroup *memcg)
+bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 {
-       bool ret;
-
-       rcu_read_lock();
-       ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
-       rcu_read_unlock();
-       return ret;
-}
-
-bool task_in_mem_cgroup(struct task_struct *task,
-                       const struct mem_cgroup *memcg)
-{
-       struct mem_cgroup *curr = NULL;
+       struct mem_cgroup *task_memcg;
        struct task_struct *p;
        bool ret;
 
        p = find_lock_task_mm(task);
        if (p) {
-               curr = get_mem_cgroup_from_mm(p->mm);
+               task_memcg = get_mem_cgroup_from_mm(p->mm);
                task_unlock(p);
        } else {
                /*
@@ -1373,19 +1352,12 @@ bool task_in_mem_cgroup(struct task_struct *task,
                 * killed to prevent needlessly killing additional tasks.
                 */
                rcu_read_lock();
-               curr = mem_cgroup_from_task(task);
-               if (curr)
-                       css_get(&curr->css);
+               task_memcg = mem_cgroup_from_task(task);
+               css_get(&task_memcg->css);
                rcu_read_unlock();
        }
-       /*
-        * We should check use_hierarchy of "memcg" not "curr". Because checking
-        * use_hierarchy of "curr" here make this function true if hierarchy is
-        * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
-        * hierarchy(even if use_hierarchy is disabled in "memcg").
-        */
-       ret = mem_cgroup_same_or_subtree(memcg, curr);
-       css_put(&curr->css);
+       ret = mem_cgroup_is_descendant(task_memcg, memcg);
+       css_put(&task_memcg->css);
        return ret;
 }
 
@@ -1448,37 +1420,6 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
        return memcg->swappiness;
 }
 
-/*
- * memcg->moving_account is used for checking possibility that some thread is
- * calling move_account(). When a thread on CPU-A starts moving pages under
- * a memcg, other threads should check memcg->moving_account under
- * rcu_read_lock(), like this:
- *
- *         CPU-A                                    CPU-B
- *                                              rcu_read_lock()
- *         memcg->moving_account+1              if (memcg->mocing_account)
- *                                                   take heavy locks.
- *         synchronize_rcu()                    update something.
- *                                              rcu_read_unlock()
- *         start move here.
- */
-
-static void mem_cgroup_start_move(struct mem_cgroup *memcg)
-{
-       atomic_inc(&memcg->moving_account);
-       synchronize_rcu();
-}
-
-static void mem_cgroup_end_move(struct mem_cgroup *memcg)
-{
-       /*
-        * Now, mem_cgroup_clear_mc() may call this function with NULL.
-        * We check NULL in callee rather than caller.
-        */
-       if (memcg)
-               atomic_dec(&memcg->moving_account);
-}
-
 /*
  * A routine for checking "mem" is under move_account() or not.
  *
@@ -1501,8 +1442,8 @@ static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
        if (!from)
                goto unlock;
 
-       ret = mem_cgroup_same_or_subtree(memcg, from)
-               || mem_cgroup_same_or_subtree(memcg, to);
+       ret = mem_cgroup_is_descendant(from, memcg) ||
+               mem_cgroup_is_descendant(to, memcg);
 unlock:
        spin_unlock(&mc.lock);
        return ret;
@@ -1524,23 +1465,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
        return false;
 }
 
-/*
- * Take this lock when
- * - a code tries to modify page's memcg while it's USED.
- * - a code tries to modify page state accounting in a memcg.
- */
-static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
-                                 unsigned long *flags)
-{
-       spin_lock_irqsave(&memcg->move_lock, *flags);
-}
-
-static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
-                               unsigned long *flags)
-{
-       spin_unlock_irqrestore(&memcg->move_lock, *flags);
-}
-
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1793,52 +1717,11 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        memcg->last_scanned_node = node;
        return node;
 }
-
-/*
- * Check all nodes whether it contains reclaimable pages or not.
- * For quick scan, we make use of scan_nodes. This will allow us to skip
- * unused nodes. But scan_nodes is lazily updated and may not cotain
- * enough new information. We need to do double check.
- */
-static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
-{
-       int nid;
-
-       /*
-        * quick check...making use of scan_node.
-        * We can skip unused nodes.
-        */
-       if (!nodes_empty(memcg->scan_nodes)) {
-               for (nid = first_node(memcg->scan_nodes);
-                    nid < MAX_NUMNODES;
-                    nid = next_node(nid, memcg->scan_nodes)) {
-
-                       if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
-                               return true;
-               }
-       }
-       /*
-        * Check rest of nodes.
-        */
-       for_each_node_state(nid, N_MEMORY) {
-               if (node_isset(nid, memcg->scan_nodes))
-                       continue;
-               if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
-                       return true;
-       }
-       return false;
-}
-
 #else
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
 {
        return 0;
 }
-
-static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
-{
-       return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
-}
 #endif
 
 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
@@ -1882,8 +1765,6 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
                        }
                        continue;
                }
-               if (!mem_cgroup_reclaimable(victim, false))
-                       continue;
                total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
                                                     zone, &nr_scanned);
                *total_scanned += nr_scanned;
@@ -1994,12 +1875,8 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
        oom_wait_memcg = oom_wait_info->memcg;
 
-       /*
-        * Both of oom_wait_info->memcg and wake_memcg are stable under us.
-        * Then we can use css_is_ancestor without taking care of RCU.
-        */
-       if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
-               && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
+       if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
+           !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
                return 0;
        return autoremove_wake_function(wait, mode, sync, arg);
 }
@@ -2141,26 +2018,23 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
                                              unsigned long *flags)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
 
        rcu_read_lock();
 
        if (mem_cgroup_disabled())
                return NULL;
-
-       pc = lookup_page_cgroup(page);
 again:
-       memcg = pc->mem_cgroup;
-       if (unlikely(!memcg || !PageCgroupUsed(pc)))
+       memcg = page->mem_cgroup;
+       if (unlikely(!memcg))
                return NULL;
 
        *locked = false;
        if (atomic_read(&memcg->moving_account) <= 0)
                return memcg;
 
-       move_lock_mem_cgroup(memcg, flags);
-       if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
-               move_unlock_mem_cgroup(memcg, flags);
+       spin_lock_irqsave(&memcg->move_lock, *flags);
+       if (memcg != page->mem_cgroup) {
+               spin_unlock_irqrestore(&memcg->move_lock, *flags);
                goto again;
        }
        *locked = true;
@@ -2174,11 +2048,11 @@ again:
  * @locked: value received from mem_cgroup_begin_page_stat()
  * @flags: value received from mem_cgroup_begin_page_stat()
  */
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
-                             unsigned long flags)
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
+                             unsigned long *flags)
 {
-       if (memcg && locked)
-               move_unlock_mem_cgroup(memcg, &flags);
+       if (memcg && *locked)
+               spin_unlock_irqrestore(&memcg->move_lock, *flags);
 
        rcu_read_unlock();
 }
@@ -2319,7 +2193,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
                memcg = stock->cached;
                if (!memcg || !stock->nr_pages)
                        continue;
-               if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
+               if (!mem_cgroup_is_descendant(memcg, root_memcg))
                        continue;
                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
@@ -2525,17 +2399,15 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  */
 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 {
-       struct mem_cgroup *memcg = NULL;
-       struct page_cgroup *pc;
+       struct mem_cgroup *memcg;
        unsigned short id;
        swp_entry_t ent;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       pc = lookup_page_cgroup(page);
-       if (PageCgroupUsed(pc)) {
-               memcg = pc->mem_cgroup;
-               if (memcg && !css_tryget_online(&memcg->css))
+       memcg = page->mem_cgroup;
+       if (memcg) {
+               if (!css_tryget_online(&memcg->css))
                        memcg = NULL;
        } else if (PageSwapCache(page)) {
                ent.val = page_private(page);
@@ -2583,14 +2455,9 @@ static void unlock_page_lru(struct page *page, int isolated)
 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
                          bool lrucare)
 {
-       struct page_cgroup *pc = lookup_page_cgroup(page);
        int isolated;
 
-       VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
-       /*
-        * we don't need page_cgroup_lock about tail pages, becase they are not
-        * accessed by any other context at this point.
-        */
+       VM_BUG_ON_PAGE(page->mem_cgroup, page);
 
        /*
         * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
@@ -2601,7 +2468,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
 
        /*
         * Nobody should be changing or seriously looking at
-        * pc->mem_cgroup and pc->flags at this point:
+        * page->mem_cgroup at this point:
         *
         * - the page is uncharged
         *
@@ -2613,8 +2480,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
         * - a page cache insertion, a swapin fault, or a migration
         *   have the page locked
         */
-       pc->mem_cgroup = memcg;
-       pc->flags = PCG_USED;
+       page->mem_cgroup = memcg;
 
        if (lrucare)
                unlock_page_lru(page, isolated);
@@ -2640,26 +2506,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
        return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
 }
 
-#ifdef CONFIG_SLABINFO
-static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       struct memcg_cache_params *params;
-
-       if (!memcg_kmem_is_active(memcg))
-               return -EIO;
-
-       print_slabinfo_header(m);
-
-       mutex_lock(&memcg_slab_mutex);
-       list_for_each_entry(params, &memcg->memcg_slab_caches, list)
-               cache_show(memcg_params_to_cache(params), m);
-       mutex_unlock(&memcg_slab_mutex);
-
-       return 0;
-}
-#endif
-
 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
                             unsigned long nr_pages)
 {
@@ -3117,8 +2963,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
                              int order)
 {
-       struct page_cgroup *pc;
-
        VM_BUG_ON(mem_cgroup_is_root(memcg));
 
        /* The page allocation failed. Revert */
@@ -3126,37 +2970,20 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
                memcg_uncharge_kmem(memcg, 1 << order);
                return;
        }
-       /*
-        * The page is freshly allocated and not visible to any
-        * outside callers yet.  Set up pc non-atomically.
-        */
-       pc = lookup_page_cgroup(page);
-       pc->mem_cgroup = memcg;
-       pc->flags = PCG_USED;
+       page->mem_cgroup = memcg;
 }
 
 void __memcg_kmem_uncharge_pages(struct page *page, int order)
 {
-       struct mem_cgroup *memcg = NULL;
-       struct page_cgroup *pc;
-
-
-       pc = lookup_page_cgroup(page);
-       if (!PageCgroupUsed(pc))
-               return;
-
-       memcg = pc->mem_cgroup;
-       pc->flags = 0;
+       struct mem_cgroup *memcg = page->mem_cgroup;
 
-       /*
-        * We trust that only if there is a memcg associated with the page, it
-        * is a valid allocation
-        */
        if (!memcg)
                return;
 
        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
+
        memcg_uncharge_kmem(memcg, 1 << order);
+       page->mem_cgroup = NULL;
 }
 #else
 static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
@@ -3174,23 +3001,15 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
  */
 void mem_cgroup_split_huge_fixup(struct page *head)
 {
-       struct page_cgroup *head_pc;
-       struct page_cgroup *pc;
-       struct mem_cgroup *memcg;
        int i;
 
        if (mem_cgroup_disabled())
                return;
 
-       head_pc = lookup_page_cgroup(head);
+       for (i = 1; i < HPAGE_PMD_NR; i++)
+               head[i].mem_cgroup = head->mem_cgroup;
 
-       memcg = head_pc->mem_cgroup;
-       for (i = 1; i < HPAGE_PMD_NR; i++) {
-               pc = head_pc + i;
-               pc->mem_cgroup = memcg;
-               pc->flags = head_pc->flags;
-       }
-       __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+       __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
                       HPAGE_PMD_NR);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -3199,7 +3018,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
  * mem_cgroup_move_account - move account of the page
  * @page: the page
  * @nr_pages: number of regular pages (>1 for huge pages)
- * @pc:        page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:        mem_cgroup which the page is moved to. @from != @to.
  *
@@ -3212,7 +3030,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
  */
 static int mem_cgroup_move_account(struct page *page,
                                   unsigned int nr_pages,
-                                  struct page_cgroup *pc,
                                   struct mem_cgroup *from,
                                   struct mem_cgroup *to)
 {
@@ -3232,7 +3049,7 @@ static int mem_cgroup_move_account(struct page *page,
                goto out;
 
        /*
-        * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup
+        * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
         * of its source page while we change it: page migration takes
         * both pages off the LRU, but page cache replacement doesn't.
         */
@@ -3240,10 +3057,10 @@ static int mem_cgroup_move_account(struct page *page,
                goto out;
 
        ret = -EINVAL;
-       if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
+       if (page->mem_cgroup != from)
                goto out_unlock;
 
-       move_lock_mem_cgroup(from, &flags);
+       spin_lock_irqsave(&from->move_lock, flags);
 
        if (!PageAnon(page) && page_mapped(page)) {
                __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
@@ -3260,14 +3077,15 @@ static int mem_cgroup_move_account(struct page *page,
        }
 
        /*
-        * It is safe to change pc->mem_cgroup here because the page
+        * It is safe to change page->mem_cgroup here because the page
         * is referenced, charged, and isolated - we can't race with
         * uncharging, charging, migration, or LRU putback.
         */
 
        /* caller should have done css_get */
-       pc->mem_cgroup = to;
-       move_unlock_mem_cgroup(from, &flags);
+       page->mem_cgroup = to;
+       spin_unlock_irqrestore(&from->move_lock, flags);
+
        ret = 0;
 
        local_irq_disable();
@@ -3339,42 +3157,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
 }
 #endif
 
-#ifdef CONFIG_DEBUG_VM
-static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
-{
-       struct page_cgroup *pc;
-
-       pc = lookup_page_cgroup(page);
-       /*
-        * Can be NULL while feeding pages into the page allocator for
-        * the first time, i.e. during boot or memory hotplug;
-        * or when mem_cgroup_disabled().
-        */
-       if (likely(pc) && PageCgroupUsed(pc))
-               return pc;
-       return NULL;
-}
-
-bool mem_cgroup_bad_page_check(struct page *page)
-{
-       if (mem_cgroup_disabled())
-               return false;
-
-       return lookup_page_cgroup_used(page) != NULL;
-}
-
-void mem_cgroup_print_bad_page(struct page *page)
-{
-       struct page_cgroup *pc;
-
-       pc = lookup_page_cgroup_used(page);
-       if (pc) {
-               pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
-                        pc, pc->flags, pc->mem_cgroup);
-       }
-}
-#endif
-
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
@@ -4660,7 +4442,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         *
         * DO NOT ADD NEW FILES.
         */
-       name = cfile.file->f_dentry->d_name.name;
+       name = cfile.file->f_path.dentry->d_name.name;
 
        if (!strcmp(name, "memory.usage_in_bytes")) {
                event->register_event = mem_cgroup_usage_register_event;
@@ -4684,7 +4466,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         * automatically removed on cgroup destruction but the removal is
         * asynchronous, so take an extra ref on @css.
         */
-       cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
+       cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
                                               &memory_cgrp_subsys);
        ret = -EINVAL;
        if (IS_ERR(cfile_css))
@@ -4824,7 +4606,10 @@ static struct cftype mem_cgroup_files[] = {
 #ifdef CONFIG_SLABINFO
        {
                .name = "kmem.slabinfo",
-               .seq_show = mem_cgroup_slabinfo_read,
+               .seq_start = slab_start,
+               .seq_next = slab_next,
+               .seq_stop = slab_stop,
+               .seq_show = memcg_slab_show,
        },
 #endif
 #endif
@@ -5288,7 +5073,6 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
                unsigned long addr, pte_t ptent, union mc_target *target)
 {
        struct page *page = NULL;
-       struct page_cgroup *pc;
        enum mc_target_type ret = MC_TARGET_NONE;
        swp_entry_t ent = { .val = 0 };
 
@@ -5302,13 +5086,12 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
        if (!page && !ent.val)
                return ret;
        if (page) {
-               pc = lookup_page_cgroup(page);
                /*
                 * Do only loose check w/o serialization.
-                * mem_cgroup_move_account() checks the pc is valid or
+                * mem_cgroup_move_account() checks the page is valid or
                 * not under LRU exclusion.
                 */
-               if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+               if (page->mem_cgroup == mc.from) {
                        ret = MC_TARGET_PAGE;
                        if (target)
                                target->page = page;
@@ -5336,15 +5119,13 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
                unsigned long addr, pmd_t pmd, union mc_target *target)
 {
        struct page *page = NULL;
-       struct page_cgroup *pc;
        enum mc_target_type ret = MC_TARGET_NONE;
 
        page = pmd_page(pmd);
        VM_BUG_ON_PAGE(!page || !PageHead(page), page);
        if (!move_anon())
                return ret;
-       pc = lookup_page_cgroup(page);
-       if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+       if (page->mem_cgroup == mc.from) {
                ret = MC_TARGET_PAGE;
                if (target) {
                        get_page(page);
@@ -5466,8 +5247,6 @@ static void __mem_cgroup_clear_mc(void)
 
 static void mem_cgroup_clear_mc(void)
 {
-       struct mem_cgroup *from = mc.from;
-
        /*
         * we must clear moving_task before waking up waiters at the end of
         * task migration.
@@ -5478,7 +5257,6 @@ static void mem_cgroup_clear_mc(void)
        mc.from = NULL;
        mc.to = NULL;
        spin_unlock(&mc.lock);
-       mem_cgroup_end_move(from);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
@@ -5511,7 +5289,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
                        VM_BUG_ON(mc.precharge);
                        VM_BUG_ON(mc.moved_charge);
                        VM_BUG_ON(mc.moved_swap);
-                       mem_cgroup_start_move(from);
+
                        spin_lock(&mc.lock);
                        mc.from = from;
                        mc.to = memcg;
@@ -5531,7 +5309,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
                                     struct cgroup_taskset *tset)
 {
-       mem_cgroup_clear_mc();
+       if (mc.to)
+               mem_cgroup_clear_mc();
 }
 
 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
@@ -5545,7 +5324,6 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        enum mc_target_type target_type;
        union mc_target target;
        struct page *page;
-       struct page_cgroup *pc;
 
        /*
         * We don't take compound_lock() here but no race with splitting thp
@@ -5566,9 +5344,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                if (target_type == MC_TARGET_PAGE) {
                        page = target.page;
                        if (!isolate_lru_page(page)) {
-                               pc = lookup_page_cgroup(page);
                                if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
-                                                       pc, mc.from, mc.to)) {
+                                                            mc.from, mc.to)) {
                                        mc.precharge -= HPAGE_PMD_NR;
                                        mc.moved_charge += HPAGE_PMD_NR;
                                }
@@ -5596,9 +5373,7 @@ retry:
                        page = target.page;
                        if (isolate_lru_page(page))
                                goto put;
-                       pc = lookup_page_cgroup(page);
-                       if (!mem_cgroup_move_account(page, 1, pc,
-                                                    mc.from, mc.to)) {
+                       if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
                                mc.precharge--;
                                /* we uncharge from mc.from later. */
                                mc.moved_charge++;
@@ -5642,6 +5417,13 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
        struct vm_area_struct *vma;
 
        lru_add_drain_all();
+       /*
+        * Signal mem_cgroup_begin_page_stat() to take the memcg's
+        * move_lock while we're moving its pages to another memcg.
+        * Then wait for already started RCU-only updates to finish.
+        */
+       atomic_inc(&mc.from->moving_account);
+       synchronize_rcu();
 retry:
        if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
                /*
@@ -5674,6 +5456,7 @@ retry:
                        break;
        }
        up_read(&mm->mmap_sem);
+       atomic_dec(&mc.from->moving_account);
 }
 
 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
@@ -5778,7 +5561,6 @@ static void __init enable_swap_cgroup(void)
 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 {
        struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
        unsigned short oldid;
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5787,19 +5569,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!do_swap_account)
                return;
 
-       pc = lookup_page_cgroup(page);
+       memcg = page->mem_cgroup;
 
        /* Readahead page, never charged */
-       if (!PageCgroupUsed(pc))
+       if (!memcg)
                return;
 
-       memcg = pc->mem_cgroup;
-
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
 
-       pc->flags = 0;
+       page->mem_cgroup = NULL;
 
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
@@ -5866,7 +5646,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                goto out;
 
        if (PageSwapCache(page)) {
-               struct page_cgroup *pc = lookup_page_cgroup(page);
                /*
                 * Every swap fault against a single page tries to charge the
                 * page, bail as early as possible.  shmem_unuse() encounters
@@ -5874,7 +5653,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                 * the page lock, which serializes swap cache removal, which
                 * in turn serializes uncharging.
                 */
-               if (PageCgroupUsed(pc))
+               if (page->mem_cgroup)
                        goto out;
        }
 
@@ -6027,7 +5806,6 @@ static void uncharge_list(struct list_head *page_list)
        next = page_list->next;
        do {
                unsigned int nr_pages = 1;
-               struct page_cgroup *pc;
 
                page = list_entry(next, struct page, lru);
                next = page->lru.next;
@@ -6035,23 +5813,22 @@ static void uncharge_list(struct list_head *page_list)
                VM_BUG_ON_PAGE(PageLRU(page), page);
                VM_BUG_ON_PAGE(page_count(page), page);
 
-               pc = lookup_page_cgroup(page);
-               if (!PageCgroupUsed(pc))
+               if (!page->mem_cgroup)
                        continue;
 
                /*
                 * Nobody should be changing or seriously looking at
-                * pc->mem_cgroup and pc->flags at this point, we have
-                * fully exclusive access to the page.
+                * page->mem_cgroup at this point, we have fully
+                * exclusive access to the page.
                 */
 
-               if (memcg != pc->mem_cgroup) {
+               if (memcg != page->mem_cgroup) {
                        if (memcg) {
                                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
                                               nr_huge, page);
                                pgpgout = nr_anon = nr_file = nr_huge = 0;
                        }
-                       memcg = pc->mem_cgroup;
+                       memcg = page->mem_cgroup;
                }
 
                if (PageTransHuge(page)) {
@@ -6065,7 +5842,7 @@ static void uncharge_list(struct list_head *page_list)
                else
                        nr_file += nr_pages;
 
-               pc->flags = 0;
+               page->mem_cgroup = NULL;
 
                pgpgout++;
        } while (next != page_list);
@@ -6084,14 +5861,11 @@ static void uncharge_list(struct list_head *page_list)
  */
 void mem_cgroup_uncharge(struct page *page)
 {
-       struct page_cgroup *pc;
-
        if (mem_cgroup_disabled())
                return;
 
        /* Don't touch page->lru of any random page, pre-check: */
-       pc = lookup_page_cgroup(page);
-       if (!PageCgroupUsed(pc))
+       if (!page->mem_cgroup)
                return;
 
        INIT_LIST_HEAD(&page->lru);
@@ -6127,7 +5901,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
                        bool lrucare)
 {
-       struct page_cgroup *pc;
+       struct mem_cgroup *memcg;
        int isolated;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
@@ -6142,8 +5916,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
                return;
 
        /* Page cache replacement: new page already charged? */
-       pc = lookup_page_cgroup(newpage);
-       if (PageCgroupUsed(pc))
+       if (newpage->mem_cgroup)
                return;
 
        /*
@@ -6152,19 +5925,19 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
         * uncharged page when the PFN walker finds a page that
         * reclaim just put back on the LRU but has not released yet.
         */
-       pc = lookup_page_cgroup(oldpage);
-       if (!PageCgroupUsed(pc))
+       memcg = oldpage->mem_cgroup;
+       if (!memcg)
                return;
 
        if (lrucare)
                lock_page_lru(oldpage, &isolated);
 
-       pc->flags = 0;
+       oldpage->mem_cgroup = NULL;
 
        if (lrucare)
                unlock_page_lru(oldpage, isolated);
 
-       commit_charge(newpage, pc->mem_cgroup, lrucare);
+       commit_charge(newpage, memcg, lrucare);
 }
 
 /*