drivers/rtc/rtc-at91rm9200.c: add DT support
[firefly-linux-kernel-4.4.55.git] / mm / memcontrol.c
index 26a38b7c7739d36c5ed734ec17f0dfcaecc2bd2e..b8dc8e4cbf6aa8eef713dab17d0b987bc65bec56 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
+#include <linux/vmpressure.h>
 #include <linux/mm_inline.h>
 #include <linux/page_cgroup.h>
 #include <linux/cpu.h>
@@ -152,12 +153,15 @@ struct mem_cgroup_stat_cpu {
 };
 
 struct mem_cgroup_reclaim_iter {
-       /* last scanned hierarchy member with elevated css ref count */
+       /*
+        * last scanned hierarchy member. Valid only if last_dead_count
+        * matches memcg->dead_count of the hierarchy root group.
+        */
        struct mem_cgroup *last_visited;
+       unsigned long last_dead_count;
+
        /* scan generation, increased every round-trip */
        unsigned int generation;
-       /* lock to protect the position and generation */
-       spinlock_t iter_lock;
 };
 
 /*
@@ -258,6 +262,9 @@ struct mem_cgroup {
         */
        struct res_counter res;
 
+       /* vmpressure notifications */
+       struct vmpressure vmpressure;
+
        union {
                /*
                 * the counter to account for mem+swap usage.
@@ -337,6 +344,7 @@ struct mem_cgroup {
        struct mem_cgroup_stat_cpu nocpu_base;
        spinlock_t pcp_counter_lock;
 
+       atomic_t        dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
        struct tcp_memcontrol tcp_mem;
 #endif
@@ -355,6 +363,7 @@ struct mem_cgroup {
        atomic_t        numainfo_events;
        atomic_t        numainfo_updating;
 #endif
+
        /*
         * Per cgroup active and inactive list, similar to the
         * per zone LRU lists.
@@ -506,6 +515,24 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
        return container_of(s, struct mem_cgroup, css);
 }
 
+/* Some nice accessors for the vmpressure. */
+struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+       if (!memcg)
+               memcg = root_mem_cgroup;
+       return &memcg->vmpressure;
+}
+
+struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
+{
+       return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
+}
+
+struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
+{
+       return &mem_cgroup_from_css(css)->vmpressure;
+}
+
 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 {
        return (memcg == root_mem_cgroup);
@@ -1069,6 +1096,51 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return memcg;
 }
 
+/*
+ * Returns a next (in a pre-order walk) alive memcg (with elevated css
+ * ref. count) or NULL if the whole root's subtree has been visited.
+ *
+ * helper function to be used by mem_cgroup_iter
+ */
+static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
+               struct mem_cgroup *last_visited)
+{
+       struct cgroup *prev_cgroup, *next_cgroup;
+
+       /*
+        * Root is not visited by cgroup iterators so it needs an
+        * explicit visit.
+        */
+       if (!last_visited)
+               return root;
+
+       prev_cgroup = (last_visited == root) ? NULL
+               : last_visited->css.cgroup;
+skip_node:
+       next_cgroup = cgroup_next_descendant_pre(
+                       prev_cgroup, root->css.cgroup);
+
+       /*
+        * Even if we found a group we have to make sure it is
+        * alive. css && !memcg means that the groups should be
+        * skipped and we should continue the tree walk.
+        * last_visited css is safe to use because it is
+        * protected by css_get and the tree walk is rcu safe.
+        */
+       if (next_cgroup) {
+               struct mem_cgroup *mem = mem_cgroup_from_cont(
+                               next_cgroup);
+               if (css_tryget(&mem->css))
+                       return mem;
+               else {
+                       prev_cgroup = next_cgroup;
+                       goto skip_node;
+               }
+       }
+
+       return NULL;
+}
+
 /**
  * mem_cgroup_iter - iterate over memory cgroup hierarchy
  * @root: hierarchy root
@@ -1092,6 +1164,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 {
        struct mem_cgroup *memcg = NULL;
        struct mem_cgroup *last_visited = NULL;
+       unsigned long uninitialized_var(dead_count);
 
        if (mem_cgroup_disabled())
                return NULL;
@@ -1111,7 +1184,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
        rcu_read_lock();
        while (!memcg) {
                struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
-               struct cgroup_subsys_state *css = NULL;
 
                if (reclaim) {
                        int nid = zone_to_nid(reclaim->zone);
@@ -1120,70 +1192,52 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 
                        mz = mem_cgroup_zoneinfo(root, nid, zid);
                        iter = &mz->reclaim_iter[reclaim->priority];
-                       spin_lock(&iter->iter_lock);
                        last_visited = iter->last_visited;
                        if (prev && reclaim->generation != iter->generation) {
-                               if (last_visited) {
-                                       css_put(&last_visited->css);
-                                       iter->last_visited = NULL;
-                               }
-                               spin_unlock(&iter->iter_lock);
+                               iter->last_visited = NULL;
                                goto out_unlock;
                        }
-               }
 
-               /*
-                * Root is not visited by cgroup iterators so it needs an
-                * explicit visit.
-                */
-               if (!last_visited) {
-                       css = &root->css;
-               } else {
-                       struct cgroup *prev_cgroup, *next_cgroup;
-
-                       prev_cgroup = (last_visited == root) ? NULL
-                               : last_visited->css.cgroup;
-                       next_cgroup = cgroup_next_descendant_pre(prev_cgroup,
-                                       root->css.cgroup);
-                       if (next_cgroup)
-                               css = cgroup_subsys_state(next_cgroup,
-                                               mem_cgroup_subsys_id);
+                       /*
+                        * If the dead_count mismatches, a destruction
+                        * has happened or is happening concurrently.
+                        * If the dead_count matches, a destruction
+                        * might still happen concurrently, but since
+                        * we checked under RCU, that destruction
+                        * won't free the object until we release the
+                        * RCU reader lock.  Thus, the dead_count
+                        * check verifies the pointer is still valid,
+                        * css_tryget() verifies the cgroup pointed to
+                        * is alive.
+                        */
+                       dead_count = atomic_read(&root->dead_count);
+                       smp_rmb();
+                       last_visited = iter->last_visited;
+                       if (last_visited) {
+                               if ((dead_count != iter->last_dead_count) ||
+                                       !css_tryget(&last_visited->css)) {
+                                       last_visited = NULL;
+                               }
+                       }
                }
 
-               /*
-                * Even if we found a group we have to make sure it is alive.
-                * css && !memcg means that the groups should be skipped and
-                * we should continue the tree walk.
-                * last_visited css is safe to use because it is protected by
-                * css_get and the tree walk is rcu safe.
-                */
-               if (css == &root->css || (css && css_tryget(css)))
-                       memcg = mem_cgroup_from_css(css);
+               memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
-                       struct mem_cgroup *curr = memcg;
-
                        if (last_visited)
                                css_put(&last_visited->css);
 
-                       if (css && !memcg)
-                               curr = mem_cgroup_from_css(css);
-
-                       /* make sure that the cached memcg is not removed */
-                       if (curr)
-                               css_get(&curr->css);
-                       iter->last_visited = curr;
+                       iter->last_visited = memcg;
+                       smp_wmb();
+                       iter->last_dead_count = dead_count;
 
-                       if (!css)
+                       if (!memcg)
                                iter->generation++;
                        else if (!prev && memcg)
                                reclaim->generation = iter->generation;
-                       spin_unlock(&iter->iter_lock);
-               } else if (css && !memcg) {
-                       last_visited = mem_cgroup_from_css(css);
                }
 
-               if (prev && !css)
+               if (prev && !memcg)
                        goto out_unlock;
        }
 out_unlock:
@@ -1733,11 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
        struct task_struct *chosen = NULL;
 
        /*
-        * If current has a pending SIGKILL, then automatically select it.  The
-        * goal is to allow it to allocate so that it may quickly exit and free
-        * its memory.
+        * If current has a pending SIGKILL or is exiting, then automatically
+        * select it.  The goal is to allow it to allocate so that it may
+        * quickly exit and free its memory.
         */
-       if (fatal_signal_pending(current)) {
+       if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
                set_thread_flag(TIF_MEMDIE);
                return;
        }
@@ -3161,12 +3215,12 @@ void memcg_release_cache(struct kmem_cache *s)
 
        root = s->memcg_params->root_cache;
        root->memcg_params->memcg_caches[id] = NULL;
-       mem_cgroup_put(memcg);
 
        mutex_lock(&memcg->slab_caches_mutex);
        list_del(&s->memcg_params->list);
        mutex_unlock(&memcg->slab_caches_mutex);
 
+       mem_cgroup_put(memcg);
 out:
        kfree(s->memcg_params);
 }
@@ -3429,7 +3483,6 @@ static void memcg_create_cache_work_func(struct work_struct *w)
 
 /*
  * Enqueue the creation of a per-memcg kmem_cache.
- * Called with rcu_read_lock.
  */
 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
                                         struct kmem_cache *cachep)
@@ -3437,12 +3490,8 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
        struct create_work *cw;
 
        cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
-       if (cw == NULL)
-               return;
-
-       /* The corresponding put will be done in the workqueue. */
-       if (!css_tryget(&memcg->css)) {
-               kfree(cw);
+       if (cw == NULL) {
+               css_put(&memcg->css);
                return;
        }
 
@@ -3498,10 +3547,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
 
        rcu_read_lock();
        memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
-       rcu_read_unlock();
 
        if (!memcg_can_account_kmem(memcg))
-               return cachep;
+               goto out;
 
        idx = memcg_cache_id(memcg);
 
@@ -3510,29 +3558,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
         * code updating memcg_caches will issue a write barrier to match this.
         */
        read_barrier_depends();
-       if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
-               /*
-                * If we are in a safe context (can wait, and not in interrupt
-                * context), we could be be predictable and return right away.
-                * This would guarantee that the allocation being performed
-                * already belongs in the new cache.
-                *
-                * However, there are some clashes that can arrive from locking.
-                * For instance, because we acquire the slab_mutex while doing
-                * kmem_cache_dup, this means no further allocation could happen
-                * with the slab_mutex held.
-                *
-                * Also, because cache creation issue get_online_cpus(), this
-                * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
-                * that ends up reversed during cpu hotplug. (cpuset allocates
-                * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
-                * better to defer everything.
-                */
-               memcg_create_cache_enqueue(memcg, cachep);
-               return cachep;
+       if (likely(cachep->memcg_params->memcg_caches[idx])) {
+               cachep = cachep->memcg_params->memcg_caches[idx];
+               goto out;
        }
 
-       return cachep->memcg_params->memcg_caches[idx];
+       /* The corresponding put will be done in the workqueue. */
+       if (!css_tryget(&memcg->css))
+               goto out;
+       rcu_read_unlock();
+
+       /*
+        * If we are in a safe context (can wait, and not in interrupt
+        * context), we could be be predictable and return right away.
+        * This would guarantee that the allocation being performed
+        * already belongs in the new cache.
+        *
+        * However, there are some clashes that can arrive from locking.
+        * For instance, because we acquire the slab_mutex while doing
+        * kmem_cache_dup, this means no further allocation could happen
+        * with the slab_mutex held.
+        *
+        * Also, because cache creation issue get_online_cpus(), this
+        * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
+        * that ends up reversed during cpu hotplug. (cpuset allocates
+        * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
+        * better to defer everything.
+        */
+       memcg_create_cache_enqueue(memcg, cachep);
+       return cachep;
+out:
+       rcu_read_unlock();
+       return cachep;
 }
 EXPORT_SYMBOL(__memcg_kmem_get_cache);
 
@@ -4994,9 +5051,6 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (type) {
        case _MEM:
                if (name == RES_USAGE)
@@ -5131,9 +5185,6 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (name) {
        case RES_LIMIT:
                if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
@@ -5210,9 +5261,6 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
        type = MEMFILE_TYPE(event);
        name = MEMFILE_ATTR(event);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (name) {
        case RES_MAX_USAGE:
                if (type == _MEM)
@@ -5791,7 +5839,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
                return ret;
 
        return mem_cgroup_sockets_init(memcg, ss);
-};
+}
 
 static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
 {
@@ -5885,6 +5933,11 @@ static struct cftype mem_cgroup_files[] = {
                .unregister_event = mem_cgroup_oom_unregister_event,
                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
        },
+       {
+               .name = "pressure_level",
+               .register_event = vmpressure_register_event,
+               .unregister_event = vmpressure_unregister_event,
+       },
 #ifdef CONFIG_NUMA
        {
                .name = "numa_stat",
@@ -5975,12 +6028,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
                return 1;
 
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-               int prio;
-
                mz = &pn->zoneinfo[zone];
                lruvec_init(&mz->lruvec);
-               for (prio = 0; prio < DEF_PRIORITY + 1; prio++)
-                       spin_lock_init(&mz->reclaim_iter[prio].iter_lock);
                mz->usage_in_excess = 0;
                mz->on_tree = false;
                mz->memcg = memcg;
@@ -6170,6 +6219,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
        memcg->move_charge_at_immigrate = 0;
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
+       vmpressure_init(&memcg->vmpressure);
 
        return &memcg->css;
 
@@ -6235,10 +6285,29 @@ mem_cgroup_css_online(struct cgroup *cont)
        return error;
 }
 
+/*
+ * Announce all parents that a group from their hierarchy is gone.
+ */
+static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
+{
+       struct mem_cgroup *parent = memcg;
+
+       while ((parent = parent_mem_cgroup(parent)))
+               atomic_inc(&parent->dead_count);
+
+       /*
+        * if the root memcg is not hierarchical we have to check it
+        * explicitely.
+        */
+       if (!root_mem_cgroup->use_hierarchy)
+               atomic_inc(&root_mem_cgroup->dead_count);
+}
+
 static void mem_cgroup_css_offline(struct cgroup *cont)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
        mem_cgroup_destroy_all_caches(memcg);
 }