Merge branches 'pm-devfreq' and 'pm-cpufreq'
[firefly-linux-kernel-4.4.55.git] / kernel / cgroup.c
index b89f3168411bc1e9f4f96a42fe5732f3e36c1d41..2c9eae6ad9704d3278557f6c692d2ef0027b587b 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
-#include <linux/percpu-rwsem.h>
 #include <linux/string.h>
 #include <linux/sort.h>
 #include <linux/kmod.h>
@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
  */
 static DEFINE_SPINLOCK(release_agent_path_lock);
 
-struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
-
 #define cgroup_assert_mutex_or_rcu_locked()                            \
        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
                           !lockdep_is_held(&cgroup_mutex),             \
@@ -145,6 +142,7 @@ static const char *cgroup_subsys_name[] = {
  * part of that cgroup.
  */
 struct cgroup_root cgrp_dfl_root;
+EXPORT_SYMBOL_GPL(cgrp_dfl_root);
 
 /*
  * The default hierarchy always exists but is hidden until mounted for the
@@ -186,6 +184,9 @@ static u64 css_serial_nr_next = 1;
 static unsigned long have_fork_callback __read_mostly;
 static unsigned long have_exit_callback __read_mostly;
 
+/* Ditto for the can_fork callback. */
+static unsigned long have_canfork_callback __read_mostly;
+
 static struct cftype cgroup_dfl_base_files[];
 static struct cftype cgroup_legacy_base_files[];
 
@@ -207,7 +208,7 @@ static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
 
        idr_preload(gfp_mask);
        spin_lock_bh(&cgroup_idr_lock);
-       ret = idr_alloc(idr, ptr, start, end, gfp_mask);
+       ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
        spin_unlock_bh(&cgroup_idr_lock);
        idr_preload_end();
        return ret;
@@ -870,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        return cset;
 }
 
+void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+       down_read(&tsk->signal->group_rwsem);
+}
+
+void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+       up_read(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_lock - lock threadgroup
+ * @tsk: member task of the threadgroup to lock
+ *
+ * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
+ * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
+ * change ->group_leader/pid.  This is useful for cases where the threadgroup
+ * needs to stay stable across blockable operations.
+ *
+ * fork and exit explicitly call threadgroup_change_{begin|end}() for
+ * synchronization.  While held, no new task will be added to threadgroup
+ * and no existing live task will have its PF_EXITING set.
+ *
+ * de_thread() does threadgroup_change_{begin|end}() when a non-leader
+ * sub-thread becomes a new leader.
+ */
+static void threadgroup_lock(struct task_struct *tsk)
+{
+       down_write(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_unlock - unlock threadgroup
+ * @tsk: member task of the threadgroup to unlock
+ *
+ * Reverse threadgroup_lock().
+ */
+static inline void threadgroup_unlock(struct task_struct *tsk)
+{
+       up_write(&tsk->signal->group_rwsem);
+}
+
 static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
 {
        struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -1027,10 +1070,13 @@ static const struct file_operations proc_cgroupstats_operations;
 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
                              char *buf)
 {
+       struct cgroup_subsys *ss = cft->ss;
+
        if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
            !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
                snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
-                        cft->ss->name, cft->name);
+                        cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
+                        cft->name);
        else
                strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
        return buf;
@@ -1332,9 +1378,10 @@ static int cgroup_show_options(struct seq_file *seq,
        struct cgroup_subsys *ss;
        int ssid;
 
-       for_each_subsys(ss, ssid)
-               if (root->subsys_mask & (1 << ssid))
-                       seq_printf(seq, ",%s", ss->name);
+       if (root != &cgrp_dfl_root)
+               for_each_subsys(ss, ssid)
+                       if (root->subsys_mask & (1 << ssid))
+                               seq_show_option(seq, ss->legacy_name, NULL);
        if (root->flags & CGRP_ROOT_NOPREFIX)
                seq_puts(seq, ",noprefix");
        if (root->flags & CGRP_ROOT_XATTR)
@@ -1342,13 +1389,14 @@ static int cgroup_show_options(struct seq_file *seq,
 
        spin_lock(&release_agent_path_lock);
        if (strlen(root->release_agent_path))
-               seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+               seq_show_option(seq, "release_agent",
+                               root->release_agent_path);
        spin_unlock(&release_agent_path_lock);
 
        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
                seq_puts(seq, ",clone_children");
        if (strlen(root->name))
-               seq_printf(seq, ",name=%s", root->name);
+               seq_show_option(seq, "name", root->name);
        return 0;
 }
 
@@ -1447,7 +1495,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                }
 
                for_each_subsys(ss, i) {
-                       if (strcmp(token, ss->name))
+                       if (strcmp(token, ss->legacy_name))
                                continue;
                        if (ss->disabled)
                                continue;
@@ -1666,7 +1714,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
 
        lockdep_assert_held(&cgroup_mutex);
 
-       ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
+       ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
        if (ret < 0)
                goto out;
        root_cgrp->id = ret;
@@ -2065,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
        lockdep_assert_held(&css_set_rwsem);
 
        /*
-        * We are synchronized through cgroup_threadgroup_rwsem against
-        * PF_EXITING setting such that we can't race against cgroup_exit()
-        * changing the css_set to init_css_set and dropping the old one.
+        * We are synchronized through threadgroup_lock() against PF_EXITING
+        * setting such that we can't race against cgroup_exit() changing the
+        * css_set to init_css_set and dropping the old one.
         */
        WARN_ON_ONCE(tsk->flags & PF_EXITING);
        old_cset = task_css_set(tsk);
@@ -2124,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
  * @src_cset and add it to @preloaded_csets, which should later be cleaned
  * up by cgroup_migrate_finish().
  *
- * This function may be called without holding cgroup_threadgroup_rwsem
- * even if the target is a process.  Threads may be created and destroyed
- * but as long as cgroup_mutex is not dropped, no new css_set can be put
- * into play and the preloaded css_sets are guaranteed to cover all
- * migrations.
+ * This function may be called without holding threadgroup_lock even if the
+ * target is a process.  Threads may be created and destroyed but as long
+ * as cgroup_mutex is not dropped, no new css_set can be put into play and
+ * the preloaded css_sets are guaranteed to cover all migrations.
  */
 static void cgroup_migrate_add_src(struct css_set *src_cset,
                                   struct cgroup *dst_cgrp,
@@ -2231,7 +2278,7 @@ err:
  * @threadgroup: whether @leader points to the whole process or a single task
  *
  * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
- * process, the caller must be holding cgroup_threadgroup_rwsem.  The
+ * process, the caller must be holding threadgroup_lock of @leader.  The
  * caller is also responsible for invoking cgroup_migrate_add_src() and
  * cgroup_migrate_prepare_dst() on the targets before invoking this
  * function and following up with cgroup_migrate_finish().
@@ -2359,7 +2406,7 @@ out_release_tset:
  * @leader: the task or the leader of the threadgroup to be attached
  * @threadgroup: attach the whole threadgroup?
  *
- * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
+ * Call holding cgroup_mutex and threadgroup_lock of @leader.
  */
 static int cgroup_attach_task(struct cgroup *dst_cgrp,
                              struct task_struct *leader, bool threadgroup)
@@ -2451,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
        if (!cgrp)
                return -ENODEV;
 
-       percpu_down_write(&cgroup_threadgroup_rwsem);
+retry_find_task:
        rcu_read_lock();
        if (pid) {
                tsk = find_task_by_vpid(pid);
                if (!tsk) {
+                       rcu_read_unlock();
                        ret = -ESRCH;
-                       goto out_unlock_rcu;
+                       goto out_unlock_cgroup;
                }
        } else {
                tsk = current;
@@ -2473,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
         */
        if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
                ret = -EINVAL;
-               goto out_unlock_rcu;
+               rcu_read_unlock();
+               goto out_unlock_cgroup;
        }
 
        get_task_struct(tsk);
        rcu_read_unlock();
 
+       threadgroup_lock(tsk);
+       if (threadgroup) {
+               if (!thread_group_leader(tsk)) {
+                       /*
+                        * a race with de_thread from another thread's exec()
+                        * may strip us of our leadership, if this happens,
+                        * there is no choice but to throw this task away and
+                        * try again; this is
+                        * "double-double-toil-and-trouble-check locking".
+                        */
+                       threadgroup_unlock(tsk);
+                       put_task_struct(tsk);
+                       goto retry_find_task;
+               }
+       }
+
        ret = cgroup_procs_write_permission(tsk, cgrp, of);
        if (!ret)
                ret = cgroup_attach_task(cgrp, tsk, threadgroup);
 
-       put_task_struct(tsk);
-       goto out_unlock_threadgroup;
+       threadgroup_unlock(tsk);
 
-out_unlock_rcu:
-       rcu_read_unlock();
-out_unlock_threadgroup:
-       percpu_up_write(&cgroup_threadgroup_rwsem);
+       put_task_struct(tsk);
+out_unlock_cgroup:
        cgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
 }
@@ -2634,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
 
        lockdep_assert_held(&cgroup_mutex);
 
-       percpu_down_write(&cgroup_threadgroup_rwsem);
-
        /* look up all csses currently attached to @cgrp's subtree */
        down_read(&css_set_rwsem);
        css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
@@ -2691,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                                goto out_finish;
                        last_task = task;
 
+                       threadgroup_lock(task);
+                       /* raced against de_thread() from another thread? */
+                       if (!thread_group_leader(task)) {
+                               threadgroup_unlock(task);
+                               put_task_struct(task);
+                               continue;
+                       }
+
                        ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
 
+                       threadgroup_unlock(task);
                        put_task_struct(task);
 
                        if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -2702,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
 
 out_finish:
        cgroup_migrate_finish(&preloaded_csets);
-       percpu_up_write(&cgroup_threadgroup_rwsem);
        return ret;
 }
 
@@ -4579,7 +4647,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
        if (err)
                goto err_free_css;
 
-       err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
+       err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
        if (err < 0)
                goto err_free_percpu_ref;
        css->id = err;
@@ -4656,7 +4724,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
         * Temporarily set the pointer to NULL, so idr_find() won't return
         * a half-baked cgroup.
         */
-       cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
+       cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
        if (cgrp->id < 0) {
                ret = -ENOMEM;
                goto out_cancel_ref;
@@ -4955,6 +5023,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
 
        have_fork_callback |= (bool)ss->fork << ss->id;
        have_exit_callback |= (bool)ss->exit << ss->id;
+       have_canfork_callback |= (bool)ss->can_fork << ss->id;
 
        /* At system boot, before all subsystems have been
         * registered, no tasks have been forked, so we don't
@@ -4993,6 +5062,8 @@ int __init cgroup_init_early(void)
 
                ss->id = i;
                ss->name = cgroup_subsys_name[i];
+               if (!ss->legacy_name)
+                       ss->legacy_name = cgroup_subsys_name[i];
 
                if (ss->early_init)
                        cgroup_init_subsys(ss, true);
@@ -5012,7 +5083,6 @@ int __init cgroup_init(void)
        unsigned long key;
        int ssid, err;
 
-       BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
@@ -5136,9 +5206,11 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                        continue;
 
                seq_printf(m, "%d:", root->hierarchy_id);
-               for_each_subsys(ss, ssid)
-                       if (root->subsys_mask & (1 << ssid))
-                               seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+               if (root != &cgrp_dfl_root)
+                       for_each_subsys(ss, ssid)
+                               if (root->subsys_mask & (1 << ssid))
+                                       seq_printf(m, "%s%s", count++ ? "," : "",
+                                                  ss->legacy_name);
                if (strlen(root->name))
                        seq_printf(m, "%sname=%s", count ? "," : "",
                                   root->name);
@@ -5178,7 +5250,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
 
        for_each_subsys(ss, i)
                seq_printf(m, "%s\t%d\t%d\t%d\n",
-                          ss->name, ss->root->hierarchy_id,
+                          ss->legacy_name, ss->root->hierarchy_id,
                           atomic_read(&ss->root->nr_cgrps), !ss->disabled);
 
        mutex_unlock(&cgroup_mutex);
@@ -5197,6 +5269,19 @@ static const struct file_operations proc_cgroupstats_operations = {
        .release = single_release,
 };
 
+static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
+{
+       if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
+               return &ss_priv[i - CGROUP_CANFORK_START];
+       return NULL;
+}
+
+static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
+{
+       void **private = subsys_canfork_priv_p(ss_priv, i);
+       return private ? *private : NULL;
+}
+
 /**
  * cgroup_fork - initialize cgroup related fields during copy_process()
  * @child: pointer to task_struct of forking parent process.
@@ -5211,6 +5296,57 @@ void cgroup_fork(struct task_struct *child)
        INIT_LIST_HEAD(&child->cg_list);
 }
 
+/**
+ * cgroup_can_fork - called on a new task before the process is exposed
+ * @child: the task in question.
+ *
+ * This calls the subsystem can_fork() callbacks. If the can_fork() callback
+ * returns an error, the fork aborts with that error code. This allows for
+ * a cgroup subsystem to conditionally allow or deny new forks.
+ */
+int cgroup_can_fork(struct task_struct *child,
+                   void *ss_priv[CGROUP_CANFORK_COUNT])
+{
+       struct cgroup_subsys *ss;
+       int i, j, ret;
+
+       for_each_subsys_which(ss, i, &have_canfork_callback) {
+               ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i));
+               if (ret)
+                       goto out_revert;
+       }
+
+       return 0;
+
+out_revert:
+       for_each_subsys(ss, j) {
+               if (j >= i)
+                       break;
+               if (ss->cancel_fork)
+                       ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j));
+       }
+
+       return ret;
+}
+
+/**
+ * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
+ * @child: the task in question
+ *
+ * This calls the cancel_fork() callbacks if a fork failed *after*
+ * cgroup_can_fork() succeded.
+ */
+void cgroup_cancel_fork(struct task_struct *child,
+                       void *ss_priv[CGROUP_CANFORK_COUNT])
+{
+       struct cgroup_subsys *ss;
+       int i;
+
+       for_each_subsys(ss, i)
+               if (ss->cancel_fork)
+                       ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i));
+}
+
 /**
  * cgroup_post_fork - called on a new task after adding it to the task list
  * @child: the task in question
@@ -5221,7 +5357,8 @@ void cgroup_fork(struct task_struct *child)
  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
  * list.
  */
-void cgroup_post_fork(struct task_struct *child)
+void cgroup_post_fork(struct task_struct *child,
+                     void *old_ss_priv[CGROUP_CANFORK_COUNT])
 {
        struct cgroup_subsys *ss;
        int i;
@@ -5266,7 +5403,7 @@ void cgroup_post_fork(struct task_struct *child)
         * and addition to css_set.
         */
        for_each_subsys_which(ss, i, &have_fork_callback)
-               ss->fork(child);
+               ss->fork(child, subsys_canfork_priv(old_ss_priv, i));
 }
 
 /**
@@ -5400,12 +5537,14 @@ static int __init cgroup_disable(char *str)
                        continue;
 
                for_each_subsys(ss, i) {
-                       if (!strcmp(token, ss->name)) {
-                               ss->disabled = 1;
-                               printk(KERN_INFO "Disabling %s control group"
-                                       " subsystem\n", ss->name);
-                               break;
-                       }
+                       if (strcmp(token, ss->name) &&
+                           strcmp(token, ss->legacy_name))
+                               continue;
+
+                       ss->disabled = 1;
+                       printk(KERN_INFO "Disabling %s control group subsystem\n",
+                              ss->name);
+                       break;
                }
        }
        return 1;