cgroup: enable task_cg_lists on the first cgroup mount
[firefly-linux-kernel-4.4.55.git] / kernel / cgroup.c
index bc1dcabe92176636baf79c7ef52e597422aeaf75..506f6da67ad1c0e1361db63d11c0ecd91e340802 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
-#include <linux/backing-dev.h>
-#include <linux/seq_file.h>
 #include <linux/slab.h>
-#include <linux/magic.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/sort.h>
 #include <linux/kmod.h>
-#include <linux/module.h>
 #include <linux/delayacct.h>
 #include <linux/cgroupstats.h>
 #include <linux/hashtable.h>
-#include <linux/namei.h>
 #include <linux/pid_namespace.h>
 #include <linux/idr.h>
 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
-#include <linux/eventfd.h>
-#include <linux/poll.h>
 #include <linux/flex_array.h> /* used in cgroup_attach_task */
 #include <linux/kthread.h>
-#include <linux/file.h>
+#include <linux/delay.h>
 
 #include <linux/atomic.h>
 
+/*
+ * pidlists linger the following amount before being destroyed.  The goal
+ * is avoiding frequent destruction in the middle of consecutive read calls
+ * Expiring in the middle is a performance problem not a correctness one.
+ * 1 sec should be enough.
+ */
+#define CGROUP_PIDLIST_DESTROY_DELAY   HZ
+
+#define CGROUP_FILE_NAME_MAX           (MAX_CGROUP_TYPE_NAMELEN +      \
+                                        MAX_CFTYPE_NAME + 2)
+
+/*
+ * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file
+ * creation/removal and hierarchy changing operations including cgroup
+ * creation, removal, css association and controller rebinding.  This outer
+ * lock is needed mainly to resolve the circular dependency between kernfs
+ * active ref and cgroup_mutex.  cgroup_tree_mutex nests above both.
+ */
+static DEFINE_MUTEX(cgroup_tree_mutex);
+
 /*
  * cgroup_mutex is the master lock.  Any modification to cgroup or its
  * hierarchy must be performed while holding it.
- *
- * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
- * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
- * release_agent_path and so on.  Modifying requires both cgroup_mutex and
- * cgroup_root_mutex.  Readers can acquire either of the two.  This is to
- * break the following locking order cycle.
- *
- *  A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
- *  B. namespace_sem -> cgroup_mutex
- *
- * B happens only through cgroup_show_options() and using cgroup_root_mutex
- * breaks it.
  */
 #ifdef CONFIG_PROVE_RCU
 DEFINE_MUTEX(cgroup_mutex);
@@ -87,7 +88,17 @@ EXPORT_SYMBOL_GPL(cgroup_mutex);     /* only for lockdep */
 static DEFINE_MUTEX(cgroup_mutex);
 #endif
 
-static DEFINE_MUTEX(cgroup_root_mutex);
+/*
+ * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
+ * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
+ */
+static DEFINE_SPINLOCK(release_agent_path_lock);
+
+#define cgroup_assert_mutexes_or_rcu_locked()                          \
+       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
+                          lockdep_is_held(&cgroup_tree_mutex) ||       \
+                          lockdep_is_held(&cgroup_mutex),              \
+                          "cgroup_[tree_]mutex or RCU read lock required");
 
 /*
  * cgroup destruction makes heavy use of work items and there can be a lot
@@ -98,16 +109,24 @@ static DEFINE_MUTEX(cgroup_root_mutex);
 static struct workqueue_struct *cgroup_destroy_wq;
 
 /*
- * Generate an array of cgroup subsystem pointers. At boot time, this is
- * populated with the built in subsystems, and modular subsystems are
- * registered after that. The mutable section of this array is protected by
- * cgroup_mutex.
+ * pidlist destructions need to be flushed on cgroup destruction.  Use a
+ * separate workqueue as flush domain.
  */
-#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
-#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
-static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = {
+static struct workqueue_struct *cgroup_pidlist_destroy_wq;
+
+/* generate an array of cgroup subsystem pointers */
+#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+static struct cgroup_subsys *cgroup_subsys[] = {
+#include <linux/cgroup_subsys.h>
+};
+#undef SUBSYS
+
+/* array of cgroup subsystem names */
+#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
+static const char *cgroup_subsys_name[] = {
 #include <linux/cgroup_subsys.h>
 };
+#undef SUBSYS
 
 /*
  * The dummy hierarchy, reserved for the subsystems that are otherwise
@@ -119,63 +138,14 @@ static struct cgroupfs_root cgroup_dummy_root;
 /* dummy_top is a shorthand for the dummy hierarchy's top cgroup */
 static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup;
 
-/*
- * cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
- */
-struct cfent {
-       struct list_head                node;
-       struct dentry                   *dentry;
-       struct cftype                   *type;
-       struct cgroup_subsys_state      *css;
-
-       /* file xattrs */
-       struct simple_xattrs            xattrs;
-};
-
-/*
- * cgroup_event represents events which userspace want to receive.
- */
-struct cgroup_event {
-       /*
-        * css which the event belongs to.
-        */
-       struct cgroup_subsys_state *css;
-       /*
-        * Control file which the event associated.
-        */
-       struct cftype *cft;
-       /*
-        * eventfd to signal userspace about the event.
-        */
-       struct eventfd_ctx *eventfd;
-       /*
-        * Each of these stored in a list by the cgroup.
-        */
-       struct list_head list;
-       /*
-        * All fields below needed to unregister event when
-        * userspace closes eventfd.
-        */
-       poll_table pt;
-       wait_queue_head_t *wqh;
-       wait_queue_t wait;
-       struct work_struct remove;
-};
-
 /* The list of hierarchy roots */
 
 static LIST_HEAD(cgroup_roots);
 static int cgroup_root_count;
 
-/*
- * Hierarchy ID allocation and mapping.  It follows the same exclusion
- * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for
- * writes, either for reads.
- */
+/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
 static DEFINE_IDR(cgroup_hierarchy_idr);
 
-static struct cgroup_name root_cgroup_name = { .name = "/" };
-
 /*
  * Assign a monotonically increasing serial number to cgroups.  It
  * guarantees cgroups with bigger numbers are newer than those with smaller
@@ -195,11 +165,15 @@ static int need_forkexit_callback __read_mostly;
 
 static struct cftype cgroup_base_files[];
 
+static void cgroup_put(struct cgroup *cgrp);
+static int rebind_subsystems(struct cgroupfs_root *root,
+                            unsigned long added_mask, unsigned removed_mask);
 static void cgroup_destroy_css_killed(struct cgroup *cgrp);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
                              bool is_add);
-static int cgroup_file_release(struct inode *inode, struct file *file);
+static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
+static void cgroup_enable_task_cg_lists(void);
 
 /**
  * cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -216,8 +190,9 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
                                              struct cgroup_subsys *ss)
 {
        if (ss)
-               return rcu_dereference_check(cgrp->subsys[ss->subsys_id],
-                                            lockdep_is_held(&cgroup_mutex));
+               return rcu_dereference_check(cgrp->subsys[ss->id],
+                                       lockdep_is_held(&cgroup_tree_mutex) ||
+                                       lockdep_is_held(&cgroup_mutex));
        else
                return &cgrp->dummy_css;
 }
@@ -228,6 +203,27 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp)
        return test_bit(CGRP_DEAD, &cgrp->flags);
 }
 
+struct cgroup_subsys_state *seq_css(struct seq_file *seq)
+{
+       struct kernfs_open_file *of = seq->private;
+       struct cgroup *cgrp = of->kn->parent->priv;
+       struct cftype *cft = seq_cft(seq);
+
+       /*
+        * This is open and unprotected implementation of cgroup_css().
+        * seq_css() is only called from a kernfs file operation which has
+        * an active reference on the file.  Because all the subsystem
+        * files are drained before a css is disassociated with a cgroup,
+        * the matching css from the cgroup's subsys table is guaranteed to
+        * be and stay valid until the enclosing operation is complete.
+        */
+       if (cft->ss)
+               return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
+       else
+               return &cgrp->dummy_css;
+}
+EXPORT_SYMBOL_GPL(seq_css);
+
 /**
  * cgroup_is_descendant - test ancestry
  * @cgrp: the cgroup to be tested
@@ -262,53 +258,34 @@ static int notify_on_release(const struct cgroup *cgrp)
 }
 
 /**
- * for_each_subsys - iterate all loaded cgroup subsystems
- * @ss: the iteration cursor
- * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
+ * for_each_css - iterate all css's of a cgroup
+ * @css: the iteration cursor
+ * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
+ * @cgrp: the target cgroup to iterate css's of
  *
  * Should be called under cgroup_mutex.
  */
-#define for_each_subsys(ss, i)                                         \
-       for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++)                 \
-               if (({ lockdep_assert_held(&cgroup_mutex);              \
-                      !((ss) = cgroup_subsys[i]); })) { }              \
+#define for_each_css(css, ssid, cgrp)                                  \
+       for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)        \
+               if (!((css) = rcu_dereference_check(                    \
+                               (cgrp)->subsys[(ssid)],                 \
+                               lockdep_is_held(&cgroup_tree_mutex) ||  \
+                               lockdep_is_held(&cgroup_mutex)))) { }   \
                else
 
 /**
- * for_each_builtin_subsys - iterate all built-in cgroup subsystems
+ * for_each_subsys - iterate all enabled cgroup subsystems
  * @ss: the iteration cursor
- * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end
- *
- * Bulit-in subsystems are always present and iteration itself doesn't
- * require any synchronization.
+ * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
  */
-#define for_each_builtin_subsys(ss, i)                                 \
-       for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT &&              \
-            (((ss) = cgroup_subsys[i]) || true); (i)++)
-
-/* iterate each subsystem attached to a hierarchy */
-#define for_each_root_subsys(root, ss)                                 \
-       list_for_each_entry((ss), &(root)->subsys_list, sibling)
+#define for_each_subsys(ss, ssid)                                      \
+       for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&                \
+            (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
 
 /* iterate across the active hierarchies */
 #define for_each_active_root(root)                                     \
        list_for_each_entry((root), &cgroup_roots, root_list)
 
-static inline struct cgroup *__d_cgrp(struct dentry *dentry)
-{
-       return dentry->d_fsdata;
-}
-
-static inline struct cfent *__d_cfe(struct dentry *dentry)
-{
-       return dentry->d_fsdata;
-}
-
-static inline struct cftype *__d_cft(struct dentry *dentry)
-{
-       return __d_cfe(dentry)->type;
-}
-
 /**
  * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
  * @cgrp: the cgroup to be checked for liveness
@@ -399,7 +376,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
  * fork()/exit() overhead for people who have cgroups compiled into their
  * kernel but not actually in use.
  */
-static int use_task_css_set_links __read_mostly;
+static bool use_task_css_set_links __read_mostly;
 
 static void __put_css_set(struct css_set *cset, int taskexit)
 {
@@ -710,6 +687,90 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        return cset;
 }
 
+static struct cgroupfs_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
+{
+       struct cgroup *top_cgrp = kf_root->kn->priv;
+
+       return top_cgrp->root;
+}
+
+static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
+{
+       int id;
+
+       lockdep_assert_held(&cgroup_mutex);
+
+       id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end,
+                             GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       root->hierarchy_id = id;
+       return 0;
+}
+
+static void cgroup_exit_root_id(struct cgroupfs_root *root)
+{
+       lockdep_assert_held(&cgroup_mutex);
+
+       if (root->hierarchy_id) {
+               idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
+               root->hierarchy_id = 0;
+       }
+}
+
+static void cgroup_free_root(struct cgroupfs_root *root)
+{
+       if (root) {
+               /* hierarhcy ID shoulid already have been released */
+               WARN_ON_ONCE(root->hierarchy_id);
+
+               idr_destroy(&root->cgroup_idr);
+               kfree(root);
+       }
+}
+
+static void cgroup_destroy_root(struct cgroupfs_root *root)
+{
+       struct cgroup *cgrp = &root->top_cgroup;
+       struct cgrp_cset_link *link, *tmp_link;
+
+       mutex_lock(&cgroup_tree_mutex);
+       mutex_lock(&cgroup_mutex);
+
+       BUG_ON(atomic_read(&root->nr_cgrps));
+       BUG_ON(!list_empty(&cgrp->children));
+
+       /* Rebind all subsystems back to the default hierarchy */
+       WARN_ON(rebind_subsystems(root, 0, root->subsys_mask));
+
+       /*
+        * Release all the links from cset_links to this hierarchy's
+        * root cgroup
+        */
+       write_lock(&css_set_lock);
+
+       list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
+               list_del(&link->cset_link);
+               list_del(&link->cgrp_link);
+               kfree(link);
+       }
+       write_unlock(&css_set_lock);
+
+       if (!list_empty(&root->root_list)) {
+               list_del(&root->root_list);
+               cgroup_root_count--;
+       }
+
+       cgroup_exit_root_id(root);
+
+       mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
+
+       kernfs_destroy_root(root->kf_root);
+       cgroup_free_root(root);
+}
+
 /*
  * Return the cgroup for "task" from the given hierarchy. Must be
  * called with cgroup_mutex held.
@@ -797,82 +858,71 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  * update of a tasks cgroup pointer by cgroup_attach_task()
  */
 
-/*
- * A couple of forward declarations required, due to cyclic reference loop:
- * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
- * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
- * -> cgroup_mkdir.
- */
-
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
-static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
-static const struct inode_operations cgroup_dir_inode_operations;
+static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
 static const struct file_operations proc_cgroupstats_operations;
 
-static struct backing_dev_info cgroup_backing_dev_info = {
-       .name           = "cgroup",
-       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
-
-static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
+static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
+                             char *buf)
 {
-       struct inode *inode = new_inode(sb);
-
-       if (inode) {
-               inode->i_ino = get_next_ino();
-               inode->i_mode = mode;
-               inode->i_uid = current_fsuid();
-               inode->i_gid = current_fsgid();
-               inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-               inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
-       }
-       return inode;
+       if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
+           !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
+               snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
+                        cft->ss->name, cft->name);
+       else
+               strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+       return buf;
 }
 
-static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
+/**
+ * cgroup_file_mode - deduce file mode of a control file
+ * @cft: the control file in question
+ *
+ * returns cft->mode if ->mode is not 0
+ * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
+ * returns S_IRUGO if it has only a read handler
+ * returns S_IWUSR if it has only a write hander
+ */
+static umode_t cgroup_file_mode(const struct cftype *cft)
 {
-       struct cgroup_name *name;
+       umode_t mode = 0;
 
-       name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
-       if (!name)
-               return NULL;
-       strcpy(name->name, dentry->d_name.name);
-       return name;
+       if (cft->mode)
+               return cft->mode;
+
+       if (cft->read_u64 || cft->read_s64 || cft->seq_show)
+               mode |= S_IRUGO;
+
+       if (cft->write_u64 || cft->write_s64 || cft->write_string ||
+           cft->trigger)
+               mode |= S_IWUSR;
+
+       return mode;
 }
 
 static void cgroup_free_fn(struct work_struct *work)
 {
        struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
 
-       mutex_lock(&cgroup_mutex);
-       cgrp->root->number_of_cgroups--;
-       mutex_unlock(&cgroup_mutex);
-
-       /*
-        * We get a ref to the parent's dentry, and put the ref when
-        * this cgroup is being freed, so it's guaranteed that the
-        * parent won't be destroyed before its children.
-        */
-       dput(cgrp->parent->dentry);
-
-       /*
-        * Drop the active superblock reference that we took when we
-        * created the cgroup. This will free cgrp->root, if we are
-        * holding the last reference to @sb.
-        */
-       deactivate_super(cgrp->root->sb);
-
-       /*
-        * if we're getting rid of the cgroup, refcount should ensure
-        * that there are no pidlists left.
-        */
-       BUG_ON(!list_empty(&cgrp->pidlists));
-
-       simple_xattrs_free(&cgrp->xattrs);
+       atomic_dec(&cgrp->root->nr_cgrps);
+       cgroup_pidlist_destroy_all(cgrp);
 
-       kfree(rcu_dereference_raw(cgrp->name));
-       kfree(cgrp);
+       if (cgrp->parent) {
+               /*
+                * We get a ref to the parent, and put the ref when this
+                * cgroup is being freed, so it's guaranteed that the
+                * parent won't be destroyed before its children.
+                */
+               cgroup_put(cgrp->parent);
+               kernfs_put(cgrp->kn);
+               kfree(cgrp);
+       } else {
+               /*
+                * This is top cgroup's refcnt reaching zero, which
+                * indicates that the root should be released.
+                */
+               cgroup_destroy_root(cgrp->root);
+       }
 }
 
 static void cgroup_free_rcu(struct rcu_head *head)
@@ -883,71 +933,40 @@ static void cgroup_free_rcu(struct rcu_head *head)
        queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
 }
 
-static void cgroup_diput(struct dentry *dentry, struct inode *inode)
-{
-       /* is dentry a directory ? if so, kfree() associated cgroup */
-       if (S_ISDIR(inode->i_mode)) {
-               struct cgroup *cgrp = dentry->d_fsdata;
-
-               BUG_ON(!(cgroup_is_dead(cgrp)));
-
-               /*
-                * XXX: cgrp->id is only used to look up css's.  As cgroup
-                * and css's lifetimes will be decoupled, it should be made
-                * per-subsystem and moved to css->id so that lookups are
-                * successful until the target css is released.
-                */
-               idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
-               cgrp->id = -1;
-
-               call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
-       } else {
-               struct cfent *cfe = __d_cfe(dentry);
-               struct cgroup *cgrp = dentry->d_parent->d_fsdata;
-
-               WARN_ONCE(!list_empty(&cfe->node) &&
-                         cgrp != &cgrp->root->top_cgroup,
-                         "cfe still linked for %s\n", cfe->type->name);
-               simple_xattrs_free(&cfe->xattrs);
-               kfree(cfe);
-       }
-       iput(inode);
-}
-
-static void remove_dir(struct dentry *d)
+static void cgroup_get(struct cgroup *cgrp)
 {
-       struct dentry *parent = dget(d->d_parent);
-
-       d_delete(d);
-       simple_rmdir(parent->d_inode, d);
-       dput(parent);
+       WARN_ON_ONCE(cgroup_is_dead(cgrp));
+       WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0);
+       atomic_inc(&cgrp->refcnt);
 }
 
-static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
+static void cgroup_put(struct cgroup *cgrp)
 {
-       struct cfent *cfe;
-
-       lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
-       lockdep_assert_held(&cgroup_mutex);
+       if (!atomic_dec_and_test(&cgrp->refcnt))
+               return;
+       if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
+               return;
 
        /*
-        * If we're doing cleanup due to failure of cgroup_create(),
-        * the corresponding @cfe may not exist.
+        * XXX: cgrp->id is only used to look up css's.  As cgroup and
+        * css's lifetimes will be decoupled, it should be made
+        * per-subsystem and moved to css->id so that lookups are
+        * successful until the target css is released.
         */
-       list_for_each_entry(cfe, &cgrp->files, node) {
-               struct dentry *d = cfe->dentry;
+       mutex_lock(&cgroup_mutex);
+       idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+       mutex_unlock(&cgroup_mutex);
+       cgrp->id = -1;
 
-               if (cft && cfe->type != cft)
-                       continue;
+       call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
+}
 
-               dget(d);
-               d_delete(d);
-               simple_unlink(cgrp->dentry->d_inode, d);
-               list_del_init(&cfe->node);
-               dput(d);
+static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
+{
+       char name[CGROUP_FILE_NAME_MAX];
 
-               break;
-       }
+       lockdep_assert_held(&cgroup_tree_mutex);
+       kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
 }
 
 /**
@@ -961,81 +980,41 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
        int i;
 
        for_each_subsys(ss, i) {
-               struct cftype_set *set;
+               struct cftype *cfts;
 
                if (!test_bit(i, &subsys_mask))
                        continue;
-               list_for_each_entry(set, &ss->cftsets, node)
-                       cgroup_addrm_files(cgrp, set->cfts, false);
+               list_for_each_entry(cfts, &ss->cfts, node)
+                       cgroup_addrm_files(cgrp, cfts, false);
        }
 }
 
-/*
- * NOTE : the dentry must have been dget()'ed
- */
-static void cgroup_d_remove_dir(struct dentry *dentry)
-{
-       struct dentry *parent;
-
-       parent = dentry->d_parent;
-       spin_lock(&parent->d_lock);
-       spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-       list_del_init(&dentry->d_u.d_child);
-       spin_unlock(&dentry->d_lock);
-       spin_unlock(&parent->d_lock);
-       remove_dir(dentry);
-}
-
-/*
- * Call with cgroup_mutex held. Drops reference counts on modules, including
- * any duplicate ones that parse_cgroupfs_options took. If this function
- * returns an error, no reference counts are touched.
- */
 static int rebind_subsystems(struct cgroupfs_root *root,
                             unsigned long added_mask, unsigned removed_mask)
 {
        struct cgroup *cgrp = &root->top_cgroup;
        struct cgroup_subsys *ss;
-       unsigned long pinned = 0;
        int i, ret;
 
-       BUG_ON(!mutex_is_locked(&cgroup_mutex));
-       BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
+       lockdep_assert_held(&cgroup_tree_mutex);
+       lockdep_assert_held(&cgroup_mutex);
 
        /* Check that any added subsystems are currently free */
-       for_each_subsys(ss, i) {
-               if (!(added_mask & (1 << i)))
-                       continue;
-
-               /* is the subsystem mounted elsewhere? */
-               if (ss->root != &cgroup_dummy_root) {
-                       ret = -EBUSY;
-                       goto out_put;
-               }
-
-               /* pin the module */
-               if (!try_module_get(ss->module)) {
-                       ret = -ENOENT;
-                       goto out_put;
-               }
-               pinned |= 1 << i;
-       }
-
-       /* subsys could be missing if unloaded between parsing and here */
-       if (added_mask != pinned) {
-               ret = -ENOENT;
-               goto out_put;
-       }
+       for_each_subsys(ss, i)
+               if ((added_mask & (1 << i)) && ss->root != &cgroup_dummy_root)
+                       return -EBUSY;
 
        ret = cgroup_populate_dir(cgrp, added_mask);
        if (ret)
-               goto out_put;
+               return ret;
 
        /*
         * Nothing can fail from this point on.  Remove files for the
         * removed subsystems and rebind each subsystem.
         */
+       mutex_unlock(&cgroup_mutex);
        cgroup_clear_dir(cgrp, removed_mask);
+       mutex_lock(&cgroup_mutex);
 
        for_each_subsys(ss, i) {
                unsigned long bit = 1UL << i;
@@ -1050,7 +1029,6 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                                           cgroup_css(cgroup_dummy_top, ss));
                        cgroup_css(cgrp, ss)->cgroup = cgrp;
 
-                       list_move(&ss->sibling, &root->subsys_list);
                        ss->root = root;
                        if (ss->bind)
                                ss->bind(cgroup_css(cgrp, ss));
@@ -1069,50 +1047,40 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        RCU_INIT_POINTER(cgrp->subsys[i], NULL);
 
                        cgroup_subsys[i]->root = &cgroup_dummy_root;
-                       list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
-
-                       /* subsystem is now free - drop reference on module */
-                       module_put(ss->module);
                        root->subsys_mask &= ~bit;
                }
        }
 
-       /*
-        * Mark @root has finished binding subsystems.  @root->subsys_mask
-        * now matches the bound subsystems.
-        */
-       root->flags |= CGRP_ROOT_SUBSYS_BOUND;
-
+       kernfs_activate(cgrp->kn);
        return 0;
-
-out_put:
-       for_each_subsys(ss, i)
-               if (pinned & (1 << i))
-                       module_put(ss->module);
-       return ret;
 }
 
-static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
+static int cgroup_show_options(struct seq_file *seq,
+                              struct kernfs_root *kf_root)
 {
-       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
+       struct cgroupfs_root *root = cgroup_root_from_kf(kf_root);
        struct cgroup_subsys *ss;
+       int ssid;
 
-       mutex_lock(&cgroup_root_mutex);
-       for_each_root_subsys(root, ss)
-               seq_printf(seq, ",%s", ss->name);
+       for_each_subsys(ss, ssid)
+               if (root->subsys_mask & (1 << ssid))
+                       seq_printf(seq, ",%s", ss->name);
        if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
                seq_puts(seq, ",sane_behavior");
        if (root->flags & CGRP_ROOT_NOPREFIX)
                seq_puts(seq, ",noprefix");
        if (root->flags & CGRP_ROOT_XATTR)
                seq_puts(seq, ",xattr");
+
+       spin_lock(&release_agent_path_lock);
        if (strlen(root->release_agent_path))
                seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+       spin_unlock(&release_agent_path_lock);
+
        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags))
                seq_puts(seq, ",clone_children");
        if (strlen(root->name))
                seq_printf(seq, ",name=%s", root->name);
-       mutex_unlock(&cgroup_root_mutex);
        return 0;
 }
 
@@ -1124,9 +1092,6 @@ struct cgroup_sb_opts {
        char *name;
        /* User explicitly requested empty subsystem */
        bool none;
-
-       struct cgroupfs_root *new_root;
-
 };
 
 /*
@@ -1146,7 +1111,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        BUG_ON(!mutex_is_locked(&cgroup_mutex));
 
 #ifdef CONFIG_CPUSETS
-       mask = ~(1UL << cpuset_subsys_id);
+       mask = ~(1UL << cpuset_cgrp_id);
 #endif
 
        memset(opts, 0, sizeof(*opts));
@@ -1251,13 +1216,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
                pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
 
-               if (opts->flags & CGRP_ROOT_NOPREFIX) {
-                       pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
-                       return -EINVAL;
-               }
-
-               if (opts->cpuset_clone_children) {
-                       pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
+               if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
+                   opts->cpuset_clone_children || opts->release_agent ||
+                   opts->name) {
+                       pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
                        return -EINVAL;
                }
        }
@@ -1285,11 +1247,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        return 0;
 }
 
-static int cgroup_remount(struct super_block *sb, int *flags, char *data)
+static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
 {
        int ret = 0;
-       struct cgroupfs_root *root = sb->s_fs_info;
-       struct cgroup *cgrp = &root->top_cgroup;
+       struct cgroupfs_root *root = cgroup_root_from_kf(kf_root);
        struct cgroup_sb_opts opts;
        unsigned long added_mask, removed_mask;
 
@@ -1298,9 +1259,8 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
        }
 
-       mutex_lock(&cgrp->dentry->d_inode->i_mutex);
+       mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
-       mutex_lock(&cgroup_root_mutex);
 
        /* See what subsystems are wanted */
        ret = parse_cgroupfs_options(data, &opts);
@@ -1325,7 +1285,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
        }
 
        /* remounting is not allowed for populated hierarchies */
-       if (root->number_of_cgroups > 1) {
+       if (!list_empty(&root->top_cgroup.children)) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -1334,105 +1294,48 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
        if (ret)
                goto out_unlock;
 
-       if (opts.release_agent)
+       if (opts.release_agent) {
+               spin_lock(&release_agent_path_lock);
                strcpy(root->release_agent_path, opts.release_agent);
+               spin_unlock(&release_agent_path_lock);
+       }
  out_unlock:
        kfree(opts.release_agent);
        kfree(opts.name);
-       mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
        return ret;
 }
 
-static const struct super_operations cgroup_ops = {
-       .statfs = simple_statfs,
-       .drop_inode = generic_delete_inode,
-       .show_options = cgroup_show_options,
-       .remount_fs = cgroup_remount,
-};
-
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
 {
+       atomic_set(&cgrp->refcnt, 1);
        INIT_LIST_HEAD(&cgrp->sibling);
        INIT_LIST_HEAD(&cgrp->children);
-       INIT_LIST_HEAD(&cgrp->files);
        INIT_LIST_HEAD(&cgrp->cset_links);
        INIT_LIST_HEAD(&cgrp->release_list);
        INIT_LIST_HEAD(&cgrp->pidlists);
        mutex_init(&cgrp->pidlist_mutex);
        cgrp->dummy_css.cgroup = cgrp;
-       INIT_LIST_HEAD(&cgrp->event_list);
-       spin_lock_init(&cgrp->event_list_lock);
-       simple_xattrs_init(&cgrp->xattrs);
 }
 
 static void init_cgroup_root(struct cgroupfs_root *root)
 {
        struct cgroup *cgrp = &root->top_cgroup;
 
-       INIT_LIST_HEAD(&root->subsys_list);
        INIT_LIST_HEAD(&root->root_list);
-       root->number_of_cgroups = 1;
+       atomic_set(&root->nr_cgrps, 1);
        cgrp->root = root;
-       RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
        init_cgroup_housekeeping(cgrp);
        idr_init(&root->cgroup_idr);
 }
 
-static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
+static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
 {
-       int id;
-
-       lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&cgroup_root_mutex);
-
-       id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end,
-                             GFP_KERNEL);
-       if (id < 0)
-               return id;
-
-       root->hierarchy_id = id;
-       return 0;
-}
-
-static void cgroup_exit_root_id(struct cgroupfs_root *root)
-{
-       lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&cgroup_root_mutex);
-
-       if (root->hierarchy_id) {
-               idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
-               root->hierarchy_id = 0;
-       }
-}
-
-static int cgroup_test_super(struct super_block *sb, void *data)
-{
-       struct cgroup_sb_opts *opts = data;
-       struct cgroupfs_root *root = sb->s_fs_info;
-
-       /* If we asked for a name then it must match */
-       if (opts->name && strcmp(opts->name, root->name))
-               return 0;
-
-       /*
-        * If we asked for subsystems (or explicitly for no
-        * subsystems) then they must match
-        */
-       if ((opts->subsys_mask || opts->none)
-           && (opts->subsys_mask != root->subsys_mask))
-               return 0;
-
-       return 1;
-}
-
-static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
-{
-       struct cgroupfs_root *root;
+       struct cgroupfs_root *root;
 
        if (!opts->subsys_mask && !opts->none)
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        root = kzalloc(sizeof(*root), GFP_KERNEL);
        if (!root)
@@ -1440,15 +1343,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
 
        init_cgroup_root(root);
 
-       /*
-        * We need to set @root->subsys_mask now so that @root can be
-        * matched by cgroup_test_super() before it finishes
-        * initialization; otherwise, competing mounts with the same
-        * options may try to bind the same subsystems instead of waiting
-        * for the first one leading to unexpected mount errors.
-        * SUBSYS_BOUND will be set once actual binding is complete.
-        */
-       root->subsys_mask = opts->subsys_mask;
        root->flags = opts->flags;
        if (opts->release_agent)
                strcpy(root->release_agent_path, opts->release_agent);
@@ -1459,290 +1353,200 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
        return root;
 }
 
-static void cgroup_free_root(struct cgroupfs_root *root)
+static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask)
 {
-       if (root) {
-               /* hierarhcy ID shoulid already have been released */
-               WARN_ON_ONCE(root->hierarchy_id);
-
-               idr_destroy(&root->cgroup_idr);
-               kfree(root);
-       }
-}
+       LIST_HEAD(tmp_links);
+       struct cgroup *root_cgrp = &root->top_cgroup;
+       struct css_set *cset;
+       int i, ret;
 
-static int cgroup_set_super(struct super_block *sb, void *data)
-{
-       int ret;
-       struct cgroup_sb_opts *opts = data;
+       lockdep_assert_held(&cgroup_tree_mutex);
+       lockdep_assert_held(&cgroup_mutex);
 
-       /* If we don't have a new root, we can't set up a new sb */
-       if (!opts->new_root)
-               return -EINVAL;
+       ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+       if (ret < 0)
+               goto out;
+       root_cgrp->id = ret;
 
-       BUG_ON(!opts->subsys_mask && !opts->none);
+       /*
+        * We're accessing css_set_count without locking css_set_lock here,
+        * but that's OK - it can only be increased by someone holding
+        * cgroup_lock, and that's us. The worst that can happen is that we
+        * have some link structures left over
+        */
+       ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
+       if (ret)
+               goto out;
 
-       ret = set_anon_super(sb, NULL);
+       /* ID 0 is reserved for dummy root, 1 for unified hierarchy */
+       ret = cgroup_init_root_id(root, 2, 0);
        if (ret)
-               return ret;
+               goto out;
+
+       root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
+                                          KERNFS_ROOT_CREATE_DEACTIVATED,
+                                          root_cgrp);
+       if (IS_ERR(root->kf_root)) {
+               ret = PTR_ERR(root->kf_root);
+               goto exit_root_id;
+       }
+       root_cgrp->kn = root->kf_root->kn;
 
-       sb->s_fs_info = opts->new_root;
-       opts->new_root->sb = sb;
+       ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+       if (ret)
+               goto destroy_root;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-       sb->s_magic = CGROUP_SUPER_MAGIC;
-       sb->s_op = &cgroup_ops;
+       ret = rebind_subsystems(root, ss_mask, 0);
+       if (ret)
+               goto destroy_root;
 
-       return 0;
-}
+       /*
+        * There must be no failure case after here, since rebinding takes
+        * care of subsystems' refcounts, which are explicitly dropped in
+        * the failure exit path.
+        */
+       list_add(&root->root_list, &cgroup_roots);
+       cgroup_root_count++;
 
-static int cgroup_get_rootdir(struct super_block *sb)
-{
-       static const struct dentry_operations cgroup_dops = {
-               .d_iput = cgroup_diput,
-               .d_delete = always_delete_dentry,
-       };
+       /*
+        * Link the top cgroup in this hierarchy into all the css_set
+        * objects.
+        */
+       write_lock(&css_set_lock);
+       hash_for_each(css_set_table, i, cset, hlist)
+               link_css_set(&tmp_links, cset, root_cgrp);
+       write_unlock(&css_set_lock);
 
-       struct inode *inode =
-               cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
+       BUG_ON(!list_empty(&root_cgrp->children));
+       BUG_ON(atomic_read(&root->nr_cgrps) != 1);
 
-       if (!inode)
-               return -ENOMEM;
+       kernfs_activate(root_cgrp->kn);
+       ret = 0;
+       goto out;
 
-       inode->i_fop = &simple_dir_operations;
-       inode->i_op = &cgroup_dir_inode_operations;
-       /* directories start off with i_nlink == 2 (for "." entry) */
-       inc_nlink(inode);
-       sb->s_root = d_make_root(inode);
-       if (!sb->s_root)
-               return -ENOMEM;
-       /* for everything else we want ->d_op set */
-       sb->s_d_op = &cgroup_dops;
-       return 0;
+destroy_root:
+       kernfs_destroy_root(root->kf_root);
+       root->kf_root = NULL;
+exit_root_id:
+       cgroup_exit_root_id(root);
+out:
+       free_cgrp_cset_links(&tmp_links);
+       return ret;
 }
 
 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                         int flags, const char *unused_dev_name,
                         void *data)
 {
-       struct cgroup_sb_opts opts;
        struct cgroupfs_root *root;
-       int ret = 0;
-       struct super_block *sb;
-       struct cgroupfs_root *new_root;
-       struct list_head tmp_links;
-       struct inode *inode;
-       const struct cred *cred;
-
-       /* First find the desired set of subsystems */
-       mutex_lock(&cgroup_mutex);
-       ret = parse_cgroupfs_options(data, &opts);
-       mutex_unlock(&cgroup_mutex);
-       if (ret)
-               goto out_err;
+       struct cgroup_sb_opts opts;
+       struct dentry *dentry;
+       int ret;
 
        /*
-        * Allocate a new cgroup root. We may not need it if we're
-        * reusing an existing hierarchy.
+        * The first time anyone tries to mount a cgroup, enable the list
+        * linking each css_set to its tasks and fix up all existing tasks.
         */
-       new_root = cgroup_root_from_opts(&opts);
-       if (IS_ERR(new_root)) {
-               ret = PTR_ERR(new_root);
-               goto out_err;
-       }
-       opts.new_root = new_root;
-
-       /* Locate an existing or new sb for this hierarchy */
-       sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
-       if (IS_ERR(sb)) {
-               ret = PTR_ERR(sb);
-               cgroup_free_root(opts.new_root);
-               goto out_err;
-       }
-
-       root = sb->s_fs_info;
-       BUG_ON(!root);
-       if (root == opts.new_root) {
-               /* We used the new root structure, so this is a new hierarchy */
-               struct cgroup *root_cgrp = &root->top_cgroup;
-               struct cgroupfs_root *existing_root;
-               int i;
-               struct css_set *cset;
-
-               BUG_ON(sb->s_root != NULL);
-
-               ret = cgroup_get_rootdir(sb);
-               if (ret)
-                       goto drop_new_super;
-               inode = sb->s_root->d_inode;
-
-               mutex_lock(&inode->i_mutex);
-               mutex_lock(&cgroup_mutex);
-               mutex_lock(&cgroup_root_mutex);
-
-               root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
-                                          0, 1, GFP_KERNEL);
-               if (root_cgrp->id < 0)
-                       goto unlock_drop;
-
-               /* Check for name clashes with existing mounts */
-               ret = -EBUSY;
-               if (strlen(root->name))
-                       for_each_active_root(existing_root)
-                               if (!strcmp(existing_root->name, root->name))
-                                       goto unlock_drop;
-
-               /*
-                * We're accessing css_set_count without locking
-                * css_set_lock here, but that's OK - it can only be
-                * increased by someone holding cgroup_lock, and
-                * that's us. The worst that can happen is that we
-                * have some link structures left over
-                */
-               ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
-               if (ret)
-                       goto unlock_drop;
-
-               /* ID 0 is reserved for dummy root, 1 for unified hierarchy */
-               ret = cgroup_init_root_id(root, 2, 0);
-               if (ret)
-                       goto unlock_drop;
-
-               sb->s_root->d_fsdata = root_cgrp;
-               root_cgrp->dentry = sb->s_root;
-
-               /*
-                * We're inside get_sb() and will call lookup_one_len() to
-                * create the root files, which doesn't work if SELinux is
-                * in use.  The following cred dancing somehow works around
-                * it.  See 2ce9738ba ("cgroupfs: use init_cred when
-                * populating new cgroupfs mount") for more details.
-                */
-               cred = override_creds(&init_cred);
-
-               ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
-               if (ret)
-                       goto rm_base_files;
+       if (!use_task_css_set_links)
+               cgroup_enable_task_cg_lists();
+retry:
+       mutex_lock(&cgroup_tree_mutex);
+       mutex_lock(&cgroup_mutex);
 
-               ret = rebind_subsystems(root, root->subsys_mask, 0);
-               if (ret)
-                       goto rm_base_files;
+       /* First find the desired set of subsystems */
+       ret = parse_cgroupfs_options(data, &opts);
+       if (ret)
+               goto out_unlock;
 
-               revert_creds(cred);
+       /* look for a matching existing root */
+       for_each_active_root(root) {
+               bool name_match = false;
 
                /*
-                * There must be no failure case after here, since rebinding
-                * takes care of subsystems' refcounts, which are explicitly
-                * dropped in the failure exit path.
+                * If we asked for a name then it must match.  Also, if
+                * name matches but sybsys_mask doesn't, we should fail.
+                * Remember whether name matched.
                 */
+               if (opts.name) {
+                       if (strcmp(opts.name, root->name))
+                               continue;
+                       name_match = true;
+               }
 
-               list_add(&root->root_list, &cgroup_roots);
-               cgroup_root_count++;
-
-               /* Link the top cgroup in this hierarchy into all
-                * the css_set objects */
-               write_lock(&css_set_lock);
-               hash_for_each(css_set_table, i, cset, hlist)
-                       link_css_set(&tmp_links, cset, root_cgrp);
-               write_unlock(&css_set_lock);
-
-               free_cgrp_cset_links(&tmp_links);
-
-               BUG_ON(!list_empty(&root_cgrp->children));
-               BUG_ON(root->number_of_cgroups != 1);
-
-               mutex_unlock(&cgroup_root_mutex);
-               mutex_unlock(&cgroup_mutex);
-               mutex_unlock(&inode->i_mutex);
-       } else {
                /*
-                * We re-used an existing hierarchy - the new root (if
-                * any) is not needed
+                * If we asked for subsystems (or explicitly for no
+                * subsystems) then they must match.
                 */
-               cgroup_free_root(opts.new_root);
+               if ((opts.subsys_mask || opts.none) &&
+                   (opts.subsys_mask != root->subsys_mask)) {
+                       if (!name_match)
+                               continue;
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
 
                if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
                        if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
                                pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
                                ret = -EINVAL;
-                               goto drop_new_super;
+                               goto out_unlock;
                        } else {
                                pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
                        }
                }
-       }
 
-       kfree(opts.release_agent);
-       kfree(opts.name);
-       return dget(sb->s_root);
-
- rm_base_files:
-       free_cgrp_cset_links(&tmp_links);
-       cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
-       revert_creds(cred);
- unlock_drop:
-       cgroup_exit_root_id(root);
-       mutex_unlock(&cgroup_root_mutex);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&inode->i_mutex);
- drop_new_super:
-       deactivate_locked_super(sb);
- out_err:
-       kfree(opts.release_agent);
-       kfree(opts.name);
-       return ERR_PTR(ret);
-}
-
-static void cgroup_kill_sb(struct super_block *sb) {
-       struct cgroupfs_root *root = sb->s_fs_info;
-       struct cgroup *cgrp = &root->top_cgroup;
-       struct cgrp_cset_link *link, *tmp_link;
-       int ret;
-
-       BUG_ON(!root);
-
-       BUG_ON(root->number_of_cgroups != 1);
-       BUG_ON(!list_empty(&cgrp->children));
+               /*
+                * A root's lifetime is governed by its top cgroup.  Zero
+                * ref indicate that the root is being destroyed.  Wait for
+                * destruction to complete so that the subsystems are free.
+                * We can use wait_queue for the wait but this path is
+                * super cold.  Let's just sleep for a bit and retry.
+                */
+               if (!atomic_inc_not_zero(&root->top_cgroup.refcnt)) {
+                       mutex_unlock(&cgroup_mutex);
+                       mutex_unlock(&cgroup_tree_mutex);
+                       msleep(10);
+                       goto retry;
+               }
 
-       mutex_lock(&cgrp->dentry->d_inode->i_mutex);
-       mutex_lock(&cgroup_mutex);
-       mutex_lock(&cgroup_root_mutex);
+               ret = 0;
+               goto out_unlock;
+       }
 
-       /* Rebind all subsystems back to the default hierarchy */
-       if (root->flags & CGRP_ROOT_SUBSYS_BOUND) {
-               ret = rebind_subsystems(root, 0, root->subsys_mask);
-               /* Shouldn't be able to fail ... */
-               BUG_ON(ret);
+       /* no such thing, create a new one */
+       root = cgroup_root_from_opts(&opts);
+       if (IS_ERR(root)) {
+               ret = PTR_ERR(root);
+               goto out_unlock;
        }
 
-       /*
-        * Release all the links from cset_links to this hierarchy's
-        * root cgroup
-        */
-       write_lock(&css_set_lock);
+       ret = cgroup_setup_root(root, opts.subsys_mask);
+       if (ret)
+               cgroup_free_root(root);
 
-       list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
-               list_del(&link->cset_link);
-               list_del(&link->cgrp_link);
-               kfree(link);
-       }
-       write_unlock(&css_set_lock);
+out_unlock:
+       mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
 
-       if (!list_empty(&root->root_list)) {
-               list_del(&root->root_list);
-               cgroup_root_count--;
-       }
+       kfree(opts.release_agent);
+       kfree(opts.name);
 
-       cgroup_exit_root_id(root);
+       if (ret)
+               return ERR_PTR(ret);
 
-       mutex_unlock(&cgroup_root_mutex);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+       dentry = kernfs_mount(fs_type, flags, root->kf_root);
+       if (IS_ERR(dentry))
+               cgroup_put(&root->top_cgroup);
+       return dentry;
+}
 
-       simple_xattrs_free(&cgrp->xattrs);
+static void cgroup_kill_sb(struct super_block *sb)
+{
+       struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
+       struct cgroupfs_root *root = cgroup_root_from_kf(kf_root);
 
-       kill_litter_super(sb);
-       cgroup_free_root(root);
+       cgroup_put(&root->top_cgroup);
+       kernfs_kill_sb(sb);
 }
 
 static struct file_system_type cgroup_fs_type = {
@@ -1753,57 +1557,6 @@ static struct file_system_type cgroup_fs_type = {
 
 static struct kobject *cgroup_kobj;
 
-/**
- * cgroup_path - generate the path of a cgroup
- * @cgrp: the cgroup in question
- * @buf: the buffer to write the path into
- * @buflen: the length of the buffer
- *
- * Writes path of cgroup into buf.  Returns 0 on success, -errno on error.
- *
- * We can't generate cgroup path using dentry->d_name, as accessing
- * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
- * inode's i_mutex, while on the other hand cgroup_path() can be called
- * with some irq-safe spinlocks held.
- */
-int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
-{
-       int ret = -ENAMETOOLONG;
-       char *start;
-
-       if (!cgrp->parent) {
-               if (strlcpy(buf, "/", buflen) >= buflen)
-                       return -ENAMETOOLONG;
-               return 0;
-       }
-
-       start = buf + buflen - 1;
-       *start = '\0';
-
-       rcu_read_lock();
-       do {
-               const char *name = cgroup_name(cgrp);
-               int len;
-
-               len = strlen(name);
-               if ((start -= len) < buf)
-                       goto out;
-               memcpy(start, name, len);
-
-               if (--start < buf)
-                       goto out;
-               *start = '/';
-
-               cgrp = cgrp->parent;
-       } while (cgrp->parent);
-       ret = 0;
-       memmove(buf, start, buf + buflen - start);
-out:
-       rcu_read_unlock();
-       return ret;
-}
-EXPORT_SYMBOL_GPL(cgroup_path);
-
 /**
  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
  * @task: target task
@@ -1815,16 +1568,14 @@ EXPORT_SYMBOL_GPL(cgroup_path);
  * function grabs cgroup_mutex and shouldn't be used inside locks used by
  * cgroup controller callbacks.
  *
- * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
+ * Return value is the same as kernfs_path().
  */
-int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
 {
        struct cgroupfs_root *root;
        struct cgroup *cgrp;
-       int hierarchy_id = 1, ret = 0;
-
-       if (buflen < 2)
-               return -ENAMETOOLONG;
+       int hierarchy_id = 1;
+       char *path = NULL;
 
        mutex_lock(&cgroup_mutex);
 
@@ -1832,14 +1583,15 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
 
        if (root) {
                cgrp = task_cgroup_from_root(task, root);
-               ret = cgroup_path(cgrp, buf, buflen);
+               path = cgroup_path(cgrp, buf, buflen);
        } else {
                /* if no hierarchy exists, everyone is in "/" */
-               memcpy(buf, "/", 2);
+               if (strlcpy(buf, "/", buflen) < buflen)
+                       path = buf;
        }
 
        mutex_unlock(&cgroup_mutex);
-       return ret;
+       return path;
 }
 EXPORT_SYMBOL_GPL(task_cgroup_path);
 
@@ -1948,10 +1700,8 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
        rcu_assign_pointer(tsk->cgroups, new_cset);
        task_unlock(tsk);
 
-       /* Update the css_set linked lists if we're using them */
        write_lock(&css_set_lock);
-       if (!list_empty(&tsk->cg_list))
-               list_move(&tsk->cg_list, &new_cset->tasks);
+       list_move(&tsk->cg_list, &new_cset->tasks);
        write_unlock(&css_set_lock);
 
        /*
@@ -1976,8 +1726,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                              bool threadgroup)
 {
        int retval, i, group_size;
-       struct cgroup_subsys *ss, *failed_ss = NULL;
        struct cgroupfs_root *root = cgrp->root;
+       struct cgroup_subsys_state *css, *failed_css = NULL;
        /* threadgroup list cursor and array */
        struct task_struct *leader = tsk;
        struct task_and_cgroup *tc;
@@ -2050,13 +1800,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
        /*
         * step 1: check that we can legitimately attach to the cgroup.
         */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-               if (ss->can_attach) {
-                       retval = ss->can_attach(css, &tset);
+       for_each_css(css, i, cgrp) {
+               if (css->ss->can_attach) {
+                       retval = css->ss->can_attach(css, &tset);
                        if (retval) {
-                               failed_ss = ss;
+                               failed_css = css;
                                goto out_cancel_attach;
                        }
                }
@@ -2092,12 +1840,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
        /*
         * step 4: do subsystem attach callbacks.
         */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-               if (ss->attach)
-                       ss->attach(css, &tset);
-       }
+       for_each_css(css, i, cgrp)
+               if (css->ss->attach)
+                       css->ss->attach(css, &tset);
 
        /*
         * step 5: success! and cleanup
@@ -2114,13 +1859,11 @@ out_put_css_set_refs:
        }
 out_cancel_attach:
        if (retval) {
-               for_each_root_subsys(root, ss) {
-                       struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-                       if (ss == failed_ss)
+               for_each_css(css, i, cgrp) {
+                       if (css == failed_css)
                                break;
-                       if (ss->cancel_attach)
-                               ss->cancel_attach(css, &tset);
+                       if (css->ss->cancel_attach)
+                               css->ss->cancel_attach(css, &tset);
                }
        }
 out_free_group_list:
@@ -2148,7 +1891,7 @@ retry_find_task:
                tsk = find_task_by_vpid(pid);
                if (!tsk) {
                        rcu_read_unlock();
-                       ret= -ESRCH;
+                       ret = -ESRCH;
                        goto out_unlock_cgroup;
                }
                /*
@@ -2248,22 +1991,22 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css,
 static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
                                      struct cftype *cft, const char *buffer)
 {
-       BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
-       if (strlen(buffer) >= PATH_MAX)
-               return -EINVAL;
+       struct cgroupfs_root *root = css->cgroup->root;
+
+       BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX);
        if (!cgroup_lock_live_group(css->cgroup))
                return -ENODEV;
-       mutex_lock(&cgroup_root_mutex);
-       strcpy(css->cgroup->root->release_agent_path, buffer);
-       mutex_unlock(&cgroup_root_mutex);
+       spin_lock(&release_agent_path_lock);
+       strlcpy(root->release_agent_path, buffer,
+               sizeof(root->release_agent_path));
+       spin_unlock(&release_agent_path_lock);
        mutex_unlock(&cgroup_mutex);
        return 0;
 }
 
-static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
-                                    struct cftype *cft, struct seq_file *seq)
+static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 {
-       struct cgroup *cgrp = css->cgroup;
+       struct cgroup *cgrp = seq_css(seq)->cgroup;
 
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
@@ -2273,264 +2016,114 @@ static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
-                                    struct cftype *cft, struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 {
-       seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
+       struct cgroup *cgrp = seq_css(seq)->cgroup;
+
+       seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
        return 0;
 }
 
-/* A buffer size big enough for numbers or short strings */
-#define CGROUP_LOCAL_BUFFER_SIZE 64
-
-static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
-                               struct cftype *cft, struct file *file,
-                               const char __user *userbuf, size_t nbytes,
-                               loff_t *unused_ppos)
+static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
+                                size_t nbytes, loff_t off)
 {
-       char buffer[CGROUP_LOCAL_BUFFER_SIZE];
-       int retval = 0;
-       char *end;
+       struct cgroup *cgrp = of->kn->parent->priv;
+       struct cftype *cft = of->kn->priv;
+       struct cgroup_subsys_state *css;
+       int ret;
 
-       if (!nbytes)
-               return -EINVAL;
-       if (nbytes >= sizeof(buffer))
-               return -E2BIG;
-       if (copy_from_user(buffer, userbuf, nbytes))
-               return -EFAULT;
-
-       buffer[nbytes] = 0;     /* nul-terminate */
-       if (cft->write_u64) {
-               u64 val = simple_strtoull(strstrip(buffer), &end, 0);
-               if (*end)
-                       return -EINVAL;
-               retval = cft->write_u64(css, cft, val);
+       /*
+        * kernfs guarantees that a file isn't deleted with operations in
+        * flight, which means that the matching css is and stays alive and
+        * doesn't need to be pinned.  The RCU locking is not necessary
+        * either.  It's just for the convenience of using cgroup_css().
+        */
+       rcu_read_lock();
+       css = cgroup_css(cgrp, cft->ss);
+       rcu_read_unlock();
+
+       if (cft->write_string) {
+               ret = cft->write_string(css, cft, strstrip(buf));
+       } else if (cft->write_u64) {
+               unsigned long long v;
+               ret = kstrtoull(buf, 0, &v);
+               if (!ret)
+                       ret = cft->write_u64(css, cft, v);
+       } else if (cft->write_s64) {
+               long long v;
+               ret = kstrtoll(buf, 0, &v);
+               if (!ret)
+                       ret = cft->write_s64(css, cft, v);
+       } else if (cft->trigger) {
+               ret = cft->trigger(css, (unsigned int)cft->private);
        } else {
-               s64 val = simple_strtoll(strstrip(buffer), &end, 0);
-               if (*end)
-                       return -EINVAL;
-               retval = cft->write_s64(css, cft, val);
+               ret = -EINVAL;
        }
-       if (!retval)
-               retval = nbytes;
-       return retval;
+
+       return ret ?: nbytes;
 }
 
-static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
-                                  struct cftype *cft, struct file *file,
-                                  const char __user *userbuf, size_t nbytes,
-                                  loff_t *unused_ppos)
+static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
 {
-       char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
-       int retval = 0;
-       size_t max_bytes = cft->max_write_len;
-       char *buffer = local_buffer;
-
-       if (!max_bytes)
-               max_bytes = sizeof(local_buffer) - 1;
-       if (nbytes >= max_bytes)
-               return -E2BIG;
-       /* Allocate a dynamic buffer if we need one */
-       if (nbytes >= sizeof(local_buffer)) {
-               buffer = kmalloc(nbytes + 1, GFP_KERNEL);
-               if (buffer == NULL)
-                       return -ENOMEM;
-       }
-       if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       buffer[nbytes] = 0;     /* nul-terminate */
-       retval = cft->write_string(css, cft, strstrip(buffer));
-       if (!retval)
-               retval = nbytes;
-out:
-       if (buffer != local_buffer)
-               kfree(buffer);
-       return retval;
+       return seq_cft(seq)->seq_start(seq, ppos);
 }
 
-static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
-                                size_t nbytes, loff_t *ppos)
+static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
 {
-       struct cfent *cfe = __d_cfe(file->f_dentry);
-       struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup_subsys_state *css = cfe->css;
-
-       if (cft->write)
-               return cft->write(css, cft, file, buf, nbytes, ppos);
-       if (cft->write_u64 || cft->write_s64)
-               return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
-       if (cft->write_string)
-               return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
-       if (cft->trigger) {
-               int ret = cft->trigger(css, (unsigned int)cft->private);
-               return ret ? ret : nbytes;
-       }
-       return -EINVAL;
+       return seq_cft(seq)->seq_next(seq, v, ppos);
 }
 
-static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
-                              struct cftype *cft, struct file *file,
-                              char __user *buf, size_t nbytes, loff_t *ppos)
+static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
 {
-       char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       u64 val = cft->read_u64(css, cft);
-       int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
-
-       return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+       seq_cft(seq)->seq_stop(seq, v);
 }
 
-static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
-                              struct cftype *cft, struct file *file,
-                              char __user *buf, size_t nbytes, loff_t *ppos)
+static int cgroup_seqfile_show(struct seq_file *m, void *arg)
 {
-       char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       s64 val = cft->read_s64(css, cft);
-       int len = sprintf(tmp, "%lld\n", (long long) val);
+       struct cftype *cft = seq_cft(m);
+       struct cgroup_subsys_state *css = seq_css(m);
 
-       return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
-}
-
-static ssize_t cgroup_file_read(struct file *file, char __user *buf,
-                               size_t nbytes, loff_t *ppos)
-{
-       struct cfent *cfe = __d_cfe(file->f_dentry);
-       struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup_subsys_state *css = cfe->css;
+       if (cft->seq_show)
+               return cft->seq_show(m, arg);
 
-       if (cft->read)
-               return cft->read(css, cft, file, buf, nbytes, ppos);
        if (cft->read_u64)
-               return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
-       if (cft->read_s64)
-               return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
-       return -EINVAL;
-}
-
-/*
- * seqfile ops/methods for returning structured data. Currently just
- * supports string->u64 maps, but can be extended in future.
- */
-
-static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
-{
-       struct seq_file *sf = cb->state;
-       return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
-}
-
-static int cgroup_seqfile_show(struct seq_file *m, void *arg)
-{
-       struct cfent *cfe = m->private;
-       struct cftype *cft = cfe->type;
-       struct cgroup_subsys_state *css = cfe->css;
-
-       if (cft->read_map) {
-               struct cgroup_map_cb cb = {
-                       .fill = cgroup_map_add,
-                       .state = m,
-               };
-               return cft->read_map(css, cft, &cb);
-       }
-       return cft->read_seq_string(css, cft, m);
+               seq_printf(m, "%llu\n", cft->read_u64(css, cft));
+       else if (cft->read_s64)
+               seq_printf(m, "%lld\n", cft->read_s64(css, cft));
+       else
+               return -EINVAL;
+       return 0;
 }
 
-static const struct file_operations cgroup_seqfile_operations = {
-       .read = seq_read,
-       .write = cgroup_file_write,
-       .llseek = seq_lseek,
-       .release = cgroup_file_release,
+static struct kernfs_ops cgroup_kf_single_ops = {
+       .atomic_write_len       = PAGE_SIZE,
+       .write                  = cgroup_file_write,
+       .seq_show               = cgroup_seqfile_show,
 };
 
-static int cgroup_file_open(struct inode *inode, struct file *file)
-{
-       struct cfent *cfe = __d_cfe(file->f_dentry);
-       struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
-       struct cgroup_subsys_state *css;
-       int err;
-
-       err = generic_file_open(inode, file);
-       if (err)
-               return err;
-
-       /*
-        * If the file belongs to a subsystem, pin the css.  Will be
-        * unpinned either on open failure or release.  This ensures that
-        * @css stays alive for all file operations.
-        */
-       rcu_read_lock();
-       css = cgroup_css(cgrp, cft->ss);
-       if (cft->ss && !css_tryget(css))
-               css = NULL;
-       rcu_read_unlock();
-
-       if (!css)
-               return -ENODEV;
-
-       /*
-        * @cfe->css is used by read/write/close to determine the
-        * associated css.  @file->private_data would be a better place but
-        * that's already used by seqfile.  Multiple accessors may use it
-        * simultaneously which is okay as the association never changes.
-        */
-       WARN_ON_ONCE(cfe->css && cfe->css != css);
-       cfe->css = css;
-
-       if (cft->read_map || cft->read_seq_string) {
-               file->f_op = &cgroup_seqfile_operations;
-               err = single_open(file, cgroup_seqfile_show, cfe);
-       } else if (cft->open) {
-               err = cft->open(inode, file);
-       }
-
-       if (css->ss && err)
-               css_put(css);
-       return err;
-}
-
-static int cgroup_file_release(struct inode *inode, struct file *file)
-{
-       struct cfent *cfe = __d_cfe(file->f_dentry);
-       struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup_subsys_state *css = cfe->css;
-       int ret = 0;
-
-       if (cft->release)
-               ret = cft->release(inode, file);
-       if (css->ss)
-               css_put(css);
-       if (file->f_op == &cgroup_seqfile_operations)
-               single_release(inode, file);
-       return ret;
-}
+static struct kernfs_ops cgroup_kf_ops = {
+       .atomic_write_len       = PAGE_SIZE,
+       .write                  = cgroup_file_write,
+       .seq_start              = cgroup_seqfile_start,
+       .seq_next               = cgroup_seqfile_next,
+       .seq_stop               = cgroup_seqfile_stop,
+       .seq_show               = cgroup_seqfile_show,
+};
 
 /*
  * cgroup_rename - Only allow simple rename of directories in place.
  */
-static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
-                           struct inode *new_dir, struct dentry *new_dentry)
+static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
+                        const char *new_name_str)
 {
+       struct cgroup *cgrp = kn->priv;
        int ret;
-       struct cgroup_name *name, *old_name;
-       struct cgroup *cgrp;
-
-       /*
-        * It's convinient to use parent dir's i_mutex to protected
-        * cgrp->name.
-        */
-       lockdep_assert_held(&old_dir->i_mutex);
 
-       if (!S_ISDIR(old_dentry->d_inode->i_mode))
+       if (kernfs_type(kn) != KERNFS_DIR)
                return -ENOTDIR;
-       if (new_dentry->d_inode)
-               return -EEXIST;
-       if (old_dir != new_dir)
+       if (kn->parent != new_parent)
                return -EIO;
 
-       cgrp = __d_cgrp(old_dentry);
-
        /*
         * This isn't a proper migration and its usefulness is very
         * limited.  Disallow if sane_behavior.
@@ -2538,229 +2131,31 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (cgroup_sane_behavior(cgrp))
                return -EPERM;
 
-       name = cgroup_alloc_name(new_dentry);
-       if (!name)
-               return -ENOMEM;
-
-       ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
-       if (ret) {
-               kfree(name);
-               return ret;
-       }
-
-       old_name = rcu_dereference_protected(cgrp->name, true);
-       rcu_assign_pointer(cgrp->name, name);
-
-       kfree_rcu(old_name, rcu_head);
-       return 0;
-}
-
-static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
-{
-       if (S_ISDIR(dentry->d_inode->i_mode))
-               return &__d_cgrp(dentry)->xattrs;
-       else
-               return &__d_cfe(dentry)->xattrs;
-}
-
-static inline int xattr_enabled(struct dentry *dentry)
-{
-       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
-       return root->flags & CGRP_ROOT_XATTR;
-}
-
-static bool is_valid_xattr(const char *name)
-{
-       if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
-           !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
-               return true;
-       return false;
-}
-
-static int cgroup_setxattr(struct dentry *dentry, const char *name,
-                          const void *val, size_t size, int flags)
-{
-       if (!xattr_enabled(dentry))
-               return -EOPNOTSUPP;
-       if (!is_valid_xattr(name))
-               return -EINVAL;
-       return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
-}
-
-static int cgroup_removexattr(struct dentry *dentry, const char *name)
-{
-       if (!xattr_enabled(dentry))
-               return -EOPNOTSUPP;
-       if (!is_valid_xattr(name))
-               return -EINVAL;
-       return simple_xattr_remove(__d_xattrs(dentry), name);
-}
-
-static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
-                              void *buf, size_t size)
-{
-       if (!xattr_enabled(dentry))
-               return -EOPNOTSUPP;
-       if (!is_valid_xattr(name))
-               return -EINVAL;
-       return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
-}
-
-static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
-{
-       if (!xattr_enabled(dentry))
-               return -EOPNOTSUPP;
-       return simple_xattr_list(__d_xattrs(dentry), buf, size);
-}
-
-static const struct file_operations cgroup_file_operations = {
-       .read = cgroup_file_read,
-       .write = cgroup_file_write,
-       .llseek = generic_file_llseek,
-       .open = cgroup_file_open,
-       .release = cgroup_file_release,
-};
-
-static const struct inode_operations cgroup_file_inode_operations = {
-       .setxattr = cgroup_setxattr,
-       .getxattr = cgroup_getxattr,
-       .listxattr = cgroup_listxattr,
-       .removexattr = cgroup_removexattr,
-};
-
-static const struct inode_operations cgroup_dir_inode_operations = {
-       .lookup = simple_lookup,
-       .mkdir = cgroup_mkdir,
-       .rmdir = cgroup_rmdir,
-       .rename = cgroup_rename,
-       .setxattr = cgroup_setxattr,
-       .getxattr = cgroup_getxattr,
-       .listxattr = cgroup_listxattr,
-       .removexattr = cgroup_removexattr,
-};
-
-/*
- * Check if a file is a control file
- */
-static inline struct cftype *__file_cft(struct file *file)
-{
-       if (file_inode(file)->i_fop != &cgroup_file_operations)
-               return ERR_PTR(-EINVAL);
-       return __d_cft(file->f_dentry);
-}
-
-static int cgroup_create_file(struct dentry *dentry, umode_t mode,
-                               struct super_block *sb)
-{
-       struct inode *inode;
-
-       if (!dentry)
-               return -ENOENT;
-       if (dentry->d_inode)
-               return -EEXIST;
-
-       inode = cgroup_new_inode(mode, sb);
-       if (!inode)
-               return -ENOMEM;
-
-       if (S_ISDIR(mode)) {
-               inode->i_op = &cgroup_dir_inode_operations;
-               inode->i_fop = &simple_dir_operations;
-
-               /* start off with i_nlink == 2 (for "." entry) */
-               inc_nlink(inode);
-               inc_nlink(dentry->d_parent->d_inode);
-
-               /*
-                * Control reaches here with cgroup_mutex held.
-                * @inode->i_mutex should nest outside cgroup_mutex but we
-                * want to populate it immediately without releasing
-                * cgroup_mutex.  As @inode isn't visible to anyone else
-                * yet, trylock will always succeed without affecting
-                * lockdep checks.
-                */
-               WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex));
-       } else if (S_ISREG(mode)) {
-               inode->i_size = 0;
-               inode->i_fop = &cgroup_file_operations;
-               inode->i_op = &cgroup_file_inode_operations;
-       }
-       d_instantiate(dentry, inode);
-       dget(dentry);   /* Extra count - pin the dentry in core */
-       return 0;
-}
-
-/**
- * cgroup_file_mode - deduce file mode of a control file
- * @cft: the control file in question
- *
- * returns cft->mode if ->mode is not 0
- * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
- * returns S_IRUGO if it has only a read handler
- * returns S_IWUSR if it has only a write hander
- */
-static umode_t cgroup_file_mode(const struct cftype *cft)
-{
-       umode_t mode = 0;
-
-       if (cft->mode)
-               return cft->mode;
-
-       if (cft->read || cft->read_u64 || cft->read_s64 ||
-           cft->read_map || cft->read_seq_string)
-               mode |= S_IRUGO;
+       mutex_lock(&cgroup_tree_mutex);
+       mutex_lock(&cgroup_mutex);
 
-       if (cft->write || cft->write_u64 || cft->write_s64 ||
-           cft->write_string || cft->trigger)
-               mode |= S_IWUSR;
+       ret = kernfs_rename(kn, new_parent, new_name_str);
 
-       return mode;
+       mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
+       return ret;
 }
 
 static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
 {
-       struct dentry *dir = cgrp->dentry;
-       struct cgroup *parent = __d_cgrp(dir);
-       struct dentry *dentry;
-       struct cfent *cfe;
-       int error;
-       umode_t mode;
-       char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
-
-       if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
-           !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
-               strcpy(name, cft->ss->name);
-               strcat(name, ".");
-       }
-       strcat(name, cft->name);
-
-       BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
-
-       cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
-       if (!cfe)
-               return -ENOMEM;
+       char name[CGROUP_FILE_NAME_MAX];
+       struct kernfs_node *kn;
+       struct lock_class_key *key = NULL;
 
-       dentry = lookup_one_len(name, dir, strlen(name));
-       if (IS_ERR(dentry)) {
-               error = PTR_ERR(dentry);
-               goto out;
-       }
-
-       cfe->type = (void *)cft;
-       cfe->dentry = dentry;
-       dentry->d_fsdata = cfe;
-       simple_xattrs_init(&cfe->xattrs);
-
-       mode = cgroup_file_mode(cft);
-       error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
-       if (!error) {
-               list_add_tail(&cfe->node, &parent->files);
-               cfe = NULL;
-       }
-       dput(dentry);
-out:
-       kfree(cfe);
-       return error;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       key = &cft->lockdep_key;
+#endif
+       kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
+                                 cgroup_file_mode(cft), 0, cft->kf_ops, cft,
+                                 NULL, false, key);
+       if (IS_ERR(kn))
+               return PTR_ERR(kn);
+       return 0;
 }
 
 /**
@@ -2780,8 +2175,7 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
        struct cftype *cft;
        int ret;
 
-       lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
-       lockdep_assert_held(&cgroup_mutex);
+       lockdep_assert_held(&cgroup_tree_mutex);
 
        for (cft = cfts; cft->name[0] != '\0'; cft++) {
                /* does cft->flags tell us to skip this file on @cgrp? */
@@ -2806,115 +2200,96 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
        return 0;
 }
 
-static void cgroup_cfts_prepare(void)
-       __acquires(&cgroup_mutex)
-{
-       /*
-        * Thanks to the entanglement with vfs inode locking, we can't walk
-        * the existing cgroups under cgroup_mutex and create files.
-        * Instead, we use css_for_each_descendant_pre() and drop RCU read
-        * lock before calling cgroup_addrm_files().
-        */
-       mutex_lock(&cgroup_mutex);
-}
-
-static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
-       __releases(&cgroup_mutex)
+static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
 {
        LIST_HEAD(pending);
        struct cgroup_subsys *ss = cfts[0].ss;
        struct cgroup *root = &ss->root->top_cgroup;
-       struct super_block *sb = ss->root->sb;
-       struct dentry *prev = NULL;
-       struct inode *inode;
        struct cgroup_subsys_state *css;
-       u64 update_before;
        int ret = 0;
 
-       /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
-       if (!cfts || ss->root == &cgroup_dummy_root ||
-           !atomic_inc_not_zero(&sb->s_active)) {
-               mutex_unlock(&cgroup_mutex);
-               return 0;
-       }
-
-       /*
-        * All cgroups which are created after we drop cgroup_mutex will
-        * have the updated set of files, so we only need to update the
-        * cgroups created before the current @cgroup_serial_nr_next.
-        */
-       update_before = cgroup_serial_nr_next;
+       lockdep_assert_held(&cgroup_tree_mutex);
 
-       mutex_unlock(&cgroup_mutex);
+       /* don't bother if @ss isn't attached */
+       if (ss->root == &cgroup_dummy_root)
+               return 0;
 
        /* add/rm files for all cgroups created before */
-       rcu_read_lock();
        css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
                struct cgroup *cgrp = css->cgroup;
 
                if (cgroup_is_dead(cgrp))
                        continue;
 
-               inode = cgrp->dentry->d_inode;
-               dget(cgrp->dentry);
-               rcu_read_unlock();
-
-               dput(prev);
-               prev = cgrp->dentry;
-
-               mutex_lock(&inode->i_mutex);
-               mutex_lock(&cgroup_mutex);
-               if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
-                       ret = cgroup_addrm_files(cgrp, cfts, is_add);
-               mutex_unlock(&cgroup_mutex);
-               mutex_unlock(&inode->i_mutex);
-
-               rcu_read_lock();
+               ret = cgroup_addrm_files(cgrp, cfts, is_add);
                if (ret)
                        break;
        }
-       rcu_read_unlock();
-       dput(prev);
-       deactivate_super(sb);
+
+       if (is_add && !ret)
+               kernfs_activate(root->kn);
        return ret;
 }
 
-/**
- * cgroup_add_cftypes - add an array of cftypes to a subsystem
- * @ss: target cgroup subsystem
- * @cfts: zero-length name terminated array of cftypes
- *
- * Register @cfts to @ss.  Files described by @cfts are created for all
- * existing cgroups to which @ss is attached and all future cgroups will
- * have them too.  This function can be called anytime whether @ss is
- * attached or not.
- *
- * Returns 0 on successful registration, -errno on failure.  Note that this
- * function currently returns 0 as long as @cfts registration is successful
- * even if some file creation attempts on existing cgroups fail.
- */
-int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+static void cgroup_exit_cftypes(struct cftype *cfts)
 {
-       struct cftype_set *set;
        struct cftype *cft;
-       int ret;
 
-       set = kzalloc(sizeof(*set), GFP_KERNEL);
-       if (!set)
-               return -ENOMEM;
+       for (cft = cfts; cft->name[0] != '\0'; cft++) {
+               /* free copy for custom atomic_write_len, see init_cftypes() */
+               if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
+                       kfree(cft->kf_ops);
+               cft->kf_ops = NULL;
+               cft->ss = NULL;
+       }
+}
+
+static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+       struct cftype *cft;
+
+       for (cft = cfts; cft->name[0] != '\0'; cft++) {
+               struct kernfs_ops *kf_ops;
+
+               WARN_ON(cft->ss || cft->kf_ops);
+
+               if (cft->seq_start)
+                       kf_ops = &cgroup_kf_ops;
+               else
+                       kf_ops = &cgroup_kf_single_ops;
+
+               /*
+                * Ugh... if @cft wants a custom max_write_len, we need to
+                * make a copy of kf_ops to set its atomic_write_len.
+                */
+               if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
+                       kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
+                       if (!kf_ops) {
+                               cgroup_exit_cftypes(cfts);
+                               return -ENOMEM;
+                       }
+                       kf_ops->atomic_write_len = cft->max_write_len;
+               }
 
-       for (cft = cfts; cft->name[0] != '\0'; cft++)
+               cft->kf_ops = kf_ops;
                cft->ss = ss;
+       }
 
-       cgroup_cfts_prepare();
-       set->cfts = cfts;
-       list_add_tail(&set->node, &ss->cftsets);
-       ret = cgroup_cfts_commit(cfts, true);
-       if (ret)
-               cgroup_rm_cftypes(cfts);
-       return ret;
+       return 0;
+}
+
+static int cgroup_rm_cftypes_locked(struct cftype *cfts)
+{
+       lockdep_assert_held(&cgroup_tree_mutex);
+
+       if (!cfts || !cfts[0].ss)
+               return -ENOENT;
+
+       list_del(&cfts->node);
+       cgroup_apply_cftypes(cfts, false);
+       cgroup_exit_cftypes(cfts);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
 
 /**
  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
@@ -2929,25 +2304,47 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
  */
 int cgroup_rm_cftypes(struct cftype *cfts)
 {
-       struct cftype_set *set;
+       int ret;
+
+       mutex_lock(&cgroup_tree_mutex);
+       ret = cgroup_rm_cftypes_locked(cfts);
+       mutex_unlock(&cgroup_tree_mutex);
+       return ret;
+}
+
+/**
+ * cgroup_add_cftypes - add an array of cftypes to a subsystem
+ * @ss: target cgroup subsystem
+ * @cfts: zero-length name terminated array of cftypes
+ *
+ * Register @cfts to @ss.  Files described by @cfts are created for all
+ * existing cgroups to which @ss is attached and all future cgroups will
+ * have them too.  This function can be called anytime whether @ss is
+ * attached or not.
+ *
+ * Returns 0 on successful registration, -errno on failure.  Note that this
+ * function currently returns 0 as long as @cfts registration is successful
+ * even if some file creation attempts on existing cgroups fail.
+ */
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+       int ret;
 
-       if (!cfts || !cfts[0].ss)
-               return -ENOENT;
+       ret = cgroup_init_cftypes(ss, cfts);
+       if (ret)
+               return ret;
 
-       cgroup_cfts_prepare();
+       mutex_lock(&cgroup_tree_mutex);
 
-       list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
-               if (set->cfts == cfts) {
-                       list_del(&set->node);
-                       kfree(set);
-                       cgroup_cfts_commit(cfts, false);
-                       return 0;
-               }
-       }
+       list_add_tail(&cfts->node, &ss->cfts);
+       ret = cgroup_apply_cftypes(cfts, true);
+       if (ret)
+               cgroup_rm_cftypes_locked(cfts);
 
-       cgroup_cfts_commit(NULL, false);
-       return -ENOENT;
+       mutex_unlock(&cgroup_tree_mutex);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
 
 /**
  * cgroup_task_count - count the number of tasks in a cgroup.
@@ -2971,13 +2368,19 @@ int cgroup_task_count(const struct cgroup *cgrp)
  * To reduce the fork() overhead for systems that are not actually using
  * their cgroups capability, we don't maintain the lists running through
  * each css_set to its tasks until we see the list actually used - in other
- * words after the first call to css_task_iter_start().
+ * words after the first mount.
  */
 static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
+
        write_lock(&css_set_lock);
-       use_task_css_set_links = 1;
+
+       if (use_task_css_set_links)
+               goto out_unlock;
+
+       use_task_css_set_links = true;
+
        /*
         * We need tasklist_lock because RCU is not safe against
         * while_each_thread(). Besides, a forking task that has passed
@@ -2988,16 +2391,22 @@ static void cgroup_enable_task_cg_lists(void)
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
                task_lock(p);
+
+               WARN_ON_ONCE(!list_empty(&p->cg_list) ||
+                            task_css_set(p) != &init_css_set);
+
                /*
                 * We should check if the process is exiting, otherwise
                 * it will race with cgroup_exit() in that the list
                 * entry won't be deleted though the process has exited.
                 */
-               if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
+               if (!(p->flags & PF_EXITING))
                        list_add(&p->cg_list, &task_css_set(p)->tasks);
+
                task_unlock(p);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
+out_unlock:
        write_unlock(&css_set_lock);
 }
 
@@ -3007,9 +2416,9 @@ static void cgroup_enable_task_cg_lists(void)
  * @parent_css: css whose children to walk
  *
  * This function returns the next child of @parent_css and should be called
- * under RCU read lock.  The only requirement is that @parent_css and
- * @pos_css are accessible.  The next sibling is guaranteed to be returned
- * regardless of their states.
+ * under either cgroup_mutex or RCU read lock.  The only requirement is
+ * that @parent_css and @pos_css are accessible.  The next sibling is
+ * guaranteed to be returned regardless of their states.
  */
 struct cgroup_subsys_state *
 css_next_child(struct cgroup_subsys_state *pos_css,
@@ -3019,7 +2428,7 @@ css_next_child(struct cgroup_subsys_state *pos_css,
        struct cgroup *cgrp = parent_css->cgroup;
        struct cgroup *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutexes_or_rcu_locked();
 
        /*
         * @pos could already have been removed.  Once a cgroup is removed,
@@ -3066,10 +2475,10 @@ EXPORT_SYMBOL_GPL(css_next_child);
  * to visit for pre-order traversal of @root's descendants.  @root is
  * included in the iteration and the first node to be visited.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct next descendant as long as both @pos
- * and @root are accessible and @pos is a descendant of @root.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct next descendant as long
+ * as both @pos and @root are accessible and @pos is a descendant of @root.
  */
 struct cgroup_subsys_state *
 css_next_descendant_pre(struct cgroup_subsys_state *pos,
@@ -3077,7 +2486,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos,
 {
        struct cgroup_subsys_state *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutexes_or_rcu_locked();
 
        /* if first iteration, visit @root */
        if (!pos)
@@ -3108,17 +2517,17 @@ EXPORT_SYMBOL_GPL(css_next_descendant_pre);
  * is returned.  This can be used during pre-order traversal to skip
  * subtree of @pos.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct rightmost descendant as long as @pos is
- * accessible.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct rightmost descendant as
+ * long as @pos is accessible.
  */
 struct cgroup_subsys_state *
 css_rightmost_descendant(struct cgroup_subsys_state *pos)
 {
        struct cgroup_subsys_state *last, *tmp;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutexes_or_rcu_locked();
 
        do {
                last = pos;
@@ -3154,10 +2563,11 @@ css_leftmost_descendant(struct cgroup_subsys_state *pos)
  * to visit for post-order traversal of @root's descendants.  @root is
  * included in the iteration and the last node to be visited.
  *
- * While this function requires RCU read locking, it doesn't require the
- * whole traversal to be contained in a single RCU critical section.  This
- * function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * While this function requires cgroup_mutex or RCU read locking, it
+ * doesn't require the whole traversal to be contained in a single critical
+ * section.  This function will return the correct next descendant as long
+ * as both @pos and @cgroup are accessible and @pos is a descendant of
+ * @cgroup.
  */
 struct cgroup_subsys_state *
 css_next_descendant_post(struct cgroup_subsys_state *pos,
@@ -3165,7 +2575,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
 {
        struct cgroup_subsys_state *next;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       cgroup_assert_mutexes_or_rcu_locked();
 
        /* if first iteration, visit leftmost descendant which may be @root */
        if (!pos)
@@ -3229,13 +2639,8 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
                         struct css_task_iter *it)
        __acquires(css_set_lock)
 {
-       /*
-        * The first time anyone tries to iterate across a css, we need to
-        * enable the list linking each css_set to its tasks, and fix up
-        * all existing tasks.
-        */
-       if (!use_task_css_set_links)
-               cgroup_enable_task_cg_lists();
+       /* no one should try to iterate before mounting cgroups */
+       WARN_ON_ONCE(!use_task_css_set_links);
 
        read_lock(&css_set_lock);
 
@@ -3504,14 +2909,12 @@ struct cgroup_pidlist {
        pid_t *list;
        /* how many elements the above list has */
        int length;
-       /* how many files are using the current array */
-       int use_count;
        /* each of these stored in a list by its cgroup */
        struct list_head links;
        /* pointer to the cgroup we belong to, for list removal purposes */
        struct cgroup *owner;
-       /* protects the other fields */
-       struct rw_semaphore rwsem;
+       /* for delayed destruction */
+       struct delayed_work destroy_dwork;
 };
 
 /*
@@ -3527,6 +2930,7 @@ static void *pidlist_allocate(int count)
        else
                return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
 }
+
 static void pidlist_free(void *p)
 {
        if (is_vmalloc_addr(p))
@@ -3535,6 +2939,47 @@ static void pidlist_free(void *p)
                kfree(p);
 }
 
+/*
+ * Used to destroy all pidlists lingering waiting for destroy timer.  None
+ * should be left afterwards.
+ */
+static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
+{
+       struct cgroup_pidlist *l, *tmp_l;
+
+       mutex_lock(&cgrp->pidlist_mutex);
+       list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
+               mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
+       mutex_unlock(&cgrp->pidlist_mutex);
+
+       flush_workqueue(cgroup_pidlist_destroy_wq);
+       BUG_ON(!list_empty(&cgrp->pidlists));
+}
+
+static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
+                                               destroy_dwork);
+       struct cgroup_pidlist *tofree = NULL;
+
+       mutex_lock(&l->owner->pidlist_mutex);
+
+       /*
+        * Destroy iff we didn't get queued again.  The state won't change
+        * as destroy_dwork can only be queued while locked.
+        */
+       if (!delayed_work_pending(dwork)) {
+               list_del(&l->links);
+               pidlist_free(l->list);
+               put_pid_ns(l->key.ns);
+               tofree = l;
+       }
+
+       mutex_unlock(&l->owner->pidlist_mutex);
+       kfree(tofree);
+}
+
 /*
  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  * Returns the number of unique elements.
@@ -3565,52 +3010,92 @@ after:
        return dest;
 }
 
+/*
+ * The two pid files - task and cgroup.procs - guaranteed that the result
+ * is sorted, which forced this whole pidlist fiasco.  As pid order is
+ * different per namespace, each namespace needs differently sorted list,
+ * making it impossible to use, for example, single rbtree of member tasks
+ * sorted by task pointer.  As pidlists can be fairly large, allocating one
+ * per open file is dangerous, so cgroup had to implement shared pool of
+ * pidlists keyed by cgroup and namespace.
+ *
+ * All this extra complexity was caused by the original implementation
+ * committing to an entirely unnecessary property.  In the long term, we
+ * want to do away with it.  Explicitly scramble sort order if
+ * sane_behavior so that no such expectation exists in the new interface.
+ *
+ * Scrambling is done by swapping every two consecutive bits, which is
+ * non-identity one-to-one mapping which disturbs sort order sufficiently.
+ */
+static pid_t pid_fry(pid_t pid)
+{
+       unsigned a = pid & 0x55555555;
+       unsigned b = pid & 0xAAAAAAAA;
+
+       return (a << 1) | (b >> 1);
+}
+
+static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
+{
+       if (cgroup_sane_behavior(cgrp))
+               return pid_fry(pid);
+       else
+               return pid;
+}
+
 static int cmppid(const void *a, const void *b)
 {
        return *(pid_t *)a - *(pid_t *)b;
 }
 
+static int fried_cmppid(const void *a, const void *b)
+{
+       return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
+}
+
+static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
+                                                 enum cgroup_filetype type)
+{
+       struct cgroup_pidlist *l;
+       /* don't need task_nsproxy() if we're looking at ourself */
+       struct pid_namespace *ns = task_active_pid_ns(current);
+
+       lockdep_assert_held(&cgrp->pidlist_mutex);
+
+       list_for_each_entry(l, &cgrp->pidlists, links)
+               if (l->key.type == type && l->key.ns == ns)
+                       return l;
+       return NULL;
+}
+
 /*
  * find the appropriate pidlist for our purpose (given procs vs tasks)
  * returns with the lock on that pidlist already held, and takes care
  * of the use count, or returns NULL with no locks held if we're out of
  * memory.
  */
-static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
-                                                 enum cgroup_filetype type)
+static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
+                                               enum cgroup_filetype type)
 {
        struct cgroup_pidlist *l;
-       /* don't need task_nsproxy() if we're looking at ourself */
-       struct pid_namespace *ns = task_active_pid_ns(current);
 
-       /*
-        * We can't drop the pidlist_mutex before taking the l->rwsem in case
-        * the last ref-holder is trying to remove l from the list at the same
-        * time. Holding the pidlist_mutex precludes somebody taking whichever
-        * list we find out from under us - compare release_pid_array().
-        */
-       mutex_lock(&cgrp->pidlist_mutex);
-       list_for_each_entry(l, &cgrp->pidlists, links) {
-               if (l->key.type == type && l->key.ns == ns) {
-                       /* make sure l doesn't vanish out from under us */
-                       down_write(&l->rwsem);
-                       mutex_unlock(&cgrp->pidlist_mutex);
-                       return l;
-               }
-       }
+       lockdep_assert_held(&cgrp->pidlist_mutex);
+
+       l = cgroup_pidlist_find(cgrp, type);
+       if (l)
+               return l;
+
        /* entry not found; create a new one */
        l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
-       if (!l) {
-               mutex_unlock(&cgrp->pidlist_mutex);
+       if (!l)
                return l;
-       }
-       init_rwsem(&l->rwsem);
-       down_write(&l->rwsem);
+
+       INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
        l->key.type = type;
-       l->key.ns = get_pid_ns(ns);
+       /* don't need task_nsproxy() if we're looking at ourself */
+       l->key.ns = get_pid_ns(task_active_pid_ns(current));
        l->owner = cgrp;
        list_add(&l->links, &cgrp->pidlists);
-       mutex_unlock(&cgrp->pidlist_mutex);
        return l;
 }
 
@@ -3627,6 +3112,8 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        struct task_struct *tsk;
        struct cgroup_pidlist *l;
 
+       lockdep_assert_held(&cgrp->pidlist_mutex);
+
        /*
         * If cgroup gets more users after we read count, we won't have
         * enough space - tough.  This race is indistinguishable to the
@@ -3653,20 +3140,24 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        css_task_iter_end(&it);
        length = n;
        /* now sort & (if procs) strip out duplicates */
-       sort(array, length, sizeof(pid_t), cmppid, NULL);
+       if (cgroup_sane_behavior(cgrp))
+               sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
+       else
+               sort(array, length, sizeof(pid_t), cmppid, NULL);
        if (type == CGROUP_FILE_PROCS)
                length = pidlist_uniq(array, length);
-       l = cgroup_pidlist_find(cgrp, type);
+
+       l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
+               mutex_unlock(&cgrp->pidlist_mutex);
                pidlist_free(array);
                return -ENOMEM;
        }
-       /* store array, freeing old if necessary - lock already held */
+
+       /* store array, freeing old if necessary */
        pidlist_free(l->list);
        l->list = array;
        l->length = length;
-       l->use_count++;
-       up_write(&l->rwsem);
        *lp = l;
        return 0;
 }
@@ -3682,21 +3173,27 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
  */
 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 {
-       int ret = -EINVAL;
+       struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
        struct cgroup *cgrp;
        struct css_task_iter it;
        struct task_struct *tsk;
 
+       /* it should be kernfs_node belonging to cgroupfs and is a directory */
+       if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
+           kernfs_type(kn) != KERNFS_DIR)
+               return -EINVAL;
+
        /*
-        * Validate dentry by checking the superblock operations,
-        * and make sure it's a directory.
+        * We aren't being called from kernfs and there's no guarantee on
+        * @kn->priv's validity.  For this and css_tryget_from_dir(),
+        * @kn->priv is RCU safe.  Let's do the RCU dancing.
         */
-       if (dentry->d_sb->s_op != &cgroup_ops ||
-           !S_ISDIR(dentry->d_inode->i_mode))
-                goto err;
-
-       ret = 0;
-       cgrp = dentry->d_fsdata;
+       rcu_read_lock();
+       cgrp = rcu_dereference(kn->priv);
+       if (!cgrp) {
+               rcu_read_unlock();
+               return -ENOENT;
+       }
 
        css_task_iter_start(&cgrp->dummy_css, &it);
        while ((tsk = css_task_iter_next(&it))) {
@@ -3721,8 +3218,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
        }
        css_task_iter_end(&it);
 
-err:
-       return ret;
+       rcu_read_unlock();
+       return 0;
 }
 
 
@@ -3740,20 +3237,45 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
         * after a seek to the start). Use a binary-search to find the
         * next pid to display, if any
         */
-       struct cgroup_pidlist *l = s->private;
+       struct kernfs_open_file *of = s->private;
+       struct cgroup *cgrp = seq_css(s)->cgroup;
+       struct cgroup_pidlist *l;
+       enum cgroup_filetype type = seq_cft(s)->private;
        int index = 0, pid = *pos;
-       int *iter;
+       int *iter, ret;
+
+       mutex_lock(&cgrp->pidlist_mutex);
+
+       /*
+        * !NULL @of->priv indicates that this isn't the first start()
+        * after open.  If the matching pidlist is around, we can use that.
+        * Look for it.  Note that @of->priv can't be used directly.  It
+        * could already have been destroyed.
+        */
+       if (of->priv)
+               of->priv = cgroup_pidlist_find(cgrp, type);
+
+       /*
+        * Either this is the first start() after open or the matching
+        * pidlist has been destroyed inbetween.  Create a new one.
+        */
+       if (!of->priv) {
+               ret = pidlist_array_load(cgrp, type,
+                                        (struct cgroup_pidlist **)&of->priv);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+       l = of->priv;
 
-       down_read(&l->rwsem);
        if (pid) {
                int end = l->length;
 
                while (index < end) {
                        int mid = (index + end) / 2;
-                       if (l->list[mid] == pid) {
+                       if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
                                index = mid;
                                break;
-                       } else if (l->list[mid] <= pid)
+                       } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
                                index = mid + 1;
                        else
                                end = mid;
@@ -3764,19 +3286,25 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
                return NULL;
        /* Update the abstract position to be the actual pid that we found */
        iter = l->list + index;
-       *pos = *iter;
+       *pos = cgroup_pid_fry(cgrp, *iter);
        return iter;
 }
 
 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 {
-       struct cgroup_pidlist *l = s->private;
-       up_read(&l->rwsem);
+       struct kernfs_open_file *of = s->private;
+       struct cgroup_pidlist *l = of->priv;
+
+       if (l)
+               mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
+                                CGROUP_PIDLIST_DESTROY_DELAY);
+       mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
 }
 
 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct cgroup_pidlist *l = s->private;
+       struct kernfs_open_file *of = s->private;
+       struct cgroup_pidlist *l = of->priv;
        pid_t *p = v;
        pid_t *end = l->list + l->length;
        /*
@@ -3787,7 +3315,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
        if (p >= end) {
                return NULL;
        } else {
-               *pos = *p;
+               *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
                return p;
        }
 }
@@ -3804,324 +3332,25 @@ static int cgroup_pidlist_show(struct seq_file *s, void *v)
 static const struct seq_operations cgroup_pidlist_seq_operations = {
        .start = cgroup_pidlist_start,
        .stop = cgroup_pidlist_stop,
-       .next = cgroup_pidlist_next,
-       .show = cgroup_pidlist_show,
-};
-
-static void cgroup_release_pid_array(struct cgroup_pidlist *l)
-{
-       /*
-        * the case where we're the last user of this particular pidlist will
-        * have us remove it from the cgroup's list, which entails taking the
-        * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
-        * pidlist_mutex, we have to take pidlist_mutex first.
-        */
-       mutex_lock(&l->owner->pidlist_mutex);
-       down_write(&l->rwsem);
-       BUG_ON(!l->use_count);
-       if (!--l->use_count) {
-               /* we're the last user if refcount is 0; remove and free */
-               list_del(&l->links);
-               mutex_unlock(&l->owner->pidlist_mutex);
-               pidlist_free(l->list);
-               put_pid_ns(l->key.ns);
-               up_write(&l->rwsem);
-               kfree(l);
-               return;
-       }
-       mutex_unlock(&l->owner->pidlist_mutex);
-       up_write(&l->rwsem);
-}
-
-static int cgroup_pidlist_release(struct inode *inode, struct file *file)
-{
-       struct cgroup_pidlist *l;
-       if (!(file->f_mode & FMODE_READ))
-               return 0;
-       /*
-        * the seq_file will only be initialized if the file was opened for
-        * reading; hence we check if it's not null only in that case.
-        */
-       l = ((struct seq_file *)file->private_data)->private;
-       cgroup_release_pid_array(l);
-       return seq_release(inode, file);
-}
-
-static const struct file_operations cgroup_pidlist_operations = {
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .write = cgroup_file_write,
-       .release = cgroup_pidlist_release,
-};
-
-/*
- * The following functions handle opens on a file that displays a pidlist
- * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
- * in the cgroup.
- */
-/* helper function for the two below it */
-static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
-{
-       struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
-       struct cgroup_pidlist *l;
-       int retval;
-
-       /* Nothing to do for write-only files */
-       if (!(file->f_mode & FMODE_READ))
-               return 0;
-
-       /* have the array populated */
-       retval = pidlist_array_load(cgrp, type, &l);
-       if (retval)
-               return retval;
-       /* configure file information */
-       file->f_op = &cgroup_pidlist_operations;
-
-       retval = seq_open(file, &cgroup_pidlist_seq_operations);
-       if (retval) {
-               cgroup_release_pid_array(l);
-               return retval;
-       }
-       ((struct seq_file *)file->private_data)->private = l;
-       return 0;
-}
-static int cgroup_tasks_open(struct inode *unused, struct file *file)
-{
-       return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
-}
-static int cgroup_procs_open(struct inode *unused, struct file *file)
-{
-       return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
-}
-
-static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
-                                        struct cftype *cft)
-{
-       return notify_on_release(css->cgroup);
-}
-
-static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
-                                         struct cftype *cft, u64 val)
-{
-       clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
-       if (val)
-               set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
-       else
-               clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
-       return 0;
-}
-
-/*
- * When dput() is called asynchronously, if umount has been done and
- * then deactivate_super() in cgroup_free_fn() kills the superblock,
- * there's a small window that vfs will see the root dentry with non-zero
- * refcnt and trigger BUG().
- *
- * That's why we hold a reference before dput() and drop it right after.
- */
-static void cgroup_dput(struct cgroup *cgrp)
-{
-       struct super_block *sb = cgrp->root->sb;
-
-       atomic_inc(&sb->s_active);
-       dput(cgrp->dentry);
-       deactivate_super(sb);
-}
-
-/*
- * Unregister event and free resources.
- *
- * Gets called from workqueue.
- */
-static void cgroup_event_remove(struct work_struct *work)
-{
-       struct cgroup_event *event = container_of(work, struct cgroup_event,
-                       remove);
-       struct cgroup_subsys_state *css = event->css;
-
-       remove_wait_queue(event->wqh, &event->wait);
-
-       event->cft->unregister_event(css, event->cft, event->eventfd);
-
-       /* Notify userspace the event is going away. */
-       eventfd_signal(event->eventfd, 1);
-
-       eventfd_ctx_put(event->eventfd);
-       kfree(event);
-       css_put(css);
-}
-
-/*
- * Gets called on POLLHUP on eventfd when user closes it.
- *
- * Called with wqh->lock held and interrupts disabled.
- */
-static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
-               int sync, void *key)
-{
-       struct cgroup_event *event = container_of(wait,
-                       struct cgroup_event, wait);
-       struct cgroup *cgrp = event->css->cgroup;
-       unsigned long flags = (unsigned long)key;
-
-       if (flags & POLLHUP) {
-               /*
-                * If the event has been detached at cgroup removal, we
-                * can simply return knowing the other side will cleanup
-                * for us.
-                *
-                * We can't race against event freeing since the other
-                * side will require wqh->lock via remove_wait_queue(),
-                * which we hold.
-                */
-               spin_lock(&cgrp->event_list_lock);
-               if (!list_empty(&event->list)) {
-                       list_del_init(&event->list);
-                       /*
-                        * We are in atomic context, but cgroup_event_remove()
-                        * may sleep, so we have to call it in workqueue.
-                        */
-                       schedule_work(&event->remove);
-               }
-               spin_unlock(&cgrp->event_list_lock);
-       }
-
-       return 0;
-}
-
-static void cgroup_event_ptable_queue_proc(struct file *file,
-               wait_queue_head_t *wqh, poll_table *pt)
-{
-       struct cgroup_event *event = container_of(pt,
-                       struct cgroup_event, pt);
-
-       event->wqh = wqh;
-       add_wait_queue(wqh, &event->wait);
-}
-
-/*
- * Parse input and register new cgroup event handler.
- *
- * Input must be in format '<event_fd> <control_fd> <args>'.
- * Interpretation of args is defined by control file implementation.
- */
-static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
-                                     struct cftype *cft, const char *buffer)
-{
-       struct cgroup *cgrp = dummy_css->cgroup;
-       struct cgroup_event *event;
-       struct cgroup_subsys_state *cfile_css;
-       unsigned int efd, cfd;
-       struct fd efile;
-       struct fd cfile;
-       char *endp;
-       int ret;
-
-       efd = simple_strtoul(buffer, &endp, 10);
-       if (*endp != ' ')
-               return -EINVAL;
-       buffer = endp + 1;
-
-       cfd = simple_strtoul(buffer, &endp, 10);
-       if ((*endp != ' ') && (*endp != '\0'))
-               return -EINVAL;
-       buffer = endp + 1;
-
-       event = kzalloc(sizeof(*event), GFP_KERNEL);
-       if (!event)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&event->list);
-       init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
-       init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
-       INIT_WORK(&event->remove, cgroup_event_remove);
-
-       efile = fdget(efd);
-       if (!efile.file) {
-               ret = -EBADF;
-               goto out_kfree;
-       }
-
-       event->eventfd = eventfd_ctx_fileget(efile.file);
-       if (IS_ERR(event->eventfd)) {
-               ret = PTR_ERR(event->eventfd);
-               goto out_put_efile;
-       }
-
-       cfile = fdget(cfd);
-       if (!cfile.file) {
-               ret = -EBADF;
-               goto out_put_eventfd;
-       }
-
-       /* the process need read permission on control file */
-       /* AV: shouldn't we check that it's been opened for read instead? */
-       ret = inode_permission(file_inode(cfile.file), MAY_READ);
-       if (ret < 0)
-               goto out_put_cfile;
-
-       event->cft = __file_cft(cfile.file);
-       if (IS_ERR(event->cft)) {
-               ret = PTR_ERR(event->cft);
-               goto out_put_cfile;
-       }
-
-       if (!event->cft->ss) {
-               ret = -EBADF;
-               goto out_put_cfile;
-       }
-
-       /*
-        * Determine the css of @cfile, verify it belongs to the same
-        * cgroup as cgroup.event_control, and associate @event with it.
-        * Remaining events are automatically removed on cgroup destruction
-        * but the removal is asynchronous, so take an extra ref.
-        */
-       rcu_read_lock();
-
-       ret = -EINVAL;
-       event->css = cgroup_css(cgrp, event->cft->ss);
-       cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss);
-       if (event->css && event->css == cfile_css && css_tryget(event->css))
-               ret = 0;
-
-       rcu_read_unlock();
-       if (ret)
-               goto out_put_cfile;
-
-       if (!event->cft->register_event || !event->cft->unregister_event) {
-               ret = -EINVAL;
-               goto out_put_css;
-       }
-
-       ret = event->cft->register_event(event->css, event->cft,
-                       event->eventfd, buffer);
-       if (ret)
-               goto out_put_css;
-
-       efile.file->f_op->poll(efile.file, &event->pt);
-
-       spin_lock(&cgrp->event_list_lock);
-       list_add(&event->list, &cgrp->event_list);
-       spin_unlock(&cgrp->event_list_lock);
+       .next = cgroup_pidlist_next,
+       .show = cgroup_pidlist_show,
+};
 
-       fdput(cfile);
-       fdput(efile);
+static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
+                                        struct cftype *cft)
+{
+       return notify_on_release(css->cgroup);
+}
 
+static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
+                                         struct cftype *cft, u64 val)
+{
+       clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
+       if (val)
+               set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
+       else
+               clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
        return 0;
-
-out_put_css:
-       css_put(event->css);
-out_put_cfile:
-       fdput(cfile);
-out_put_eventfd:
-       eventfd_ctx_put(event->eventfd);
-out_put_efile:
-       fdput(efile);
-out_kfree:
-       kfree(event);
-
-       return ret;
 }
 
 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
@@ -4143,16 +3372,14 @@ static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 static struct cftype cgroup_base_files[] = {
        {
                .name = "cgroup.procs",
-               .open = cgroup_procs_open,
+               .seq_start = cgroup_pidlist_start,
+               .seq_next = cgroup_pidlist_next,
+               .seq_stop = cgroup_pidlist_stop,
+               .seq_show = cgroup_pidlist_show,
+               .private = CGROUP_FILE_PROCS,
                .write_u64 = cgroup_procs_write,
-               .release = cgroup_pidlist_release,
                .mode = S_IRUGO | S_IWUSR,
        },
-       {
-               .name = "cgroup.event_control",
-               .write_string = cgroup_write_event_control,
-               .mode = S_IWUGO,
-       },
        {
                .name = "cgroup.clone_children",
                .flags = CFTYPE_INSANE,
@@ -4162,7 +3389,7 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "cgroup.sane_behavior",
                .flags = CFTYPE_ONLY_ON_ROOT,
-               .read_seq_string = cgroup_sane_behavior_show,
+               .seq_show = cgroup_sane_behavior_show,
        },
 
        /*
@@ -4173,9 +3400,12 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "tasks",
                .flags = CFTYPE_INSANE,         /* use "procs" instead */
-               .open = cgroup_tasks_open,
+               .seq_start = cgroup_pidlist_start,
+               .seq_next = cgroup_pidlist_next,
+               .seq_stop = cgroup_pidlist_stop,
+               .seq_show = cgroup_pidlist_show,
+               .private = CGROUP_FILE_TASKS,
                .write_u64 = cgroup_tasks_write,
-               .release = cgroup_pidlist_release,
                .mode = S_IRUGO | S_IWUSR,
        },
        {
@@ -4187,9 +3417,9 @@ static struct cftype cgroup_base_files[] = {
        {
                .name = "release_agent",
                .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
-               .read_seq_string = cgroup_release_agent_show,
+               .seq_show = cgroup_release_agent_show,
                .write_string = cgroup_release_agent_write,
-               .max_write_len = PATH_MAX,
+               .max_write_len = PATH_MAX - 1,
        },
        { }     /* terminate */
 };
@@ -4208,13 +3438,13 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
 
        /* process cftsets of each subsystem */
        for_each_subsys(ss, i) {
-               struct cftype_set *set;
+               struct cftype *cfts;
 
                if (!test_bit(i, &subsys_mask))
                        continue;
 
-               list_for_each_entry(set, &ss->cftsets, node) {
-                       ret = cgroup_addrm_files(cgrp, set->cfts, true);
+               list_for_each_entry(cfts, &ss->cfts, node) {
+                       ret = cgroup_addrm_files(cgrp, cfts, true);
                        if (ret < 0)
                                goto err;
                }
@@ -4257,7 +3487,7 @@ static void css_free_work_fn(struct work_struct *work)
                css_put(css->parent);
 
        css->ss->css_free(css);
-       cgroup_dput(cgrp);
+       cgroup_put(cgrp);
 }
 
 static void css_free_rcu_fn(struct rcu_head *rcu_head)
@@ -4265,10 +3495,6 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
        struct cgroup_subsys_state *css =
                container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
 
-       /*
-        * css holds an extra ref to @cgrp->dentry which is put on the last
-        * css_put().  dput() requires process context which we don't have.
-        */
        INIT_WORK(&css->destroy_work, css_free_work_fn);
        queue_work(cgroup_destroy_wq, &css->destroy_work);
 }
@@ -4278,7 +3504,7 @@ static void css_release(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
+       rcu_assign_pointer(css->cgroup->subsys[css->ss->id], NULL);
        call_rcu(&css->rcu_head, css_free_rcu_fn);
 }
 
@@ -4303,6 +3529,7 @@ static int online_css(struct cgroup_subsys_state *css)
        struct cgroup_subsys *ss = css->ss;
        int ret = 0;
 
+       lockdep_assert_held(&cgroup_tree_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
        if (ss->css_online)
@@ -4310,7 +3537,7 @@ static int online_css(struct cgroup_subsys_state *css)
        if (!ret) {
                css->flags |= CSS_ONLINE;
                css->cgroup->nr_css++;
-               rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css);
+               rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
        }
        return ret;
 }
@@ -4320,6 +3547,7 @@ static void offline_css(struct cgroup_subsys_state *css)
 {
        struct cgroup_subsys *ss = css->ss;
 
+       lockdep_assert_held(&cgroup_tree_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
        if (!(css->flags & CSS_ONLINE))
@@ -4330,45 +3558,85 @@ static void offline_css(struct cgroup_subsys_state *css)
 
        css->flags &= ~CSS_ONLINE;
        css->cgroup->nr_css--;
-       RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
+       RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css);
 }
 
-/*
+/**
+ * create_css - create a cgroup_subsys_state
+ * @cgrp: the cgroup new css will be associated with
+ * @ss: the subsys of new css
+ *
+ * Create a new css associated with @cgrp - @ss pair.  On success, the new
+ * css is online and installed in @cgrp with all interface files created.
+ * Returns 0 on success, -errno on failure.
+ */
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct cgroup *parent = cgrp->parent;
+       struct cgroup_subsys_state *css;
+       int err;
+
+       lockdep_assert_held(&cgroup_mutex);
+
+       css = ss->css_alloc(cgroup_css(parent, ss));
+       if (IS_ERR(css))
+               return PTR_ERR(css);
+
+       err = percpu_ref_init(&css->refcnt, css_release);
+       if (err)
+               goto err_free;
+
+       init_css(css, ss, cgrp);
+
+       err = cgroup_populate_dir(cgrp, 1 << ss->id);
+       if (err)
+               goto err_free;
+
+       err = online_css(css);
+       if (err)
+               goto err_free;
+
+       cgroup_get(cgrp);
+       css_get(css->parent);
+
+       if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
+           parent->parent) {
+               pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
+                          current->comm, current->pid, ss->name);
+               if (!strcmp(ss->name, "memory"))
+                       pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
+               ss->warned_broken_hierarchy = true;
+       }
+
+       return 0;
+
+err_free:
+       percpu_ref_cancel_init(&css->refcnt);
+       ss->css_free(css);
+       return err;
+}
+
+/**
  * cgroup_create - create a cgroup
  * @parent: cgroup that will be parent of the new cgroup
- * @dentry: dentry of the new cgroup
- * @mode: mode to set on new inode
- *
- * Must be called with the mutex on the parent inode held
+ * @name: name of the new cgroup
+ * @mode: mode to set on new cgroup
  */
-static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
-                            umode_t mode)
+static long cgroup_create(struct cgroup *parent, const char *name,
+                         umode_t mode)
 {
-       struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
        struct cgroup *cgrp;
-       struct cgroup_name *name;
        struct cgroupfs_root *root = parent->root;
-       int err = 0;
+       int ssid, err;
        struct cgroup_subsys *ss;
-       struct super_block *sb = root->sb;
+       struct kernfs_node *kn;
 
        /* allocate the cgroup and its ID, 0 is reserved for the root */
        cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
        if (!cgrp)
                return -ENOMEM;
 
-       name = cgroup_alloc_name(dentry);
-       if (!name)
-               goto err_free_cgrp;
-       rcu_assign_pointer(cgrp->name, name);
-
-       /*
-        * Temporarily set the pointer to NULL, so idr_find() won't return
-        * a half-baked cgroup.
-        */
-       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
-       if (cgrp->id < 0)
-               goto err_free_name;
+       mutex_lock(&cgroup_tree_mutex);
 
        /*
         * Only live parents can have children.  Note that the liveliness
@@ -4379,21 +3647,21 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
         */
        if (!cgroup_lock_live_group(parent)) {
                err = -ENODEV;
-               goto err_free_id;
+               goto err_unlock_tree;
        }
 
-       /* Grab a reference on the superblock so the hierarchy doesn't
-        * get deleted on unmount if there are child cgroups.  This
-        * can be done outside cgroup_mutex, since the sb can't
-        * disappear while someone has an open control file on the
-        * fs */
-       atomic_inc(&sb->s_active);
+       /*
+        * Temporarily set the pointer to NULL, so idr_find() won't return
+        * a half-baked cgroup.
+        */
+       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+       if (cgrp->id < 0) {
+               err = -ENOMEM;
+               goto err_unlock;
+       }
 
        init_cgroup_housekeeping(cgrp);
 
-       dentry->d_fsdata = cgrp;
-       cgrp->dentry = dentry;
-
        cgrp->parent = parent;
        cgrp->dummy_css.parent = &parent->dummy_css;
        cgrp->root = parent->root;
@@ -4404,123 +3672,75 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
 
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css;
-
-               css = ss->css_alloc(cgroup_css(parent, ss));
-               if (IS_ERR(css)) {
-                       err = PTR_ERR(css);
-                       goto err_free_all;
-               }
-               css_ar[ss->subsys_id] = css;
-
-               err = percpu_ref_init(&css->refcnt, css_release);
-               if (err)
-                       goto err_free_all;
-
-               init_css(css, ss, cgrp);
+       /* create the directory */
+       kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
+       if (IS_ERR(kn)) {
+               err = PTR_ERR(kn);
+               goto err_free_id;
        }
+       cgrp->kn = kn;
 
        /*
-        * Create directory.  cgroup_create_file() returns with the new
-        * directory locked on success so that it can be populated without
-        * dropping cgroup_mutex.
+        * This extra ref will be put in cgroup_free_fn() and guarantees
+        * that @cgrp->kn is always accessible.
         */
-       err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
-       if (err < 0)
-               goto err_free_all;
-       lockdep_assert_held(&dentry->d_inode->i_mutex);
+       kernfs_get(kn);
 
        cgrp->serial_nr = cgroup_serial_nr_next++;
 
        /* allocation complete, commit to creation */
        list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
-       root->number_of_cgroups++;
-
-       /* hold a ref to the parent's dentry */
-       dget(parent->dentry);
-
-       /* creation succeeded, notify subsystems */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               err = online_css(css);
-               if (err)
-                       goto err_destroy;
-
-               /* each css holds a ref to the cgroup's dentry and parent css */
-               dget(dentry);
-               css_get(css->parent);
-
-               /* mark it consumed for error path */
-               css_ar[ss->subsys_id] = NULL;
-
-               if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
-                   parent->parent) {
-                       pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
-                                  current->comm, current->pid, ss->name);
-                       if (!strcmp(ss->name, "memory"))
-                               pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
-                       ss->warned_broken_hierarchy = true;
-               }
-       }
+       atomic_inc(&root->nr_cgrps);
+       cgroup_get(parent);
 
+       /*
+        * @cgrp is now fully operational.  If something fails after this
+        * point, it'll be released via the normal destruction path.
+        */
        idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
 
        err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
        if (err)
                goto err_destroy;
 
-       err = cgroup_populate_dir(cgrp, root->subsys_mask);
-       if (err)
-               goto err_destroy;
+       /* let's create and online css's */
+       for_each_subsys(ss, ssid) {
+               if (root->subsys_mask & (1 << ssid)) {
+                       err = create_css(cgrp, ss);
+                       if (err)
+                               goto err_destroy;
+               }
+       }
+
+       kernfs_activate(kn);
 
        mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
 
        return 0;
 
-err_free_all:
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               if (css) {
-                       percpu_ref_cancel_init(&css->refcnt);
-                       ss->css_free(css);
-               }
-       }
-       mutex_unlock(&cgroup_mutex);
-       /* Release the reference count that we took on the superblock */
-       deactivate_super(sb);
 err_free_id:
        idr_remove(&root->cgroup_idr, cgrp->id);
-err_free_name:
-       kfree(rcu_dereference_raw(cgrp->name));
-err_free_cgrp:
+err_unlock:
+       mutex_unlock(&cgroup_mutex);
+err_unlock_tree:
+       mutex_unlock(&cgroup_tree_mutex);
        kfree(cgrp);
        return err;
 
 err_destroy:
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               if (css) {
-                       percpu_ref_cancel_init(&css->refcnt);
-                       ss->css_free(css);
-               }
-       }
        cgroup_destroy_locked(cgrp);
        mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&dentry->d_inode->i_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
        return err;
 }
 
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+                       umode_t mode)
 {
-       struct cgroup *c_parent = dentry->d_parent->d_fsdata;
+       struct cgroup *parent = parent_kn->priv;
 
-       /* the vfs holds inode->i_mutex already */
-       return cgroup_create(c_parent, dentry, mode | S_IFDIR);
+       return cgroup_create(parent, name, mode);
 }
 
 /*
@@ -4533,6 +3753,7 @@ static void css_killed_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
        struct cgroup *cgrp = css->cgroup;
 
+       mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
        /*
@@ -4550,6 +3771,7 @@ static void css_killed_work_fn(struct work_struct *work)
                cgroup_destroy_css_killed(cgrp);
 
        mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
 
        /*
         * Put the css refs from kill_css().  Each css holds an extra
@@ -4582,7 +3804,11 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
  */
 static void kill_css(struct cgroup_subsys_state *css)
 {
-       cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+       /*
+        * This must happen before css is disassociated with its cgroup.
+        * See seq_css() for details.
+        */
+       cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
 
        /*
         * Killing would put the base ref, but we need to keep it alive
@@ -4630,13 +3856,12 @@ static void kill_css(struct cgroup_subsys_state *css)
 static int cgroup_destroy_locked(struct cgroup *cgrp)
        __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
 {
-       struct dentry *d = cgrp->dentry;
-       struct cgroup_event *event, *tmp;
-       struct cgroup_subsys *ss;
        struct cgroup *child;
+       struct cgroup_subsys_state *css;
        bool empty;
+       int ssid;
 
-       lockdep_assert_held(&d->d_inode->i_mutex);
+       lockdep_assert_held(&cgroup_tree_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
        /*
@@ -4668,14 +3893,13 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        /*
         * Initiate massacre of all css's.  cgroup_destroy_css_killed()
         * will be invoked to perform the rest of destruction once the
-        * percpu refs of all css's are confirmed to be killed.
+        * percpu refs of all css's are confirmed to be killed.  This
+        * involves removing the subsystem's files, drop cgroup_mutex.
         */
-       for_each_root_subsys(cgrp->root, ss) {
-               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
-
-               if (css)
-                       kill_css(css);
-       }
+       mutex_unlock(&cgroup_mutex);
+       for_each_css(css, ssid, cgrp)
+               kill_css(css);
+       mutex_lock(&cgroup_mutex);
 
        /*
         * Mark @cgrp dead.  This prevents further task migration and child
@@ -4701,26 +3925,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        if (!cgrp->nr_css)
                cgroup_destroy_css_killed(cgrp);
 
-       /*
-        * Clear the base files and remove @cgrp directory.  The removal
-        * puts the base ref but we aren't quite done with @cgrp yet, so
-        * hold onto it.
-        */
-       cgroup_addrm_files(cgrp, cgroup_base_files, false);
-       dget(d);
-       cgroup_d_remove_dir(d);
+       /* remove @cgrp directory along with the base files */
+       mutex_unlock(&cgroup_mutex);
 
        /*
-        * Unregister events and notify userspace.
-        * Notify userspace about cgroup removing only after rmdir of cgroup
-        * directory to avoid race between userspace and kernelspace.
+        * There are two control paths which try to determine cgroup from
+        * dentry without going through kernfs - cgroupstats_build() and
+        * css_tryget_from_dir().  Those are supported by RCU protecting
+        * clearing of cgrp->kn->priv backpointer, which should happen
+        * after all files under it have been removed.
         */
-       spin_lock(&cgrp->event_list_lock);
-       list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
-               list_del_init(&event->list);
-               schedule_work(&event->remove);
-       }
-       spin_unlock(&cgrp->event_list_lock);
+       kernfs_remove(cgrp->kn);        /* @cgrp has an extra ref on its kn */
+       RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
+
+       mutex_lock(&cgroup_mutex);
 
        return 0;
 };
@@ -4737,62 +3955,71 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
 static void cgroup_destroy_css_killed(struct cgroup *cgrp)
 {
        struct cgroup *parent = cgrp->parent;
-       struct dentry *d = cgrp->dentry;
 
+       lockdep_assert_held(&cgroup_tree_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
        /* delete this cgroup from parent->children */
        list_del_rcu(&cgrp->sibling);
 
-       dput(d);
+       cgroup_put(cgrp);
 
        set_bit(CGRP_RELEASABLE, &parent->flags);
        check_for_release(parent);
 }
 
-static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
+static int cgroup_rmdir(struct kernfs_node *kn)
 {
-       int ret;
-
-       mutex_lock(&cgroup_mutex);
-       ret = cgroup_destroy_locked(dentry->d_fsdata);
-       mutex_unlock(&cgroup_mutex);
+       struct cgroup *cgrp = kn->priv;
+       int ret = 0;
 
-       return ret;
-}
+       /*
+        * This is self-destruction but @kn can't be removed while this
+        * callback is in progress.  Let's break active protection.  Once
+        * the protection is broken, @cgrp can be destroyed at any point.
+        * Pin it so that it stays accessible.
+        */
+       cgroup_get(cgrp);
+       kernfs_break_active_protection(kn);
 
-static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
-{
-       INIT_LIST_HEAD(&ss->cftsets);
+       mutex_lock(&cgroup_tree_mutex);
+       mutex_lock(&cgroup_mutex);
 
        /*
-        * base_cftset is embedded in subsys itself, no need to worry about
-        * deregistration.
+        * @cgrp might already have been destroyed while we're trying to
+        * grab the mutexes.
         */
-       if (ss->base_cftypes) {
-               struct cftype *cft;
+       if (!cgroup_is_dead(cgrp))
+               ret = cgroup_destroy_locked(cgrp);
 
-               for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
-                       cft->ss = ss;
+       mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
 
-               ss->base_cftset.cfts = ss->base_cftypes;
-               list_add_tail(&ss->base_cftset.node, &ss->cftsets);
-       }
+       kernfs_unbreak_active_protection(kn);
+       cgroup_put(cgrp);
+       return ret;
 }
 
+static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
+       .remount_fs             = cgroup_remount,
+       .show_options           = cgroup_show_options,
+       .mkdir                  = cgroup_mkdir,
+       .rmdir                  = cgroup_rmdir,
+       .rename                 = cgroup_rename,
+};
+
 static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
 {
        struct cgroup_subsys_state *css;
 
        printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
 
+       mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
-       /* init base cftset */
-       cgroup_init_cftsets(ss);
+       INIT_LIST_HEAD(&ss->cfts);
 
        /* Create the top cgroup state for this subsystem */
-       list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
        ss->root = &cgroup_dummy_root;
        css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
        /* We don't handle early failures gracefully */
@@ -4803,7 +4030,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
         * pointer to this state - since the subsystem is
         * newly registered, all tasks and hence the
         * init_css_set is in the subsystem's top cgroup. */
-       init_css_set.subsys[ss->subsys_id] = css;
+       init_css_set.subsys[ss->id] = css;
 
        need_forkexit_callback |= ss->fork || ss->exit;
 
@@ -4815,176 +4042,8 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        BUG_ON(online_css(css));
 
        mutex_unlock(&cgroup_mutex);
-
-       /* this function shouldn't be used with modular subsystems, since they
-        * need to register a subsys_id, among other things */
-       BUG_ON(ss->module);
-}
-
-/**
- * cgroup_load_subsys: load and register a modular subsystem at runtime
- * @ss: the subsystem to load
- *
- * This function should be called in a modular subsystem's initcall. If the
- * subsystem is built as a module, it will be assigned a new subsys_id and set
- * up for use. If the subsystem is built-in anyway, work is delegated to the
- * simpler cgroup_init_subsys.
- */
-int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
-{
-       struct cgroup_subsys_state *css;
-       int i, ret;
-       struct hlist_node *tmp;
-       struct css_set *cset;
-       unsigned long key;
-
-       /* check name and function validity */
-       if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
-           ss->css_alloc == NULL || ss->css_free == NULL)
-               return -EINVAL;
-
-       /*
-        * we don't support callbacks in modular subsystems. this check is
-        * before the ss->module check for consistency; a subsystem that could
-        * be a module should still have no callbacks even if the user isn't
-        * compiling it as one.
-        */
-       if (ss->fork || ss->exit)
-               return -EINVAL;
-
-       /*
-        * an optionally modular subsystem is built-in: we want to do nothing,
-        * since cgroup_init_subsys will have already taken care of it.
-        */
-       if (ss->module == NULL) {
-               /* a sanity check */
-               BUG_ON(cgroup_subsys[ss->subsys_id] != ss);
-               return 0;
-       }
-
-       /* init base cftset */
-       cgroup_init_cftsets(ss);
-
-       mutex_lock(&cgroup_mutex);
-       cgroup_subsys[ss->subsys_id] = ss;
-
-       /*
-        * no ss->css_alloc seems to need anything important in the ss
-        * struct, so this can happen first (i.e. before the dummy root
-        * attachment).
-        */
-       css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
-       if (IS_ERR(css)) {
-               /* failure case - need to deassign the cgroup_subsys[] slot. */
-               cgroup_subsys[ss->subsys_id] = NULL;
-               mutex_unlock(&cgroup_mutex);
-               return PTR_ERR(css);
-       }
-
-       list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
-       ss->root = &cgroup_dummy_root;
-
-       /* our new subsystem will be attached to the dummy hierarchy. */
-       init_css(css, ss, cgroup_dummy_top);
-
-       /*
-        * Now we need to entangle the css into the existing css_sets. unlike
-        * in cgroup_init_subsys, there are now multiple css_sets, so each one
-        * will need a new pointer to it; done by iterating the css_set_table.
-        * furthermore, modifying the existing css_sets will corrupt the hash
-        * table state, so each changed css_set will need its hash recomputed.
-        * this is all done under the css_set_lock.
-        */
-       write_lock(&css_set_lock);
-       hash_for_each_safe(css_set_table, i, tmp, cset, hlist) {
-               /* skip entries that we already rehashed */
-               if (cset->subsys[ss->subsys_id])
-                       continue;
-               /* remove existing entry */
-               hash_del(&cset->hlist);
-               /* set new value */
-               cset->subsys[ss->subsys_id] = css;
-               /* recompute hash and restore entry */
-               key = css_set_hash(cset->subsys);
-               hash_add(css_set_table, &cset->hlist, key);
-       }
-       write_unlock(&css_set_lock);
-
-       ret = online_css(css);
-       if (ret)
-               goto err_unload;
-
-       /* success! */
-       mutex_unlock(&cgroup_mutex);
-       return 0;
-
-err_unload:
-       mutex_unlock(&cgroup_mutex);
-       /* @ss can't be mounted here as try_module_get() would fail */
-       cgroup_unload_subsys(ss);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(cgroup_load_subsys);
-
-/**
- * cgroup_unload_subsys: unload a modular subsystem
- * @ss: the subsystem to unload
- *
- * This function should be called in a modular subsystem's exitcall. When this
- * function is invoked, the refcount on the subsystem's module will be 0, so
- * the subsystem will not be attached to any hierarchy.
- */
-void cgroup_unload_subsys(struct cgroup_subsys *ss)
-{
-       struct cgrp_cset_link *link;
-
-       BUG_ON(ss->module == NULL);
-
-       /*
-        * we shouldn't be called if the subsystem is in use, and the use of
-        * try_module_get() in rebind_subsystems() should ensure that it
-        * doesn't start being used while we're killing it off.
-        */
-       BUG_ON(ss->root != &cgroup_dummy_root);
-
-       mutex_lock(&cgroup_mutex);
-
-       offline_css(cgroup_css(cgroup_dummy_top, ss));
-
-       /* deassign the subsys_id */
-       cgroup_subsys[ss->subsys_id] = NULL;
-
-       /* remove subsystem from the dummy root's list of subsystems */
-       list_del_init(&ss->sibling);
-
-       /*
-        * disentangle the css from all css_sets attached to the dummy
-        * top. as in loading, we need to pay our respects to the hashtable
-        * gods.
-        */
-       write_lock(&css_set_lock);
-       list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) {
-               struct css_set *cset = link->cset;
-               unsigned long key;
-
-               hash_del(&cset->hlist);
-               cset->subsys[ss->subsys_id] = NULL;
-               key = css_set_hash(cset->subsys);
-               hash_add(css_set_table, &cset->hlist, key);
-       }
-       write_unlock(&css_set_lock);
-
-       /*
-        * remove subsystem's css from the cgroup_dummy_top and free it -
-        * need to free before marking as null because ss->css_free needs
-        * the cgrp->subsys pointer to find their state.
-        */
-       ss->css_free(cgroup_css(cgroup_dummy_top, ss));
-       RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
-
-       mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgroup_tree_mutex);
 }
-EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
 
 /**
  * cgroup_init_early - cgroup initialization at system boot
@@ -5011,17 +4070,16 @@ int __init cgroup_init_early(void)
        list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links);
        list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links);
 
-       /* at bootup time, we don't worry about modular subsystems */
-       for_each_builtin_subsys(ss, i) {
-               BUG_ON(!ss->name);
-               BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
-               BUG_ON(!ss->css_alloc);
-               BUG_ON(!ss->css_free);
-               if (ss->subsys_id != i) {
-                       printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
-                              ss->name, ss->subsys_id);
-                       BUG();
-               }
+       for_each_subsys(ss, i) {
+               WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
+                    "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
+                    i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
+                    ss->id, ss->name);
+               WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
+                    "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
+
+               ss->id = i;
+               ss->name = cgroup_subsys_name[i];
 
                if (ss->early_init)
                        cgroup_init_subsys(ss);
@@ -5041,18 +4099,22 @@ int __init cgroup_init(void)
        unsigned long key;
        int i, err;
 
-       err = bdi_init(&cgroup_backing_dev_info);
-       if (err)
-               return err;
+       BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
 
-       for_each_builtin_subsys(ss, i) {
+       for_each_subsys(ss, i) {
                if (!ss->early_init)
                        cgroup_init_subsys(ss);
+
+               /*
+                * cftype registration needs kmalloc and can't be done
+                * during early_init.  Register base cftypes separately.
+                */
+               if (ss->base_cftypes)
+                       WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
        }
 
        /* allocate id for the dummy hierarchy */
        mutex_lock(&cgroup_mutex);
-       mutex_lock(&cgroup_root_mutex);
 
        /* Add init_css_set to the hash table */
        key = css_set_hash(init_css_set.subsys);
@@ -5064,28 +4126,20 @@ int __init cgroup_init(void)
                        0, 1, GFP_KERNEL);
        BUG_ON(err < 0);
 
-       mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
 
        cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
-       if (!cgroup_kobj) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!cgroup_kobj)
+               return -ENOMEM;
 
        err = register_filesystem(&cgroup_fs_type);
        if (err < 0) {
                kobject_put(cgroup_kobj);
-               goto out;
+               return err;
        }
 
        proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
-
-out:
-       if (err)
-               bdi_destroy(&cgroup_backing_dev_info);
-
-       return err;
+       return 0;
 }
 
 static int __init cgroup_wq_init(void)
@@ -5093,13 +4147,26 @@ static int __init cgroup_wq_init(void)
        /*
         * There isn't much point in executing destruction path in
         * parallel.  Good chunk is serialized with cgroup_mutex anyway.
-        * Use 1 for @max_active.
+        *
+        * XXX: Must be ordered to make sure parent is offlined after
+        * children.  The ordering requirement is for memcg where a
+        * parent's offline may wait for a child's leading to deadlock.  In
+        * the long term, this should be fixed from memcg side.
         *
         * We would prefer to do this in cgroup_init() above, but that
         * is called before init_workqueues(): so leave this until after.
         */
-       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+       cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
        BUG_ON(!cgroup_destroy_wq);
+
+       /*
+        * Used to destroy pidlists and separate to serve as flush domain.
+        * Cap @max_active to 1 too.
+        */
+       cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
+                                                   0, 1);
+       BUG_ON(!cgroup_pidlist_destroy_wq);
+
        return 0;
 }
 core_initcall(cgroup_wq_init);
@@ -5121,12 +4188,12 @@ int proc_cgroup_show(struct seq_file *m, void *v)
 {
        struct pid *pid;
        struct task_struct *tsk;
-       char *buf;
+       char *buf, *path;
        int retval;
        struct cgroupfs_root *root;
 
        retval = -ENOMEM;
-       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       buf = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!buf)
                goto out;
 
@@ -5143,20 +4210,23 @@ int proc_cgroup_show(struct seq_file *m, void *v)
        for_each_active_root(root) {
                struct cgroup_subsys *ss;
                struct cgroup *cgrp;
-               int count = 0;
+               int ssid, count = 0;
 
                seq_printf(m, "%d:", root->hierarchy_id);
-               for_each_root_subsys(root, ss)
-                       seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+               for_each_subsys(ss, ssid)
+                       if (root->subsys_mask & (1 << ssid))
+                               seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
                if (strlen(root->name))
                        seq_printf(m, "%sname=%s", count ? "," : "",
                                   root->name);
                seq_putc(m, ':');
                cgrp = task_cgroup_from_root(tsk, root);
-               retval = cgroup_path(cgrp, buf, PAGE_SIZE);
-               if (retval < 0)
+               path = cgroup_path(cgrp, buf, PATH_MAX);
+               if (!path) {
+                       retval = -ENAMETOOLONG;
                        goto out_unlock;
-               seq_puts(m, buf);
+               }
+               seq_puts(m, path);
                seq_putc(m, '\n');
        }
 
@@ -5186,7 +4256,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
        for_each_subsys(ss, i)
                seq_printf(m, "%s\t%d\t%d\t%d\n",
                           ss->name, ss->root->hierarchy_id,
-                          ss->root->number_of_cgroups, !ss->disabled);
+                          atomic_read(&ss->root->nr_cgrps), !ss->disabled);
 
        mutex_unlock(&cgroup_mutex);
        return 0;
@@ -5270,15 +4340,7 @@ void cgroup_post_fork(struct task_struct *child)
         * and addition to css_set.
         */
        if (need_forkexit_callback) {
-               /*
-                * fork/exit callbacks are supported only for builtin
-                * subsystems, and the builtin section of the subsys
-                * array is immutable, so we don't need to lock the
-                * subsys array here. On the other hand, modular section
-                * of the array can be freed at module unload, so we
-                * can't touch that.
-                */
-               for_each_builtin_subsys(ss, i)
+               for_each_subsys(ss, i)
                        if (ss->fork)
                                ss->fork(child);
        }
@@ -5343,11 +4405,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
        RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
 
        if (run_callbacks && need_forkexit_callback) {
-               /*
-                * fork/exit callbacks are supported only for builtin
-                * subsystems, see cgroup_post_fork() for details.
-                */
-               for_each_builtin_subsys(ss, i) {
+               /* see cgroup_post_fork() for details */
+               for_each_subsys(ss, i) {
                        if (ss->exit) {
                                struct cgroup_subsys_state *old_css = cset->subsys[i];
                                struct cgroup_subsys_state *css = task_css(tsk, i);
@@ -5415,16 +4474,17 @@ static void cgroup_release_agent(struct work_struct *work)
        while (!list_empty(&release_list)) {
                char *argv[3], *envp[3];
                int i;
-               char *pathbuf = NULL, *agentbuf = NULL;
+               char *pathbuf = NULL, *agentbuf = NULL, *path;
                struct cgroup *cgrp = list_entry(release_list.next,
                                                    struct cgroup,
                                                    release_list);
                list_del_init(&cgrp->release_list);
                raw_spin_unlock(&release_list_lock);
-               pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
                if (!pathbuf)
                        goto continue_free;
-               if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
+               path = cgroup_path(cgrp, pathbuf, PATH_MAX);
+               if (!path)
                        goto continue_free;
                agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
                if (!agentbuf)
@@ -5432,7 +4492,7 @@ static void cgroup_release_agent(struct work_struct *work)
 
                i = 0;
                argv[i++] = agentbuf;
-               argv[i++] = pathbuf;
+               argv[i++] = path;
                argv[i] = NULL;
 
                i = 0;
@@ -5466,11 +4526,7 @@ static int __init cgroup_disable(char *str)
                if (!*token)
                        continue;
 
-               /*
-                * cgroup_disable, being at boot time, can't know about
-                * module subsystems, so we don't worry about them.
-                */
-               for_each_builtin_subsys(ss, i) {
+               for_each_subsys(ss, i) {
                        if (!strcmp(token, ss->name)) {
                                ss->disabled = 1;
                                printk(KERN_INFO "Disabling %s control group"
@@ -5484,28 +4540,42 @@ static int __init cgroup_disable(char *str)
 __setup("cgroup_disable=", cgroup_disable);
 
 /**
- * css_from_dir - get corresponding css from the dentry of a cgroup dir
+ * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
  * @dentry: directory dentry of interest
  * @ss: subsystem of interest
  *
- * Must be called under RCU read lock.  The caller is responsible for
- * pinning the returned css if it needs to be accessed outside the RCU
- * critical section.
+ * If @dentry is a directory for a cgroup which has @ss enabled on it, try
+ * to get the corresponding css and return it.  If such css doesn't exist
+ * or can't be pinned, an ERR_PTR value is returned.
  */
-struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
-                                        struct cgroup_subsys *ss)
+struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
+                                               struct cgroup_subsys *ss)
 {
+       struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
+       struct cgroup_subsys_state *css = NULL;
        struct cgroup *cgrp;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
-
        /* is @dentry a cgroup dir? */
-       if (!dentry->d_inode ||
-           dentry->d_inode->i_op != &cgroup_dir_inode_operations)
+       if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
+           kernfs_type(kn) != KERNFS_DIR)
                return ERR_PTR(-EBADF);
 
-       cgrp = __d_cgrp(dentry);
-       return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT);
+       rcu_read_lock();
+
+       /*
+        * This path doesn't originate from kernfs and @kn could already
+        * have been or be removed at any point.  @kn->priv is RCU
+        * protected for this access.  See destroy_locked() for details.
+        */
+       cgrp = rcu_dereference(kn->priv);
+       if (cgrp)
+               css = cgroup_css(cgrp, ss);
+
+       if (!css || !css_tryget(css))
+               css = ERR_PTR(-ENOENT);
+
+       rcu_read_unlock();
+       return css;
 }
 
 /**
@@ -5520,9 +4590,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
 {
        struct cgroup *cgrp;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&cgroup_mutex),
-                          "css_from_id() needs proper protection");
+       cgroup_assert_mutexes_or_rcu_locked();
 
        cgrp = idr_find(&ss->root->cgroup_idr, id);
        if (cgrp)
@@ -5570,36 +4638,41 @@ static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
        return count;
 }
 
-static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
-                                        struct cftype *cft,
-                                        struct seq_file *seq)
+static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
 {
        struct cgrp_cset_link *link;
        struct css_set *cset;
+       char *name_buf;
+
+       name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+       if (!name_buf)
+               return -ENOMEM;
 
        read_lock(&css_set_lock);
        rcu_read_lock();
        cset = rcu_dereference(current->cgroups);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
-               const char *name;
+               const char *name = "?";
+
+               if (c != cgroup_dummy_top) {
+                       cgroup_name(c, name_buf, NAME_MAX + 1);
+                       name = name_buf;
+               }
 
-               if (c->dentry)
-                       name = c->dentry->d_name.name;
-               else
-                       name = "?";
                seq_printf(seq, "Root %d group %s\n",
                           c->root->hierarchy_id, name);
        }
        rcu_read_unlock();
        read_unlock(&css_set_lock);
+       kfree(name_buf);
        return 0;
 }
 
 #define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup_subsys_state *css,
-                                struct cftype *cft, struct seq_file *seq)
+static int cgroup_css_links_read(struct seq_file *seq, void *v)
 {
+       struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
        read_lock(&css_set_lock);
@@ -5645,12 +4718,12 @@ static struct cftype debug_files[] =  {
 
        {
                .name = "current_css_set_cg_links",
-               .read_seq_string = current_css_set_cg_links_read,
+               .seq_show = current_css_set_cg_links_read,
        },
 
        {
                .name = "cgroup_css_links",
-               .read_seq_string = cgroup_css_links_read,
+               .seq_show = cgroup_css_links_read,
        },
 
        {
@@ -5661,11 +4734,9 @@ static struct cftype debug_files[] =  {
        { }     /* terminate */
 };
 
-struct cgroup_subsys debug_subsys = {
-       .name = "debug",
+struct cgroup_subsys debug_cgrp_subsys = {
        .css_alloc = debug_css_alloc,
        .css_free = debug_css_free,
-       .subsys_id = debug_subsys_id,
        .base_cftypes = debug_files,
 };
 #endif /* CONFIG_CGROUP_DEBUG */