return NULL;
spin_lock_init(&blkg->stats_lock);
- rcu_assign_pointer(blkg->q, q);
+ blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
blkg->refcnt = 1;
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &q->blkg_list);
- q->nr_blkgs++;
spin_unlock(&blkcg->lock);
out:
list_del_init(&blkg->q_node);
hlist_del_init_rcu(&blkg->blkcg_node);
- WARN_ON_ONCE(q->nr_blkgs <= 0);
- q->nr_blkgs--;
-
/*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
struct hlist_node *n;
uint64_t cgroup_total = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ spin_lock_irq(&blkcg->lock);
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
const char *dname = blkg_dev_name(blkg);
int plid = BLKIOFILE_POLICY(cft->private);
cgroup_total += blkio_get_stat_cpu(blkg, plid,
cb, dname, type);
} else {
- spin_lock_irq(&blkg->stats_lock);
+ spin_lock(&blkg->stats_lock);
cgroup_total += blkio_get_stat(blkg, plid,
cb, dname, type);
- spin_unlock_irq(&blkg->stats_lock);
+ spin_unlock(&blkg->stats_lock);
}
}
if (show_total)
cb->fill(cb, "Total", cgroup_total);
- rcu_read_unlock();
+
+ spin_unlock_irq(&blkcg->lock);
return 0;
}
{
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
- rcu_read_lock();
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkio_group, blkcg_node);
- struct request_queue *q = rcu_dereference(blkg->q);
+ struct request_queue *q = blkg->q;
if (spin_trylock(q->queue_lock)) {
blkg_destroy(blkg);
spin_unlock(q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
- rcu_read_unlock();
cpu_relax();
- rcu_read_lock();
spin_lock(&blkcg->lock);
}
}
spin_unlock_irq(&blkcg->lock);
- rcu_read_unlock();
return 0;
}
void blk_throtl_exit(struct request_queue *q)
{
- struct throtl_data *td = q->td;
- bool wait;
-
- BUG_ON(!td);
-
+ BUG_ON(!q->td);
throtl_shutdown_wq(q);
-
- /* If there are other groups */
- spin_lock_irq(q->queue_lock);
- wait = q->nr_blkgs;
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Wait for tg_to_blkg(tg)->q accessors to exit their grace periods.
- * Do this wait only if there are other undestroyed groups out
- * there (other than root group). This can happen if cgroup deletion
- * path claimed the responsibility of cleaning up a group before
- * queue cleanup code get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
- /*
- * Just being safe to make sure after previous flush if some body did
- * update limits through cgroup and another work got queued, cancel
- * it.
- */
- throtl_shutdown_wq(q);
-
kfree(q->td);
}
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
- bool wait = false;
cfq_shutdown_timer_wq(cfqd);
spin_unlock_irq(q->queue_lock);
-#ifdef CONFIG_BLK_CGROUP
- /*
- * If there are groups which we could not unlink from blkcg list,
- * wait for a rcu period for them to be freed.
- */
- spin_lock_irq(q->queue_lock);
- wait = q->nr_blkgs;
- spin_unlock_irq(q->queue_lock);
-#endif
cfq_shutdown_timer_wq(cfqd);
- /*
- * Wait for cfqg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other unlinked groups out
- * there. This can happen if cgroup deletion path claimed the
- * responsibility of cleaning up a group before queue cleanup code
- * get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
#ifndef CONFIG_CFQ_GROUP_IOSCHED
kfree(cfqd->root_group);
#endif