[NET_SCHED]: Eliminate qdisc_tree_lock
authorPatrick McHardy <kaber@trash.net>
Tue, 17 Apr 2007 00:02:10 +0000 (17:02 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Thu, 26 Apr 2007 05:29:07 +0000 (22:29 -0700)
Since we're now holding the rtnl during the entire dump operation, we
can remove qdisc_tree_lock, whose only purpose is to protect dump
callbacks from concurrent changes to the qdisc tree.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/pkt_sched.h
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_generic.c

index b2cc9a8ed4e7eda826ad4919d9f0f395a5eac296..5754d53d9efcbfc35164dc4ca3d9f14bbaec05cc 100644 (file)
@@ -13,8 +13,6 @@ struct qdisc_walker
        int     (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
 };
 
-extern rwlock_t qdisc_tree_lock;
-
 #define QDISC_ALIGNTO          32
 #define QDISC_ALIGN(len)       (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
 
index ca3da5013b7a5431721fc19ef27b8e62bafa19a4..ebf94edf0478b6517d7b22b95e3cbb225a6229e1 100644 (file)
@@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
        if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
                return skb->len;
 
-       read_lock(&qdisc_tree_lock);
        if (!tcm->tcm_parent)
                q = dev->qdisc_sleeping;
        else
@@ -457,7 +456,6 @@ errout:
        if (cl)
                cops->put(q, cl);
 out:
-       read_unlock(&qdisc_tree_lock);
        dev_put(dev);
        return skb->len;
 }
index 2e863bdaa9a1fdf64b39035ed071c90b73801be0..0ce6914f598126f025757b60d1ea86d543908b03 100644 (file)
@@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
    (root qdisc, all its children, children of children etc.)
  */
 
-static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
        struct Qdisc *q;
 
@@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
        return NULL;
 }
 
-struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
-{
-       struct Qdisc *q;
-
-       read_lock(&qdisc_tree_lock);
-       q = __qdisc_lookup(dev, handle);
-       read_unlock(&qdisc_tree_lock);
-       return q;
-}
-
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 {
        unsigned long cl;
@@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        if (n == 0)
                return;
        while ((parentid = sch->parent)) {
-               sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+               sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
                cops = sch->ops->cl_ops;
                if (cops->qlen_notify) {
                        cl = cops->get(sch, parentid);
@@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (idx > s_idx)
                        s_q_idx = 0;
-               read_lock(&qdisc_tree_lock);
                q_idx = 0;
                list_for_each_entry(q, &dev->qdisc_list, list) {
                        if (q_idx < s_q_idx) {
@@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
                        }
                        if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
-                               read_unlock(&qdisc_tree_lock);
+                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                                goto done;
-                       }
                        q_idx++;
                }
-               read_unlock(&qdisc_tree_lock);
        }
 
 done:
@@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
        s_t = cb->args[0];
        t = 0;
 
-       read_lock(&qdisc_tree_lock);
        list_for_each_entry(q, &dev->qdisc_list, list) {
                if (t < s_t || !q->ops->cl_ops ||
                    (tcm->tcm_parent &&
@@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
                        break;
                t++;
        }
-       read_unlock(&qdisc_tree_lock);
 
        cb->args[0] = t;
 
index 52eb3439d7c6bf561dc88fca37fdaea0e8a1da4b..1894eb72f6cf8ffc60761fed6de0251470cce630 100644 (file)
 
 /* Main transmission queue. */
 
-/* Main qdisc structure lock.
-
-   However, modifications
-   to data, participating in scheduling must be additionally
-   protected with dev->queue_lock spinlock.
-
-   The idea is the following:
-   - enqueue, dequeue are serialized via top level device
-     spinlock dev->queue_lock.
-   - tree walking is protected by read_lock(qdisc_tree_lock)
-     and this lock is used only in process context.
-   - updates to tree are made only under rtnl semaphore,
-     hence this lock may be made without local bh disabling.
-
-   qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
+/* Modifications to data participating in scheduling must be protected with
+ * dev->queue_lock spinlock.
+ *
+ * The idea is the following:
+ * - enqueue, dequeue are serialized via top level device
+ *   spinlock dev->queue_lock.
+ * - updates to tree and tree walking are only done under the rtnl mutex.
  */
-DEFINE_RWLOCK(qdisc_tree_lock);
 
 void qdisc_lock_tree(struct net_device *dev)
 {
-       write_lock(&qdisc_tree_lock);
        spin_lock_bh(&dev->queue_lock);
 }
 
 void qdisc_unlock_tree(struct net_device *dev)
 {
        spin_unlock_bh(&dev->queue_lock);
-       write_unlock(&qdisc_tree_lock);
 }
 
 /*
@@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev)
                                printk(KERN_INFO "%s: activation failed\n", dev->name);
                                return;
                        }
-                       write_lock(&qdisc_tree_lock);
                        list_add_tail(&qdisc->list, &dev->qdisc_list);
-                       write_unlock(&qdisc_tree_lock);
                } else {
                        qdisc =  &noqueue_qdisc;
                }
-               write_lock(&qdisc_tree_lock);
                dev->qdisc_sleeping = qdisc;
-               write_unlock(&qdisc_tree_lock);
        }
 
        if (!netif_carrier_ok(dev))