* read mostly part
*/
struct net_device *dev;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
void __netif_schedule(struct Qdisc *q);
-
-static inline void netif_schedule_queue(struct netdev_queue *txq)
-{
- if (!(txq->state & QUEUE_STATE_ANY_XOFF))
- __netif_schedule(txq->qdisc);
-}
+void netif_schedule_queue(struct netdev_queue *txq);
static inline void netif_tx_schedule_all(struct net_device *dev)
{
}
}
-static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
-{
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
- __netif_schedule(dev_queue->qdisc);
-}
+void netif_tx_wake_queue(struct netdev_queue *dev_queue);
/**
* netif_wake_queue - restart transmit
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-/**
- * netif_wake_subqueue - allow sending packets on subqueue
- * @dev: network device
- * @queue_index: sub queue index
- *
- * Resume individual transmit queue of a device with multiple transmit queues.
- */
-static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
-{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
- __netif_schedule(txq->qdisc);
-}
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
- return qdisc->dev_queue->qdisc;
+ struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
+
+ return q;
}
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
struct Qdisc *qdisc;
for (; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
unsigned int i;
+
+ rcu_read_lock();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- const struct Qdisc *q = txq->qdisc;
+ const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen)
+ if (q->q.qlen) {
+ rcu_read_unlock();
return false;
+ }
}
+ rcu_read_unlock();
return true;
}
static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != txq->qdisc_sleeping)
+ if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
return true;
}
return false;
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
unsigned int i;
+
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (txq->qdisc != &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
return false;
}
return true;
return (struct dev_kfree_skb_cb *)skb->cb;
}
+void netif_schedule_queue(struct netdev_queue *txq)
+{
+ rcu_read_lock();
+ if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
+ struct Qdisc *q = rcu_dereference(txq->qdisc);
+
+ __netif_schedule(q);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(netif_schedule_queue);
+
+/**
+ * netif_wake_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
+ struct Qdisc *q;
+
+ rcu_read_lock();
+ q = rcu_dereference(txq->qdisc);
+ __netif_schedule(q);
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(netif_wake_subqueue);
+
+void netif_tx_wake_queue(struct netdev_queue *dev_queue)
+{
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
+ struct Qdisc *q;
+
+ rcu_read_lock();
+ q = rcu_dereference(dev_queue->qdisc);
+ __netif_schedule(q);
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(netif_tx_wake_queue);
+
void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
{
unsigned long flags;
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- q = rxq->qdisc;
+ q = rcu_dereference(rxq->qdisc);
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
{
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
- if (!rxq || rxq->qdisc == &noop_qdisc)
+ if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
goto out;
if (*pt_prev) {
struct Qdisc *qdisc_default = _qdisc_default;
struct Qdisc *qdisc;
- qdisc = dev_queue->qdisc;
+ qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
{
struct Qdisc *qdisc = _qdisc;
- dev_queue->qdisc = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
}
memset(&sch->qstats, 0, sizeof(sch->qstats));
for (i = 0; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
sch->q.qlen += qdisc->q.qlen;
sch->bstats.bytes += qdisc->bstats.bytes;
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ struct netdev_queue *q = netdev_get_tx_queue(dev, i);
+
+ qdisc = rtnl_dereference(q->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
bstats.bytes += qdisc->bstats.bytes;
bstats.packets += qdisc->bstats.packets;
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
struct sk_buff *skb;
+ struct Qdisc *q;
skb = __skb_dequeue(&dat->q);
dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
+ q = rcu_dereference_bh(dat_queue->qdisc);
+
if (skb == NULL) {
- struct net_device *m = qdisc_dev(dat_queue->qdisc);
+ struct net_device *m = qdisc_dev(q);
if (m) {
dat->m->slaves = sch;
netif_wake_queue(m);
} else {
qdisc_bstats_update(sch, skb);
}
- sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
+ sch->q.qlen = dat->q.qlen + q->q.qlen;
return skb;
}
txq = netdev_get_tx_queue(master->dev, 0);
master->slaves = NULL;
- root_lock = qdisc_root_sleeping_lock(txq->qdisc);
+ root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
spin_lock_bh(root_lock);
- qdisc_reset(txq->qdisc);
+ qdisc_reset(rtnl_dereference(txq->qdisc));
spin_unlock_bh(root_lock);
}
}
struct dst_entry *dst = skb_dst(skb);
int res;
- if (txq->qdisc == &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) == &noop_qdisc)
return -ENODEV;
if (!dev->header_ops || !dst)