From: Eric Dumazet Date: Wed, 12 Jan 2011 12:13:14 +0000 (+0000) Subject: net: remove dev_txq_stats_fold() X-Git-Tag: firefly_0821_release~7613^2~2412^2~186 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=1ac9ad1394fa542ac7ae0dc943ee3cda678799fa;p=firefly-linux-kernel-4.4.55.git net: remove dev_txq_stats_fold() After recent changes, (percpu stats on vlan/tunnels...), we dont need anymore per struct netdev_queue tx_bytes/tx_packets/tx_dropped counters. Only remaining users are ixgbe, sch_teql, gianfar & macvlan : 1) ixgbe can be converted to use existing tx_ring counters. 2) macvlan incremented txq->tx_dropped, it can use the dev->stats.tx_dropped counter. 3) sch_teql : almost revert ab35cd4b8f42 (Use net_device internal stats) Now we have ndo_get_stats64(), use it, even for "unsigned long" fields (No need to bring back a struct net_device_stats) 4) gianfar adds a stats structure per tx queue to hold tx_bytes/tx_packets This removes a lockdep warning (and possible lockup) in rndis gadget, calling dev_get_stats() from hard IRQ context. Ref: http://www.spinics.net/lists/netdev/msg149202.html Reported-by: Neil Jones Signed-off-by: Eric Dumazet CC: Jarek Poplawski CC: Alexander Duyck CC: Jeff Kirsher CC: Sandeep Gopalpet CC: Michal Nazarewicz Signed-off-by: David S. Miller --- diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 45c4b7bfcf39..f1d4b450e797 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -433,7 +433,6 @@ static void gfar_init_mac(struct net_device *ndev) static struct net_device_stats *gfar_get_stats(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct netdev_queue *txq; unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; unsigned long tx_packets = 0, tx_bytes = 0; int i = 0; @@ -449,9 +448,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) dev->stats.rx_dropped = rx_dropped; for (i = 0; i < priv->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; + tx_bytes += priv->tx_queue[i]->stats.tx_bytes; + tx_packets += priv->tx_queue[i]->stats.tx_packets; } dev->stats.tx_bytes = tx_bytes; @@ -2108,8 +2106,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Update transmit stats */ - txq->tx_bytes += skb->len; - txq->tx_packets ++; + tx_queue->stats.tx_bytes += skb->len; + tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; lstatus = txbdp->lstatus; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 68984eb88ae0..54de4135e932 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -907,12 +907,21 @@ enum { MQ_MG_MODE }; +/* + * Per TX queue stats + */ +struct tx_q_stats { + unsigned long tx_packets; + unsigned long tx_bytes; +}; + /** * struct gfar_priv_tx_q - per tx queue structure * @txlock: per queue tx spin lock * @tx_skbuff:skb pointers * @skb_curtx: to be used skb pointer * @skb_dirtytx:the last used skb pointer + * @stats: bytes/packets stats * @qindex: index of this queue * @dev: back pointer to the dev structure * @grp: back pointer to the group to which this queue belongs @@ -934,6 +943,7 @@ struct gfar_priv_tx_q { struct txbd8 *tx_bd_base; struct txbd8 *cur_tx; struct txbd8 *dirty_tx; + struct tx_q_stats stats; struct net_device *dev; struct gfar_priv_grp *grp; u16 skb_curtx; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a060610a42db..602078b84892 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { - struct net_device *netdev = tx_ring->netdev; - struct netdev_queue *txq; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; @@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, skb, tx_flags, protocol); - txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); - txq->tx_bytes += skb->len; - txq->tx_packets++; ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); @@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; - /* accurate rx/tx bytes/packets stats */ - dev_txq_stats_fold(netdev, stats); rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); @@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, stats->rx_bytes += bytes; } } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 21845affea13..5933621ac3ff 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -585,7 +585,7 @@ err: rcu_read_lock_bh(); vlan = rcu_dereference(q->vlan); if (vlan) - netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; + vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); return err; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index be4957cf6511..d971346b0340 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -520,9 +520,6 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; - u64 tx_bytes; - u64 tx_packets; - u64 tx_dropped; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -2265,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage); -extern void dev_txq_stats_fold(const struct net_device *dev, - struct rtnl_link_stats64 *stats); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; diff --git a/net/core/dev.c b/net/core/dev.c index a3ef808b5e36..83507c265e48 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5523,34 +5523,6 @@ void netdev_run_todo(void) } } -/** - * dev_txq_stats_fold - fold tx_queues stats - * @dev: device to get statistics from - * @stats: struct rtnl_link_stats64 to hold results - */ -void dev_txq_stats_fold(const struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; - unsigned int i; - struct netdev_queue *txq; - - for (i = 0; i < dev->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - spin_lock_bh(&txq->_xmit_lock); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; - tx_dropped += txq->tx_dropped; - spin_unlock_bh(&txq->_xmit_lock); - } - if (tx_bytes || tx_packets || tx_dropped) { - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_packets; - stats->tx_dropped = tx_dropped; - } -} -EXPORT_SYMBOL(dev_txq_stats_fold); - /* Convert net_device_stats to rtnl_link_stats64. They have the same * fields in the same order, with only the type differing. */ @@ -5594,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); - dev_txq_stats_fold(dev, storage); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); return storage; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index af9360d1f6eb..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -59,6 +59,10 @@ struct teql_master struct net_device *dev; struct Qdisc *slaves; struct list_head master_list; + unsigned long tx_bytes; + unsigned long tx_packets; + unsigned long tx_errors; + unsigned long tx_dropped; }; struct teql_sched_data @@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) { struct teql_master *master = netdev_priv(dev); - struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); struct Qdisc *start, *q; int busy; int nores; @@ -314,8 +317,8 @@ restart: __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); - txq->tx_packets++; - txq->tx_bytes += length; + master->tx_packets++; + master->tx_bytes += length; return NETDEV_TX_OK; } __netif_tx_unlock(slave_txq); @@ -342,10 +345,10 @@ restart: netif_stop_queue(dev); return NETDEV_TX_BUSY; } - dev->stats.tx_errors++; + master->tx_errors++; drop: - txq->tx_dropped++; + master->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev) return 0; } +static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct teql_master *m = netdev_priv(dev); + + stats->tx_packets = m->tx_packets; + stats->tx_bytes = m->tx_bytes; + stats->tx_errors = m->tx_errors; + stats->tx_dropped = m->tx_dropped; + return stats; +} + static int teql_master_mtu(struct net_device *dev, int new_mtu) { struct teql_master *m = netdev_priv(dev); @@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { .ndo_open = teql_master_open, .ndo_stop = teql_master_close, .ndo_start_xmit = teql_master_xmit, + .ndo_get_stats64 = teql_master_stats64, .ndo_change_mtu = teql_master_mtu, };