2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/rtnetlink.h>
30 #define BNAD_NUM_TXF_COUNTERS 12
31 #define BNAD_NUM_RXF_COUNTERS 10
32 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
33 #define BNAD_NUM_RXQ_COUNTERS 6
34 #define BNAD_NUM_TXQ_COUNTERS 5
36 #define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
41 static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
63 "tx_heartbeat_errors",
71 "netif_queue_stopped",
87 "rxp_info_alloc_failed",
90 "tx_unmap_q_alloc_failed",
91 "rx_unmap_q_alloc_failed",
99 "mac_frame_1024_1518",
100 "mac_frame_1518_1522",
106 "mac_rx_control_frames",
108 "mac_rx_unknown_opcode",
109 "mac_rx_alignment_error",
110 "mac_rx_frame_length_error",
112 "mac_rx_carrier_sense_error",
125 "mac_tx_excessive_deferral",
126 "mac_tx_single_collision",
127 "mac_tx_muliple_collision",
128 "mac_tx_late_collision",
129 "mac_tx_excessive_collision",
130 "mac_tx_total_collision",
131 "mac_tx_pause_honored",
135 "mac_tx_control_frame",
148 "bpc_tx_zero_pause_0",
149 "bpc_tx_zero_pause_1",
150 "bpc_tx_zero_pause_2",
151 "bpc_tx_zero_pause_3",
152 "bpc_tx_zero_pause_4",
153 "bpc_tx_zero_pause_5",
154 "bpc_tx_zero_pause_6",
155 "bpc_tx_zero_pause_7",
156 "bpc_tx_first_pause_0",
157 "bpc_tx_first_pause_1",
158 "bpc_tx_first_pause_2",
159 "bpc_tx_first_pause_3",
160 "bpc_tx_first_pause_4",
161 "bpc_tx_first_pause_5",
162 "bpc_tx_first_pause_6",
163 "bpc_tx_first_pause_7",
173 "bpc_rx_zero_pause_0",
174 "bpc_rx_zero_pause_1",
175 "bpc_rx_zero_pause_2",
176 "bpc_rx_zero_pause_3",
177 "bpc_rx_zero_pause_4",
178 "bpc_rx_zero_pause_5",
179 "bpc_rx_zero_pause_6",
180 "bpc_rx_zero_pause_7",
181 "bpc_rx_first_pause_0",
182 "bpc_rx_first_pause_1",
183 "bpc_rx_first_pause_2",
184 "bpc_rx_first_pause_3",
185 "bpc_rx_first_pause_4",
186 "bpc_rx_first_pause_5",
187 "bpc_rx_first_pause_6",
188 "bpc_rx_first_pause_7",
192 "rad_rx_vlan_frames",
194 "rad_rx_ucast_octets",
197 "rad_rx_mcast_octets",
200 "rad_rx_bcast_octets",
204 "fc_rx_ucast_octets",
207 "fc_rx_mcast_octets",
210 "fc_rx_bcast_octets",
214 "fc_tx_ucast_octets",
217 "fc_tx_mcast_octets",
220 "fc_tx_bcast_octets",
223 "fc_tx_parity_errors",
225 "fc_tx_fid_parity_errors",
229 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
231 cmd->supported = SUPPORTED_10000baseT_Full;
232 cmd->advertising = ADVERTISED_10000baseT_Full;
233 cmd->autoneg = AUTONEG_DISABLE;
234 cmd->supported |= SUPPORTED_FIBRE;
235 cmd->advertising |= ADVERTISED_FIBRE;
236 cmd->port = PORT_FIBRE;
237 cmd->phy_address = 0;
239 if (netif_carrier_ok(netdev)) {
240 ethtool_cmd_speed_set(cmd, SPEED_10000);
241 cmd->duplex = DUPLEX_FULL;
243 ethtool_cmd_speed_set(cmd, -1);
246 cmd->transceiver = XCVR_EXTERNAL;
254 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
256 /* 10G full duplex setting supported only */
257 if (cmd->autoneg == AUTONEG_ENABLE)
258 return -EOPNOTSUPP; else {
259 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
260 && (cmd->duplex == DUPLEX_FULL))
268 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
270 struct bnad *bnad = netdev_priv(netdev);
271 struct bfa_ioc_attr *ioc_attr;
274 strcpy(drvinfo->driver, BNAD_NAME);
275 strcpy(drvinfo->version, BNAD_VERSION);
277 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
279 spin_lock_irqsave(&bnad->bna_lock, flags);
280 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
281 spin_unlock_irqrestore(&bnad->bna_lock, flags);
283 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
284 sizeof(drvinfo->fw_version) - 1);
288 strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
292 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
294 wolinfo->supported = 0;
295 wolinfo->wolopts = 0;
299 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
301 struct bnad *bnad = netdev_priv(netdev);
304 /* Lock rqd. to access bnad->bna_lock */
305 spin_lock_irqsave(&bnad->bna_lock, flags);
306 coalesce->use_adaptive_rx_coalesce =
307 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
308 spin_unlock_irqrestore(&bnad->bna_lock, flags);
310 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
311 BFI_COALESCING_TIMER_UNIT;
312 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
313 BFI_COALESCING_TIMER_UNIT;
314 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
320 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
322 struct bnad *bnad = netdev_priv(netdev);
324 int dim_timer_del = 0;
326 if (coalesce->rx_coalesce_usecs == 0 ||
327 coalesce->rx_coalesce_usecs >
328 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
331 if (coalesce->tx_coalesce_usecs == 0 ||
332 coalesce->tx_coalesce_usecs >
333 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
336 mutex_lock(&bnad->conf_mutex);
338 * Do not need to store rx_coalesce_usecs here
339 * Every time DIM is disabled, we can get it from the
342 spin_lock_irqsave(&bnad->bna_lock, flags);
343 if (coalesce->use_adaptive_rx_coalesce) {
344 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
345 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
346 bnad_dim_timer_start(bnad);
349 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
350 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
351 dim_timer_del = bnad_dim_timer_running(bnad);
353 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
355 spin_unlock_irqrestore(&bnad->bna_lock, flags);
356 del_timer_sync(&bnad->dim_timer);
357 spin_lock_irqsave(&bnad->bna_lock, flags);
359 bnad_rx_coalescing_timeo_set(bnad);
362 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
363 BFI_COALESCING_TIMER_UNIT) {
364 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
365 BFI_COALESCING_TIMER_UNIT;
366 bnad_tx_coalescing_timeo_set(bnad);
369 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
370 BFI_COALESCING_TIMER_UNIT) {
371 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
372 BFI_COALESCING_TIMER_UNIT;
374 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
375 bnad_rx_coalescing_timeo_set(bnad);
379 /* Add Tx Inter-pkt DMA count? */
381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
383 mutex_unlock(&bnad->conf_mutex);
388 bnad_get_ringparam(struct net_device *netdev,
389 struct ethtool_ringparam *ringparam)
391 struct bnad *bnad = netdev_priv(netdev);
393 ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
394 ringparam->rx_mini_max_pending = 0;
395 ringparam->rx_jumbo_max_pending = 0;
396 ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
398 ringparam->rx_pending = bnad->rxq_depth;
399 ringparam->rx_mini_max_pending = 0;
400 ringparam->rx_jumbo_max_pending = 0;
401 ringparam->tx_pending = bnad->txq_depth;
405 bnad_set_ringparam(struct net_device *netdev,
406 struct ethtool_ringparam *ringparam)
408 int i, current_err, err = 0;
409 struct bnad *bnad = netdev_priv(netdev);
411 mutex_lock(&bnad->conf_mutex);
412 if (ringparam->rx_pending == bnad->rxq_depth &&
413 ringparam->tx_pending == bnad->txq_depth) {
414 mutex_unlock(&bnad->conf_mutex);
418 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
419 ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
420 !BNA_POWER_OF_2(ringparam->rx_pending)) {
421 mutex_unlock(&bnad->conf_mutex);
424 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
425 ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
426 !BNA_POWER_OF_2(ringparam->tx_pending)) {
427 mutex_unlock(&bnad->conf_mutex);
431 if (ringparam->rx_pending != bnad->rxq_depth) {
432 bnad->rxq_depth = ringparam->rx_pending;
433 for (i = 0; i < bnad->num_rx; i++) {
434 if (!bnad->rx_info[i].rx)
436 bnad_cleanup_rx(bnad, i);
437 current_err = bnad_setup_rx(bnad, i);
438 if (current_err && !err)
442 if (ringparam->tx_pending != bnad->txq_depth) {
443 bnad->txq_depth = ringparam->tx_pending;
444 for (i = 0; i < bnad->num_tx; i++) {
445 if (!bnad->tx_info[i].tx)
447 bnad_cleanup_tx(bnad, i);
448 current_err = bnad_setup_tx(bnad, i);
449 if (current_err && !err)
454 mutex_unlock(&bnad->conf_mutex);
459 bnad_get_pauseparam(struct net_device *netdev,
460 struct ethtool_pauseparam *pauseparam)
462 struct bnad *bnad = netdev_priv(netdev);
464 pauseparam->autoneg = 0;
465 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
466 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
470 bnad_set_pauseparam(struct net_device *netdev,
471 struct ethtool_pauseparam *pauseparam)
473 struct bnad *bnad = netdev_priv(netdev);
474 struct bna_pause_config pause_config;
477 if (pauseparam->autoneg == AUTONEG_ENABLE)
480 mutex_lock(&bnad->conf_mutex);
481 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
482 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
483 pause_config.rx_pause = pauseparam->rx_pause;
484 pause_config.tx_pause = pauseparam->tx_pause;
485 spin_lock_irqsave(&bnad->bna_lock, flags);
486 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
489 mutex_unlock(&bnad->conf_mutex);
494 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
496 struct bnad *bnad = netdev_priv(netdev);
500 mutex_lock(&bnad->conf_mutex);
504 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
505 BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
507 memcpy(string, bnad_net_stats_strings[i],
509 string += ETH_GSTRING_LEN;
511 bmap = bna_tx_rid_mask(&bnad->bna);
512 for (i = 0; bmap; i++) {
514 sprintf(string, "txf%d_ucast_octets", i);
515 string += ETH_GSTRING_LEN;
516 sprintf(string, "txf%d_ucast", i);
517 string += ETH_GSTRING_LEN;
518 sprintf(string, "txf%d_ucast_vlan", i);
519 string += ETH_GSTRING_LEN;
520 sprintf(string, "txf%d_mcast_octets", i);
521 string += ETH_GSTRING_LEN;
522 sprintf(string, "txf%d_mcast", i);
523 string += ETH_GSTRING_LEN;
524 sprintf(string, "txf%d_mcast_vlan", i);
525 string += ETH_GSTRING_LEN;
526 sprintf(string, "txf%d_bcast_octets", i);
527 string += ETH_GSTRING_LEN;
528 sprintf(string, "txf%d_bcast", i);
529 string += ETH_GSTRING_LEN;
530 sprintf(string, "txf%d_bcast_vlan", i);
531 string += ETH_GSTRING_LEN;
532 sprintf(string, "txf%d_errors", i);
533 string += ETH_GSTRING_LEN;
534 sprintf(string, "txf%d_filter_vlan", i);
535 string += ETH_GSTRING_LEN;
536 sprintf(string, "txf%d_filter_mac_sa", i);
537 string += ETH_GSTRING_LEN;
542 bmap = bna_rx_rid_mask(&bnad->bna);
543 for (i = 0; bmap; i++) {
545 sprintf(string, "rxf%d_ucast_octets", i);
546 string += ETH_GSTRING_LEN;
547 sprintf(string, "rxf%d_ucast", i);
548 string += ETH_GSTRING_LEN;
549 sprintf(string, "rxf%d_ucast_vlan", i);
550 string += ETH_GSTRING_LEN;
551 sprintf(string, "rxf%d_mcast_octets", i);
552 string += ETH_GSTRING_LEN;
553 sprintf(string, "rxf%d_mcast", i);
554 string += ETH_GSTRING_LEN;
555 sprintf(string, "rxf%d_mcast_vlan", i);
556 string += ETH_GSTRING_LEN;
557 sprintf(string, "rxf%d_bcast_octets", i);
558 string += ETH_GSTRING_LEN;
559 sprintf(string, "rxf%d_bcast", i);
560 string += ETH_GSTRING_LEN;
561 sprintf(string, "rxf%d_bcast_vlan", i);
562 string += ETH_GSTRING_LEN;
563 sprintf(string, "rxf%d_frame_drops", i);
564 string += ETH_GSTRING_LEN;
570 for (i = 0; i < bnad->num_rx; i++) {
571 if (!bnad->rx_info[i].rx)
573 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
574 sprintf(string, "cq%d_producer_index", q_num);
575 string += ETH_GSTRING_LEN;
576 sprintf(string, "cq%d_consumer_index", q_num);
577 string += ETH_GSTRING_LEN;
578 sprintf(string, "cq%d_hw_producer_index",
580 string += ETH_GSTRING_LEN;
586 for (i = 0; i < bnad->num_rx; i++) {
587 if (!bnad->rx_info[i].rx)
589 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
590 sprintf(string, "rxq%d_packets", q_num);
591 string += ETH_GSTRING_LEN;
592 sprintf(string, "rxq%d_bytes", q_num);
593 string += ETH_GSTRING_LEN;
594 sprintf(string, "rxq%d_packets_with_error",
596 string += ETH_GSTRING_LEN;
597 sprintf(string, "rxq%d_allocbuf_failed", q_num);
598 string += ETH_GSTRING_LEN;
599 sprintf(string, "rxq%d_producer_index", q_num);
600 string += ETH_GSTRING_LEN;
601 sprintf(string, "rxq%d_consumer_index", q_num);
602 string += ETH_GSTRING_LEN;
604 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
605 bnad->rx_info[i].rx_ctrl[j].ccb->
607 bnad->rx_info[i].rx_ctrl[j].ccb->
609 sprintf(string, "rxq%d_packets", q_num);
610 string += ETH_GSTRING_LEN;
611 sprintf(string, "rxq%d_bytes", q_num);
612 string += ETH_GSTRING_LEN;
614 "rxq%d_packets_with_error", q_num);
615 string += ETH_GSTRING_LEN;
616 sprintf(string, "rxq%d_allocbuf_failed",
618 string += ETH_GSTRING_LEN;
619 sprintf(string, "rxq%d_producer_index",
621 string += ETH_GSTRING_LEN;
622 sprintf(string, "rxq%d_consumer_index",
624 string += ETH_GSTRING_LEN;
631 for (i = 0; i < bnad->num_tx; i++) {
632 if (!bnad->tx_info[i].tx)
634 for (j = 0; j < bnad->num_txq_per_tx; j++) {
635 sprintf(string, "txq%d_packets", q_num);
636 string += ETH_GSTRING_LEN;
637 sprintf(string, "txq%d_bytes", q_num);
638 string += ETH_GSTRING_LEN;
639 sprintf(string, "txq%d_producer_index", q_num);
640 string += ETH_GSTRING_LEN;
641 sprintf(string, "txq%d_consumer_index", q_num);
642 string += ETH_GSTRING_LEN;
643 sprintf(string, "txq%d_hw_consumer_index",
645 string += ETH_GSTRING_LEN;
656 mutex_unlock(&bnad->conf_mutex);
660 bnad_get_stats_count_locked(struct net_device *netdev)
662 struct bnad *bnad = netdev_priv(netdev);
663 int i, j, count, rxf_active_num = 0, txf_active_num = 0;
666 bmap = bna_tx_rid_mask(&bnad->bna);
667 for (i = 0; bmap; i++) {
672 bmap = bna_rx_rid_mask(&bnad->bna);
673 for (i = 0; bmap; i++) {
678 count = BNAD_ETHTOOL_STATS_NUM +
679 txf_active_num * BNAD_NUM_TXF_COUNTERS +
680 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
682 for (i = 0; i < bnad->num_rx; i++) {
683 if (!bnad->rx_info[i].rx)
685 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
686 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
687 for (j = 0; j < bnad->num_rxp_per_rx; j++)
688 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
689 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
690 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
691 count += BNAD_NUM_RXQ_COUNTERS;
694 for (i = 0; i < bnad->num_tx; i++) {
695 if (!bnad->tx_info[i].tx)
697 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
703 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
706 struct bna_rcb *rcb = NULL;
707 struct bna_tcb *tcb = NULL;
709 for (i = 0; i < bnad->num_rx; i++) {
710 if (!bnad->rx_info[i].rx)
712 for (j = 0; j < bnad->num_rxp_per_rx; j++)
713 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
714 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
715 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
716 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
718 buf[bi++] = 0; /* ccb->consumer_index */
719 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
720 ccb->hw_producer_index);
723 for (i = 0; i < bnad->num_rx; i++) {
724 if (!bnad->rx_info[i].rx)
726 for (j = 0; j < bnad->num_rxp_per_rx; j++)
727 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
728 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
729 bnad->rx_info[i].rx_ctrl[j].ccb->
731 rcb = bnad->rx_info[i].rx_ctrl[j].
733 buf[bi++] = rcb->rxq->rx_packets;
734 buf[bi++] = rcb->rxq->rx_bytes;
735 buf[bi++] = rcb->rxq->
736 rx_packets_with_error;
737 buf[bi++] = rcb->rxq->
739 buf[bi++] = rcb->producer_index;
740 buf[bi++] = rcb->consumer_index;
742 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
743 bnad->rx_info[i].rx_ctrl[j].ccb->
745 rcb = bnad->rx_info[i].rx_ctrl[j].
747 buf[bi++] = rcb->rxq->rx_packets;
748 buf[bi++] = rcb->rxq->rx_bytes;
749 buf[bi++] = rcb->rxq->
750 rx_packets_with_error;
751 buf[bi++] = rcb->rxq->
753 buf[bi++] = rcb->producer_index;
754 buf[bi++] = rcb->consumer_index;
759 for (i = 0; i < bnad->num_tx; i++) {
760 if (!bnad->tx_info[i].tx)
762 for (j = 0; j < bnad->num_txq_per_tx; j++)
763 if (bnad->tx_info[i].tcb[j] &&
764 bnad->tx_info[i].tcb[j]->txq) {
765 tcb = bnad->tx_info[i].tcb[j];
766 buf[bi++] = tcb->txq->tx_packets;
767 buf[bi++] = tcb->txq->tx_bytes;
768 buf[bi++] = tcb->producer_index;
769 buf[bi++] = tcb->consumer_index;
770 buf[bi++] = *(tcb->hw_consumer_index);
778 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
781 struct bnad *bnad = netdev_priv(netdev);
784 struct rtnl_link_stats64 *net_stats64;
788 mutex_lock(&bnad->conf_mutex);
789 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
790 mutex_unlock(&bnad->conf_mutex);
795 * Used bna_lock to sync reads from bna_stats, which is written
796 * under the same lock
798 spin_lock_irqsave(&bnad->bna_lock, flags);
800 memset(buf, 0, stats->n_stats * sizeof(u64));
802 net_stats64 = (struct rtnl_link_stats64 *)buf;
803 bnad_netdev_qstats_fill(bnad, net_stats64);
804 bnad_netdev_hwstats_fill(bnad, net_stats64);
806 bi = sizeof(*net_stats64) / sizeof(u64);
808 /* Get netif_queue_stopped from stack */
809 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
811 /* Fill driver stats into ethtool buffers */
812 stats64 = (u64 *)&bnad->stats.drv_stats;
813 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
814 buf[bi++] = stats64[i];
816 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
817 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
819 i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
822 buf[bi++] = stats64[i];
824 /* Fill txf stats into ethtool buffers */
825 bmap = bna_tx_rid_mask(&bnad->bna);
826 for (i = 0; bmap; i++) {
828 stats64 = (u64 *)&bnad->stats.bna_stats->
829 hw_stats.txf_stats[i];
830 for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
832 buf[bi++] = stats64[j];
837 /* Fill rxf stats into ethtool buffers */
838 bmap = bna_rx_rid_mask(&bnad->bna);
839 for (i = 0; bmap; i++) {
841 stats64 = (u64 *)&bnad->stats.bna_stats->
842 hw_stats.rxf_stats[i];
843 for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
845 buf[bi++] = stats64[j];
850 /* Fill per Q stats into ethtool buffers */
851 bi = bnad_per_q_stats_fill(bnad, buf, bi);
853 spin_unlock_irqrestore(&bnad->bna_lock, flags);
855 mutex_unlock(&bnad->conf_mutex);
859 bnad_get_sset_count(struct net_device *netdev, int sset)
863 return bnad_get_stats_count_locked(netdev);
869 static struct ethtool_ops bnad_ethtool_ops = {
870 .get_settings = bnad_get_settings,
871 .set_settings = bnad_set_settings,
872 .get_drvinfo = bnad_get_drvinfo,
873 .get_wol = bnad_get_wol,
874 .get_link = ethtool_op_get_link,
875 .get_coalesce = bnad_get_coalesce,
876 .set_coalesce = bnad_set_coalesce,
877 .get_ringparam = bnad_get_ringparam,
878 .set_ringparam = bnad_set_ringparam,
879 .get_pauseparam = bnad_get_pauseparam,
880 .set_pauseparam = bnad_set_pauseparam,
881 .get_strings = bnad_get_strings,
882 .get_ethtool_stats = bnad_get_ethtool_stats,
883 .get_sset_count = bnad_get_sset_count
887 bnad_set_ethtool_ops(struct net_device *netdev)
889 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);