2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <linux/ethtool.h>
22 struct be_ethtool_stat {
23 char desc[ETH_GSTRING_LEN];
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
33 FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
35 FIELDINFO(struct be_rx_stats, field)
36 #define DRVSTAT_INFO(field) #field, DRVSTAT,\
37 FIELDINFO(struct be_drv_stats, field)
39 static const struct be_ethtool_stat et_stats[] = {
40 {DRVSTAT_INFO(rx_crc_errors)},
41 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 {DRVSTAT_INFO(rx_pause_frames)},
43 {DRVSTAT_INFO(rx_control_frames)},
44 /* Received packets dropped when the Ethernet length field
45 * is not equal to the actual Ethernet data length.
47 {DRVSTAT_INFO(rx_in_range_errors)},
48 /* Received packets dropped when their length field is >= 1501 bytes
51 {DRVSTAT_INFO(rx_out_range_errors)},
52 /* Received packets dropped when they are longer than 9216 bytes */
53 {DRVSTAT_INFO(rx_frame_too_long)},
54 /* Received packets dropped when they don't pass the unicast or
55 * multicast address filtering.
57 {DRVSTAT_INFO(rx_address_filtered)},
58 /* Received packets dropped when IP packet length field is less than
59 * the IP header length field.
61 {DRVSTAT_INFO(rx_dropped_too_small)},
62 /* Received packets dropped when IP length field is greater than
63 * the actual packet length.
65 {DRVSTAT_INFO(rx_dropped_too_short)},
66 /* Received packets dropped when the IP header length field is less
69 {DRVSTAT_INFO(rx_dropped_header_too_small)},
70 /* Received packets dropped when the TCP header length field is less
71 * than 5 or the TCP header length + IP header length is more
72 * than IP packet length.
74 {DRVSTAT_INFO(rx_dropped_tcp_length)},
75 {DRVSTAT_INFO(rx_dropped_runt)},
76 /* Number of received packets dropped when a fifo for descriptors going
77 * into the packet demux block overflows. In normal operation, this
78 * fifo must never overflow.
80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
82 {DRVSTAT_INFO(rx_ip_checksum_errs)},
83 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
84 {DRVSTAT_INFO(rx_udp_checksum_errs)},
85 {DRVSTAT_INFO(tx_pauseframes)},
86 {DRVSTAT_INFO(tx_controlframes)},
87 {DRVSTAT_INFO(rx_priority_pause_frames)},
88 {DRVSTAT_INFO(tx_priority_pauseframes)},
89 /* Received packets dropped when an internal fifo going into
90 * main packet buffer tank (PMEM) overflows.
92 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
93 {DRVSTAT_INFO(jabber_events)},
94 /* Received packets dropped due to lack of available HW packet buffers
95 * used to temporarily hold the received packets.
97 {DRVSTAT_INFO(rx_drops_no_pbuf)},
98 /* Received packets dropped due to input receive buffer
99 * descriptor fifo overflowing.
101 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
102 /* Packets dropped because the internal FIFO to the offloaded TCP
103 * receive processing block is full. This could happen only for
104 * offloaded iSCSI or FCoE trarffic.
106 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
107 /* Received packets dropped when they need more than 8
108 * receive buffers. This cannot happen as the driver configures
109 * 2048 byte receive buffers.
111 {DRVSTAT_INFO(rx_drops_too_many_frags)},
112 {DRVSTAT_INFO(forwarded_packets)},
113 /* Received packets dropped when the frame length
114 * is more than 9018 bytes
116 {DRVSTAT_INFO(rx_drops_mtu)},
117 /* Number of packets dropped due to random early drop function */
118 {DRVSTAT_INFO(eth_red_drops)},
119 {DRVSTAT_INFO(be_on_die_temperature)}
121 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
123 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
124 * are first and second members respectively.
126 static const struct be_ethtool_stat et_rx_stats[] = {
127 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
128 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
129 {DRVSTAT_RX_INFO(rx_compl)},
130 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
131 /* Number of page allocation failures while posting receive buffers
134 {DRVSTAT_RX_INFO(rx_post_fail)},
135 /* Recevied packets dropped due to skb allocation failure */
136 {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
137 /* Received packets dropped due to lack of available fetched buffers
138 * posted by the driver.
140 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
142 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
144 /* Stats related to multi TX queues: get_stats routine assumes compl is the
147 static const struct be_ethtool_stat et_tx_stats[] = {
148 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
149 {DRVSTAT_TX_INFO(tx_bytes)},
150 {DRVSTAT_TX_INFO(tx_pkts)},
151 /* Number of skbs queued for trasmission by the driver */
152 {DRVSTAT_TX_INFO(tx_reqs)},
153 /* Number of TX work request blocks DMAed to HW */
154 {DRVSTAT_TX_INFO(tx_wrbs)},
155 /* Number of times the TX queue was stopped due to lack
156 * of spaces in the TXQ.
158 {DRVSTAT_TX_INFO(tx_stops)},
159 /* Pkts dropped in the driver's transmit path */
160 {DRVSTAT_TX_INFO(tx_drv_drops)}
162 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
164 static const char et_self_tests[][ETH_GSTRING_LEN] = {
167 "External Loopback test",
172 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
173 #define BE_MAC_LOOPBACK 0x0
174 #define BE_PHY_LOOPBACK 0x1
175 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
176 #define BE_NO_LOOPBACK 0xff
178 static void be_get_drvinfo(struct net_device *netdev,
179 struct ethtool_drvinfo *drvinfo)
181 struct be_adapter *adapter = netdev_priv(netdev);
183 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
184 strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
185 if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
186 strlcpy(drvinfo->fw_version, adapter->fw_ver,
187 sizeof(drvinfo->fw_version));
189 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
190 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
192 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
193 sizeof(drvinfo->bus_info));
194 drvinfo->testinfo_len = 0;
195 drvinfo->regdump_len = 0;
196 drvinfo->eedump_len = 0;
200 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
202 u32 data_read = 0, eof;
204 struct be_dma_mem data_len_cmd;
207 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
208 /* data_offset and data_size should be 0 to get reg len */
209 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
210 file_name, &data_read, &eof, &addn_status);
216 lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
217 u32 buf_len, void *buf)
219 struct be_dma_mem read_cmd;
220 u32 read_len = 0, total_read_len = 0, chunk_size;
225 read_cmd.size = LANCER_READ_FILE_CHUNK;
226 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
230 dev_err(&adapter->pdev->dev,
231 "Memory allocation failure while reading dump\n");
235 while ((total_read_len < buf_len) && !eof) {
236 chunk_size = min_t(u32, (buf_len - total_read_len),
237 LANCER_READ_FILE_CHUNK);
238 chunk_size = ALIGN(chunk_size, 4);
239 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
240 total_read_len, file_name, &read_len,
243 memcpy(buf + total_read_len, read_cmd.va, read_len);
244 total_read_len += read_len;
245 eof &= LANCER_READ_FILE_EOF_MASK;
251 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
258 be_get_reg_len(struct net_device *netdev)
260 struct be_adapter *adapter = netdev_priv(netdev);
263 if (!check_privilege(adapter, MAX_PRIVILEGES))
266 if (be_physfn(adapter)) {
267 if (lancer_chip(adapter))
268 log_size = lancer_cmd_get_file_len(adapter,
269 LANCER_FW_DUMP_FILE);
271 be_cmd_get_reg_len(adapter, &log_size);
277 be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
279 struct be_adapter *adapter = netdev_priv(netdev);
281 if (be_physfn(adapter)) {
282 memset(buf, 0, regs->len);
283 if (lancer_chip(adapter))
284 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
287 be_cmd_get_regs(adapter, regs->len, buf);
291 static int be_get_coalesce(struct net_device *netdev,
292 struct ethtool_coalesce *et)
294 struct be_adapter *adapter = netdev_priv(netdev);
295 struct be_aic_obj *aic = &adapter->aic_obj[0];
298 et->rx_coalesce_usecs = aic->prev_eqd;
299 et->rx_coalesce_usecs_high = aic->max_eqd;
300 et->rx_coalesce_usecs_low = aic->min_eqd;
302 et->tx_coalesce_usecs = aic->prev_eqd;
303 et->tx_coalesce_usecs_high = aic->max_eqd;
304 et->tx_coalesce_usecs_low = aic->min_eqd;
306 et->use_adaptive_rx_coalesce = aic->enable;
307 et->use_adaptive_tx_coalesce = aic->enable;
312 /* TX attributes are ignored. Only RX attributes are considered
313 * eqd cmd is issued in the worker thread.
315 static int be_set_coalesce(struct net_device *netdev,
316 struct ethtool_coalesce *et)
318 struct be_adapter *adapter = netdev_priv(netdev);
319 struct be_aic_obj *aic = &adapter->aic_obj[0];
320 struct be_eq_obj *eqo;
323 for_all_evt_queues(adapter, eqo, i) {
324 aic->enable = et->use_adaptive_rx_coalesce;
325 aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
326 aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
327 aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
328 aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
336 be_get_ethtool_stats(struct net_device *netdev,
337 struct ethtool_stats *stats, uint64_t *data)
339 struct be_adapter *adapter = netdev_priv(netdev);
340 struct be_rx_obj *rxo;
341 struct be_tx_obj *txo;
343 unsigned int i, j, base = 0, start;
345 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
346 p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
349 base += ETHTOOL_STATS_NUM;
351 for_all_rx_queues(adapter, rxo, j) {
352 struct be_rx_stats *stats = rx_stats(rxo);
355 start = u64_stats_fetch_begin_bh(&stats->sync);
356 data[base] = stats->rx_bytes;
357 data[base + 1] = stats->rx_pkts;
358 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
360 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
361 p = (u8 *)stats + et_rx_stats[i].offset;
362 data[base + i] = *(u32 *)p;
364 base += ETHTOOL_RXSTATS_NUM;
367 for_all_tx_queues(adapter, txo, j) {
368 struct be_tx_stats *stats = tx_stats(txo);
371 start = u64_stats_fetch_begin_bh(&stats->sync_compl);
372 data[base] = stats->tx_compl;
373 } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
376 start = u64_stats_fetch_begin_bh(&stats->sync);
377 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
378 p = (u8 *)stats + et_tx_stats[i].offset;
380 (et_tx_stats[i].size == sizeof(u64)) ?
381 *(u64 *)p : *(u32 *)p;
383 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
384 base += ETHTOOL_TXSTATS_NUM;
389 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
392 struct be_adapter *adapter = netdev_priv(netdev);
397 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
398 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
399 data += ETH_GSTRING_LEN;
401 for (i = 0; i < adapter->num_rx_qs; i++) {
402 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
403 sprintf(data, "rxq%d: %s", i,
404 et_rx_stats[j].desc);
405 data += ETH_GSTRING_LEN;
408 for (i = 0; i < adapter->num_tx_qs; i++) {
409 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
410 sprintf(data, "txq%d: %s", i,
411 et_tx_stats[j].desc);
412 data += ETH_GSTRING_LEN;
417 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
418 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
419 data += ETH_GSTRING_LEN;
425 static int be_get_sset_count(struct net_device *netdev, int stringset)
427 struct be_adapter *adapter = netdev_priv(netdev);
431 return ETHTOOL_TESTS_NUM;
433 return ETHTOOL_STATS_NUM +
434 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
435 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
441 static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
446 case PHY_TYPE_BASET_1GB:
447 case PHY_TYPE_BASEX_1GB:
451 case PHY_TYPE_SFP_PLUS_10GB:
452 port = dac_cable_len ? PORT_DA : PORT_FIBRE;
454 case PHY_TYPE_XFP_10GB:
455 case PHY_TYPE_SFP_1GB:
458 case PHY_TYPE_BASET_10GB:
468 static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
473 case PHY_TYPE_BASET_1GB:
474 case PHY_TYPE_BASEX_1GB:
477 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
478 val |= SUPPORTED_1000baseT_Full;
479 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
480 val |= SUPPORTED_100baseT_Full;
481 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
482 val |= SUPPORTED_10baseT_Full;
484 case PHY_TYPE_KX4_10GB:
485 val |= SUPPORTED_Backplane;
486 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
487 val |= SUPPORTED_1000baseKX_Full;
488 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
489 val |= SUPPORTED_10000baseKX4_Full;
491 case PHY_TYPE_KR_10GB:
492 val |= SUPPORTED_Backplane |
493 SUPPORTED_10000baseKR_Full;
495 case PHY_TYPE_SFP_PLUS_10GB:
496 case PHY_TYPE_XFP_10GB:
497 case PHY_TYPE_SFP_1GB:
498 val |= SUPPORTED_FIBRE;
499 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
500 val |= SUPPORTED_10000baseT_Full;
501 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
502 val |= SUPPORTED_1000baseT_Full;
504 case PHY_TYPE_BASET_10GB:
506 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
507 val |= SUPPORTED_10000baseT_Full;
508 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
509 val |= SUPPORTED_1000baseT_Full;
510 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
511 val |= SUPPORTED_100baseT_Full;
520 bool be_pause_supported(struct be_adapter *adapter)
522 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
523 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
527 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
529 struct be_adapter *adapter = netdev_priv(netdev);
538 if (adapter->phy.link_speed < 0) {
539 status = be_cmd_link_status_query(adapter, &link_speed,
542 be_link_status_update(adapter, link_status);
543 ethtool_cmd_speed_set(ecmd, link_speed);
545 status = be_cmd_get_phy_info(adapter);
547 interface_type = adapter->phy.interface_type;
548 auto_speeds = adapter->phy.auto_speeds_supported;
549 fixed_speeds = adapter->phy.fixed_speeds_supported;
550 dac_cable_len = adapter->phy.dac_cable_len;
553 convert_to_et_setting(interface_type,
557 convert_to_et_setting(interface_type,
560 ecmd->port = be_get_port_type(interface_type,
563 if (adapter->phy.auto_speeds_supported) {
564 ecmd->supported |= SUPPORTED_Autoneg;
565 ecmd->autoneg = AUTONEG_ENABLE;
566 ecmd->advertising |= ADVERTISED_Autoneg;
569 ecmd->supported |= SUPPORTED_Pause;
570 if (be_pause_supported(adapter))
571 ecmd->advertising |= ADVERTISED_Pause;
573 switch (adapter->phy.interface_type) {
574 case PHY_TYPE_KR_10GB:
575 case PHY_TYPE_KX4_10GB:
576 ecmd->transceiver = XCVR_INTERNAL;
579 ecmd->transceiver = XCVR_EXTERNAL;
583 ecmd->port = PORT_OTHER;
584 ecmd->autoneg = AUTONEG_DISABLE;
585 ecmd->transceiver = XCVR_DUMMY1;
588 /* Save for future use */
589 adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
590 adapter->phy.port_type = ecmd->port;
591 adapter->phy.transceiver = ecmd->transceiver;
592 adapter->phy.autoneg = ecmd->autoneg;
593 adapter->phy.advertising = ecmd->advertising;
594 adapter->phy.supported = ecmd->supported;
596 ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
597 ecmd->port = adapter->phy.port_type;
598 ecmd->transceiver = adapter->phy.transceiver;
599 ecmd->autoneg = adapter->phy.autoneg;
600 ecmd->advertising = adapter->phy.advertising;
601 ecmd->supported = adapter->phy.supported;
604 ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
605 ecmd->phy_address = adapter->port_num;
610 static void be_get_ringparam(struct net_device *netdev,
611 struct ethtool_ringparam *ring)
613 struct be_adapter *adapter = netdev_priv(netdev);
615 ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
616 ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
620 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
622 struct be_adapter *adapter = netdev_priv(netdev);
624 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
625 ecmd->autoneg = adapter->phy.fc_autoneg;
629 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
631 struct be_adapter *adapter = netdev_priv(netdev);
634 if (ecmd->autoneg != adapter->phy.fc_autoneg)
636 adapter->tx_fc = ecmd->tx_pause;
637 adapter->rx_fc = ecmd->rx_pause;
639 status = be_cmd_set_flow_control(adapter,
640 adapter->tx_fc, adapter->rx_fc);
642 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
648 be_set_phys_id(struct net_device *netdev,
649 enum ethtool_phys_id_state state)
651 struct be_adapter *adapter = netdev_priv(netdev);
654 case ETHTOOL_ID_ACTIVE:
655 be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
656 &adapter->beacon_state);
657 return 1; /* cycle on/off once per second */
660 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
661 BEACON_STATE_ENABLED);
665 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
666 BEACON_STATE_DISABLED);
669 case ETHTOOL_ID_INACTIVE:
670 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
671 adapter->beacon_state);
677 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
679 struct be_adapter *adapter = netdev_priv(netdev);
680 struct device *dev = &adapter->pdev->dev;
683 if (!lancer_chip(adapter)) {
684 dev_err(dev, "FW dump not supported\n");
688 if (dump_present(adapter)) {
689 dev_err(dev, "Previous dump not cleared, not forcing dump\n");
693 switch (dump->flag) {
694 case LANCER_INITIATE_FW_DUMP:
695 status = lancer_initiate_dump(adapter);
697 dev_info(dev, "F/w dump initiated successfully\n");
700 dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
707 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
709 struct be_adapter *adapter = netdev_priv(netdev);
711 if (be_is_wol_supported(adapter)) {
712 wol->supported |= WAKE_MAGIC;
714 wol->wolopts |= WAKE_MAGIC;
717 memset(&wol->sopass, 0, sizeof(wol->sopass));
721 be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
723 struct be_adapter *adapter = netdev_priv(netdev);
725 if (wol->wolopts & ~WAKE_MAGIC)
728 if (!be_is_wol_supported(adapter)) {
729 dev_warn(&adapter->pdev->dev, "WOL not supported\n");
733 if (wol->wolopts & WAKE_MAGIC)
736 adapter->wol = false;
742 be_test_ddr_dma(struct be_adapter *adapter)
745 struct be_dma_mem ddrdma_cmd;
746 static const u64 pattern[2] = {
747 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
750 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
751 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
752 &ddrdma_cmd.dma, GFP_KERNEL);
756 for (i = 0; i < 2; i++) {
757 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
764 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
769 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
772 be_cmd_set_loopback(adapter, adapter->hba_port_num,
774 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
777 be_cmd_set_loopback(adapter, adapter->hba_port_num,
783 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
785 struct be_adapter *adapter = netdev_priv(netdev);
789 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
790 dev_err(&adapter->pdev->dev, "Self test not supported\n");
791 test->flags |= ETH_TEST_FL_FAILED;
795 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
797 if (test->flags & ETH_TEST_FL_OFFLINE) {
798 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
800 test->flags |= ETH_TEST_FL_FAILED;
802 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
804 test->flags |= ETH_TEST_FL_FAILED;
806 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
808 test->flags |= ETH_TEST_FL_FAILED;
812 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
814 test->flags |= ETH_TEST_FL_FAILED;
817 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
819 test->flags |= ETH_TEST_FL_FAILED;
821 } else if (!link_status) {
822 test->flags |= ETH_TEST_FL_FAILED;
828 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
830 struct be_adapter *adapter = netdev_priv(netdev);
832 return be_load_fw(adapter, efl->data);
836 be_get_eeprom_len(struct net_device *netdev)
838 struct be_adapter *adapter = netdev_priv(netdev);
840 if (!check_privilege(adapter, MAX_PRIVILEGES))
843 if (lancer_chip(adapter)) {
844 if (be_physfn(adapter))
845 return lancer_cmd_get_file_len(adapter,
848 return lancer_cmd_get_file_len(adapter,
851 return BE_READ_SEEPROM_LEN;
856 be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
859 struct be_adapter *adapter = netdev_priv(netdev);
860 struct be_dma_mem eeprom_cmd;
861 struct be_cmd_resp_seeprom_read *resp;
867 if (lancer_chip(adapter)) {
868 if (be_physfn(adapter))
869 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
872 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
876 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
878 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
879 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
880 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
881 &eeprom_cmd.dma, GFP_KERNEL);
886 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
889 resp = eeprom_cmd.va;
890 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
892 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
898 static u32 be_get_msg_level(struct net_device *netdev)
900 struct be_adapter *adapter = netdev_priv(netdev);
902 if (lancer_chip(adapter)) {
903 dev_err(&adapter->pdev->dev, "Operation not supported\n");
907 return adapter->msg_enable;
910 static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
912 struct be_dma_mem extfat_cmd;
913 struct be_fat_conf_params *cfgs;
917 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
918 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
919 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
921 if (!extfat_cmd.va) {
922 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
926 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
928 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
929 sizeof(struct be_cmd_resp_hdr));
930 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
931 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
932 for (j = 0; j < num_modes; j++) {
933 if (cfgs->module[i].trace_lvl[j].mode ==
935 cfgs->module[i].trace_lvl[j].dbg_lvl =
939 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
942 dev_err(&adapter->pdev->dev,
943 "Message level set failed\n");
945 dev_err(&adapter->pdev->dev, "Message level get failed\n");
948 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
954 static void be_set_msg_level(struct net_device *netdev, u32 level)
956 struct be_adapter *adapter = netdev_priv(netdev);
958 if (lancer_chip(adapter)) {
959 dev_err(&adapter->pdev->dev, "Operation not supported\n");
963 if (adapter->msg_enable == level)
966 if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
967 be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
968 FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
969 adapter->msg_enable = level;
974 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
980 if (adapter->rss_flags & RSS_ENABLE_IPV4)
981 data |= RXH_IP_DST | RXH_IP_SRC;
982 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
983 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
986 if (adapter->rss_flags & RSS_ENABLE_IPV4)
987 data |= RXH_IP_DST | RXH_IP_SRC;
988 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
989 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
992 if (adapter->rss_flags & RSS_ENABLE_IPV6)
993 data |= RXH_IP_DST | RXH_IP_SRC;
994 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
995 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
998 if (adapter->rss_flags & RSS_ENABLE_IPV6)
999 data |= RXH_IP_DST | RXH_IP_SRC;
1000 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
1001 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1008 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1011 struct be_adapter *adapter = netdev_priv(netdev);
1013 if (!be_multi_rxq(adapter)) {
1014 dev_info(&adapter->pdev->dev,
1015 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1021 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1023 case ETHTOOL_GRXRINGS:
1024 cmd->data = adapter->num_rx_qs - 1;
1033 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1034 struct ethtool_rxnfc *cmd)
1036 struct be_rx_obj *rxo;
1037 int status = 0, i, j;
1039 u32 rss_flags = adapter->rss_flags;
1041 if (cmd->data != L3_RSS_FLAGS &&
1042 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1045 switch (cmd->flow_type) {
1047 if (cmd->data == L3_RSS_FLAGS)
1048 rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1049 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1050 rss_flags |= RSS_ENABLE_IPV4 |
1051 RSS_ENABLE_TCP_IPV4;
1054 if (cmd->data == L3_RSS_FLAGS)
1055 rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1056 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1057 rss_flags |= RSS_ENABLE_IPV6 |
1058 RSS_ENABLE_TCP_IPV6;
1061 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1065 if (cmd->data == L3_RSS_FLAGS)
1066 rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1067 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1068 rss_flags |= RSS_ENABLE_IPV4 |
1069 RSS_ENABLE_UDP_IPV4;
1072 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1076 if (cmd->data == L3_RSS_FLAGS)
1077 rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1078 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1079 rss_flags |= RSS_ENABLE_IPV6 |
1080 RSS_ENABLE_UDP_IPV6;
1086 if (rss_flags == adapter->rss_flags)
1089 if (be_multi_rxq(adapter)) {
1090 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
1091 for_all_rss_queues(adapter, rxo, i) {
1094 rsstable[j + i] = rxo->rss_id;
1098 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
1100 adapter->rss_flags = rss_flags;
1105 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1107 struct be_adapter *adapter = netdev_priv(netdev);
1110 if (!be_multi_rxq(adapter)) {
1111 dev_err(&adapter->pdev->dev,
1112 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1118 status = be_set_rss_hash_opts(adapter, cmd);
1127 static void be_get_channels(struct net_device *netdev,
1128 struct ethtool_channels *ch)
1130 struct be_adapter *adapter = netdev_priv(netdev);
1132 ch->combined_count = adapter->num_evt_qs;
1133 ch->max_combined = be_max_qs(adapter);
1136 static int be_set_channels(struct net_device *netdev,
1137 struct ethtool_channels *ch)
1139 struct be_adapter *adapter = netdev_priv(netdev);
1141 if (ch->rx_count || ch->tx_count || ch->other_count ||
1142 !ch->combined_count || ch->combined_count > be_max_qs(adapter))
1145 adapter->cfg_num_qs = ch->combined_count;
1147 return be_update_queues(adapter);
1150 const struct ethtool_ops be_ethtool_ops = {
1151 .get_settings = be_get_settings,
1152 .get_drvinfo = be_get_drvinfo,
1153 .get_wol = be_get_wol,
1154 .set_wol = be_set_wol,
1155 .get_link = ethtool_op_get_link,
1156 .get_eeprom_len = be_get_eeprom_len,
1157 .get_eeprom = be_read_eeprom,
1158 .get_coalesce = be_get_coalesce,
1159 .set_coalesce = be_set_coalesce,
1160 .get_ringparam = be_get_ringparam,
1161 .get_pauseparam = be_get_pauseparam,
1162 .set_pauseparam = be_set_pauseparam,
1163 .get_strings = be_get_stat_strings,
1164 .set_phys_id = be_set_phys_id,
1165 .set_dump = be_set_dump,
1166 .get_msglevel = be_get_msg_level,
1167 .set_msglevel = be_set_msg_level,
1168 .get_sset_count = be_get_sset_count,
1169 .get_ethtool_stats = be_get_ethtool_stats,
1170 .get_regs_len = be_get_reg_len,
1171 .get_regs = be_get_regs,
1172 .flash_device = be_do_flash,
1173 .self_test = be_self_test,
1174 .get_rxnfc = be_get_rxnfc,
1175 .set_rxnfc = be_set_rxnfc,
1176 .get_channels = be_get_channels,
1177 .set_channels = be_set_channels