2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
48 #include "t4vf_common.h"
49 #include "t4vf_defs.h"
51 #include "../cxgb4/t4_regs.h"
52 #include "../cxgb4/t4_msg.h"
55 * Generic information about the driver.
57 #define DRV_VERSION "1.0.0"
58 #define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
66 * Default ethtool "message level" for adapters.
68 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 static int dflt_msg_enable = DFLT_MSG_ENABLE;
74 module_param(dflt_msg_enable, int, 0644);
75 MODULE_PARM_DESC(dflt_msg_enable,
76 "default adapter ethtool message level bitmap");
79 * The driver uses the best interrupt scheme available on a platform in the
80 * order MSI-X then MSI. This parameter determines which of these schemes the
81 * driver may consider as follows:
83 * msi = 2: choose from among MSI-X and MSI
84 * msi = 1: only consider MSI interrupts
86 * Note that unlike the Physical Function driver, this Virtual Function driver
87 * does _not_ support legacy INTx interrupts (this limitation is mandated by
88 * the PCI-E SR-IOV standard).
92 #define MSI_DEFAULT MSI_MSIX
94 static int msi = MSI_DEFAULT;
96 module_param(msi, int, 0644);
97 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
100 * Fundamental constants.
101 * ======================
105 MAX_TXQ_ENTRIES = 16384,
106 MAX_RSPQ_ENTRIES = 16384,
107 MAX_RX_BUFFERS = 16384,
109 MIN_TXQ_ENTRIES = 32,
110 MIN_RSPQ_ENTRIES = 128,
114 * For purposes of manipulating the Free List size we need to
115 * recognize that Free Lists are actually Egress Queues (the host
116 * produces free buffers which the hardware consumes), Egress Queues
117 * indices are all in units of Egress Context Units bytes, and free
118 * list entries are 64-bit PCI DMA addresses. And since the state of
119 * the Producer Index == the Consumer Index implies an EMPTY list, we
120 * always have at least one Egress Unit's worth of Free List entries
121 * unused. See sge.c for more details ...
123 EQ_UNIT = SGE_EQ_IDXSIZE,
124 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
125 MIN_FL_RESID = FL_PER_EQ_UNIT,
129 * Global driver state.
130 * ====================
133 static struct dentry *cxgb4vf_debugfs_root;
136 * OS "Callback" functions.
137 * ========================
141 * The link status has changed on the indicated "port" (Virtual Interface).
143 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
145 struct net_device *dev = adapter->port[pidx];
148 * If the port is disabled or the current recorded "link up"
149 * status matches the new status, just return.
151 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
155 * Tell the OS that the link status has changed and print a short
156 * informative message on the console about the event.
161 const struct port_info *pi = netdev_priv(dev);
163 netif_carrier_on(dev);
165 switch (pi->link_cfg.speed) {
183 switch (pi->link_cfg.fc) {
192 case PAUSE_RX|PAUSE_TX:
201 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
203 netif_carrier_off(dev);
204 netdev_info(dev, "link down\n");
209 * Net device operations.
210 * ======================
217 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
220 static int link_start(struct net_device *dev)
223 struct port_info *pi = netdev_priv(dev);
226 * We do not set address filters and promiscuity here, the stack does
227 * that step explicitly. Enable vlan accel.
229 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
232 ret = t4vf_change_mac(pi->adapter, pi->viid,
233 pi->xact_addr_filt, dev->dev_addr, true);
235 pi->xact_addr_filt = ret;
241 * We don't need to actually "start the link" itself since the
242 * firmware will do that for us when the first Virtual Interface
243 * is enabled on a port.
246 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
251 * Name the MSI-X interrupts.
253 static void name_msix_vecs(struct adapter *adapter)
255 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
261 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
262 "%s-FWeventq", adapter->name);
263 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
268 for_each_port(adapter, pidx) {
269 struct net_device *dev = adapter->port[pidx];
270 const struct port_info *pi = netdev_priv(dev);
273 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
274 snprintf(adapter->msix_info[msi].desc, namelen,
275 "%s-%d", dev->name, qs);
276 adapter->msix_info[msi].desc[namelen] = 0;
282 * Request all of our MSI-X resources.
284 static int request_msix_queue_irqs(struct adapter *adapter)
286 struct sge *s = &adapter->sge;
292 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
293 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
301 for_each_ethrxq(s, rxq) {
302 err = request_irq(adapter->msix_info[msi].vec,
303 t4vf_sge_intr_msix, 0,
304 adapter->msix_info[msi].desc,
305 &s->ethrxq[rxq].rspq);
314 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
315 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
320 * Free our MSI-X resources.
322 static void free_msix_queue_irqs(struct adapter *adapter)
324 struct sge *s = &adapter->sge;
327 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
329 for_each_ethrxq(s, rxq)
330 free_irq(adapter->msix_info[msi++].vec,
331 &s->ethrxq[rxq].rspq);
335 * Turn on NAPI and start up interrupts on a response queue.
337 static void qenable(struct sge_rspq *rspq)
339 napi_enable(&rspq->napi);
342 * 0-increment the Going To Sleep register to start the timer and
345 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
347 SEINTARM(rspq->intr_params) |
348 INGRESSQID(rspq->cntxt_id));
352 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
354 static void enable_rx(struct adapter *adapter)
357 struct sge *s = &adapter->sge;
359 for_each_ethrxq(s, rxq)
360 qenable(&s->ethrxq[rxq].rspq);
361 qenable(&s->fw_evtq);
364 * The interrupt queue doesn't use NAPI so we do the 0-increment of
365 * its Going To Sleep register here to get it started.
367 if (adapter->flags & USING_MSI)
368 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
370 SEINTARM(s->intrq.intr_params) |
371 INGRESSQID(s->intrq.cntxt_id));
376 * Wait until all NAPI handlers are descheduled.
378 static void quiesce_rx(struct adapter *adapter)
380 struct sge *s = &adapter->sge;
383 for_each_ethrxq(s, rxq)
384 napi_disable(&s->ethrxq[rxq].rspq.napi);
385 napi_disable(&s->fw_evtq.napi);
389 * Response queue handler for the firmware event queue.
391 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
392 const struct pkt_gl *gl)
395 * Extract response opcode and get pointer to CPL message body.
397 struct adapter *adapter = rspq->adapter;
398 u8 opcode = ((const struct rss_header *)rsp)->opcode;
399 void *cpl = (void *)(rsp + 1);
404 * We've received an asynchronous message from the firmware.
406 const struct cpl_fw6_msg *fw_msg = cpl;
407 if (fw_msg->type == FW6_TYPE_CMD_RPL)
408 t4vf_handle_fw_rpl(adapter, fw_msg->data);
412 case CPL_SGE_EGR_UPDATE: {
414 * We've received an Egress Queue Status Update message. We
415 * get these, if the SGE is configured to send these when the
416 * firmware passes certain points in processing our TX
417 * Ethernet Queue or if we make an explicit request for one.
418 * We use these updates to determine when we may need to
419 * restart a TX Ethernet Queue which was stopped for lack of
420 * free TX Queue Descriptors ...
422 const struct cpl_sge_egr_update *p = cpl;
423 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
424 struct sge *s = &adapter->sge;
426 struct sge_eth_txq *txq;
430 * Perform sanity checking on the Queue ID to make sure it
431 * really refers to one of our TX Ethernet Egress Queues which
432 * is active and matches the queue's ID. None of these error
433 * conditions should ever happen so we may want to either make
434 * them fatal and/or conditionalized under DEBUG.
436 eq_idx = EQ_IDX(s, qid);
437 if (unlikely(eq_idx >= MAX_EGRQ)) {
438 dev_err(adapter->pdev_dev,
439 "Egress Update QID %d out of range\n", qid);
442 tq = s->egr_map[eq_idx];
443 if (unlikely(tq == NULL)) {
444 dev_err(adapter->pdev_dev,
445 "Egress Update QID %d TXQ=NULL\n", qid);
448 txq = container_of(tq, struct sge_eth_txq, q);
449 if (unlikely(tq->abs_id != qid)) {
450 dev_err(adapter->pdev_dev,
451 "Egress Update QID %d refers to TXQ %d\n",
457 * Restart a stopped TX Queue which has less than half of its
461 netif_tx_wake_queue(txq->txq);
466 dev_err(adapter->pdev_dev,
467 "unexpected CPL %#x on FW event queue\n", opcode);
474 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
475 * to use and initializes them. We support multiple "Queue Sets" per port if
476 * we have MSI-X, otherwise just one queue set per port.
478 static int setup_sge_queues(struct adapter *adapter)
480 struct sge *s = &adapter->sge;
484 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
487 bitmap_zero(s->starving_fl, MAX_EGRQ);
490 * If we're using MSI interrupt mode we need to set up a "forwarded
491 * interrupt" queue which we'll set up with our MSI vector. The rest
492 * of the ingress queues will be set up to forward their interrupts to
493 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
494 * the intrq's queue ID as the interrupt forwarding queue for the
495 * subsequent calls ...
497 if (adapter->flags & USING_MSI) {
498 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
499 adapter->port[0], 0, NULL, NULL);
501 goto err_free_queues;
505 * Allocate our ingress queue for asynchronous firmware messages.
507 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
508 MSIX_FW, NULL, fwevtq_handler);
510 goto err_free_queues;
513 * Allocate each "port"'s initial Queue Sets. These can be changed
514 * later on ... up to the point where any interface on the adapter is
515 * brought up at which point lots of things get nailed down
519 for_each_port(adapter, pidx) {
520 struct net_device *dev = adapter->port[pidx];
521 struct port_info *pi = netdev_priv(dev);
522 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
523 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
526 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
527 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
529 &rxq->fl, t4vf_ethrx_handler);
531 goto err_free_queues;
533 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
534 netdev_get_tx_queue(dev, qs),
535 s->fw_evtq.cntxt_id);
537 goto err_free_queues;
540 memset(&rxq->stats, 0, sizeof(rxq->stats));
545 * Create the reverse mappings for the queues.
547 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
548 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
549 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
550 for_each_port(adapter, pidx) {
551 struct net_device *dev = adapter->port[pidx];
552 struct port_info *pi = netdev_priv(dev);
553 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
554 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
557 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
558 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
559 EQ_MAP(s, txq->q.abs_id) = &txq->q;
562 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
563 * for Free Lists but since all of the Egress Queues
564 * (including Free Lists) have Relative Queue IDs
565 * which are computed as Absolute - Base Queue ID, we
566 * can synthesize the Absolute Queue IDs for the Free
567 * Lists. This is useful for debugging purposes when
568 * we want to dump Queue Contexts via the PF Driver.
570 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
571 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
577 t4vf_free_sge_resources(adapter);
582 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
583 * queues. We configure the RSS CPU lookup table to distribute to the number
584 * of HW receive queues, and the response queue lookup table to narrow that
585 * down to the response queues actually configured for each "port" (Virtual
586 * Interface). We always configure the RSS mapping for all ports since the
587 * mapping table has plenty of entries.
589 static int setup_rss(struct adapter *adapter)
593 for_each_port(adapter, pidx) {
594 struct port_info *pi = adap2pinfo(adapter, pidx);
595 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
596 u16 rss[MAX_PORT_QSETS];
599 for (qs = 0; qs < pi->nqsets; qs++)
600 rss[qs] = rxq[qs].rspq.abs_id;
602 err = t4vf_config_rss_range(adapter, pi->viid,
603 0, pi->rss_size, rss, pi->nqsets);
608 * Perform Global RSS Mode-specific initialization.
610 switch (adapter->params.rss.mode) {
611 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
613 * If Tunnel All Lookup isn't specified in the global
614 * RSS Configuration, then we need to specify a
615 * default Ingress Queue for any ingress packets which
616 * aren't hashed. We'll use our first ingress queue
619 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
620 union rss_vi_config config;
621 err = t4vf_read_rss_vi_config(adapter,
626 config.basicvirtual.defaultq =
628 err = t4vf_write_rss_vi_config(adapter,
642 * Bring the adapter up. Called whenever we go from no "ports" open to having
643 * one open. This function performs the actions necessary to make an adapter
644 * operational, such as completing the initialization of HW modules, and
645 * enabling interrupts. Must be called with the rtnl lock held. (Note that
646 * this is called "cxgb_up" in the PF Driver.)
648 static int adapter_up(struct adapter *adapter)
653 * If this is the first time we've been called, perform basic
654 * adapter setup. Once we've done this, many of our adapter
655 * parameters can no longer be changed ...
657 if ((adapter->flags & FULL_INIT_DONE) == 0) {
658 err = setup_sge_queues(adapter);
661 err = setup_rss(adapter);
663 t4vf_free_sge_resources(adapter);
667 if (adapter->flags & USING_MSIX)
668 name_msix_vecs(adapter);
669 adapter->flags |= FULL_INIT_DONE;
673 * Acquire our interrupt resources. We only support MSI-X and MSI.
675 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
676 if (adapter->flags & USING_MSIX)
677 err = request_msix_queue_irqs(adapter);
679 err = request_irq(adapter->pdev->irq,
680 t4vf_intr_handler(adapter), 0,
681 adapter->name, adapter);
683 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
689 * Enable NAPI ingress processing and return success.
692 t4vf_sge_start(adapter);
697 * Bring the adapter down. Called whenever the last "port" (Virtual
698 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
701 static void adapter_down(struct adapter *adapter)
704 * Free interrupt resources.
706 if (adapter->flags & USING_MSIX)
707 free_msix_queue_irqs(adapter);
709 free_irq(adapter->pdev->irq, adapter);
712 * Wait for NAPI handlers to finish.
718 * Start up a net device.
720 static int cxgb4vf_open(struct net_device *dev)
723 struct port_info *pi = netdev_priv(dev);
724 struct adapter *adapter = pi->adapter;
727 * If this is the first interface that we're opening on the "adapter",
728 * bring the "adapter" up now.
730 if (adapter->open_device_map == 0) {
731 err = adapter_up(adapter);
737 * Note that this interface is up and start everything up ...
739 netif_set_real_num_tx_queues(dev, pi->nqsets);
740 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
743 err = link_start(dev);
747 netif_tx_start_all_queues(dev);
748 set_bit(pi->port_id, &adapter->open_device_map);
752 if (adapter->open_device_map == 0)
753 adapter_down(adapter);
758 * Shut down a net device. This routine is called "cxgb_close" in the PF
761 static int cxgb4vf_stop(struct net_device *dev)
763 struct port_info *pi = netdev_priv(dev);
764 struct adapter *adapter = pi->adapter;
766 netif_tx_stop_all_queues(dev);
767 netif_carrier_off(dev);
768 t4vf_enable_vi(adapter, pi->viid, false, false);
769 pi->link_cfg.link_ok = 0;
771 clear_bit(pi->port_id, &adapter->open_device_map);
772 if (adapter->open_device_map == 0)
773 adapter_down(adapter);
778 * Translate our basic statistics into the standard "ifconfig" statistics.
780 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
782 struct t4vf_port_stats stats;
783 struct port_info *pi = netdev2pinfo(dev);
784 struct adapter *adapter = pi->adapter;
785 struct net_device_stats *ns = &dev->stats;
788 spin_lock(&adapter->stats_lock);
789 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
790 spin_unlock(&adapter->stats_lock);
792 memset(ns, 0, sizeof(*ns));
796 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
797 stats.tx_ucast_bytes + stats.tx_offload_bytes);
798 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
799 stats.tx_ucast_frames + stats.tx_offload_frames);
800 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
801 stats.rx_ucast_bytes);
802 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
803 stats.rx_ucast_frames);
804 ns->multicast = stats.rx_mcast_frames;
805 ns->tx_errors = stats.tx_drop_frames;
806 ns->rx_errors = stats.rx_err_frames;
812 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
813 * at a specified offset within the list, into an array of addrss pointers and
814 * return the number collected.
816 static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
819 unsigned int maxaddrs)
821 unsigned int index = 0;
822 unsigned int naddr = 0;
823 const struct netdev_hw_addr *ha;
825 for_each_dev_addr(dev, ha)
826 if (index++ >= offset) {
827 addr[naddr++] = ha->addr;
828 if (naddr >= maxaddrs)
835 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
836 * at a specified offset within the list, into an array of addrss pointers and
837 * return the number collected.
839 static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
842 unsigned int maxaddrs)
844 unsigned int index = 0;
845 unsigned int naddr = 0;
846 const struct netdev_hw_addr *ha;
848 netdev_for_each_mc_addr(ha, dev)
849 if (index++ >= offset) {
850 addr[naddr++] = ha->addr;
851 if (naddr >= maxaddrs)
858 * Configure the exact and hash address filters to handle a port's multicast
859 * and secondary unicast MAC addresses.
861 static int set_addr_filters(const struct net_device *dev, bool sleep)
866 unsigned int offset, naddr;
869 const struct port_info *pi = netdev_priv(dev);
871 /* first do the secondary unicast addresses */
872 for (offset = 0; ; offset += naddr) {
873 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
878 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
879 naddr, addr, NULL, &uhash, sleep);
886 /* next set up the multicast addresses */
887 for (offset = 0; ; offset += naddr) {
888 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
893 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
894 naddr, addr, NULL, &mhash, sleep);
900 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
901 uhash | mhash, sleep);
905 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
906 * If @mtu is -1 it is left unchanged.
908 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
911 struct port_info *pi = netdev_priv(dev);
913 ret = set_addr_filters(dev, sleep_ok);
915 ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
916 (dev->flags & IFF_PROMISC) != 0,
917 (dev->flags & IFF_ALLMULTI) != 0,
923 * Set the current receive modes on the device.
925 static void cxgb4vf_set_rxmode(struct net_device *dev)
927 /* unfortunately we can't return errors to the stack */
928 set_rxmode(dev, -1, false);
932 * Find the entry in the interrupt holdoff timer value array which comes
933 * closest to the specified interrupt holdoff value.
935 static int closest_timer(const struct sge *s, int us)
937 int i, timer_idx = 0, min_delta = INT_MAX;
939 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
940 int delta = us - s->timer_val[i];
943 if (delta < min_delta) {
951 static int closest_thres(const struct sge *s, int thres)
953 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
955 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
956 delta = thres - s->counter_val[i];
959 if (delta < min_delta) {
968 * Return a queue's interrupt hold-off time in us. 0 means no timer.
970 static unsigned int qtimer_val(const struct adapter *adapter,
971 const struct sge_rspq *rspq)
973 unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
975 return timer_idx < SGE_NTIMERS
976 ? adapter->sge.timer_val[timer_idx]
981 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
982 * @adapter: the adapter
983 * @rspq: the RX response queue
984 * @us: the hold-off time in us, or 0 to disable timer
985 * @cnt: the hold-off packet count, or 0 to disable counter
987 * Sets an RX response queue's interrupt hold-off time and packet count.
988 * At least one of the two needs to be enabled for the queue to generate
991 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
992 unsigned int us, unsigned int cnt)
994 unsigned int timer_idx;
997 * If both the interrupt holdoff timer and count are specified as
998 * zero, default to a holdoff count of 1 ...
1000 if ((us | cnt) == 0)
1004 * If an interrupt holdoff count has been specified, then find the
1005 * closest configured holdoff count and use that. If the response
1006 * queue has already been created, then update its queue context
1013 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1014 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1015 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1017 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1018 FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
1019 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1023 rspq->pktcnt_idx = pktcnt_idx;
1027 * Compute the closest holdoff timer index from the supplied holdoff
1030 timer_idx = (us == 0
1031 ? SGE_TIMER_RSTRT_CNTR
1032 : closest_timer(&adapter->sge, us));
1035 * Update the response queue's interrupt coalescing parameters and
1038 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
1039 (cnt > 0 ? QINTR_CNT_EN : 0));
1044 * Return a version number to identify the type of adapter. The scheme is:
1045 * - bits 0..9: chip version
1046 * - bits 10..15: chip revision
1048 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1051 * Chip version 4, revision 0x3f (cxgb4vf).
1053 return 4 | (0x3f << 10);
1057 * Execute the specified ioctl command.
1059 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1065 * The VF Driver doesn't have access to any of the other
1066 * common Ethernet device ioctl()'s (like reading/writing
1067 * PHY registers, etc.
1078 * Change the device's MTU.
1080 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1083 struct port_info *pi = netdev_priv(dev);
1085 /* accommodate SACK */
1089 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1090 -1, -1, -1, -1, true);
1096 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1097 netdev_features_t features)
1100 * Since there is no support for separate rx/tx vlan accel
1101 * enable/disable make sure tx flag is always in same state as rx.
1103 if (features & NETIF_F_HW_VLAN_RX)
1104 features |= NETIF_F_HW_VLAN_TX;
1106 features &= ~NETIF_F_HW_VLAN_TX;
1111 static int cxgb4vf_set_features(struct net_device *dev,
1112 netdev_features_t features)
1114 struct port_info *pi = netdev_priv(dev);
1115 netdev_features_t changed = dev->features ^ features;
1117 if (changed & NETIF_F_HW_VLAN_RX)
1118 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1119 features & NETIF_F_HW_VLAN_TX, 0);
1125 * Change the devices MAC address.
1127 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1130 struct sockaddr *addr = _addr;
1131 struct port_info *pi = netdev_priv(dev);
1133 if (!is_valid_ether_addr(addr->sa_data))
1134 return -EADDRNOTAVAIL;
1136 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1137 addr->sa_data, true);
1141 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1142 pi->xact_addr_filt = ret;
1146 #ifdef CONFIG_NET_POLL_CONTROLLER
1148 * Poll all of our receive queues. This is called outside of normal interrupt
1151 static void cxgb4vf_poll_controller(struct net_device *dev)
1153 struct port_info *pi = netdev_priv(dev);
1154 struct adapter *adapter = pi->adapter;
1156 if (adapter->flags & USING_MSIX) {
1157 struct sge_eth_rxq *rxq;
1160 rxq = &adapter->sge.ethrxq[pi->first_qset];
1161 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1162 t4vf_sge_intr_msix(0, &rxq->rspq);
1166 t4vf_intr_handler(adapter)(0, adapter);
1171 * Ethtool operations.
1172 * ===================
1174 * Note that we don't support any ethtool operations which change the physical
1175 * state of the port to which we're linked.
1179 * Return current port link settings.
1181 static int cxgb4vf_get_settings(struct net_device *dev,
1182 struct ethtool_cmd *cmd)
1184 const struct port_info *pi = netdev_priv(dev);
1186 cmd->supported = pi->link_cfg.supported;
1187 cmd->advertising = pi->link_cfg.advertising;
1188 ethtool_cmd_speed_set(cmd,
1189 netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
1190 cmd->duplex = DUPLEX_FULL;
1192 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1193 cmd->phy_address = pi->port_id;
1194 cmd->transceiver = XCVR_EXTERNAL;
1195 cmd->autoneg = pi->link_cfg.autoneg;
1202 * Return our driver information.
1204 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1205 struct ethtool_drvinfo *drvinfo)
1207 struct adapter *adapter = netdev2adap(dev);
1209 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1210 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1211 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1212 sizeof(drvinfo->bus_info));
1213 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1214 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1215 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
1216 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
1217 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
1218 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
1219 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
1220 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
1221 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
1222 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
1226 * Return current adapter message level.
1228 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1230 return netdev2adap(dev)->msg_enable;
1234 * Set current adapter message level.
1236 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1238 netdev2adap(dev)->msg_enable = msglevel;
1242 * Return the device's current Queue Set ring size parameters along with the
1243 * allowed maximum values. Since ethtool doesn't understand the concept of
1244 * multi-queue devices, we just return the current values associated with the
1247 static void cxgb4vf_get_ringparam(struct net_device *dev,
1248 struct ethtool_ringparam *rp)
1250 const struct port_info *pi = netdev_priv(dev);
1251 const struct sge *s = &pi->adapter->sge;
1253 rp->rx_max_pending = MAX_RX_BUFFERS;
1254 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1255 rp->rx_jumbo_max_pending = 0;
1256 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1258 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1259 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1260 rp->rx_jumbo_pending = 0;
1261 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1265 * Set the Queue Set ring size parameters for the device. Again, since
1266 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1267 * apply these new values across all of the Queue Sets associated with the
1268 * device -- after vetting them of course!
1270 static int cxgb4vf_set_ringparam(struct net_device *dev,
1271 struct ethtool_ringparam *rp)
1273 const struct port_info *pi = netdev_priv(dev);
1274 struct adapter *adapter = pi->adapter;
1275 struct sge *s = &adapter->sge;
1278 if (rp->rx_pending > MAX_RX_BUFFERS ||
1279 rp->rx_jumbo_pending ||
1280 rp->tx_pending > MAX_TXQ_ENTRIES ||
1281 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1282 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1283 rp->rx_pending < MIN_FL_ENTRIES ||
1284 rp->tx_pending < MIN_TXQ_ENTRIES)
1287 if (adapter->flags & FULL_INIT_DONE)
1290 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1291 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1292 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1293 s->ethtxq[qs].q.size = rp->tx_pending;
1299 * Return the interrupt holdoff timer and count for the first Queue Set on the
1300 * device. Our extension ioctl() (the cxgbtool interface) allows the
1301 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1303 static int cxgb4vf_get_coalesce(struct net_device *dev,
1304 struct ethtool_coalesce *coalesce)
1306 const struct port_info *pi = netdev_priv(dev);
1307 const struct adapter *adapter = pi->adapter;
1308 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1310 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1311 coalesce->rx_max_coalesced_frames =
1312 ((rspq->intr_params & QINTR_CNT_EN)
1313 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1319 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1320 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1321 * the interrupt holdoff timer on any of the device's Queue Sets.
1323 static int cxgb4vf_set_coalesce(struct net_device *dev,
1324 struct ethtool_coalesce *coalesce)
1326 const struct port_info *pi = netdev_priv(dev);
1327 struct adapter *adapter = pi->adapter;
1329 return set_rxq_intr_params(adapter,
1330 &adapter->sge.ethrxq[pi->first_qset].rspq,
1331 coalesce->rx_coalesce_usecs,
1332 coalesce->rx_max_coalesced_frames);
1336 * Report current port link pause parameter settings.
1338 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1339 struct ethtool_pauseparam *pauseparam)
1341 struct port_info *pi = netdev_priv(dev);
1343 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1344 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1345 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1349 * Identify the port by blinking the port's LED.
1351 static int cxgb4vf_phys_id(struct net_device *dev,
1352 enum ethtool_phys_id_state state)
1355 struct port_info *pi = netdev_priv(dev);
1357 if (state == ETHTOOL_ID_ACTIVE)
1359 else if (state == ETHTOOL_ID_INACTIVE)
1364 return t4vf_identify_port(pi->adapter, pi->viid, val);
1368 * Port stats maintained per queue of the port.
1370 struct queue_port_stats {
1381 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1382 * these need to match the order of statistics returned by
1383 * t4vf_get_port_stats().
1385 static const char stats_strings[][ETH_GSTRING_LEN] = {
1387 * These must match the layout of the t4vf_port_stats structure.
1389 "TxBroadcastBytes ",
1390 "TxBroadcastFrames ",
1391 "TxMulticastBytes ",
1392 "TxMulticastFrames ",
1398 "RxBroadcastBytes ",
1399 "RxBroadcastFrames ",
1400 "RxMulticastBytes ",
1401 "RxMulticastFrames ",
1407 * These are accumulated per-queue statistics and must match the
1408 * order of the fields in the queue_port_stats structure.
1420 * Return the number of statistics in the specified statistics set.
1422 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1426 return ARRAY_SIZE(stats_strings);
1434 * Return the strings for the specified statistics set.
1436 static void cxgb4vf_get_strings(struct net_device *dev,
1442 memcpy(data, stats_strings, sizeof(stats_strings));
1448 * Small utility routine to accumulate queue statistics across the queues of
1451 static void collect_sge_port_stats(const struct adapter *adapter,
1452 const struct port_info *pi,
1453 struct queue_port_stats *stats)
1455 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1456 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1459 memset(stats, 0, sizeof(*stats));
1460 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1461 stats->tso += txq->tso;
1462 stats->tx_csum += txq->tx_cso;
1463 stats->rx_csum += rxq->stats.rx_cso;
1464 stats->vlan_ex += rxq->stats.vlan_ex;
1465 stats->vlan_ins += txq->vlan_ins;
1466 stats->lro_pkts += rxq->stats.lro_pkts;
1467 stats->lro_merged += rxq->stats.lro_merged;
1472 * Return the ETH_SS_STATS statistics set.
1474 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1475 struct ethtool_stats *stats,
1478 struct port_info *pi = netdev2pinfo(dev);
1479 struct adapter *adapter = pi->adapter;
1480 int err = t4vf_get_port_stats(adapter, pi->pidx,
1481 (struct t4vf_port_stats *)data);
1483 memset(data, 0, sizeof(struct t4vf_port_stats));
1485 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1486 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1490 * Return the size of our register map.
1492 static int cxgb4vf_get_regs_len(struct net_device *dev)
1494 return T4VF_REGMAP_SIZE;
1498 * Dump a block of registers, start to end inclusive, into a buffer.
1500 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1501 unsigned int start, unsigned int end)
1503 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1505 for ( ; start <= end; start += sizeof(u32)) {
1507 * Avoid reading the Mailbox Control register since that
1508 * can trigger a Mailbox Ownership Arbitration cycle and
1509 * interfere with communication with the firmware.
1511 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1514 *bp++ = t4_read_reg(adapter, start);
1519 * Copy our entire register map into the provided buffer.
1521 static void cxgb4vf_get_regs(struct net_device *dev,
1522 struct ethtool_regs *regs,
1525 struct adapter *adapter = netdev2adap(dev);
1527 regs->version = mk_adap_vers(adapter);
1530 * Fill in register buffer with our register map.
1532 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1534 reg_block_dump(adapter, regbuf,
1535 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1536 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1537 reg_block_dump(adapter, regbuf,
1538 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1539 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1540 reg_block_dump(adapter, regbuf,
1541 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1542 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
1543 reg_block_dump(adapter, regbuf,
1544 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1545 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1547 reg_block_dump(adapter, regbuf,
1548 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1549 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1553 * Report current Wake On LAN settings.
1555 static void cxgb4vf_get_wol(struct net_device *dev,
1556 struct ethtool_wolinfo *wol)
1560 memset(&wol->sopass, 0, sizeof(wol->sopass));
1564 * TCP Segmentation Offload flags which we support.
1566 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1568 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1569 .get_settings = cxgb4vf_get_settings,
1570 .get_drvinfo = cxgb4vf_get_drvinfo,
1571 .get_msglevel = cxgb4vf_get_msglevel,
1572 .set_msglevel = cxgb4vf_set_msglevel,
1573 .get_ringparam = cxgb4vf_get_ringparam,
1574 .set_ringparam = cxgb4vf_set_ringparam,
1575 .get_coalesce = cxgb4vf_get_coalesce,
1576 .set_coalesce = cxgb4vf_set_coalesce,
1577 .get_pauseparam = cxgb4vf_get_pauseparam,
1578 .get_link = ethtool_op_get_link,
1579 .get_strings = cxgb4vf_get_strings,
1580 .set_phys_id = cxgb4vf_phys_id,
1581 .get_sset_count = cxgb4vf_get_sset_count,
1582 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1583 .get_regs_len = cxgb4vf_get_regs_len,
1584 .get_regs = cxgb4vf_get_regs,
1585 .get_wol = cxgb4vf_get_wol,
1589 * /sys/kernel/debug/cxgb4vf support code and data.
1590 * ================================================
1594 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1598 static int sge_qinfo_show(struct seq_file *seq, void *v)
1600 struct adapter *adapter = seq->private;
1601 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1602 int qs, r = (uintptr_t)v - 1;
1605 seq_putc(seq, '\n');
1607 #define S3(fmt_spec, s, v) \
1609 seq_printf(seq, "%-12s", s); \
1610 for (qs = 0; qs < n; ++qs) \
1611 seq_printf(seq, " %16" fmt_spec, v); \
1612 seq_putc(seq, '\n'); \
1614 #define S(s, v) S3("s", s, v)
1615 #define T(s, v) S3("u", s, txq[qs].v)
1616 #define R(s, v) S3("u", s, rxq[qs].v)
1618 if (r < eth_entries) {
1619 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1620 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1621 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1623 S("QType:", "Ethernet");
1625 (rxq[qs].rspq.netdev
1626 ? rxq[qs].rspq.netdev->name
1629 (rxq[qs].rspq.netdev
1630 ? ((struct port_info *)
1631 netdev_priv(rxq[qs].rspq.netdev))->port_id
1633 T("TxQ ID:", q.abs_id);
1634 T("TxQ size:", q.size);
1635 T("TxQ inuse:", q.in_use);
1636 T("TxQ PIdx:", q.pidx);
1637 T("TxQ CIdx:", q.cidx);
1638 R("RspQ ID:", rspq.abs_id);
1639 R("RspQ size:", rspq.size);
1640 R("RspQE size:", rspq.iqe_len);
1641 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1642 S3("u", "Intr pktcnt:",
1643 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1644 R("RspQ CIdx:", rspq.cidx);
1645 R("RspQ Gen:", rspq.gen);
1646 R("FL ID:", fl.abs_id);
1647 R("FL size:", fl.size - MIN_FL_RESID);
1648 R("FL avail:", fl.avail);
1649 R("FL PIdx:", fl.pidx);
1650 R("FL CIdx:", fl.cidx);
1656 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1658 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1659 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1660 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1661 qtimer_val(adapter, evtq));
1662 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1663 adapter->sge.counter_val[evtq->pktcnt_idx]);
1664 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1665 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1666 } else if (r == 1) {
1667 const struct sge_rspq *intrq = &adapter->sge.intrq;
1669 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1670 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1671 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1672 qtimer_val(adapter, intrq));
1673 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1674 adapter->sge.counter_val[intrq->pktcnt_idx]);
1675 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1676 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1688 * Return the number of "entries" in our "file". We group the multi-Queue
1689 * sections with QPL Queue Sets per "entry". The sections of the output are:
1691 * Ethernet RX/TX Queue Sets
1692 * Firmware Event Queue
1693 * Forwarded Interrupt Queue (if in MSI mode)
1695 static int sge_queue_entries(const struct adapter *adapter)
1697 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1698 ((adapter->flags & USING_MSI) != 0);
1701 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1703 int entries = sge_queue_entries(seq->private);
1705 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1708 static void sge_queue_stop(struct seq_file *seq, void *v)
1712 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1714 int entries = sge_queue_entries(seq->private);
1717 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1720 static const struct seq_operations sge_qinfo_seq_ops = {
1721 .start = sge_queue_start,
1722 .next = sge_queue_next,
1723 .stop = sge_queue_stop,
1724 .show = sge_qinfo_show
1727 static int sge_qinfo_open(struct inode *inode, struct file *file)
1729 int res = seq_open(file, &sge_qinfo_seq_ops);
1732 struct seq_file *seq = file->private_data;
1733 seq->private = inode->i_private;
1738 static const struct file_operations sge_qinfo_debugfs_fops = {
1739 .owner = THIS_MODULE,
1740 .open = sge_qinfo_open,
1742 .llseek = seq_lseek,
1743 .release = seq_release,
1747 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1751 static int sge_qstats_show(struct seq_file *seq, void *v)
1753 struct adapter *adapter = seq->private;
1754 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1755 int qs, r = (uintptr_t)v - 1;
1758 seq_putc(seq, '\n');
1760 #define S3(fmt, s, v) \
1762 seq_printf(seq, "%-16s", s); \
1763 for (qs = 0; qs < n; ++qs) \
1764 seq_printf(seq, " %8" fmt, v); \
1765 seq_putc(seq, '\n'); \
1767 #define S(s, v) S3("s", s, v)
1769 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1770 #define T(s, v) T3("lu", s, v)
1772 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1773 #define R(s, v) R3("lu", s, v)
1775 if (r < eth_entries) {
1776 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1777 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1778 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1780 S("QType:", "Ethernet");
1782 (rxq[qs].rspq.netdev
1783 ? rxq[qs].rspq.netdev->name
1785 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
1786 R("RxPackets:", stats.pkts);
1787 R("RxCSO:", stats.rx_cso);
1788 R("VLANxtract:", stats.vlan_ex);
1789 R("LROmerged:", stats.lro_merged);
1790 R("LROpackets:", stats.lro_pkts);
1791 R("RxDrops:", stats.rx_drops);
1793 T("TxCSO:", tx_cso);
1794 T("VLANins:", vlan_ins);
1795 T("TxQFull:", q.stops);
1796 T("TxQRestarts:", q.restarts);
1797 T("TxMapErr:", mapping_err);
1798 R("FLAllocErr:", fl.alloc_failed);
1799 R("FLLrgAlcErr:", fl.large_alloc_failed);
1800 R("FLStarving:", fl.starving);
1806 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1808 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
1809 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1810 evtq->unhandled_irqs);
1811 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1812 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1813 } else if (r == 1) {
1814 const struct sge_rspq *intrq = &adapter->sge.intrq;
1816 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
1817 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1818 intrq->unhandled_irqs);
1819 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1820 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1834 * Return the number of "entries" in our "file". We group the multi-Queue
1835 * sections with QPL Queue Sets per "entry". The sections of the output are:
1837 * Ethernet RX/TX Queue Sets
1838 * Firmware Event Queue
1839 * Forwarded Interrupt Queue (if in MSI mode)
1841 static int sge_qstats_entries(const struct adapter *adapter)
1843 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1844 ((adapter->flags & USING_MSI) != 0);
1847 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
1849 int entries = sge_qstats_entries(seq->private);
1851 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1854 static void sge_qstats_stop(struct seq_file *seq, void *v)
1858 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
1860 int entries = sge_qstats_entries(seq->private);
1863 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1866 static const struct seq_operations sge_qstats_seq_ops = {
1867 .start = sge_qstats_start,
1868 .next = sge_qstats_next,
1869 .stop = sge_qstats_stop,
1870 .show = sge_qstats_show
1873 static int sge_qstats_open(struct inode *inode, struct file *file)
1875 int res = seq_open(file, &sge_qstats_seq_ops);
1878 struct seq_file *seq = file->private_data;
1879 seq->private = inode->i_private;
1884 static const struct file_operations sge_qstats_proc_fops = {
1885 .owner = THIS_MODULE,
1886 .open = sge_qstats_open,
1888 .llseek = seq_lseek,
1889 .release = seq_release,
1893 * Show PCI-E SR-IOV Virtual Function Resource Limits.
1895 static int resources_show(struct seq_file *seq, void *v)
1897 struct adapter *adapter = seq->private;
1898 struct vf_resources *vfres = &adapter->params.vfres;
1900 #define S(desc, fmt, var) \
1901 seq_printf(seq, "%-60s " fmt "\n", \
1902 desc " (" #var "):", vfres->var)
1904 S("Virtual Interfaces", "%d", nvi);
1905 S("Egress Queues", "%d", neq);
1906 S("Ethernet Control", "%d", nethctrl);
1907 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
1908 S("Ingress Queues", "%d", niq);
1909 S("Traffic Class", "%d", tc);
1910 S("Port Access Rights Mask", "%#x", pmask);
1911 S("MAC Address Filters", "%d", nexactf);
1912 S("Firmware Command Read Capabilities", "%#x", r_caps);
1913 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
1920 static int resources_open(struct inode *inode, struct file *file)
1922 return single_open(file, resources_show, inode->i_private);
1925 static const struct file_operations resources_proc_fops = {
1926 .owner = THIS_MODULE,
1927 .open = resources_open,
1929 .llseek = seq_lseek,
1930 .release = single_release,
1934 * Show Virtual Interfaces.
1936 static int interfaces_show(struct seq_file *seq, void *v)
1938 if (v == SEQ_START_TOKEN) {
1939 seq_puts(seq, "Interface Port VIID\n");
1941 struct adapter *adapter = seq->private;
1942 int pidx = (uintptr_t)v - 2;
1943 struct net_device *dev = adapter->port[pidx];
1944 struct port_info *pi = netdev_priv(dev);
1946 seq_printf(seq, "%9s %4d %#5x\n",
1947 dev->name, pi->port_id, pi->viid);
1952 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
1954 return pos <= adapter->params.nports
1955 ? (void *)(uintptr_t)(pos + 1)
1959 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
1962 ? interfaces_get_idx(seq->private, *pos)
1966 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
1969 return interfaces_get_idx(seq->private, *pos);
1972 static void interfaces_stop(struct seq_file *seq, void *v)
1976 static const struct seq_operations interfaces_seq_ops = {
1977 .start = interfaces_start,
1978 .next = interfaces_next,
1979 .stop = interfaces_stop,
1980 .show = interfaces_show
1983 static int interfaces_open(struct inode *inode, struct file *file)
1985 int res = seq_open(file, &interfaces_seq_ops);
1988 struct seq_file *seq = file->private_data;
1989 seq->private = inode->i_private;
1994 static const struct file_operations interfaces_proc_fops = {
1995 .owner = THIS_MODULE,
1996 .open = interfaces_open,
1998 .llseek = seq_lseek,
1999 .release = seq_release,
2003 * /sys/kernel/debugfs/cxgb4vf/ files list.
2005 struct cxgb4vf_debugfs_entry {
2006 const char *name; /* name of debugfs node */
2007 umode_t mode; /* file system mode */
2008 const struct file_operations *fops;
2011 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2012 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2013 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2014 { "resources", S_IRUGO, &resources_proc_fops },
2015 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2019 * Module and device initialization and cleanup code.
2020 * ==================================================
2024 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2025 * directory (debugfs_root) has already been set up.
2027 static int setup_debugfs(struct adapter *adapter)
2031 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2034 * Debugfs support is best effort.
2036 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2037 (void)debugfs_create_file(debugfs_files[i].name,
2038 debugfs_files[i].mode,
2039 adapter->debugfs_root,
2041 debugfs_files[i].fops);
2047 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2048 * it to our caller to tear down the directory (debugfs_root).
2050 static void cleanup_debugfs(struct adapter *adapter)
2052 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2055 * Unlike our sister routine cleanup_proc(), we don't need to remove
2056 * individual entries because a call will be made to
2057 * debugfs_remove_recursive(). We just need to clean up any ancillary
2064 * Perform early "adapter" initialization. This is where we discover what
2065 * adapter parameters we're going to be using and initialize basic adapter
2068 static int adap_init0(struct adapter *adapter)
2070 struct vf_resources *vfres = &adapter->params.vfres;
2071 struct sge_params *sge_params = &adapter->params.sge;
2072 struct sge *s = &adapter->sge;
2073 unsigned int ethqsets;
2077 * Wait for the device to become ready before proceeding ...
2079 err = t4vf_wait_dev_ready(adapter);
2081 dev_err(adapter->pdev_dev, "device didn't become ready:"
2087 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2088 * 2.6.31 and later we can't call pci_reset_function() in order to
2089 * issue an FLR because of a self- deadlock on the device semaphore.
2090 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2091 * cases where they're needed -- for instance, some versions of KVM
2092 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2093 * use the firmware based reset in order to reset any per function
2096 err = t4vf_fw_reset(adapter);
2098 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2103 * Grab basic operational parameters. These will predominantly have
2104 * been set up by the Physical Function Driver or will be hard coded
2105 * into the adapter. We just have to live with them ... Note that
2106 * we _must_ get our VPD parameters before our SGE parameters because
2107 * we need to know the adapter's core clock from the VPD in order to
2108 * properly decode the SGE Timer Values.
2110 err = t4vf_get_dev_params(adapter);
2112 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2113 " device parameters: err=%d\n", err);
2116 err = t4vf_get_vpd_params(adapter);
2118 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2119 " VPD parameters: err=%d\n", err);
2122 err = t4vf_get_sge_params(adapter);
2124 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2125 " SGE parameters: err=%d\n", err);
2128 err = t4vf_get_rss_glb_config(adapter);
2130 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2131 " RSS parameters: err=%d\n", err);
2134 if (adapter->params.rss.mode !=
2135 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2136 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2137 " mode %d\n", adapter->params.rss.mode);
2140 err = t4vf_sge_init(adapter);
2142 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2148 * Retrieve our RX interrupt holdoff timer values and counter
2149 * threshold values from the SGE parameters.
2151 s->timer_val[0] = core_ticks_to_us(adapter,
2152 TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
2153 s->timer_val[1] = core_ticks_to_us(adapter,
2154 TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
2155 s->timer_val[2] = core_ticks_to_us(adapter,
2156 TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
2157 s->timer_val[3] = core_ticks_to_us(adapter,
2158 TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
2159 s->timer_val[4] = core_ticks_to_us(adapter,
2160 TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
2161 s->timer_val[5] = core_ticks_to_us(adapter,
2162 TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
2165 THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
2167 THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
2169 THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2171 THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2174 * Grab our Virtual Interface resource allocation, extract the
2175 * features that we're interested in and do a bit of sanity testing on
2178 err = t4vf_get_vfres(adapter);
2180 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2181 " resources: err=%d\n", err);
2186 * The number of "ports" which we support is equal to the number of
2187 * Virtual Interfaces with which we've been provisioned.
2189 adapter->params.nports = vfres->nvi;
2190 if (adapter->params.nports > MAX_NPORTS) {
2191 dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
2192 " virtual interfaces\n", MAX_NPORTS,
2193 adapter->params.nports);
2194 adapter->params.nports = MAX_NPORTS;
2198 * We need to reserve a number of the ingress queues with Free List
2199 * and Interrupt capabilities for special interrupt purposes (like
2200 * asynchronous firmware messages, or forwarded interrupts if we're
2201 * using MSI). The rest of the FL/Intr-capable ingress queues will be
2202 * matched up one-for-one with Ethernet/Control egress queues in order
2203 * to form "Queue Sets" which will be aportioned between the "ports".
2204 * For each Queue Set, we'll need the ability to allocate two Egress
2205 * Contexts -- one for the Ingress Queue Free List and one for the TX
2208 ethqsets = vfres->niqflint - INGQ_EXTRAS;
2209 if (vfres->nethctrl != ethqsets) {
2210 dev_warn(adapter->pdev_dev, "unequal number of [available]"
2211 " ingress/egress queues (%d/%d); using minimum for"
2212 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
2213 ethqsets = min(vfres->nethctrl, ethqsets);
2215 if (vfres->neq < ethqsets*2) {
2216 dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
2217 " to support Queue Sets (%d); reducing allowed Queue"
2218 " Sets\n", vfres->neq, ethqsets);
2219 ethqsets = vfres->neq/2;
2221 if (ethqsets > MAX_ETH_QSETS) {
2222 dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
2223 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
2224 ethqsets = MAX_ETH_QSETS;
2226 if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
2227 dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
2228 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
2230 adapter->sge.max_ethqsets = ethqsets;
2233 * Check for various parameter sanity issues. Most checks simply
2234 * result in us using fewer resources than our provissioning but we
2235 * do need at least one "port" with which to work ...
2237 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2238 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2239 " virtual interfaces (too few Queue Sets)\n",
2240 adapter->sge.max_ethqsets, adapter->params.nports);
2241 adapter->params.nports = adapter->sge.max_ethqsets;
2243 if (adapter->params.nports == 0) {
2244 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2251 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2252 u8 pkt_cnt_idx, unsigned int size,
2253 unsigned int iqe_size)
2255 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
2256 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
2257 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2260 rspq->iqe_len = iqe_size;
2265 * Perform default configuration of DMA queues depending on the number and
2266 * type of ports we found and the number of available CPUs. Most settings can
2267 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2268 * being brought up for the first time.
2270 static void cfg_queues(struct adapter *adapter)
2272 struct sge *s = &adapter->sge;
2273 int q10g, n10g, qidx, pidx, qs;
2277 * We should not be called till we know how many Queue Sets we can
2278 * support. In particular, this means that we need to know what kind
2279 * of interrupts we'll be using ...
2281 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2284 * Count the number of 10GbE Virtual Interfaces that we have.
2287 for_each_port(adapter, pidx)
2288 n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2291 * We default to 1 queue per non-10G port and up to # of cores queues
2297 int n1g = (adapter->params.nports - n10g);
2298 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2299 if (q10g > num_online_cpus())
2300 q10g = num_online_cpus();
2304 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2305 * The layout will be established in setup_sge_queues() when the
2306 * adapter is brough up for the first time.
2309 for_each_port(adapter, pidx) {
2310 struct port_info *pi = adap2pinfo(adapter, pidx);
2312 pi->first_qset = qidx;
2313 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2319 * The Ingress Queue Entry Size for our various Response Queues needs
2320 * to be big enough to accommodate the largest message we can receive
2321 * from the chip/firmware; which is 64 bytes ...
2326 * Set up default Queue Set parameters ... Start off with the
2327 * shortest interrupt holdoff timer.
2329 for (qs = 0; qs < s->max_ethqsets; qs++) {
2330 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2331 struct sge_eth_txq *txq = &s->ethtxq[qs];
2333 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2339 * The firmware event queue is used for link state changes and
2340 * notifications of TX DMA completions.
2342 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2345 * The forwarded interrupt queue is used when we're in MSI interrupt
2346 * mode. In this mode all interrupts associated with RX queues will
2347 * be forwarded to a single queue which we'll associate with our MSI
2348 * interrupt vector. The messages dropped in the forwarded interrupt
2349 * queue will indicate which ingress queue needs servicing ... This
2350 * queue needs to be large enough to accommodate all of the ingress
2351 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2352 * from equalling the CIDX if every ingress queue has an outstanding
2353 * interrupt). The queue doesn't need to be any larger because no
2354 * ingress queue will ever have more than one outstanding interrupt at
2357 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2362 * Reduce the number of Ethernet queues across all ports to at most n.
2363 * n provides at least one queue per port.
2365 static void reduce_ethqs(struct adapter *adapter, int n)
2368 struct port_info *pi;
2371 * While we have too many active Ether Queue Sets, interate across the
2372 * "ports" and reduce their individual Queue Set allocations.
2374 BUG_ON(n < adapter->params.nports);
2375 while (n < adapter->sge.ethqsets)
2376 for_each_port(adapter, i) {
2377 pi = adap2pinfo(adapter, i);
2378 if (pi->nqsets > 1) {
2380 adapter->sge.ethqsets--;
2381 if (adapter->sge.ethqsets <= n)
2387 * Reassign the starting Queue Sets for each of the "ports" ...
2390 for_each_port(adapter, i) {
2391 pi = adap2pinfo(adapter, i);
2398 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2399 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2400 * need. Minimally we need one for every Virtual Interface plus those needed
2401 * for our "extras". Note that this process may lower the maximum number of
2402 * allowed Queue Sets ...
2404 static int enable_msix(struct adapter *adapter)
2406 int i, err, want, need;
2407 struct msix_entry entries[MSIX_ENTRIES];
2408 struct sge *s = &adapter->sge;
2410 for (i = 0; i < MSIX_ENTRIES; ++i)
2411 entries[i].entry = i;
2414 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2415 * plus those needed for our "extras" (for example, the firmware
2416 * message queue). We _need_ at least one "Queue Set" per Virtual
2417 * Interface plus those needed for our "extras". So now we get to see
2418 * if the song is right ...
2420 want = s->max_ethqsets + MSIX_EXTRAS;
2421 need = adapter->params.nports + MSIX_EXTRAS;
2422 while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2426 int nqsets = want - MSIX_EXTRAS;
2427 if (nqsets < s->max_ethqsets) {
2428 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2429 " for %d Queue Sets\n", nqsets);
2430 s->max_ethqsets = nqsets;
2431 if (nqsets < s->ethqsets)
2432 reduce_ethqs(adapter, nqsets);
2434 for (i = 0; i < want; ++i)
2435 adapter->msix_info[i].vec = entries[i].vector;
2436 } else if (err > 0) {
2437 pci_disable_msix(adapter->pdev);
2438 dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2439 " not using MSI-X\n", err);
2444 static const struct net_device_ops cxgb4vf_netdev_ops = {
2445 .ndo_open = cxgb4vf_open,
2446 .ndo_stop = cxgb4vf_stop,
2447 .ndo_start_xmit = t4vf_eth_xmit,
2448 .ndo_get_stats = cxgb4vf_get_stats,
2449 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2450 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2451 .ndo_validate_addr = eth_validate_addr,
2452 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2453 .ndo_change_mtu = cxgb4vf_change_mtu,
2454 .ndo_fix_features = cxgb4vf_fix_features,
2455 .ndo_set_features = cxgb4vf_set_features,
2456 #ifdef CONFIG_NET_POLL_CONTROLLER
2457 .ndo_poll_controller = cxgb4vf_poll_controller,
2462 * "Probe" a device: initialize a device and construct all kernel and driver
2463 * state needed to manage the device. This routine is called "init_one" in
2466 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2467 const struct pci_device_id *ent)
2472 struct adapter *adapter;
2473 struct port_info *pi;
2474 struct net_device *netdev;
2477 * Print our driver banner the first time we're called to initialize a
2480 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2483 * Initialize generic PCI device state.
2485 err = pci_enable_device(pdev);
2487 dev_err(&pdev->dev, "cannot enable PCI device\n");
2492 * Reserve PCI resources for the device. If we can't get them some
2493 * other driver may have already claimed the device ...
2495 err = pci_request_regions(pdev, KBUILD_MODNAME);
2497 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2498 goto err_disable_device;
2502 * Set up our DMA mask: try for 64-bit address masking first and
2503 * fall back to 32-bit if we can't get 64 bits ...
2505 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2507 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2509 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2510 " coherent allocations\n");
2511 goto err_release_regions;
2515 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2517 dev_err(&pdev->dev, "no usable DMA configuration\n");
2518 goto err_release_regions;
2524 * Enable bus mastering for the device ...
2526 pci_set_master(pdev);
2529 * Allocate our adapter data structure and attach it to the device.
2531 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2534 goto err_release_regions;
2536 pci_set_drvdata(pdev, adapter);
2537 adapter->pdev = pdev;
2538 adapter->pdev_dev = &pdev->dev;
2541 * Initialize SMP data synchronization resources.
2543 spin_lock_init(&adapter->stats_lock);
2546 * Map our I/O registers in BAR0.
2548 adapter->regs = pci_ioremap_bar(pdev, 0);
2549 if (!adapter->regs) {
2550 dev_err(&pdev->dev, "cannot map device registers\n");
2552 goto err_free_adapter;
2556 * Initialize adapter level features.
2558 adapter->name = pci_name(pdev);
2559 adapter->msg_enable = dflt_msg_enable;
2560 err = adap_init0(adapter);
2565 * Allocate our "adapter ports" and stitch everything together.
2567 pmask = adapter->params.vfres.pmask;
2568 for_each_port(adapter, pidx) {
2572 * We simplistically allocate our virtual interfaces
2573 * sequentially across the port numbers to which we have
2574 * access rights. This should be configurable in some manner
2579 port_id = ffs(pmask) - 1;
2580 pmask &= ~(1 << port_id);
2581 viid = t4vf_alloc_vi(adapter, port_id);
2583 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2584 " err=%d\n", port_id, viid);
2590 * Allocate our network device and stitch things together.
2592 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2594 if (netdev == NULL) {
2595 t4vf_free_vi(adapter, viid);
2599 adapter->port[pidx] = netdev;
2600 SET_NETDEV_DEV(netdev, &pdev->dev);
2601 pi = netdev_priv(netdev);
2602 pi->adapter = adapter;
2604 pi->port_id = port_id;
2608 * Initialize the starting state of our "port" and register
2611 pi->xact_addr_filt = -1;
2612 netif_carrier_off(netdev);
2613 netdev->irq = pdev->irq;
2615 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2616 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2617 NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
2618 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2619 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2621 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
2623 netdev->features |= NETIF_F_HIGHDMA;
2625 netdev->priv_flags |= IFF_UNICAST_FLT;
2627 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2628 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
2631 * Initialize the hardware/software state for the port.
2633 err = t4vf_port_init(adapter, pidx);
2635 dev_err(&pdev->dev, "cannot initialize port %d\n",
2642 * The "card" is now ready to go. If any errors occur during device
2643 * registration we do not fail the whole "card" but rather proceed
2644 * only with the ports we manage to register successfully. However we
2645 * must register at least one net device.
2647 for_each_port(adapter, pidx) {
2648 netdev = adapter->port[pidx];
2652 err = register_netdev(netdev);
2654 dev_warn(&pdev->dev, "cannot register net device %s,"
2655 " skipping\n", netdev->name);
2659 set_bit(pidx, &adapter->registered_device_map);
2661 if (adapter->registered_device_map == 0) {
2662 dev_err(&pdev->dev, "could not register any net devices\n");
2667 * Set up our debugfs entries.
2669 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2670 adapter->debugfs_root =
2671 debugfs_create_dir(pci_name(pdev),
2672 cxgb4vf_debugfs_root);
2673 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2674 dev_warn(&pdev->dev, "could not create debugfs"
2677 setup_debugfs(adapter);
2681 * See what interrupts we'll be using. If we've been configured to
2682 * use MSI-X interrupts, try to enable them but fall back to using
2683 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2684 * get MSI interrupts we bail with the error.
2686 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2687 adapter->flags |= USING_MSIX;
2689 err = pci_enable_msi(pdev);
2691 dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
2693 msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
2694 goto err_free_debugfs;
2696 adapter->flags |= USING_MSI;
2700 * Now that we know how many "ports" we have and what their types are,
2701 * and how many Queue Sets we can support, we can configure our queue
2704 cfg_queues(adapter);
2707 * Print a short notice on the existence and configuration of the new
2708 * VF network device ...
2710 for_each_port(adapter, pidx) {
2711 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2712 adapter->port[pidx]->name,
2713 (adapter->flags & USING_MSIX) ? "MSI-X" :
2714 (adapter->flags & USING_MSI) ? "MSI" : "");
2723 * Error recovery and exit code. Unwind state that's been created
2724 * so far and return the error.
2728 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2729 cleanup_debugfs(adapter);
2730 debugfs_remove_recursive(adapter->debugfs_root);
2734 for_each_port(adapter, pidx) {
2735 netdev = adapter->port[pidx];
2738 pi = netdev_priv(netdev);
2739 t4vf_free_vi(adapter, pi->viid);
2740 if (test_bit(pidx, &adapter->registered_device_map))
2741 unregister_netdev(netdev);
2742 free_netdev(netdev);
2746 iounmap(adapter->regs);
2750 pci_set_drvdata(pdev, NULL);
2752 err_release_regions:
2753 pci_release_regions(pdev);
2754 pci_set_drvdata(pdev, NULL);
2755 pci_clear_master(pdev);
2758 pci_disable_device(pdev);
2764 * "Remove" a device: tear down all kernel and driver state created in the
2765 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2766 * that this is called "remove_one" in the PF Driver.)
2768 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
2770 struct adapter *adapter = pci_get_drvdata(pdev);
2773 * Tear down driver state associated with device.
2779 * Stop all of our activity. Unregister network port,
2780 * disable interrupts, etc.
2782 for_each_port(adapter, pidx)
2783 if (test_bit(pidx, &adapter->registered_device_map))
2784 unregister_netdev(adapter->port[pidx]);
2785 t4vf_sge_stop(adapter);
2786 if (adapter->flags & USING_MSIX) {
2787 pci_disable_msix(adapter->pdev);
2788 adapter->flags &= ~USING_MSIX;
2789 } else if (adapter->flags & USING_MSI) {
2790 pci_disable_msi(adapter->pdev);
2791 adapter->flags &= ~USING_MSI;
2795 * Tear down our debugfs entries.
2797 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2798 cleanup_debugfs(adapter);
2799 debugfs_remove_recursive(adapter->debugfs_root);
2803 * Free all of the various resources which we've acquired ...
2805 t4vf_free_sge_resources(adapter);
2806 for_each_port(adapter, pidx) {
2807 struct net_device *netdev = adapter->port[pidx];
2808 struct port_info *pi;
2813 pi = netdev_priv(netdev);
2814 t4vf_free_vi(adapter, pi->viid);
2815 free_netdev(netdev);
2817 iounmap(adapter->regs);
2819 pci_set_drvdata(pdev, NULL);
2823 * Disable the device and release its PCI resources.
2825 pci_disable_device(pdev);
2826 pci_clear_master(pdev);
2827 pci_release_regions(pdev);
2831 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2834 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2836 struct adapter *adapter;
2839 adapter = pci_get_drvdata(pdev);
2844 * Disable all Virtual Interfaces. This will shut down the
2845 * delivery of all ingress packets into the chip for these
2846 * Virtual Interfaces.
2848 for_each_port(adapter, pidx) {
2849 struct net_device *netdev;
2850 struct port_info *pi;
2852 if (!test_bit(pidx, &adapter->registered_device_map))
2855 netdev = adapter->port[pidx];
2859 pi = netdev_priv(netdev);
2860 t4vf_enable_vi(adapter, pi->viid, false, false);
2864 * Free up all Queues which will prevent further DMA and
2865 * Interrupts allowing various internal pathways to drain.
2867 t4vf_free_sge_resources(adapter);
2871 * PCI Device registration data structures.
2873 #define CH_DEVICE(devid, idx) \
2874 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2876 static struct pci_device_id cxgb4vf_pci_tbl[] = {
2877 CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2878 CH_DEVICE(0x4800, 0), /* T440-dbg */
2879 CH_DEVICE(0x4801, 0), /* T420-cr */
2880 CH_DEVICE(0x4802, 0), /* T422-cr */
2881 CH_DEVICE(0x4803, 0), /* T440-cr */
2882 CH_DEVICE(0x4804, 0), /* T420-bch */
2883 CH_DEVICE(0x4805, 0), /* T440-bch */
2884 CH_DEVICE(0x4806, 0), /* T460-ch */
2885 CH_DEVICE(0x4807, 0), /* T420-so */
2886 CH_DEVICE(0x4808, 0), /* T420-cx */
2887 CH_DEVICE(0x4809, 0), /* T420-bt */
2888 CH_DEVICE(0x480a, 0), /* T404-bt */
2889 CH_DEVICE(0x480d, 0), /* T480-cr */
2890 CH_DEVICE(0x480e, 0), /* T440-lp-cr */
2894 MODULE_DESCRIPTION(DRV_DESC);
2895 MODULE_AUTHOR("Chelsio Communications");
2896 MODULE_LICENSE("Dual BSD/GPL");
2897 MODULE_VERSION(DRV_VERSION);
2898 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
2900 static struct pci_driver cxgb4vf_driver = {
2901 .name = KBUILD_MODNAME,
2902 .id_table = cxgb4vf_pci_tbl,
2903 .probe = cxgb4vf_pci_probe,
2904 .remove = cxgb4vf_pci_remove,
2905 .shutdown = cxgb4vf_pci_shutdown,
2909 * Initialize global driver state.
2911 static int __init cxgb4vf_module_init(void)
2916 * Vet our module parameters.
2918 if (msi != MSI_MSIX && msi != MSI_MSI) {
2919 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
2920 msi, MSI_MSIX, MSI_MSI);
2924 /* Debugfs support is optional, just warn if this fails */
2925 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2926 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2927 pr_warn("could not create debugfs entry, continuing\n");
2929 ret = pci_register_driver(&cxgb4vf_driver);
2930 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2931 debugfs_remove(cxgb4vf_debugfs_root);
2936 * Tear down global driver state.
2938 static void __exit cxgb4vf_module_exit(void)
2940 pci_unregister_driver(&cxgb4vf_driver);
2941 debugfs_remove(cxgb4vf_debugfs_root);
2944 module_init(cxgb4vf_module_init);
2945 module_exit(cxgb4vf_module_exit);