2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
79 #define MAX_SGE_TIMERVAL 200U
83 * Physical Function provisioning constants.
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
131 unsigned int portn, portvec;
134 * Give PF's access to all of the ports.
137 return FW_PFVF_CMD_PMASK_MASK;
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
146 if (adapter->params.nports == 0)
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
178 /* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
184 struct filter_entry {
185 /* Administrative fields for filter.
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
199 struct ch_filter_specification fs;
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 CH_DEVICE(0xa000, 0), /* PE10K */
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
235 #define FW_FNAME "cxgb4/t4fw.bin"
236 #define FW5_FNAME "cxgb4/t5fw.bin"
237 #define FW_CFNAME "cxgb4/t4-config.txt"
238 #define FW5_CFNAME "cxgb4/t5-config.txt"
240 MODULE_DESCRIPTION(DRV_DESC);
241 MODULE_AUTHOR("Chelsio Communications");
242 MODULE_LICENSE("Dual BSD/GPL");
243 MODULE_VERSION(DRV_VERSION);
244 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
245 MODULE_FIRMWARE(FW_FNAME);
246 MODULE_FIRMWARE(FW5_FNAME);
249 * Normally we're willing to become the firmware's Master PF but will be happy
250 * if another PF has already become the Master and initialized the adapter.
251 * Setting "force_init" will cause this driver to forcibly establish itself as
252 * the Master PF and initialize the adapter.
254 static uint force_init;
256 module_param(force_init, uint, 0644);
257 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
260 * Normally if the firmware we connect to has Configuration File support, we
261 * use that and only fall back to the old Driver-based initialization if the
262 * Configuration File fails for some reason. If force_old_init is set, then
263 * we'll always use the old Driver-based initialization sequence.
265 static uint force_old_init;
267 module_param(force_old_init, uint, 0644);
268 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
270 static int dflt_msg_enable = DFLT_MSG_ENABLE;
272 module_param(dflt_msg_enable, int, 0644);
273 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
276 * The driver uses the best interrupt scheme available on a platform in the
277 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
278 * of these schemes the driver may consider as follows:
280 * msi = 2: choose from among all three options
281 * msi = 1: only consider MSI and INTx interrupts
282 * msi = 0: force INTx interrupts
286 module_param(msi, int, 0644);
287 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
290 * Queue interrupt hold-off timer values. Queues default to the first of these
293 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
295 module_param_array(intr_holdoff, uint, NULL, 0644);
296 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
297 "0..4 in microseconds");
299 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
301 module_param_array(intr_cnt, uint, NULL, 0644);
302 MODULE_PARM_DESC(intr_cnt,
303 "thresholds 1..3 for queue interrupt packet counters");
306 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
307 * offset by 2 bytes in order to have the IP headers line up on 4-byte
308 * boundaries. This is a requirement for many architectures which will throw
309 * a machine check fault if an attempt is made to access one of the 4-byte IP
310 * header fields on a non-4-byte boundary. And it's a major performance issue
311 * even on some architectures which allow it like some implementations of the
312 * x86 ISA. However, some architectures don't mind this and for some very
313 * edge-case performance sensitive applications (like forwarding large volumes
314 * of small packets), setting this DMA offset to 0 will decrease the number of
315 * PCI-E Bus transfers enough to measurably affect performance.
317 static int rx_dma_offset = 2;
321 #ifdef CONFIG_PCI_IOV
322 module_param(vf_acls, bool, 0644);
323 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
325 /* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
326 * macro as num_vf array size
328 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
330 module_param_array(num_vf, uint, NULL, 0644);
331 MODULE_PARM_DESC(num_vf,
332 "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
336 * The filter TCAM has a fixed portion and a variable portion. The fixed
337 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
338 * ports. The variable portion is 36 bits which can include things like Exact
339 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
340 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
341 * far exceed the 36-bit budget for this "compressed" header portion of the
342 * filter. Thus, we have a scarce resource which must be carefully managed.
344 * By default we set this up to mostly match the set of filter matching
345 * capabilities of T3 but with accommodations for some of T4's more
346 * interesting features:
348 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
349 * [Inner] VLAN (17), Port (3), FCoE (1) }
352 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
353 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
354 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
357 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
359 module_param(tp_vlan_pri_map, uint, 0644);
360 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
362 static struct dentry *cxgb4_debugfs_root;
364 static LIST_HEAD(adapter_list);
365 static DEFINE_MUTEX(uld_mutex);
366 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
367 static const char *uld_str[] = { "RDMA", "iSCSI" };
369 static void link_report(struct net_device *dev)
371 if (!netif_carrier_ok(dev))
372 netdev_info(dev, "link down\n");
374 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
376 const char *s = "10Mbps";
377 const struct port_info *p = netdev_priv(dev);
379 switch (p->link_cfg.speed) {
391 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
396 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
398 struct net_device *dev = adapter->port[port_id];
400 /* Skip changes from disabled ports. */
401 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
403 netif_carrier_on(dev);
405 netif_carrier_off(dev);
411 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
413 static const char *mod_str[] = {
414 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
417 const struct net_device *dev = adap->port[port_id];
418 const struct port_info *pi = netdev_priv(dev);
420 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
421 netdev_info(dev, "port module unplugged\n");
422 else if (pi->mod_type < ARRAY_SIZE(mod_str))
423 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
427 * Configure the exact and hash address filters to handle a port's multicast
428 * and secondary unicast MAC addresses.
430 static int set_addr_filters(const struct net_device *dev, bool sleep)
438 const struct netdev_hw_addr *ha;
439 int uc_cnt = netdev_uc_count(dev);
440 int mc_cnt = netdev_mc_count(dev);
441 const struct port_info *pi = netdev_priv(dev);
442 unsigned int mb = pi->adapter->fn;
444 /* first do the secondary unicast addresses */
445 netdev_for_each_uc_addr(ha, dev) {
446 addr[naddr++] = ha->addr;
447 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
448 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
449 naddr, addr, filt_idx, &uhash, sleep);
458 /* next set up the multicast addresses */
459 netdev_for_each_mc_addr(ha, dev) {
460 addr[naddr++] = ha->addr;
461 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
462 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
463 naddr, addr, filt_idx, &mhash, sleep);
472 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
473 uhash | mhash, sleep);
476 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
477 module_param(dbfifo_int_thresh, int, 0644);
478 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
481 * usecs to sleep while draining the dbfifo
483 static int dbfifo_drain_delay = 1000;
484 module_param(dbfifo_drain_delay, int, 0644);
485 MODULE_PARM_DESC(dbfifo_drain_delay,
486 "usecs to sleep while draining the dbfifo");
489 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
490 * If @mtu is -1 it is left unchanged.
492 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
495 struct port_info *pi = netdev_priv(dev);
497 ret = set_addr_filters(dev, sleep_ok);
499 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
500 (dev->flags & IFF_PROMISC) ? 1 : 0,
501 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
506 static struct workqueue_struct *workq;
509 * link_start - enable a port
510 * @dev: the port to enable
512 * Performs the MAC and PHY actions needed to enable a port.
514 static int link_start(struct net_device *dev)
517 struct port_info *pi = netdev_priv(dev);
518 unsigned int mb = pi->adapter->fn;
521 * We do not set address filters and promiscuity here, the stack does
522 * that step explicitly.
524 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
525 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
527 ret = t4_change_mac(pi->adapter, mb, pi->viid,
528 pi->xact_addr_filt, dev->dev_addr, true,
531 pi->xact_addr_filt = ret;
536 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
539 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
543 /* Clear a filter and release any of its resources that we own. This also
544 * clears the filter's "pending" status.
546 static void clear_filter(struct adapter *adap, struct filter_entry *f)
548 /* If the new or old filter have loopback rewriteing rules then we'll
549 * need to free any existing Layer Two Table (L2T) entries of the old
550 * filter rule. The firmware will handle freeing up any Source MAC
551 * Table (SMT) entries used for rewriting Source MAC Addresses in
555 cxgb4_l2t_release(f->l2t);
557 /* The zeroing of the filter rule below clears the filter valid,
558 * pending, locked flags, l2t pointer, etc. so it's all we need for
561 memset(f, 0, sizeof(*f));
564 /* Handle a filter write/deletion reply.
566 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
568 unsigned int idx = GET_TID(rpl);
569 unsigned int nidx = idx - adap->tids.ftid_base;
571 struct filter_entry *f;
573 if (idx >= adap->tids.ftid_base && nidx <
574 (adap->tids.nftids + adap->tids.nsftids)) {
576 ret = GET_TCB_COOKIE(rpl->cookie);
577 f = &adap->tids.ftid_tab[idx];
579 if (ret == FW_FILTER_WR_FLT_DELETED) {
580 /* Clear the filter when we get confirmation from the
581 * hardware that the filter has been deleted.
583 clear_filter(adap, f);
584 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
585 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
587 clear_filter(adap, f);
588 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
589 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
590 f->pending = 0; /* asynchronous setup completed */
593 /* Something went wrong. Issue a warning about the
594 * problem and clear everything out.
596 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
598 clear_filter(adap, f);
603 /* Response queue handler for the FW event queue.
605 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
606 const struct pkt_gl *gl)
608 u8 opcode = ((const struct rss_header *)rsp)->opcode;
610 rsp++; /* skip RSS header */
611 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
612 const struct cpl_sge_egr_update *p = (void *)rsp;
613 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
616 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
618 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
619 struct sge_eth_txq *eq;
621 eq = container_of(txq, struct sge_eth_txq, q);
622 netif_tx_wake_queue(eq->txq);
624 struct sge_ofld_txq *oq;
626 oq = container_of(txq, struct sge_ofld_txq, q);
627 tasklet_schedule(&oq->qresume_tsk);
629 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
630 const struct cpl_fw6_msg *p = (void *)rsp;
633 t4_handle_fw_rpl(q->adap, p->data);
634 } else if (opcode == CPL_L2T_WRITE_RPL) {
635 const struct cpl_l2t_write_rpl *p = (void *)rsp;
637 do_l2t_write_rpl(q->adap, p);
638 } else if (opcode == CPL_SET_TCB_RPL) {
639 const struct cpl_set_tcb_rpl *p = (void *)rsp;
641 filter_rpl(q->adap, p);
643 dev_err(q->adap->pdev_dev,
644 "unexpected CPL %#x on FW event queue\n", opcode);
649 * uldrx_handler - response queue handler for ULD queues
650 * @q: the response queue that received the packet
651 * @rsp: the response queue descriptor holding the offload message
652 * @gl: the gather list of packet fragments
654 * Deliver an ingress offload packet to a ULD. All processing is done by
655 * the ULD, we just maintain statistics.
657 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
658 const struct pkt_gl *gl)
660 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
662 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
668 else if (gl == CXGB4_MSG_AN)
675 static void disable_msi(struct adapter *adapter)
677 if (adapter->flags & USING_MSIX) {
678 pci_disable_msix(adapter->pdev);
679 adapter->flags &= ~USING_MSIX;
680 } else if (adapter->flags & USING_MSI) {
681 pci_disable_msi(adapter->pdev);
682 adapter->flags &= ~USING_MSI;
687 * Interrupt handler for non-data events used with MSI-X.
689 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
691 struct adapter *adap = cookie;
693 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
696 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
698 t4_slow_intr_handler(adap);
703 * Name the MSI-X interrupts.
705 static void name_msix_vecs(struct adapter *adap)
707 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
709 /* non-data interrupts */
710 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
713 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
714 adap->port[0]->name);
716 /* Ethernet queues */
717 for_each_port(adap, j) {
718 struct net_device *d = adap->port[j];
719 const struct port_info *pi = netdev_priv(d);
721 for (i = 0; i < pi->nqsets; i++, msi_idx++)
722 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
727 for_each_ofldrxq(&adap->sge, i)
728 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
729 adap->port[0]->name, i);
731 for_each_rdmarxq(&adap->sge, i)
732 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
733 adap->port[0]->name, i);
736 static int request_msix_queue_irqs(struct adapter *adap)
738 struct sge *s = &adap->sge;
739 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
741 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
742 adap->msix_info[1].desc, &s->fw_evtq);
746 for_each_ethrxq(s, ethqidx) {
747 err = request_irq(adap->msix_info[msi_index].vec,
749 adap->msix_info[msi_index].desc,
750 &s->ethrxq[ethqidx].rspq);
755 for_each_ofldrxq(s, ofldqidx) {
756 err = request_irq(adap->msix_info[msi_index].vec,
758 adap->msix_info[msi_index].desc,
759 &s->ofldrxq[ofldqidx].rspq);
764 for_each_rdmarxq(s, rdmaqidx) {
765 err = request_irq(adap->msix_info[msi_index].vec,
767 adap->msix_info[msi_index].desc,
768 &s->rdmarxq[rdmaqidx].rspq);
776 while (--rdmaqidx >= 0)
777 free_irq(adap->msix_info[--msi_index].vec,
778 &s->rdmarxq[rdmaqidx].rspq);
779 while (--ofldqidx >= 0)
780 free_irq(adap->msix_info[--msi_index].vec,
781 &s->ofldrxq[ofldqidx].rspq);
782 while (--ethqidx >= 0)
783 free_irq(adap->msix_info[--msi_index].vec,
784 &s->ethrxq[ethqidx].rspq);
785 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
789 static void free_msix_queue_irqs(struct adapter *adap)
791 int i, msi_index = 2;
792 struct sge *s = &adap->sge;
794 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
795 for_each_ethrxq(s, i)
796 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
797 for_each_ofldrxq(s, i)
798 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
799 for_each_rdmarxq(s, i)
800 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
804 * write_rss - write the RSS table for a given port
806 * @queues: array of queue indices for RSS
808 * Sets up the portion of the HW RSS table for the port's VI to distribute
809 * packets to the Rx queues in @queues.
811 static int write_rss(const struct port_info *pi, const u16 *queues)
815 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
817 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
821 /* map the queue indices to queue ids */
822 for (i = 0; i < pi->rss_size; i++, queues++)
823 rss[i] = q[*queues].rspq.abs_id;
825 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
826 pi->rss_size, rss, pi->rss_size);
832 * setup_rss - configure RSS
835 * Sets up RSS for each port.
837 static int setup_rss(struct adapter *adap)
841 for_each_port(adap, i) {
842 const struct port_info *pi = adap2pinfo(adap, i);
844 err = write_rss(pi, pi->rss);
852 * Return the channel of the ingress queue with the given qid.
854 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
856 qid -= p->ingr_start;
857 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
861 * Wait until all NAPI handlers are descheduled.
863 static void quiesce_rx(struct adapter *adap)
867 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
868 struct sge_rspq *q = adap->sge.ingr_map[i];
871 napi_disable(&q->napi);
876 * Enable NAPI scheduling and interrupt generation for all Rx queues.
878 static void enable_rx(struct adapter *adap)
882 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
883 struct sge_rspq *q = adap->sge.ingr_map[i];
888 napi_enable(&q->napi);
889 /* 0-increment GTS to start the timer and enable interrupts */
890 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
891 SEINTARM(q->intr_params) |
892 INGRESSQID(q->cntxt_id));
897 * setup_sge_queues - configure SGE Tx/Rx/response queues
900 * Determines how many sets of SGE queues to use and initializes them.
901 * We support multiple queue sets per port if we have MSI-X, otherwise
902 * just one queue set per port.
904 static int setup_sge_queues(struct adapter *adap)
906 int err, msi_idx, i, j;
907 struct sge *s = &adap->sge;
909 bitmap_zero(s->starving_fl, MAX_EGRQ);
910 bitmap_zero(s->txq_maperr, MAX_EGRQ);
912 if (adap->flags & USING_MSIX)
913 msi_idx = 1; /* vector 0 is for non-queue interrupts */
915 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
919 msi_idx = -((int)s->intrq.abs_id + 1);
922 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
923 msi_idx, NULL, fwevtq_handler);
925 freeout: t4_free_sge_resources(adap);
929 for_each_port(adap, i) {
930 struct net_device *dev = adap->port[i];
931 struct port_info *pi = netdev_priv(dev);
932 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
933 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
935 for (j = 0; j < pi->nqsets; j++, q++) {
938 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
944 memset(&q->stats, 0, sizeof(q->stats));
946 for (j = 0; j < pi->nqsets; j++, t++) {
947 err = t4_sge_alloc_eth_txq(adap, t, dev,
948 netdev_get_tx_queue(dev, j),
949 s->fw_evtq.cntxt_id);
955 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
956 for_each_ofldrxq(s, i) {
957 struct sge_ofld_rxq *q = &s->ofldrxq[i];
958 struct net_device *dev = adap->port[i / j];
962 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
963 &q->fl, uldrx_handler);
966 memset(&q->stats, 0, sizeof(q->stats));
967 s->ofld_rxq[i] = q->rspq.abs_id;
968 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
969 s->fw_evtq.cntxt_id);
974 for_each_rdmarxq(s, i) {
975 struct sge_ofld_rxq *q = &s->rdmarxq[i];
979 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
980 msi_idx, &q->fl, uldrx_handler);
983 memset(&q->stats, 0, sizeof(q->stats));
984 s->rdma_rxq[i] = q->rspq.abs_id;
987 for_each_port(adap, i) {
989 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
990 * have RDMA queues, and that's the right value.
992 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
994 s->rdmarxq[i].rspq.cntxt_id);
999 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1000 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1001 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1006 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1007 * started but failed, and a negative errno if flash load couldn't start.
1009 static int upgrade_fw(struct adapter *adap)
1012 u32 vers, exp_major;
1013 const struct fw_hdr *hdr;
1014 const struct firmware *fw;
1015 struct device *dev = adap->pdev_dev;
1018 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1020 fw_file_name = FW_FNAME;
1021 exp_major = FW_VERSION_MAJOR;
1024 fw_file_name = FW5_FNAME;
1025 exp_major = FW_VERSION_MAJOR_T5;
1028 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1032 ret = request_firmware(&fw, fw_file_name, dev);
1034 dev_err(dev, "unable to load firmware image %s, error %d\n",
1039 hdr = (const struct fw_hdr *)fw->data;
1040 vers = ntohl(hdr->fw_ver);
1041 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1042 ret = -EINVAL; /* wrong major version, won't do */
1047 * If the flash FW is unusable or we found something newer, load it.
1049 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1050 vers > adap->params.fw_vers) {
1051 dev_info(dev, "upgrading firmware ...\n");
1052 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1056 "firmware upgraded to version %pI4 from %s\n",
1057 &hdr->fw_ver, fw_file_name);
1059 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1062 * Tell our caller that we didn't upgrade the firmware.
1067 out: release_firmware(fw);
1072 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1073 * The allocated memory is cleared.
1075 void *t4_alloc_mem(size_t size)
1077 void *p = kzalloc(size, GFP_KERNEL);
1085 * Free memory allocated through alloc_mem().
1087 static void t4_free_mem(void *addr)
1089 if (is_vmalloc_addr(addr))
1095 /* Send a Work Request to write the filter at a specified index. We construct
1096 * a Firmware Filter Work Request to have the work done and put the indicated
1097 * filter into "pending" mode which will prevent any further actions against
1098 * it till we get a reply from the firmware on the completion status of the
1101 static int set_filter_wr(struct adapter *adapter, int fidx)
1103 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1104 struct sk_buff *skb;
1105 struct fw_filter_wr *fwr;
1108 /* If the new filter requires loopback Destination MAC and/or VLAN
1109 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1112 if (f->fs.newdmac || f->fs.newvlan) {
1113 /* allocate L2T entry for new filter */
1114 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1117 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1118 f->fs.eport, f->fs.dmac)) {
1119 cxgb4_l2t_release(f->l2t);
1125 ftid = adapter->tids.ftid_base + fidx;
1127 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1128 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1129 memset(fwr, 0, sizeof(*fwr));
1131 /* It would be nice to put most of the following in t4_hw.c but most
1132 * of the work is translating the cxgbtool ch_filter_specification
1133 * into the Work Request and the definition of that structure is
1134 * currently in cxgbtool.h which isn't appropriate to pull into the
1135 * common code. We may eventually try to come up with a more neutral
1136 * filter specification structure but for now it's easiest to simply
1137 * put this fairly direct code in line ...
1139 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1140 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1142 htonl(V_FW_FILTER_WR_TID(ftid) |
1143 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1144 V_FW_FILTER_WR_NOREPLY(0) |
1145 V_FW_FILTER_WR_IQ(f->fs.iq));
1146 fwr->del_filter_to_l2tix =
1147 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1148 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1149 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1150 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1151 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1152 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1153 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1154 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1155 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1156 f->fs.newvlan == VLAN_REWRITE) |
1157 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1158 f->fs.newvlan == VLAN_REWRITE) |
1159 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1160 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1161 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1162 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1163 fwr->ethtype = htons(f->fs.val.ethtype);
1164 fwr->ethtypem = htons(f->fs.mask.ethtype);
1165 fwr->frag_to_ovlan_vldm =
1166 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1167 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1168 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1169 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1170 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1171 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1173 fwr->rx_chan_rx_rpl_iq =
1174 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1175 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1176 fwr->maci_to_matchtypem =
1177 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1178 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1179 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1180 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1181 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1182 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1183 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1184 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1185 fwr->ptcl = f->fs.val.proto;
1186 fwr->ptclm = f->fs.mask.proto;
1187 fwr->ttyp = f->fs.val.tos;
1188 fwr->ttypm = f->fs.mask.tos;
1189 fwr->ivlan = htons(f->fs.val.ivlan);
1190 fwr->ivlanm = htons(f->fs.mask.ivlan);
1191 fwr->ovlan = htons(f->fs.val.ovlan);
1192 fwr->ovlanm = htons(f->fs.mask.ovlan);
1193 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1194 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1195 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1196 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1197 fwr->lp = htons(f->fs.val.lport);
1198 fwr->lpm = htons(f->fs.mask.lport);
1199 fwr->fp = htons(f->fs.val.fport);
1200 fwr->fpm = htons(f->fs.mask.fport);
1202 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1204 /* Mark the filter as "pending" and ship off the Filter Work Request.
1205 * When we get the Work Request Reply we'll clear the pending status.
1208 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1209 t4_ofld_send(adapter, skb);
1213 /* Delete the filter at a specified index.
1215 static int del_filter_wr(struct adapter *adapter, int fidx)
1217 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1218 struct sk_buff *skb;
1219 struct fw_filter_wr *fwr;
1220 unsigned int len, ftid;
1223 ftid = adapter->tids.ftid_base + fidx;
1225 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1226 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1227 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1229 /* Mark the filter as "pending" and ship off the Filter Work Request.
1230 * When we get the Work Request Reply we'll clear the pending status.
1233 t4_mgmt_tx(adapter, skb);
1237 static inline int is_offload(const struct adapter *adap)
1239 return adap->params.offload;
1243 * Implementation of ethtool operations.
1246 static u32 get_msglevel(struct net_device *dev)
1248 return netdev2adap(dev)->msg_enable;
1251 static void set_msglevel(struct net_device *dev, u32 val)
1253 netdev2adap(dev)->msg_enable = val;
1256 static char stats_strings[][ETH_GSTRING_LEN] = {
1259 "TxBroadcastFrames ",
1260 "TxMulticastFrames ",
1266 "TxFrames128To255 ",
1267 "TxFrames256To511 ",
1268 "TxFrames512To1023 ",
1269 "TxFrames1024To1518 ",
1270 "TxFrames1519ToMax ",
1285 "RxBroadcastFrames ",
1286 "RxMulticastFrames ",
1298 "RxFrames128To255 ",
1299 "RxFrames256To511 ",
1300 "RxFrames512To1023 ",
1301 "RxFrames1024To1518 ",
1302 "RxFrames1519ToMax ",
1314 "RxBG0FramesDropped ",
1315 "RxBG1FramesDropped ",
1316 "RxBG2FramesDropped ",
1317 "RxBG3FramesDropped ",
1318 "RxBG0FramesTrunc ",
1319 "RxBG1FramesTrunc ",
1320 "RxBG2FramesTrunc ",
1321 "RxBG3FramesTrunc ",
1330 "WriteCoalSuccess ",
1334 static int get_sset_count(struct net_device *dev, int sset)
1338 return ARRAY_SIZE(stats_strings);
1344 #define T4_REGMAP_SIZE (160 * 1024)
1345 #define T5_REGMAP_SIZE (332 * 1024)
1347 static int get_regs_len(struct net_device *dev)
1349 struct adapter *adap = netdev2adap(dev);
1350 if (is_t4(adap->chip))
1351 return T4_REGMAP_SIZE;
1353 return T5_REGMAP_SIZE;
1356 static int get_eeprom_len(struct net_device *dev)
1361 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1363 struct adapter *adapter = netdev2adap(dev);
1365 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1366 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1367 strlcpy(info->bus_info, pci_name(adapter->pdev),
1368 sizeof(info->bus_info));
1370 if (adapter->params.fw_vers)
1371 snprintf(info->fw_version, sizeof(info->fw_version),
1372 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1373 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1374 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1375 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1376 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1377 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1378 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1379 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1380 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1383 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1385 if (stringset == ETH_SS_STATS)
1386 memcpy(data, stats_strings, sizeof(stats_strings));
1390 * port stats maintained per queue of the port. They should be in the same
1391 * order as in stats_strings above.
1393 struct queue_port_stats {
1403 static void collect_sge_port_stats(const struct adapter *adap,
1404 const struct port_info *p, struct queue_port_stats *s)
1407 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1408 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1410 memset(s, 0, sizeof(*s));
1411 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1413 s->tx_csum += tx->tx_cso;
1414 s->rx_csum += rx->stats.rx_cso;
1415 s->vlan_ex += rx->stats.vlan_ex;
1416 s->vlan_ins += tx->vlan_ins;
1417 s->gro_pkts += rx->stats.lro_pkts;
1418 s->gro_merged += rx->stats.lro_merged;
1422 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1425 struct port_info *pi = netdev_priv(dev);
1426 struct adapter *adapter = pi->adapter;
1429 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1431 data += sizeof(struct port_stats) / sizeof(u64);
1432 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1433 data += sizeof(struct queue_port_stats) / sizeof(u64);
1434 if (!is_t4(adapter->chip)) {
1435 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1436 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1437 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1438 *data = val1 - val2;
1443 memset(data, 0, 2 * sizeof(u64));
1449 * Return a version number to identify the type of adapter. The scheme is:
1450 * - bits 0..9: chip version
1451 * - bits 10..15: chip revision
1452 * - bits 16..23: register dump version
1454 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1456 return CHELSIO_CHIP_VERSION(ap->chip) |
1457 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1460 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1463 u32 *p = buf + start;
1465 for ( ; start <= end; start += sizeof(u32))
1466 *p++ = t4_read_reg(ap, start);
1469 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1472 static const unsigned int t4_reg_ranges[] = {
1692 static const unsigned int t5_reg_ranges[] = {
2120 struct adapter *ap = netdev2adap(dev);
2121 static const unsigned int *reg_ranges;
2122 int arr_size = 0, buf_size = 0;
2124 if (is_t4(ap->chip)) {
2125 reg_ranges = &t4_reg_ranges[0];
2126 arr_size = ARRAY_SIZE(t4_reg_ranges);
2127 buf_size = T4_REGMAP_SIZE;
2129 reg_ranges = &t5_reg_ranges[0];
2130 arr_size = ARRAY_SIZE(t5_reg_ranges);
2131 buf_size = T5_REGMAP_SIZE;
2134 regs->version = mk_adap_vers(ap);
2136 memset(buf, 0, buf_size);
2137 for (i = 0; i < arr_size; i += 2)
2138 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2141 static int restart_autoneg(struct net_device *dev)
2143 struct port_info *p = netdev_priv(dev);
2145 if (!netif_running(dev))
2147 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2149 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2153 static int identify_port(struct net_device *dev,
2154 enum ethtool_phys_id_state state)
2157 struct adapter *adap = netdev2adap(dev);
2159 if (state == ETHTOOL_ID_ACTIVE)
2161 else if (state == ETHTOOL_ID_INACTIVE)
2166 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2169 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2173 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2174 type == FW_PORT_TYPE_BT_XAUI) {
2176 if (caps & FW_PORT_CAP_SPEED_100M)
2177 v |= SUPPORTED_100baseT_Full;
2178 if (caps & FW_PORT_CAP_SPEED_1G)
2179 v |= SUPPORTED_1000baseT_Full;
2180 if (caps & FW_PORT_CAP_SPEED_10G)
2181 v |= SUPPORTED_10000baseT_Full;
2182 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2183 v |= SUPPORTED_Backplane;
2184 if (caps & FW_PORT_CAP_SPEED_1G)
2185 v |= SUPPORTED_1000baseKX_Full;
2186 if (caps & FW_PORT_CAP_SPEED_10G)
2187 v |= SUPPORTED_10000baseKX4_Full;
2188 } else if (type == FW_PORT_TYPE_KR)
2189 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2190 else if (type == FW_PORT_TYPE_BP_AP)
2191 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2192 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2193 else if (type == FW_PORT_TYPE_BP4_AP)
2194 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2195 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2196 SUPPORTED_10000baseKX4_Full;
2197 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2198 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2199 v |= SUPPORTED_FIBRE;
2201 if (caps & FW_PORT_CAP_ANEG)
2202 v |= SUPPORTED_Autoneg;
2206 static unsigned int to_fw_linkcaps(unsigned int caps)
2210 if (caps & ADVERTISED_100baseT_Full)
2211 v |= FW_PORT_CAP_SPEED_100M;
2212 if (caps & ADVERTISED_1000baseT_Full)
2213 v |= FW_PORT_CAP_SPEED_1G;
2214 if (caps & ADVERTISED_10000baseT_Full)
2215 v |= FW_PORT_CAP_SPEED_10G;
2219 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2221 const struct port_info *p = netdev_priv(dev);
2223 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2224 p->port_type == FW_PORT_TYPE_BT_XFI ||
2225 p->port_type == FW_PORT_TYPE_BT_XAUI)
2226 cmd->port = PORT_TP;
2227 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2228 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2229 cmd->port = PORT_FIBRE;
2230 else if (p->port_type == FW_PORT_TYPE_SFP) {
2231 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2232 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2233 cmd->port = PORT_DA;
2235 cmd->port = PORT_FIBRE;
2237 cmd->port = PORT_OTHER;
2239 if (p->mdio_addr >= 0) {
2240 cmd->phy_address = p->mdio_addr;
2241 cmd->transceiver = XCVR_EXTERNAL;
2242 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2243 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2245 cmd->phy_address = 0; /* not really, but no better option */
2246 cmd->transceiver = XCVR_INTERNAL;
2247 cmd->mdio_support = 0;
2250 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2251 cmd->advertising = from_fw_linkcaps(p->port_type,
2252 p->link_cfg.advertising);
2253 ethtool_cmd_speed_set(cmd,
2254 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2255 cmd->duplex = DUPLEX_FULL;
2256 cmd->autoneg = p->link_cfg.autoneg;
2262 static unsigned int speed_to_caps(int speed)
2264 if (speed == SPEED_100)
2265 return FW_PORT_CAP_SPEED_100M;
2266 if (speed == SPEED_1000)
2267 return FW_PORT_CAP_SPEED_1G;
2268 if (speed == SPEED_10000)
2269 return FW_PORT_CAP_SPEED_10G;
2273 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276 struct port_info *p = netdev_priv(dev);
2277 struct link_config *lc = &p->link_cfg;
2278 u32 speed = ethtool_cmd_speed(cmd);
2280 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2283 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2285 * PHY offers a single speed. See if that's what's
2288 if (cmd->autoneg == AUTONEG_DISABLE &&
2289 (lc->supported & speed_to_caps(speed)))
2294 if (cmd->autoneg == AUTONEG_DISABLE) {
2295 cap = speed_to_caps(speed);
2297 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2298 (speed == SPEED_10000))
2300 lc->requested_speed = cap;
2301 lc->advertising = 0;
2303 cap = to_fw_linkcaps(cmd->advertising);
2304 if (!(lc->supported & cap))
2306 lc->requested_speed = 0;
2307 lc->advertising = cap | FW_PORT_CAP_ANEG;
2309 lc->autoneg = cmd->autoneg;
2311 if (netif_running(dev))
2312 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2317 static void get_pauseparam(struct net_device *dev,
2318 struct ethtool_pauseparam *epause)
2320 struct port_info *p = netdev_priv(dev);
2322 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2323 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2324 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2327 static int set_pauseparam(struct net_device *dev,
2328 struct ethtool_pauseparam *epause)
2330 struct port_info *p = netdev_priv(dev);
2331 struct link_config *lc = &p->link_cfg;
2333 if (epause->autoneg == AUTONEG_DISABLE)
2334 lc->requested_fc = 0;
2335 else if (lc->supported & FW_PORT_CAP_ANEG)
2336 lc->requested_fc = PAUSE_AUTONEG;
2340 if (epause->rx_pause)
2341 lc->requested_fc |= PAUSE_RX;
2342 if (epause->tx_pause)
2343 lc->requested_fc |= PAUSE_TX;
2344 if (netif_running(dev))
2345 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2350 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2352 const struct port_info *pi = netdev_priv(dev);
2353 const struct sge *s = &pi->adapter->sge;
2355 e->rx_max_pending = MAX_RX_BUFFERS;
2356 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2357 e->rx_jumbo_max_pending = 0;
2358 e->tx_max_pending = MAX_TXQ_ENTRIES;
2360 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2361 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2362 e->rx_jumbo_pending = 0;
2363 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2366 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2369 const struct port_info *pi = netdev_priv(dev);
2370 struct adapter *adapter = pi->adapter;
2371 struct sge *s = &adapter->sge;
2373 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2374 e->tx_pending > MAX_TXQ_ENTRIES ||
2375 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2376 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2377 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2380 if (adapter->flags & FULL_INIT_DONE)
2383 for (i = 0; i < pi->nqsets; ++i) {
2384 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2385 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2386 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2391 static int closest_timer(const struct sge *s, int time)
2393 int i, delta, match = 0, min_delta = INT_MAX;
2395 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2396 delta = time - s->timer_val[i];
2399 if (delta < min_delta) {
2407 static int closest_thres(const struct sge *s, int thres)
2409 int i, delta, match = 0, min_delta = INT_MAX;
2411 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2412 delta = thres - s->counter_val[i];
2415 if (delta < min_delta) {
2424 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2426 static unsigned int qtimer_val(const struct adapter *adap,
2427 const struct sge_rspq *q)
2429 unsigned int idx = q->intr_params >> 1;
2431 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2435 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2436 * @adap: the adapter
2438 * @us: the hold-off time in us, or 0 to disable timer
2439 * @cnt: the hold-off packet count, or 0 to disable counter
2441 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2442 * one of the two needs to be enabled for the queue to generate interrupts.
2444 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2445 unsigned int us, unsigned int cnt)
2447 if ((us | cnt) == 0)
2454 new_idx = closest_thres(&adap->sge, cnt);
2455 if (q->desc && q->pktcnt_idx != new_idx) {
2456 /* the queue has already been created, update it */
2457 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2458 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2459 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2460 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2465 q->pktcnt_idx = new_idx;
2468 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2469 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2473 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2475 const struct port_info *pi = netdev_priv(dev);
2476 struct adapter *adap = pi->adapter;
2481 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2482 q = &adap->sge.ethrxq[i].rspq;
2483 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2484 c->rx_max_coalesced_frames);
2486 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2493 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2495 const struct port_info *pi = netdev_priv(dev);
2496 const struct adapter *adap = pi->adapter;
2497 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2499 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2500 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2501 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2506 * eeprom_ptov - translate a physical EEPROM address to virtual
2507 * @phys_addr: the physical EEPROM address
2508 * @fn: the PCI function number
2509 * @sz: size of function-specific area
2511 * Translate a physical EEPROM address to virtual. The first 1K is
2512 * accessed through virtual addresses starting at 31K, the rest is
2513 * accessed through virtual addresses starting at 0.
2515 * The mapping is as follows:
2516 * [0..1K) -> [31K..32K)
2517 * [1K..1K+A) -> [31K-A..31K)
2518 * [1K+A..ES) -> [0..ES-A-1K)
2520 * where A = @fn * @sz, and ES = EEPROM size.
2522 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2525 if (phys_addr < 1024)
2526 return phys_addr + (31 << 10);
2527 if (phys_addr < 1024 + fn)
2528 return 31744 - fn + phys_addr - 1024;
2529 if (phys_addr < EEPROMSIZE)
2530 return phys_addr - 1024 - fn;
2535 * The next two routines implement eeprom read/write from physical addresses.
2537 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2539 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2542 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2543 return vaddr < 0 ? vaddr : 0;
2546 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2548 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2551 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2552 return vaddr < 0 ? vaddr : 0;
2555 #define EEPROM_MAGIC 0x38E2F10C
2557 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2561 struct adapter *adapter = netdev2adap(dev);
2563 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2567 e->magic = EEPROM_MAGIC;
2568 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2569 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2572 memcpy(data, buf + e->offset, e->len);
2577 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2582 u32 aligned_offset, aligned_len, *p;
2583 struct adapter *adapter = netdev2adap(dev);
2585 if (eeprom->magic != EEPROM_MAGIC)
2588 aligned_offset = eeprom->offset & ~3;
2589 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2591 if (adapter->fn > 0) {
2592 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2594 if (aligned_offset < start ||
2595 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2599 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2601 * RMW possibly needed for first or last words.
2603 buf = kmalloc(aligned_len, GFP_KERNEL);
2606 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2607 if (!err && aligned_len > 4)
2608 err = eeprom_rd_phys(adapter,
2609 aligned_offset + aligned_len - 4,
2610 (u32 *)&buf[aligned_len - 4]);
2613 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2617 err = t4_seeprom_wp(adapter, false);
2621 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2622 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2623 aligned_offset += 4;
2627 err = t4_seeprom_wp(adapter, true);
2634 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2637 const struct firmware *fw;
2638 struct adapter *adap = netdev2adap(netdev);
2640 ef->data[sizeof(ef->data) - 1] = '\0';
2641 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2645 ret = t4_load_fw(adap, fw->data, fw->size);
2646 release_firmware(fw);
2648 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2652 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2653 #define BCAST_CRC 0xa0ccc1a6
2655 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2657 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2658 wol->wolopts = netdev2adap(dev)->wol;
2659 memset(&wol->sopass, 0, sizeof(wol->sopass));
2662 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665 struct port_info *pi = netdev_priv(dev);
2667 if (wol->wolopts & ~WOL_SUPPORTED)
2669 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2670 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2671 if (wol->wolopts & WAKE_BCAST) {
2672 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2675 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2676 ~6ULL, ~0ULL, BCAST_CRC, true);
2678 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2682 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2684 const struct port_info *pi = netdev_priv(dev);
2685 netdev_features_t changed = dev->features ^ features;
2688 if (!(changed & NETIF_F_HW_VLAN_RX))
2691 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2693 !!(features & NETIF_F_HW_VLAN_RX), true);
2695 dev->features = features ^ NETIF_F_HW_VLAN_RX;
2699 static u32 get_rss_table_size(struct net_device *dev)
2701 const struct port_info *pi = netdev_priv(dev);
2703 return pi->rss_size;
2706 static int get_rss_table(struct net_device *dev, u32 *p)
2708 const struct port_info *pi = netdev_priv(dev);
2709 unsigned int n = pi->rss_size;
2716 static int set_rss_table(struct net_device *dev, const u32 *p)
2719 struct port_info *pi = netdev_priv(dev);
2721 for (i = 0; i < pi->rss_size; i++)
2723 if (pi->adapter->flags & FULL_INIT_DONE)
2724 return write_rss(pi, pi->rss);
2728 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2731 const struct port_info *pi = netdev_priv(dev);
2733 switch (info->cmd) {
2734 case ETHTOOL_GRXFH: {
2735 unsigned int v = pi->rss_mode;
2738 switch (info->flow_type) {
2740 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2741 info->data = RXH_IP_SRC | RXH_IP_DST |
2742 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2743 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2744 info->data = RXH_IP_SRC | RXH_IP_DST;
2747 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2748 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2749 info->data = RXH_IP_SRC | RXH_IP_DST |
2750 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2751 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2752 info->data = RXH_IP_SRC | RXH_IP_DST;
2755 case AH_ESP_V4_FLOW:
2757 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2758 info->data = RXH_IP_SRC | RXH_IP_DST;
2761 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2762 info->data = RXH_IP_SRC | RXH_IP_DST |
2763 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2764 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2765 info->data = RXH_IP_SRC | RXH_IP_DST;
2768 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2769 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2770 info->data = RXH_IP_SRC | RXH_IP_DST |
2771 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2772 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2773 info->data = RXH_IP_SRC | RXH_IP_DST;
2776 case AH_ESP_V6_FLOW:
2778 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2779 info->data = RXH_IP_SRC | RXH_IP_DST;
2784 case ETHTOOL_GRXRINGS:
2785 info->data = pi->nqsets;
2791 static const struct ethtool_ops cxgb_ethtool_ops = {
2792 .get_settings = get_settings,
2793 .set_settings = set_settings,
2794 .get_drvinfo = get_drvinfo,
2795 .get_msglevel = get_msglevel,
2796 .set_msglevel = set_msglevel,
2797 .get_ringparam = get_sge_param,
2798 .set_ringparam = set_sge_param,
2799 .get_coalesce = get_coalesce,
2800 .set_coalesce = set_coalesce,
2801 .get_eeprom_len = get_eeprom_len,
2802 .get_eeprom = get_eeprom,
2803 .set_eeprom = set_eeprom,
2804 .get_pauseparam = get_pauseparam,
2805 .set_pauseparam = set_pauseparam,
2806 .get_link = ethtool_op_get_link,
2807 .get_strings = get_strings,
2808 .set_phys_id = identify_port,
2809 .nway_reset = restart_autoneg,
2810 .get_sset_count = get_sset_count,
2811 .get_ethtool_stats = get_stats,
2812 .get_regs_len = get_regs_len,
2813 .get_regs = get_regs,
2816 .get_rxnfc = get_rxnfc,
2817 .get_rxfh_indir_size = get_rss_table_size,
2818 .get_rxfh_indir = get_rss_table,
2819 .set_rxfh_indir = set_rss_table,
2820 .flash_device = set_flash,
2826 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2830 loff_t avail = file_inode(file)->i_size;
2831 unsigned int mem = (uintptr_t)file->private_data & 3;
2832 struct adapter *adap = file->private_data - mem;
2838 if (count > avail - pos)
2839 count = avail - pos;
2847 ret = t4_mc_read(adap, pos, data, NULL);
2849 ret = t4_edc_read(adap, mem, pos, data, NULL);
2853 ofst = pos % sizeof(data);
2854 len = min(count, sizeof(data) - ofst);
2855 if (copy_to_user(buf, (u8 *)data + ofst, len))
2862 count = pos - *ppos;
2867 static const struct file_operations mem_debugfs_fops = {
2868 .owner = THIS_MODULE,
2869 .open = simple_open,
2871 .llseek = default_llseek,
2874 static void add_debugfs_mem(struct adapter *adap, const char *name,
2875 unsigned int idx, unsigned int size_mb)
2879 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2880 (void *)adap + idx, &mem_debugfs_fops);
2881 if (de && de->d_inode)
2882 de->d_inode->i_size = size_mb << 20;
2885 static int setup_debugfs(struct adapter *adap)
2889 if (IS_ERR_OR_NULL(adap->debugfs_root))
2892 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2893 if (i & EDRAM0_ENABLE)
2894 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2895 if (i & EDRAM1_ENABLE)
2896 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2897 if (i & EXT_MEM_ENABLE)
2898 add_debugfs_mem(adap, "mc", MEM_MC,
2899 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2901 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2907 * upper-layer driver support
2911 * Allocate an active-open TID and set it to the supplied value.
2913 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2917 spin_lock_bh(&t->atid_lock);
2919 union aopen_entry *p = t->afree;
2921 atid = (p - t->atid_tab) + t->atid_base;
2926 spin_unlock_bh(&t->atid_lock);
2929 EXPORT_SYMBOL(cxgb4_alloc_atid);
2932 * Release an active-open TID.
2934 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2936 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2938 spin_lock_bh(&t->atid_lock);
2942 spin_unlock_bh(&t->atid_lock);
2944 EXPORT_SYMBOL(cxgb4_free_atid);
2947 * Allocate a server TID and set it to the supplied value.
2949 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2953 spin_lock_bh(&t->stid_lock);
2954 if (family == PF_INET) {
2955 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2956 if (stid < t->nstids)
2957 __set_bit(stid, t->stid_bmap);
2961 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2966 t->stid_tab[stid].data = data;
2967 stid += t->stid_base;
2970 spin_unlock_bh(&t->stid_lock);
2973 EXPORT_SYMBOL(cxgb4_alloc_stid);
2975 /* Allocate a server filter TID and set it to the supplied value.
2977 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2981 spin_lock_bh(&t->stid_lock);
2982 if (family == PF_INET) {
2983 stid = find_next_zero_bit(t->stid_bmap,
2984 t->nstids + t->nsftids, t->nstids);
2985 if (stid < (t->nstids + t->nsftids))
2986 __set_bit(stid, t->stid_bmap);
2993 t->stid_tab[stid].data = data;
2994 stid += t->stid_base;
2997 spin_unlock_bh(&t->stid_lock);
3000 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3002 /* Release a server TID.
3004 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3006 stid -= t->stid_base;
3007 spin_lock_bh(&t->stid_lock);
3008 if (family == PF_INET)
3009 __clear_bit(stid, t->stid_bmap);
3011 bitmap_release_region(t->stid_bmap, stid, 2);
3012 t->stid_tab[stid].data = NULL;
3014 spin_unlock_bh(&t->stid_lock);
3016 EXPORT_SYMBOL(cxgb4_free_stid);
3019 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3021 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3024 struct cpl_tid_release *req;
3026 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3027 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3028 INIT_TP_WR(req, tid);
3029 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3033 * Queue a TID release request and if necessary schedule a work queue to
3036 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3039 void **p = &t->tid_tab[tid];
3040 struct adapter *adap = container_of(t, struct adapter, tids);
3042 spin_lock_bh(&adap->tid_release_lock);
3043 *p = adap->tid_release_head;
3044 /* Low 2 bits encode the Tx channel number */
3045 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3046 if (!adap->tid_release_task_busy) {
3047 adap->tid_release_task_busy = true;
3048 queue_work(workq, &adap->tid_release_task);
3050 spin_unlock_bh(&adap->tid_release_lock);
3054 * Process the list of pending TID release requests.
3056 static void process_tid_release_list(struct work_struct *work)
3058 struct sk_buff *skb;
3059 struct adapter *adap;
3061 adap = container_of(work, struct adapter, tid_release_task);
3063 spin_lock_bh(&adap->tid_release_lock);
3064 while (adap->tid_release_head) {
3065 void **p = adap->tid_release_head;
3066 unsigned int chan = (uintptr_t)p & 3;
3067 p = (void *)p - chan;
3069 adap->tid_release_head = *p;
3071 spin_unlock_bh(&adap->tid_release_lock);
3073 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3075 schedule_timeout_uninterruptible(1);
3077 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3078 t4_ofld_send(adap, skb);
3079 spin_lock_bh(&adap->tid_release_lock);
3081 adap->tid_release_task_busy = false;
3082 spin_unlock_bh(&adap->tid_release_lock);
3086 * Release a TID and inform HW. If we are unable to allocate the release
3087 * message we defer to a work queue.
3089 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3092 struct sk_buff *skb;
3093 struct adapter *adap = container_of(t, struct adapter, tids);
3095 old = t->tid_tab[tid];
3096 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3098 t->tid_tab[tid] = NULL;
3099 mk_tid_release(skb, chan, tid);
3100 t4_ofld_send(adap, skb);
3102 cxgb4_queue_tid_release(t, chan, tid);
3104 atomic_dec(&t->tids_in_use);
3106 EXPORT_SYMBOL(cxgb4_remove_tid);
3109 * Allocate and initialize the TID tables. Returns 0 on success.
3111 static int tid_init(struct tid_info *t)
3114 unsigned int stid_bmap_size;
3115 unsigned int natids = t->natids;
3117 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3118 size = t->ntids * sizeof(*t->tid_tab) +
3119 natids * sizeof(*t->atid_tab) +
3120 t->nstids * sizeof(*t->stid_tab) +
3121 t->nsftids * sizeof(*t->stid_tab) +
3122 stid_bmap_size * sizeof(long) +
3123 t->nftids * sizeof(*t->ftid_tab) +
3124 t->nsftids * sizeof(*t->ftid_tab);
3126 t->tid_tab = t4_alloc_mem(size);
3130 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3131 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3132 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3133 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3134 spin_lock_init(&t->stid_lock);
3135 spin_lock_init(&t->atid_lock);
3137 t->stids_in_use = 0;
3139 t->atids_in_use = 0;
3140 atomic_set(&t->tids_in_use, 0);
3142 /* Setup the free list for atid_tab and clear the stid bitmap. */
3145 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3146 t->afree = t->atid_tab;
3148 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3153 * cxgb4_create_server - create an IP server
3155 * @stid: the server TID
3156 * @sip: local IP address to bind server to
3157 * @sport: the server's TCP port
3158 * @queue: queue to direct messages from this server to
3160 * Create an IP server for the given port and address.
3161 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3163 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3164 __be32 sip, __be16 sport, __be16 vlan,
3168 struct sk_buff *skb;
3169 struct adapter *adap;
3170 struct cpl_pass_open_req *req;
3172 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3176 adap = netdev2adap(dev);
3177 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3179 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3180 req->local_port = sport;
3181 req->peer_port = htons(0);
3182 req->local_ip = sip;
3183 req->peer_ip = htonl(0);
3184 chan = rxq_to_chan(&adap->sge, queue);
3185 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3186 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3187 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3188 return t4_mgmt_tx(adap, skb);
3190 EXPORT_SYMBOL(cxgb4_create_server);
3193 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3194 * @mtus: the HW MTU table
3195 * @mtu: the target MTU
3196 * @idx: index of selected entry in the MTU table
3198 * Returns the index and the value in the HW MTU table that is closest to
3199 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3200 * table, in which case that smallest available value is selected.
3202 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3207 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3213 EXPORT_SYMBOL(cxgb4_best_mtu);
3216 * cxgb4_port_chan - get the HW channel of a port
3217 * @dev: the net device for the port
3219 * Return the HW Tx channel of the given port.
3221 unsigned int cxgb4_port_chan(const struct net_device *dev)
3223 return netdev2pinfo(dev)->tx_chan;
3225 EXPORT_SYMBOL(cxgb4_port_chan);
3227 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3229 struct adapter *adap = netdev2adap(dev);
3230 u32 v1, v2, lp_count, hp_count;
3232 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3233 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3234 if (is_t4(adap->chip)) {
3235 lp_count = G_LP_COUNT(v1);
3236 hp_count = G_HP_COUNT(v1);
3238 lp_count = G_LP_COUNT_T5(v1);
3239 hp_count = G_HP_COUNT_T5(v2);
3241 return lpfifo ? lp_count : hp_count;
3243 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3246 * cxgb4_port_viid - get the VI id of a port
3247 * @dev: the net device for the port
3249 * Return the VI id of the given port.
3251 unsigned int cxgb4_port_viid(const struct net_device *dev)
3253 return netdev2pinfo(dev)->viid;
3255 EXPORT_SYMBOL(cxgb4_port_viid);
3258 * cxgb4_port_idx - get the index of a port
3259 * @dev: the net device for the port
3261 * Return the index of the given port.
3263 unsigned int cxgb4_port_idx(const struct net_device *dev)
3265 return netdev2pinfo(dev)->port_id;
3267 EXPORT_SYMBOL(cxgb4_port_idx);
3269 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3270 struct tp_tcp_stats *v6)
3272 struct adapter *adap = pci_get_drvdata(pdev);
3274 spin_lock(&adap->stats_lock);
3275 t4_tp_get_tcp_stats(adap, v4, v6);
3276 spin_unlock(&adap->stats_lock);
3278 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3280 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3281 const unsigned int *pgsz_order)
3283 struct adapter *adap = netdev2adap(dev);
3285 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3286 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3287 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3288 HPZ3(pgsz_order[3]));
3290 EXPORT_SYMBOL(cxgb4_iscsi_init);
3292 int cxgb4_flush_eq_cache(struct net_device *dev)
3294 struct adapter *adap = netdev2adap(dev);
3297 ret = t4_fwaddrspace_write(adap, adap->mbox,
3298 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3301 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3303 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3305 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3309 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3311 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3312 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3317 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3320 struct adapter *adap = netdev2adap(dev);
3321 u16 hw_pidx, hw_cidx;
3324 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3328 if (pidx != hw_pidx) {
3331 if (pidx >= hw_pidx)
3332 delta = pidx - hw_pidx;
3334 delta = size - hw_pidx + pidx;
3336 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3337 QID(qid) | PIDX(delta));
3342 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3344 static struct pci_driver cxgb4_driver;
3346 static void check_neigh_update(struct neighbour *neigh)
3348 const struct device *parent;
3349 const struct net_device *netdev = neigh->dev;
3351 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3352 netdev = vlan_dev_real_dev(netdev);
3353 parent = netdev->dev.parent;
3354 if (parent && parent->driver == &cxgb4_driver.driver)
3355 t4_l2t_update(dev_get_drvdata(parent), neigh);
3358 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3362 case NETEVENT_NEIGH_UPDATE:
3363 check_neigh_update(data);
3365 case NETEVENT_REDIRECT:
3372 static bool netevent_registered;
3373 static struct notifier_block cxgb4_netevent_nb = {
3374 .notifier_call = netevent_cb
3377 static void drain_db_fifo(struct adapter *adap, int usecs)
3379 u32 v1, v2, lp_count, hp_count;
3382 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3383 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3384 if (is_t4(adap->chip)) {
3385 lp_count = G_LP_COUNT(v1);
3386 hp_count = G_HP_COUNT(v1);
3388 lp_count = G_LP_COUNT_T5(v1);
3389 hp_count = G_HP_COUNT_T5(v2);
3392 if (lp_count == 0 && hp_count == 0)
3394 set_current_state(TASK_UNINTERRUPTIBLE);
3395 schedule_timeout(usecs_to_jiffies(usecs));
3399 static void disable_txq_db(struct sge_txq *q)
3401 spin_lock_irq(&q->db_lock);
3403 spin_unlock_irq(&q->db_lock);
3406 static void enable_txq_db(struct sge_txq *q)
3408 spin_lock_irq(&q->db_lock);
3410 spin_unlock_irq(&q->db_lock);
3413 static void disable_dbs(struct adapter *adap)
3417 for_each_ethrxq(&adap->sge, i)
3418 disable_txq_db(&adap->sge.ethtxq[i].q);
3419 for_each_ofldrxq(&adap->sge, i)
3420 disable_txq_db(&adap->sge.ofldtxq[i].q);
3421 for_each_port(adap, i)
3422 disable_txq_db(&adap->sge.ctrlq[i].q);
3425 static void enable_dbs(struct adapter *adap)
3429 for_each_ethrxq(&adap->sge, i)
3430 enable_txq_db(&adap->sge.ethtxq[i].q);
3431 for_each_ofldrxq(&adap->sge, i)
3432 enable_txq_db(&adap->sge.ofldtxq[i].q);
3433 for_each_port(adap, i)
3434 enable_txq_db(&adap->sge.ctrlq[i].q);
3437 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3439 u16 hw_pidx, hw_cidx;
3442 spin_lock_bh(&q->db_lock);
3443 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3446 if (q->db_pidx != hw_pidx) {
3449 if (q->db_pidx >= hw_pidx)
3450 delta = q->db_pidx - hw_pidx;
3452 delta = q->size - hw_pidx + q->db_pidx;
3454 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3455 QID(q->cntxt_id) | PIDX(delta));
3459 spin_unlock_bh(&q->db_lock);
3461 CH_WARN(adap, "DB drop recovery failed.\n");
3463 static void recover_all_queues(struct adapter *adap)
3467 for_each_ethrxq(&adap->sge, i)
3468 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3469 for_each_ofldrxq(&adap->sge, i)
3470 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3471 for_each_port(adap, i)
3472 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3475 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3477 mutex_lock(&uld_mutex);
3478 if (adap->uld_handle[CXGB4_ULD_RDMA])
3479 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3481 mutex_unlock(&uld_mutex);
3484 static void process_db_full(struct work_struct *work)
3486 struct adapter *adap;
3488 adap = container_of(work, struct adapter, db_full_task);
3490 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3491 drain_db_fifo(adap, dbfifo_drain_delay);
3492 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3493 DBFIFO_HP_INT | DBFIFO_LP_INT,
3494 DBFIFO_HP_INT | DBFIFO_LP_INT);
3495 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3498 static void process_db_drop(struct work_struct *work)
3500 struct adapter *adap;
3502 adap = container_of(work, struct adapter, db_drop_task);
3504 if (is_t4(adap->chip)) {
3506 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3507 drain_db_fifo(adap, 1);
3508 recover_all_queues(adap);
3511 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3512 u16 qid = (dropped_db >> 15) & 0x1ffff;
3513 u16 pidx_inc = dropped_db & 0x1fff;
3515 unsigned short udb_density;
3516 unsigned long qpshift;
3520 dev_warn(adap->pdev_dev,
3521 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3523 (dropped_db >> 14) & 1,
3524 (dropped_db >> 13) & 1,
3527 drain_db_fifo(adap, 1);
3529 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3530 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3531 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3532 qpshift = PAGE_SHIFT - ilog2(udb_density);
3533 udb = qid << qpshift;
3535 page = udb / PAGE_SIZE;
3536 udb += (qid - (page * udb_density)) * 128;
3538 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3540 /* Re-enable BAR2 WC */
3541 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3544 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3547 void t4_db_full(struct adapter *adap)
3549 if (is_t4(adap->chip)) {
3550 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3551 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3552 queue_work(workq, &adap->db_full_task);
3556 void t4_db_dropped(struct adapter *adap)
3558 if (is_t4(adap->chip))
3559 queue_work(workq, &adap->db_drop_task);
3562 static void uld_attach(struct adapter *adap, unsigned int uld)
3565 struct cxgb4_lld_info lli;
3568 lli.pdev = adap->pdev;
3569 lli.l2t = adap->l2t;
3570 lli.tids = &adap->tids;
3571 lli.ports = adap->port;
3572 lli.vr = &adap->vres;
3573 lli.mtus = adap->params.mtus;
3574 if (uld == CXGB4_ULD_RDMA) {
3575 lli.rxq_ids = adap->sge.rdma_rxq;
3576 lli.nrxq = adap->sge.rdmaqs;
3577 } else if (uld == CXGB4_ULD_ISCSI) {
3578 lli.rxq_ids = adap->sge.ofld_rxq;
3579 lli.nrxq = adap->sge.ofldqsets;
3581 lli.ntxq = adap->sge.ofldqsets;
3582 lli.nchan = adap->params.nports;
3583 lli.nports = adap->params.nports;
3584 lli.wr_cred = adap->params.ofldq_wr_cred;
3585 lli.adapter_type = adap->params.rev;
3586 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3587 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3588 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3590 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3591 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3593 lli.filt_mode = adap->filter_mode;
3594 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3595 for (i = 0; i < NCHAN; i++)
3597 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3598 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3599 lli.fw_vers = adap->params.fw_vers;
3600 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3601 lli.sge_pktshift = adap->sge.pktshift;
3602 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3604 handle = ulds[uld].add(&lli);
3605 if (IS_ERR(handle)) {
3606 dev_warn(adap->pdev_dev,
3607 "could not attach to the %s driver, error %ld\n",
3608 uld_str[uld], PTR_ERR(handle));
3612 adap->uld_handle[uld] = handle;
3614 if (!netevent_registered) {
3615 register_netevent_notifier(&cxgb4_netevent_nb);
3616 netevent_registered = true;
3619 if (adap->flags & FULL_INIT_DONE)
3620 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3623 static void attach_ulds(struct adapter *adap)
3627 mutex_lock(&uld_mutex);
3628 list_add_tail(&adap->list_node, &adapter_list);
3629 for (i = 0; i < CXGB4_ULD_MAX; i++)
3631 uld_attach(adap, i);
3632 mutex_unlock(&uld_mutex);
3635 static void detach_ulds(struct adapter *adap)
3639 mutex_lock(&uld_mutex);
3640 list_del(&adap->list_node);
3641 for (i = 0; i < CXGB4_ULD_MAX; i++)
3642 if (adap->uld_handle[i]) {
3643 ulds[i].state_change(adap->uld_handle[i],
3644 CXGB4_STATE_DETACH);
3645 adap->uld_handle[i] = NULL;
3647 if (netevent_registered && list_empty(&adapter_list)) {
3648 unregister_netevent_notifier(&cxgb4_netevent_nb);
3649 netevent_registered = false;
3651 mutex_unlock(&uld_mutex);
3654 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3658 mutex_lock(&uld_mutex);
3659 for (i = 0; i < CXGB4_ULD_MAX; i++)
3660 if (adap->uld_handle[i])
3661 ulds[i].state_change(adap->uld_handle[i], new_state);
3662 mutex_unlock(&uld_mutex);
3666 * cxgb4_register_uld - register an upper-layer driver
3667 * @type: the ULD type
3668 * @p: the ULD methods
3670 * Registers an upper-layer driver with this driver and notifies the ULD
3671 * about any presently available devices that support its type. Returns
3672 * %-EBUSY if a ULD of the same type is already registered.
3674 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3677 struct adapter *adap;
3679 if (type >= CXGB4_ULD_MAX)
3681 mutex_lock(&uld_mutex);
3682 if (ulds[type].add) {
3687 list_for_each_entry(adap, &adapter_list, list_node)
3688 uld_attach(adap, type);
3689 out: mutex_unlock(&uld_mutex);
3692 EXPORT_SYMBOL(cxgb4_register_uld);
3695 * cxgb4_unregister_uld - unregister an upper-layer driver
3696 * @type: the ULD type
3698 * Unregisters an existing upper-layer driver.
3700 int cxgb4_unregister_uld(enum cxgb4_uld type)
3702 struct adapter *adap;
3704 if (type >= CXGB4_ULD_MAX)
3706 mutex_lock(&uld_mutex);
3707 list_for_each_entry(adap, &adapter_list, list_node)
3708 adap->uld_handle[type] = NULL;
3709 ulds[type].add = NULL;
3710 mutex_unlock(&uld_mutex);
3713 EXPORT_SYMBOL(cxgb4_unregister_uld);
3716 * cxgb_up - enable the adapter
3717 * @adap: adapter being enabled
3719 * Called when the first port is enabled, this function performs the
3720 * actions necessary to make an adapter operational, such as completing
3721 * the initialization of HW modules, and enabling interrupts.
3723 * Must be called with the rtnl lock held.
3725 static int cxgb_up(struct adapter *adap)
3729 err = setup_sge_queues(adap);
3732 err = setup_rss(adap);
3736 if (adap->flags & USING_MSIX) {
3737 name_msix_vecs(adap);
3738 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3739 adap->msix_info[0].desc, adap);
3743 err = request_msix_queue_irqs(adap);
3745 free_irq(adap->msix_info[0].vec, adap);
3749 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3750 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3751 adap->port[0]->name, adap);
3757 t4_intr_enable(adap);
3758 adap->flags |= FULL_INIT_DONE;
3759 notify_ulds(adap, CXGB4_STATE_UP);
3763 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3765 t4_free_sge_resources(adap);
3769 static void cxgb_down(struct adapter *adapter)
3771 t4_intr_disable(adapter);
3772 cancel_work_sync(&adapter->tid_release_task);
3773 cancel_work_sync(&adapter->db_full_task);
3774 cancel_work_sync(&adapter->db_drop_task);
3775 adapter->tid_release_task_busy = false;
3776 adapter->tid_release_head = NULL;
3778 if (adapter->flags & USING_MSIX) {
3779 free_msix_queue_irqs(adapter);
3780 free_irq(adapter->msix_info[0].vec, adapter);
3782 free_irq(adapter->pdev->irq, adapter);
3783 quiesce_rx(adapter);
3784 t4_sge_stop(adapter);
3785 t4_free_sge_resources(adapter);
3786 adapter->flags &= ~FULL_INIT_DONE;
3790 * net_device operations
3792 static int cxgb_open(struct net_device *dev)
3795 struct port_info *pi = netdev_priv(dev);
3796 struct adapter *adapter = pi->adapter;
3798 netif_carrier_off(dev);
3800 if (!(adapter->flags & FULL_INIT_DONE)) {
3801 err = cxgb_up(adapter);
3806 err = link_start(dev);
3808 netif_tx_start_all_queues(dev);
3812 static int cxgb_close(struct net_device *dev)
3814 struct port_info *pi = netdev_priv(dev);
3815 struct adapter *adapter = pi->adapter;
3817 netif_tx_stop_all_queues(dev);
3818 netif_carrier_off(dev);
3819 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3822 /* Return an error number if the indicated filter isn't writable ...
3824 static int writable_filter(struct filter_entry *f)
3834 /* Delete the filter at the specified index (if valid). The checks for all
3835 * the common problems with doing this like the filter being locked, currently
3836 * pending in another operation, etc.
3838 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3840 struct filter_entry *f;
3843 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3846 f = &adapter->tids.ftid_tab[fidx];
3847 ret = writable_filter(f);
3851 return del_filter_wr(adapter, fidx);
3856 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3857 __be32 sip, __be16 sport, __be16 vlan,
3858 unsigned int queue, unsigned char port, unsigned char mask)
3861 struct filter_entry *f;
3862 struct adapter *adap;
3866 adap = netdev2adap(dev);
3868 /* Adjust stid to correct filter index */
3869 stid -= adap->tids.nstids;
3870 stid += adap->tids.nftids;
3872 /* Check to make sure the filter requested is writable ...
3874 f = &adap->tids.ftid_tab[stid];
3875 ret = writable_filter(f);
3879 /* Clear out any old resources being used by the filter before
3880 * we start constructing the new filter.
3883 clear_filter(adap, f);
3885 /* Clear out filter specifications */
3886 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3887 f->fs.val.lport = cpu_to_be16(sport);
3888 f->fs.mask.lport = ~0;
3890 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3891 for (i = 0; i < 4; i++) {
3892 f->fs.val.lip[i] = val[i];
3893 f->fs.mask.lip[i] = ~0;
3895 if (adap->filter_mode & F_PORT) {
3896 f->fs.val.iport = port;
3897 f->fs.mask.iport = mask;
3903 /* Mark filter as locked */
3907 ret = set_filter_wr(adap, stid);
3909 clear_filter(adap, f);
3915 EXPORT_SYMBOL(cxgb4_create_server_filter);
3917 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3918 unsigned int queue, bool ipv6)
3921 struct filter_entry *f;
3922 struct adapter *adap;
3924 adap = netdev2adap(dev);
3926 /* Adjust stid to correct filter index */
3927 stid -= adap->tids.nstids;
3928 stid += adap->tids.nftids;
3930 f = &adap->tids.ftid_tab[stid];
3931 /* Unlock the filter */
3934 ret = delete_filter(adap, stid);
3940 EXPORT_SYMBOL(cxgb4_remove_server_filter);
3942 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3943 struct rtnl_link_stats64 *ns)
3945 struct port_stats stats;
3946 struct port_info *p = netdev_priv(dev);
3947 struct adapter *adapter = p->adapter;
3949 spin_lock(&adapter->stats_lock);
3950 t4_get_port_stats(adapter, p->tx_chan, &stats);
3951 spin_unlock(&adapter->stats_lock);
3953 ns->tx_bytes = stats.tx_octets;
3954 ns->tx_packets = stats.tx_frames;
3955 ns->rx_bytes = stats.rx_octets;
3956 ns->rx_packets = stats.rx_frames;
3957 ns->multicast = stats.rx_mcast_frames;
3959 /* detailed rx_errors */
3960 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3962 ns->rx_over_errors = 0;
3963 ns->rx_crc_errors = stats.rx_fcs_err;
3964 ns->rx_frame_errors = stats.rx_symbol_err;
3965 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3966 stats.rx_ovflow2 + stats.rx_ovflow3 +
3967 stats.rx_trunc0 + stats.rx_trunc1 +
3968 stats.rx_trunc2 + stats.rx_trunc3;
3969 ns->rx_missed_errors = 0;
3971 /* detailed tx_errors */
3972 ns->tx_aborted_errors = 0;
3973 ns->tx_carrier_errors = 0;
3974 ns->tx_fifo_errors = 0;
3975 ns->tx_heartbeat_errors = 0;
3976 ns->tx_window_errors = 0;
3978 ns->tx_errors = stats.tx_error_frames;
3979 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3980 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3984 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3987 int ret = 0, prtad, devad;
3988 struct port_info *pi = netdev_priv(dev);
3989 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3993 if (pi->mdio_addr < 0)
3995 data->phy_id = pi->mdio_addr;
3999 if (mdio_phy_id_is_c45(data->phy_id)) {
4000 prtad = mdio_phy_id_prtad(data->phy_id);
4001 devad = mdio_phy_id_devad(data->phy_id);
4002 } else if (data->phy_id < 32) {
4003 prtad = data->phy_id;
4005 data->reg_num &= 0x1f;
4009 mbox = pi->adapter->fn;
4010 if (cmd == SIOCGMIIREG)
4011 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4012 data->reg_num, &data->val_out);
4014 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4015 data->reg_num, data->val_in);
4023 static void cxgb_set_rxmode(struct net_device *dev)
4025 /* unfortunately we can't return errors to the stack */
4026 set_rxmode(dev, -1, false);
4029 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4032 struct port_info *pi = netdev_priv(dev);
4034 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4036 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4043 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4046 struct sockaddr *addr = p;
4047 struct port_info *pi = netdev_priv(dev);
4049 if (!is_valid_ether_addr(addr->sa_data))
4050 return -EADDRNOTAVAIL;
4052 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4053 pi->xact_addr_filt, addr->sa_data, true, true);
4057 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4058 pi->xact_addr_filt = ret;
4062 #ifdef CONFIG_NET_POLL_CONTROLLER
4063 static void cxgb_netpoll(struct net_device *dev)
4065 struct port_info *pi = netdev_priv(dev);
4066 struct adapter *adap = pi->adapter;
4068 if (adap->flags & USING_MSIX) {
4070 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4072 for (i = pi->nqsets; i; i--, rx++)
4073 t4_sge_intr_msix(0, &rx->rspq);
4075 t4_intr_handler(adap)(0, adap);
4079 static const struct net_device_ops cxgb4_netdev_ops = {
4080 .ndo_open = cxgb_open,
4081 .ndo_stop = cxgb_close,
4082 .ndo_start_xmit = t4_eth_xmit,
4083 .ndo_get_stats64 = cxgb_get_stats,
4084 .ndo_set_rx_mode = cxgb_set_rxmode,
4085 .ndo_set_mac_address = cxgb_set_mac_addr,
4086 .ndo_set_features = cxgb_set_features,
4087 .ndo_validate_addr = eth_validate_addr,
4088 .ndo_do_ioctl = cxgb_ioctl,
4089 .ndo_change_mtu = cxgb_change_mtu,
4090 #ifdef CONFIG_NET_POLL_CONTROLLER
4091 .ndo_poll_controller = cxgb_netpoll,
4095 void t4_fatal_err(struct adapter *adap)
4097 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4098 t4_intr_disable(adap);
4099 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4102 static void setup_memwin(struct adapter *adap)
4106 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4107 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4108 (bar0 + MEMWIN0_BASE) | BIR(0) |
4109 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4110 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4111 (bar0 + MEMWIN1_BASE) | BIR(0) |
4112 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4113 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4114 (bar0 + MEMWIN2_BASE) | BIR(0) |
4115 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4118 static void setup_memwin_rdma(struct adapter *adap)
4120 if (adap->vres.ocq.size) {
4121 unsigned int start, sz_kb;
4123 start = pci_resource_start(adap->pdev, 2) +
4124 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4125 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4127 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4128 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4131 adap->vres.ocq.start);
4133 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4137 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4142 /* get device capabilities */
4143 memset(c, 0, sizeof(*c));
4144 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4145 FW_CMD_REQUEST | FW_CMD_READ);
4146 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4147 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4151 /* select capabilities we'll be using */
4152 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4154 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4156 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4157 } else if (vf_acls) {
4158 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4161 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4162 FW_CMD_REQUEST | FW_CMD_WRITE);
4163 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4167 ret = t4_config_glbl_rss(adap, adap->fn,
4168 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4169 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4170 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4174 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4175 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4181 /* tweak some settings */
4182 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4183 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4184 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4185 v = t4_read_reg(adap, TP_PIO_DATA);
4186 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4188 /* first 4 Tx modulation queues point to consecutive Tx channels */
4189 adap->params.tp.tx_modq_map = 0xE4;
4190 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4191 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4193 /* associate each Tx modulation queue with consecutive Tx channels */
4195 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4196 &v, 1, A_TP_TX_SCHED_HDR);
4197 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4198 &v, 1, A_TP_TX_SCHED_FIFO);
4199 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4200 &v, 1, A_TP_TX_SCHED_PCMD);
4202 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4203 if (is_offload(adap)) {
4204 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4205 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4206 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4207 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4208 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4209 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4210 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4211 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4212 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4213 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4216 /* get basic stuff going */
4217 return t4_early_init(adap, adap->fn);
4221 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4223 #define MAX_ATIDS 8192U
4226 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4228 * If the firmware we're dealing with has Configuration File support, then
4229 * we use that to perform all configuration
4233 * Tweak configuration based on module parameters, etc. Most of these have
4234 * defaults assigned to them by Firmware Configuration Files (if we're using
4235 * them) but need to be explicitly set if we're using hard-coded
4236 * initialization. But even in the case of using Firmware Configuration
4237 * Files, we'd like to expose the ability to change these via module
4238 * parameters so these are essentially common tweaks/settings for
4239 * Configuration Files and hard-coded initialization ...
4241 static int adap_init0_tweaks(struct adapter *adapter)
4244 * Fix up various Host-Dependent Parameters like Page Size, Cache
4245 * Line Size, etc. The firmware default is for a 4KB Page Size and
4246 * 64B Cache Line Size ...
4248 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4251 * Process module parameters which affect early initialization.
4253 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4254 dev_err(&adapter->pdev->dev,
4255 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4259 t4_set_reg_field(adapter, SGE_CONTROL,
4261 PKTSHIFT(rx_dma_offset));
4264 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4265 * adds the pseudo header itself.
4267 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4268 CSUM_HAS_PSEUDO_HDR, 0);
4274 * Attempt to initialize the adapter via a Firmware Configuration File.
4276 static int adap_init0_config(struct adapter *adapter, int reset)
4278 struct fw_caps_config_cmd caps_cmd;
4279 const struct firmware *cf;
4280 unsigned long mtype = 0, maddr = 0;
4281 u32 finiver, finicsum, cfcsum;
4282 int ret, using_flash;
4283 char *fw_config_file, fw_config_file_path[256];
4286 * Reset device if necessary.
4289 ret = t4_fw_reset(adapter, adapter->mbox,
4290 PIORSTMODE | PIORST);
4296 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4297 * then use that. Otherwise, use the configuration file stored
4298 * in the adapter flash ...
4300 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4302 fw_config_file = FW_CFNAME;
4305 fw_config_file = FW5_CFNAME;
4308 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4309 adapter->pdev->device);
4314 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4317 mtype = FW_MEMTYPE_CF_FLASH;
4318 maddr = t4_flash_cfg_addr(adapter);
4320 u32 params[7], val[7];
4323 if (cf->size >= FLASH_CFG_MAX_SIZE)
4326 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4327 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4328 ret = t4_query_params(adapter, adapter->mbox,
4329 adapter->fn, 0, 1, params, val);
4332 * For t4_memory_write() below addresses and
4333 * sizes have to be in terms of multiples of 4
4334 * bytes. So, if the Configuration File isn't
4335 * a multiple of 4 bytes in length we'll have
4336 * to write that out separately since we can't
4337 * guarantee that the bytes following the
4338 * residual byte in the buffer returned by
4339 * request_firmware() are zeroed out ...
4341 size_t resid = cf->size & 0x3;
4342 size_t size = cf->size & ~0x3;
4343 __be32 *data = (__be32 *)cf->data;
4345 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4346 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4348 ret = t4_memory_write(adapter, mtype, maddr,
4350 if (ret == 0 && resid != 0) {
4357 last.word = data[size >> 2];
4358 for (i = resid; i < 4; i++)
4360 ret = t4_memory_write(adapter, mtype,
4367 release_firmware(cf);
4373 * Issue a Capability Configuration command to the firmware to get it
4374 * to parse the Configuration File. We don't use t4_fw_config_file()
4375 * because we want the ability to modify various features after we've
4376 * processed the configuration file ...
4378 memset(&caps_cmd, 0, sizeof(caps_cmd));
4379 caps_cmd.op_to_write =
4380 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4383 caps_cmd.cfvalid_to_len16 =
4384 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4385 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4386 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4387 FW_LEN16(caps_cmd));
4388 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4393 finiver = ntohl(caps_cmd.finiver);
4394 finicsum = ntohl(caps_cmd.finicsum);
4395 cfcsum = ntohl(caps_cmd.cfcsum);
4396 if (finicsum != cfcsum)
4397 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4398 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4402 * And now tell the firmware to use the configuration we just loaded.
4404 caps_cmd.op_to_write =
4405 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4408 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4409 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4415 * Tweak configuration based on system architecture, module
4418 ret = adap_init0_tweaks(adapter);
4423 * And finally tell the firmware to initialize itself using the
4424 * parameters from the Configuration File.
4426 ret = t4_fw_initialize(adapter, adapter->mbox);
4430 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4432 * Return successfully and note that we're operating with parameters
4433 * not supplied by the driver, rather than from hard-wired
4434 * initialization constants burried in the driver.
4436 adapter->flags |= USING_SOFT_PARAMS;
4437 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4438 "Configuration File %s, version %#x, computed checksum %#x\n",
4441 : fw_config_file_path),
4446 * Something bad happened. Return the error ... (If the "error"
4447 * is that there's no Configuration File on the adapter we don't
4448 * want to issue a warning since this is fairly common.)
4452 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4458 * Attempt to initialize the adapter via hard-coded, driver supplied
4461 static int adap_init0_no_config(struct adapter *adapter, int reset)
4463 struct sge *s = &adapter->sge;
4464 struct fw_caps_config_cmd caps_cmd;
4469 * Reset device if necessary
4472 ret = t4_fw_reset(adapter, adapter->mbox,
4473 PIORSTMODE | PIORST);
4479 * Get device capabilities and select which we'll be using.
4481 memset(&caps_cmd, 0, sizeof(caps_cmd));
4482 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4483 FW_CMD_REQUEST | FW_CMD_READ);
4484 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4485 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4490 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4492 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4494 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4495 } else if (vf_acls) {
4496 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4499 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4500 FW_CMD_REQUEST | FW_CMD_WRITE);
4501 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4507 * Tweak configuration based on system architecture, module
4510 ret = adap_init0_tweaks(adapter);
4515 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4516 * mode which maps each Virtual Interface to its own section of
4517 * the RSS Table and we turn on all map and hash enables ...
4519 adapter->flags |= RSS_TNLALLLOOKUP;
4520 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4521 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4522 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4523 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4524 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4525 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4530 * Set up our own fundamental resource provisioning ...
4532 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4533 PFRES_NEQ, PFRES_NETHCTRL,
4534 PFRES_NIQFLINT, PFRES_NIQ,
4535 PFRES_TC, PFRES_NVI,
4536 FW_PFVF_CMD_CMASK_MASK,
4537 pfvfres_pmask(adapter, adapter->fn, 0),
4539 PFRES_R_CAPS, PFRES_WX_CAPS);
4544 * Perform low level SGE initialization. We need to do this before we
4545 * send the firmware the INITIALIZE command because that will cause
4546 * any other PF Drivers which are waiting for the Master
4547 * Initialization to proceed forward.
4549 for (i = 0; i < SGE_NTIMERS - 1; i++)
4550 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4551 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4552 s->counter_val[0] = 1;
4553 for (i = 1; i < SGE_NCOUNTERS; i++)
4554 s->counter_val[i] = min(intr_cnt[i - 1],
4555 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4556 t4_sge_init(adapter);
4558 #ifdef CONFIG_PCI_IOV
4560 * Provision resource limits for Virtual Functions. We currently
4561 * grant them all the same static resource limits except for the Port
4562 * Access Rights Mask which we're assigning based on the PF. All of
4563 * the static provisioning stuff for both the PF and VF really needs
4564 * to be managed in a persistent manner for each device which the
4565 * firmware controls.
4569 int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
4570 NUM_OF_PF_WITH_SRIOV_T5;
4572 for (pf = 0; pf < max_no_pf; pf++) {
4573 if (num_vf[pf] <= 0)
4576 /* VF numbering starts at 1! */
4577 for (vf = 1; vf <= num_vf[pf]; vf++) {
4578 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4580 VFRES_NEQ, VFRES_NETHCTRL,
4581 VFRES_NIQFLINT, VFRES_NIQ,
4582 VFRES_TC, VFRES_NVI,
4583 FW_PFVF_CMD_CMASK_MASK,
4587 VFRES_R_CAPS, VFRES_WX_CAPS);
4589 dev_warn(adapter->pdev_dev,
4591 "provision pf/vf=%d/%d; "
4592 "err=%d\n", pf, vf, ret);
4599 * Set up the default filter mode. Later we'll want to implement this
4600 * via a firmware command, etc. ... This needs to be done before the
4601 * firmare initialization command ... If the selected set of fields
4602 * isn't equal to the default value, we'll need to make sure that the
4603 * field selections will fit in the 36-bit budget.
4605 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4608 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4609 switch (tp_vlan_pri_map & (1 << j)) {
4611 /* compressed filter field not enabled */
4631 case ETHERTYPE_MASK:
4637 case MPSHITTYPE_MASK:
4640 case FRAGMENTATION_MASK:
4646 dev_err(adapter->pdev_dev,
4647 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4648 " using %#x\n", tp_vlan_pri_map, bits,
4649 TP_VLAN_PRI_MAP_DEFAULT);
4650 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4653 v = tp_vlan_pri_map;
4654 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4655 &v, 1, TP_VLAN_PRI_MAP);
4658 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4659 * to support any of the compressed filter fields above. Newer
4660 * versions of the firmware do this automatically but it doesn't hurt
4661 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4662 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4663 * since the firmware automatically turns this on and off when we have
4664 * a non-zero number of filters active (since it does have a
4665 * performance impact).
4667 if (tp_vlan_pri_map)
4668 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4669 FIVETUPLELOOKUP_MASK,
4670 FIVETUPLELOOKUP_MASK);
4673 * Tweak some settings.
4675 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4676 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4677 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4678 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4681 * Get basic stuff going by issuing the Firmware Initialize command.
4682 * Note that this _must_ be after all PFVF commands ...
4684 ret = t4_fw_initialize(adapter, adapter->mbox);
4689 * Return successfully!
4691 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4692 "driver parameters\n");
4696 * Something bad happened. Return the error ...
4703 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4705 static int adap_init0(struct adapter *adap)
4709 enum dev_state state;
4710 u32 params[7], val[7];
4711 struct fw_caps_config_cmd caps_cmd;
4715 * Contact FW, advertising Master capability (and potentially forcing
4716 * ourselves as the Master PF if our module parameter force_init is
4719 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4720 force_init ? MASTER_MUST : MASTER_MAY,
4723 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4727 if (ret == adap->mbox)
4728 adap->flags |= MASTER_PF;
4729 if (force_init && state == DEV_STATE_INIT)
4730 state = DEV_STATE_UNINIT;
4733 * If we're the Master PF Driver and the device is uninitialized,
4734 * then let's consider upgrading the firmware ... (We always want
4735 * to check the firmware version number in order to A. get it for
4736 * later reporting and B. to warn if the currently loaded firmware
4737 * is excessively mismatched relative to the driver.)
4739 ret = t4_check_fw_version(adap);
4740 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4741 if (ret == -EINVAL || ret > 0) {
4742 if (upgrade_fw(adap) >= 0) {
4744 * Note that the chip was reset as part of the
4745 * firmware upgrade so we don't reset it again
4746 * below and grab the new firmware version.
4749 ret = t4_check_fw_version(adap);
4757 * Grab VPD parameters. This should be done after we establish a
4758 * connection to the firmware since some of the VPD parameters
4759 * (notably the Core Clock frequency) are retrieved via requests to
4760 * the firmware. On the other hand, we need these fairly early on
4761 * so we do this right after getting ahold of the firmware.
4763 ret = get_vpd_params(adap, &adap->params.vpd);
4768 * Find out what ports are available to us. Note that we need to do
4769 * this before calling adap_init0_no_config() since it needs nports
4773 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4774 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4775 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4779 adap->params.nports = hweight32(port_vec);
4780 adap->params.portvec = port_vec;
4783 * If the firmware is initialized already (and we're not forcing a
4784 * master initialization), note that we're living with existing
4785 * adapter parameters. Otherwise, it's time to try initializing the
4788 if (state == DEV_STATE_INIT) {
4789 dev_info(adap->pdev_dev, "Coming up as %s: "\
4790 "Adapter already initialized\n",
4791 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4792 adap->flags |= USING_SOFT_PARAMS;
4794 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4795 "Initializing adapter\n");
4798 * If the firmware doesn't support Configuration
4799 * Files warn user and exit,
4802 dev_warn(adap->pdev_dev, "Firmware doesn't support "
4803 "configuration file.\n");
4805 ret = adap_init0_no_config(adap, reset);
4808 * Find out whether we're dealing with a version of
4809 * the firmware which has configuration file support.
4811 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4812 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4813 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4817 * If the firmware doesn't support Configuration
4818 * Files, use the old Driver-based, hard-wired
4819 * initialization. Otherwise, try using the
4820 * Configuration File support and fall back to the
4821 * Driver-based initialization if there's no
4822 * Configuration File found.
4825 ret = adap_init0_no_config(adap, reset);
4828 * The firmware provides us with a memory
4829 * buffer where we can load a Configuration
4830 * File from the host if we want to override
4831 * the Configuration File in flash.
4834 ret = adap_init0_config(adap, reset);
4835 if (ret == -ENOENT) {
4836 dev_info(adap->pdev_dev,
4837 "No Configuration File present "
4838 "on adapter. Using hard-wired "
4839 "configuration parameters.\n");
4840 ret = adap_init0_no_config(adap, reset);
4845 dev_err(adap->pdev_dev,
4846 "could not initialize adapter, error %d\n",
4853 * If we're living with non-hard-coded parameters (either from a
4854 * Firmware Configuration File or values programmed by a different PF
4855 * Driver), give the SGE code a chance to pull in anything that it
4856 * needs ... Note that this must be called after we retrieve our VPD
4857 * parameters in order to know how to convert core ticks to seconds.
4859 if (adap->flags & USING_SOFT_PARAMS) {
4860 ret = t4_sge_init(adap);
4865 if (is_bypass_device(adap->pdev->device))
4866 adap->params.bypass = 1;
4869 * Grab some of our basic fundamental operating parameters.
4871 #define FW_PARAM_DEV(param) \
4872 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4873 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4875 #define FW_PARAM_PFVF(param) \
4876 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4877 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4878 FW_PARAMS_PARAM_Y(0) | \
4879 FW_PARAMS_PARAM_Z(0)
4881 params[0] = FW_PARAM_PFVF(EQ_START);
4882 params[1] = FW_PARAM_PFVF(L2T_START);
4883 params[2] = FW_PARAM_PFVF(L2T_END);
4884 params[3] = FW_PARAM_PFVF(FILTER_START);
4885 params[4] = FW_PARAM_PFVF(FILTER_END);
4886 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4887 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4890 adap->sge.egr_start = val[0];
4891 adap->l2t_start = val[1];
4892 adap->l2t_end = val[2];
4893 adap->tids.ftid_base = val[3];
4894 adap->tids.nftids = val[4] - val[3] + 1;
4895 adap->sge.ingr_start = val[5];
4897 /* query params related to active filter region */
4898 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4899 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4900 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4901 /* If Active filter size is set we enable establishing
4902 * offload connection through firmware work request
4904 if ((val[0] != val[1]) && (ret >= 0)) {
4905 adap->flags |= FW_OFLD_CONN;
4906 adap->tids.aftid_base = val[0];
4907 adap->tids.aftid_end = val[1];
4911 * Get device capabilities so we can determine what resources we need
4914 memset(&caps_cmd, 0, sizeof(caps_cmd));
4915 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4916 FW_CMD_REQUEST | FW_CMD_READ);
4917 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4918 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4923 if (caps_cmd.ofldcaps) {
4924 /* query offload-related parameters */
4925 params[0] = FW_PARAM_DEV(NTID);
4926 params[1] = FW_PARAM_PFVF(SERVER_START);
4927 params[2] = FW_PARAM_PFVF(SERVER_END);
4928 params[3] = FW_PARAM_PFVF(TDDP_START);
4929 params[4] = FW_PARAM_PFVF(TDDP_END);
4930 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4931 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4935 adap->tids.ntids = val[0];
4936 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4937 adap->tids.stid_base = val[1];
4938 adap->tids.nstids = val[2] - val[1] + 1;
4940 * Setup server filter region. Divide the availble filter
4941 * region into two parts. Regular filters get 1/3rd and server
4942 * filters get 2/3rd part. This is only enabled if workarond
4944 * 1. For regular filters.
4945 * 2. Server filter: This are special filters which are used
4946 * to redirect SYN packets to offload queue.
4948 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4949 adap->tids.sftid_base = adap->tids.ftid_base +
4950 DIV_ROUND_UP(adap->tids.nftids, 3);
4951 adap->tids.nsftids = adap->tids.nftids -
4952 DIV_ROUND_UP(adap->tids.nftids, 3);
4953 adap->tids.nftids = adap->tids.sftid_base -
4954 adap->tids.ftid_base;
4956 adap->vres.ddp.start = val[3];
4957 adap->vres.ddp.size = val[4] - val[3] + 1;
4958 adap->params.ofldq_wr_cred = val[5];
4960 adap->params.offload = 1;
4962 if (caps_cmd.rdmacaps) {
4963 params[0] = FW_PARAM_PFVF(STAG_START);
4964 params[1] = FW_PARAM_PFVF(STAG_END);
4965 params[2] = FW_PARAM_PFVF(RQ_START);
4966 params[3] = FW_PARAM_PFVF(RQ_END);
4967 params[4] = FW_PARAM_PFVF(PBL_START);
4968 params[5] = FW_PARAM_PFVF(PBL_END);
4969 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4973 adap->vres.stag.start = val[0];
4974 adap->vres.stag.size = val[1] - val[0] + 1;
4975 adap->vres.rq.start = val[2];
4976 adap->vres.rq.size = val[3] - val[2] + 1;
4977 adap->vres.pbl.start = val[4];
4978 adap->vres.pbl.size = val[5] - val[4] + 1;
4980 params[0] = FW_PARAM_PFVF(SQRQ_START);
4981 params[1] = FW_PARAM_PFVF(SQRQ_END);
4982 params[2] = FW_PARAM_PFVF(CQ_START);
4983 params[3] = FW_PARAM_PFVF(CQ_END);
4984 params[4] = FW_PARAM_PFVF(OCQ_START);
4985 params[5] = FW_PARAM_PFVF(OCQ_END);
4986 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
4989 adap->vres.qp.start = val[0];
4990 adap->vres.qp.size = val[1] - val[0] + 1;
4991 adap->vres.cq.start = val[2];
4992 adap->vres.cq.size = val[3] - val[2] + 1;
4993 adap->vres.ocq.start = val[4];
4994 adap->vres.ocq.size = val[5] - val[4] + 1;
4996 if (caps_cmd.iscsicaps) {
4997 params[0] = FW_PARAM_PFVF(ISCSI_START);
4998 params[1] = FW_PARAM_PFVF(ISCSI_END);
4999 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5003 adap->vres.iscsi.start = val[0];
5004 adap->vres.iscsi.size = val[1] - val[0] + 1;
5006 #undef FW_PARAM_PFVF
5010 * These are finalized by FW initialization, load their values now.
5012 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5013 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5014 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5015 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5016 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5017 adap->params.b_wnd);
5019 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5020 for (j = 0; j < NCHAN; j++)
5021 adap->params.tp.tx_modq[j] = j;
5023 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5024 &adap->filter_mode, 1,
5027 adap->flags |= FW_OK;
5031 * Something bad happened. If a command timed out or failed with EIO
5032 * FW does not operate within its spec or something catastrophic
5033 * happened to HW/FW, stop issuing commands.
5036 if (ret != -ETIMEDOUT && ret != -EIO)
5037 t4_fw_bye(adap, adap->mbox);
5043 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5044 pci_channel_state_t state)
5047 struct adapter *adap = pci_get_drvdata(pdev);
5053 adap->flags &= ~FW_OK;
5054 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5055 for_each_port(adap, i) {
5056 struct net_device *dev = adap->port[i];
5058 netif_device_detach(dev);
5059 netif_carrier_off(dev);
5061 if (adap->flags & FULL_INIT_DONE)
5064 pci_disable_device(pdev);
5065 out: return state == pci_channel_io_perm_failure ?
5066 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5069 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5072 struct fw_caps_config_cmd c;
5073 struct adapter *adap = pci_get_drvdata(pdev);
5076 pci_restore_state(pdev);
5077 pci_save_state(pdev);
5078 return PCI_ERS_RESULT_RECOVERED;
5081 if (pci_enable_device(pdev)) {
5082 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5083 return PCI_ERS_RESULT_DISCONNECT;
5086 pci_set_master(pdev);
5087 pci_restore_state(pdev);
5088 pci_save_state(pdev);
5089 pci_cleanup_aer_uncorrect_error_status(pdev);
5091 if (t4_wait_dev_ready(adap) < 0)
5092 return PCI_ERS_RESULT_DISCONNECT;
5093 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
5094 return PCI_ERS_RESULT_DISCONNECT;
5095 adap->flags |= FW_OK;
5096 if (adap_init1(adap, &c))
5097 return PCI_ERS_RESULT_DISCONNECT;
5099 for_each_port(adap, i) {
5100 struct port_info *p = adap2pinfo(adap, i);
5102 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5105 return PCI_ERS_RESULT_DISCONNECT;
5107 p->xact_addr_filt = -1;
5110 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5111 adap->params.b_wnd);
5114 return PCI_ERS_RESULT_DISCONNECT;
5115 return PCI_ERS_RESULT_RECOVERED;
5118 static void eeh_resume(struct pci_dev *pdev)
5121 struct adapter *adap = pci_get_drvdata(pdev);
5127 for_each_port(adap, i) {
5128 struct net_device *dev = adap->port[i];
5130 if (netif_running(dev)) {
5132 cxgb_set_rxmode(dev);
5134 netif_device_attach(dev);
5139 static const struct pci_error_handlers cxgb4_eeh = {
5140 .error_detected = eeh_err_detected,
5141 .slot_reset = eeh_slot_reset,
5142 .resume = eeh_resume,
5145 static inline bool is_10g_port(const struct link_config *lc)
5147 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5150 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5151 unsigned int size, unsigned int iqe_size)
5153 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5154 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5155 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5156 q->iqe_len = iqe_size;
5161 * Perform default configuration of DMA queues depending on the number and type
5162 * of ports we found and the number of available CPUs. Most settings can be
5163 * modified by the admin prior to actual use.
5165 static void cfg_queues(struct adapter *adap)
5167 struct sge *s = &adap->sge;
5168 int i, q10g = 0, n10g = 0, qidx = 0;
5170 for_each_port(adap, i)
5171 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5174 * We default to 1 queue per non-10G port and up to # of cores queues
5178 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5179 if (q10g > netif_get_num_default_rss_queues())
5180 q10g = netif_get_num_default_rss_queues();
5182 for_each_port(adap, i) {
5183 struct port_info *pi = adap2pinfo(adap, i);
5185 pi->first_qset = qidx;
5186 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5191 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5193 if (is_offload(adap)) {
5195 * For offload we use 1 queue/channel if all ports are up to 1G,
5196 * otherwise we divide all available queues amongst the channels
5197 * capped by the number of available cores.
5200 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5202 s->ofldqsets = roundup(i, adap->params.nports);
5204 s->ofldqsets = adap->params.nports;
5205 /* For RDMA one Rx queue per channel suffices */
5206 s->rdmaqs = adap->params.nports;
5209 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5210 struct sge_eth_rxq *r = &s->ethrxq[i];
5212 init_rspq(&r->rspq, 0, 0, 1024, 64);
5216 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5217 s->ethtxq[i].q.size = 1024;
5219 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5220 s->ctrlq[i].q.size = 512;
5222 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5223 s->ofldtxq[i].q.size = 1024;
5225 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5226 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5228 init_rspq(&r->rspq, 0, 0, 1024, 64);
5229 r->rspq.uld = CXGB4_ULD_ISCSI;
5233 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5234 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5236 init_rspq(&r->rspq, 0, 0, 511, 64);
5237 r->rspq.uld = CXGB4_ULD_RDMA;
5241 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5242 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5246 * Reduce the number of Ethernet queues across all ports to at most n.
5247 * n provides at least one queue per port.
5249 static void reduce_ethqs(struct adapter *adap, int n)
5252 struct port_info *pi;
5254 while (n < adap->sge.ethqsets)
5255 for_each_port(adap, i) {
5256 pi = adap2pinfo(adap, i);
5257 if (pi->nqsets > 1) {
5259 adap->sge.ethqsets--;
5260 if (adap->sge.ethqsets <= n)
5266 for_each_port(adap, i) {
5267 pi = adap2pinfo(adap, i);
5273 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5274 #define EXTRA_VECS 2
5276 static int enable_msix(struct adapter *adap)
5279 int i, err, want, need;
5280 struct sge *s = &adap->sge;
5281 unsigned int nchan = adap->params.nports;
5282 struct msix_entry entries[MAX_INGQ + 1];
5284 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5285 entries[i].entry = i;
5287 want = s->max_ethqsets + EXTRA_VECS;
5288 if (is_offload(adap)) {
5289 want += s->rdmaqs + s->ofldqsets;
5290 /* need nchan for each possible ULD */
5291 ofld_need = 2 * nchan;
5293 need = adap->params.nports + EXTRA_VECS + ofld_need;
5295 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5300 * Distribute available vectors to the various queue groups.
5301 * Every group gets its minimum requirement and NIC gets top
5302 * priority for leftovers.
5304 i = want - EXTRA_VECS - ofld_need;
5305 if (i < s->max_ethqsets) {
5306 s->max_ethqsets = i;
5307 if (i < s->ethqsets)
5308 reduce_ethqs(adap, i);
5310 if (is_offload(adap)) {
5311 i = want - EXTRA_VECS - s->max_ethqsets;
5312 i -= ofld_need - nchan;
5313 s->ofldqsets = (i / nchan) * nchan; /* round down */
5315 for (i = 0; i < want; ++i)
5316 adap->msix_info[i].vec = entries[i].vector;
5318 dev_info(adap->pdev_dev,
5319 "only %d MSI-X vectors left, not using MSI-X\n", err);
5325 static int init_rss(struct adapter *adap)
5329 for_each_port(adap, i) {
5330 struct port_info *pi = adap2pinfo(adap, i);
5332 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5335 for (j = 0; j < pi->rss_size; j++)
5336 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5341 static void print_port_info(const struct net_device *dev)
5343 static const char *base[] = {
5344 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5345 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5350 const char *spd = "";
5351 const struct port_info *pi = netdev_priv(dev);
5352 const struct adapter *adap = pi->adapter;
5354 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5356 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5359 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5360 bufp += sprintf(bufp, "100/");
5361 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5362 bufp += sprintf(bufp, "1000/");
5363 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5364 bufp += sprintf(bufp, "10G/");
5367 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5369 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5370 adap->params.vpd.id,
5371 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
5372 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5373 (adap->flags & USING_MSIX) ? " MSI-X" :
5374 (adap->flags & USING_MSI) ? " MSI" : "");
5375 netdev_info(dev, "S/N: %s, E/C: %s\n",
5376 adap->params.vpd.sn, adap->params.vpd.ec);
5379 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5381 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5385 * Free the following resources:
5386 * - memory used for tables
5389 * - resources FW is holding for us
5391 static void free_some_resources(struct adapter *adapter)
5395 t4_free_mem(adapter->l2t);
5396 t4_free_mem(adapter->tids.tid_tab);
5397 disable_msi(adapter);
5399 for_each_port(adapter, i)
5400 if (adapter->port[i]) {
5401 kfree(adap2pinfo(adapter, i)->rss);
5402 free_netdev(adapter->port[i]);
5404 if (adapter->flags & FW_OK)
5405 t4_fw_bye(adapter, adapter->fn);
5408 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5409 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5410 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5411 #define SEGMENT_SIZE 128
5413 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5415 int func, i, err, s_qpp, qpp, num_seg;
5416 struct port_info *pi;
5417 bool highdma = false;
5418 struct adapter *adapter = NULL;
5419 #ifdef CONFIG_PCI_IOV
5423 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5425 err = pci_request_regions(pdev, KBUILD_MODNAME);
5427 /* Just info, some other driver may have claimed the device. */
5428 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5432 /* We control everything through one PF */
5433 func = PCI_FUNC(pdev->devfn);
5434 if (func != ent->driver_data) {
5435 pci_save_state(pdev); /* to restore SR-IOV later */
5439 err = pci_enable_device(pdev);
5441 dev_err(&pdev->dev, "cannot enable PCI device\n");
5442 goto out_release_regions;
5445 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5447 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5449 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5450 "coherent allocations\n");
5451 goto out_disable_device;
5454 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5456 dev_err(&pdev->dev, "no usable DMA configuration\n");
5457 goto out_disable_device;
5461 pci_enable_pcie_error_reporting(pdev);
5462 enable_pcie_relaxed_ordering(pdev);
5463 pci_set_master(pdev);
5464 pci_save_state(pdev);
5466 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5469 goto out_disable_device;
5472 adapter->regs = pci_ioremap_bar(pdev, 0);
5473 if (!adapter->regs) {
5474 dev_err(&pdev->dev, "cannot map device registers\n");
5476 goto out_free_adapter;
5479 adapter->pdev = pdev;
5480 adapter->pdev_dev = &pdev->dev;
5481 adapter->mbox = func;
5483 adapter->msg_enable = dflt_msg_enable;
5484 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5486 spin_lock_init(&adapter->stats_lock);
5487 spin_lock_init(&adapter->tid_release_lock);
5489 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5490 INIT_WORK(&adapter->db_full_task, process_db_full);
5491 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5493 err = t4_prep_adapter(adapter);
5495 goto out_unmap_bar0;
5497 if (!is_t4(adapter->chip)) {
5498 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5499 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5500 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5501 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5503 /* Each segment size is 128B. Write coalescing is enabled only
5504 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5505 * queue is less no of segments that can be accommodated in
5508 if (qpp > num_seg) {
5510 "Incorrect number of egress queues per page\n");
5512 goto out_unmap_bar0;
5514 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5515 pci_resource_len(pdev, 2));
5516 if (!adapter->bar2) {
5517 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5519 goto out_unmap_bar0;
5523 setup_memwin(adapter);
5524 err = adap_init0(adapter);
5525 setup_memwin_rdma(adapter);
5529 for_each_port(adapter, i) {
5530 struct net_device *netdev;
5532 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5539 SET_NETDEV_DEV(netdev, &pdev->dev);
5541 adapter->port[i] = netdev;
5542 pi = netdev_priv(netdev);
5543 pi->adapter = adapter;
5544 pi->xact_addr_filt = -1;
5546 netdev->irq = pdev->irq;
5548 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5549 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5550 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5551 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5553 netdev->hw_features |= NETIF_F_HIGHDMA;
5554 netdev->features |= netdev->hw_features;
5555 netdev->vlan_features = netdev->features & VLAN_FEAT;
5557 netdev->priv_flags |= IFF_UNICAST_FLT;
5559 netdev->netdev_ops = &cxgb4_netdev_ops;
5560 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5563 pci_set_drvdata(pdev, adapter);
5565 if (adapter->flags & FW_OK) {
5566 err = t4_port_init(adapter, func, func, 0);
5572 * Configure queues and allocate tables now, they can be needed as
5573 * soon as the first register_netdev completes.
5575 cfg_queues(adapter);
5577 adapter->l2t = t4_init_l2t();
5578 if (!adapter->l2t) {
5579 /* We tolerate a lack of L2T, giving up some functionality */
5580 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5581 adapter->params.offload = 0;
5584 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5585 dev_warn(&pdev->dev, "could not allocate TID table, "
5587 adapter->params.offload = 0;
5590 /* See what interrupts we'll be using */
5591 if (msi > 1 && enable_msix(adapter) == 0)
5592 adapter->flags |= USING_MSIX;
5593 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5594 adapter->flags |= USING_MSI;
5596 err = init_rss(adapter);
5601 * The card is now ready to go. If any errors occur during device
5602 * registration we do not fail the whole card but rather proceed only
5603 * with the ports we manage to register successfully. However we must
5604 * register at least one net device.
5606 for_each_port(adapter, i) {
5607 pi = adap2pinfo(adapter, i);
5608 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5609 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5611 err = register_netdev(adapter->port[i]);
5614 adapter->chan_map[pi->tx_chan] = i;
5615 print_port_info(adapter->port[i]);
5618 dev_err(&pdev->dev, "could not register any net devices\n");
5622 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5626 if (cxgb4_debugfs_root) {
5627 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5628 cxgb4_debugfs_root);
5629 setup_debugfs(adapter);
5632 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5633 pdev->needs_freset = 1;
5635 if (is_offload(adapter))
5636 attach_ulds(adapter);
5639 #ifdef CONFIG_PCI_IOV
5640 max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
5641 NUM_OF_PF_WITH_SRIOV_T5;
5643 if (func < max_no_pf && num_vf[func] > 0)
5644 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5645 dev_info(&pdev->dev,
5646 "instantiated %u virtual functions\n",
5652 free_some_resources(adapter);
5654 if (!is_t4(adapter->chip))
5655 iounmap(adapter->bar2);
5657 iounmap(adapter->regs);
5661 pci_disable_pcie_error_reporting(pdev);
5662 pci_disable_device(pdev);
5663 out_release_regions:
5664 pci_release_regions(pdev);
5665 pci_set_drvdata(pdev, NULL);
5669 static void remove_one(struct pci_dev *pdev)
5671 struct adapter *adapter = pci_get_drvdata(pdev);
5673 #ifdef CONFIG_PCI_IOV
5674 pci_disable_sriov(pdev);
5681 if (is_offload(adapter))
5682 detach_ulds(adapter);
5684 for_each_port(adapter, i)
5685 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5686 unregister_netdev(adapter->port[i]);
5688 if (adapter->debugfs_root)
5689 debugfs_remove_recursive(adapter->debugfs_root);
5691 /* If we allocated filters, free up state associated with any
5694 if (adapter->tids.ftid_tab) {
5695 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5696 for (i = 0; i < (adapter->tids.nftids +
5697 adapter->tids.nsftids); i++, f++)
5699 clear_filter(adapter, f);
5702 if (adapter->flags & FULL_INIT_DONE)
5705 free_some_resources(adapter);
5706 iounmap(adapter->regs);
5707 if (!is_t4(adapter->chip))
5708 iounmap(adapter->bar2);
5710 pci_disable_pcie_error_reporting(pdev);
5711 pci_disable_device(pdev);
5712 pci_release_regions(pdev);
5713 pci_set_drvdata(pdev, NULL);
5715 pci_release_regions(pdev);
5718 static struct pci_driver cxgb4_driver = {
5719 .name = KBUILD_MODNAME,
5720 .id_table = cxgb4_pci_tbl,
5722 .remove = remove_one,
5723 .err_handler = &cxgb4_eeh,
5726 static int __init cxgb4_init_module(void)
5730 workq = create_singlethread_workqueue("cxgb4");
5734 /* Debugfs support is optional, just warn if this fails */
5735 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5736 if (!cxgb4_debugfs_root)
5737 pr_warn("could not create debugfs entry, continuing\n");
5739 ret = pci_register_driver(&cxgb4_driver);
5741 debugfs_remove(cxgb4_debugfs_root);
5745 static void __exit cxgb4_cleanup_module(void)
5747 pci_unregister_driver(&cxgb4_driver);
5748 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5749 flush_workqueue(workq);
5750 destroy_workqueue(workq);
5753 module_init(cxgb4_init_module);
5754 module_exit(cxgb4_cleanup_module);