2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "2.0.0-ko"
72 #define DRV_DESC "Chelsio T4/T5 Network Driver"
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
79 #define MAX_SGE_TIMERVAL 200U
83 * Physical Function provisioning constants.
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
131 unsigned int portn, portvec;
134 * Give PF's access to all of the ports.
137 return FW_PFVF_CMD_PMASK_MASK;
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
146 if (adapter->params.nports == 0)
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
178 /* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
184 struct filter_entry {
185 /* Administrative fields for filter.
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
199 struct ch_filter_specification fs;
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 CH_DEVICE(0xa000, 0), /* PE10K */
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
232 CH_DEVICE(0x5001, 5),
233 CH_DEVICE(0x5002, 5),
234 CH_DEVICE(0x5003, 5),
235 CH_DEVICE(0x5004, 5),
236 CH_DEVICE(0x5005, 5),
237 CH_DEVICE(0x5006, 5),
238 CH_DEVICE(0x5007, 5),
239 CH_DEVICE(0x5008, 5),
240 CH_DEVICE(0x5009, 5),
241 CH_DEVICE(0x500A, 5),
242 CH_DEVICE(0x500B, 5),
243 CH_DEVICE(0x500C, 5),
244 CH_DEVICE(0x500D, 5),
245 CH_DEVICE(0x500E, 5),
246 CH_DEVICE(0x500F, 5),
247 CH_DEVICE(0x5010, 5),
248 CH_DEVICE(0x5011, 5),
249 CH_DEVICE(0x5012, 5),
250 CH_DEVICE(0x5013, 5),
251 CH_DEVICE(0x5401, 5),
252 CH_DEVICE(0x5402, 5),
253 CH_DEVICE(0x5403, 5),
254 CH_DEVICE(0x5404, 5),
255 CH_DEVICE(0x5405, 5),
256 CH_DEVICE(0x5406, 5),
257 CH_DEVICE(0x5407, 5),
258 CH_DEVICE(0x5408, 5),
259 CH_DEVICE(0x5409, 5),
260 CH_DEVICE(0x540A, 5),
261 CH_DEVICE(0x540B, 5),
262 CH_DEVICE(0x540C, 5),
263 CH_DEVICE(0x540D, 5),
264 CH_DEVICE(0x540E, 5),
265 CH_DEVICE(0x540F, 5),
266 CH_DEVICE(0x5410, 5),
267 CH_DEVICE(0x5411, 5),
268 CH_DEVICE(0x5412, 5),
269 CH_DEVICE(0x5413, 5),
273 #define FW_FNAME "cxgb4/t4fw.bin"
274 #define FW5_FNAME "cxgb4/t5fw.bin"
275 #define FW_CFNAME "cxgb4/t4-config.txt"
276 #define FW5_CFNAME "cxgb4/t5-config.txt"
278 MODULE_DESCRIPTION(DRV_DESC);
279 MODULE_AUTHOR("Chelsio Communications");
280 MODULE_LICENSE("Dual BSD/GPL");
281 MODULE_VERSION(DRV_VERSION);
282 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283 MODULE_FIRMWARE(FW_FNAME);
284 MODULE_FIRMWARE(FW5_FNAME);
287 * Normally we're willing to become the firmware's Master PF but will be happy
288 * if another PF has already become the Master and initialized the adapter.
289 * Setting "force_init" will cause this driver to forcibly establish itself as
290 * the Master PF and initialize the adapter.
292 static uint force_init;
294 module_param(force_init, uint, 0644);
295 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
298 * Normally if the firmware we connect to has Configuration File support, we
299 * use that and only fall back to the old Driver-based initialization if the
300 * Configuration File fails for some reason. If force_old_init is set, then
301 * we'll always use the old Driver-based initialization sequence.
303 static uint force_old_init;
305 module_param(force_old_init, uint, 0644);
306 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
308 static int dflt_msg_enable = DFLT_MSG_ENABLE;
310 module_param(dflt_msg_enable, int, 0644);
311 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
314 * The driver uses the best interrupt scheme available on a platform in the
315 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
316 * of these schemes the driver may consider as follows:
318 * msi = 2: choose from among all three options
319 * msi = 1: only consider MSI and INTx interrupts
320 * msi = 0: force INTx interrupts
324 module_param(msi, int, 0644);
325 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
328 * Queue interrupt hold-off timer values. Queues default to the first of these
331 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
333 module_param_array(intr_holdoff, uint, NULL, 0644);
334 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 "0..4 in microseconds");
337 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
339 module_param_array(intr_cnt, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_cnt,
341 "thresholds 1..3 for queue interrupt packet counters");
344 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345 * offset by 2 bytes in order to have the IP headers line up on 4-byte
346 * boundaries. This is a requirement for many architectures which will throw
347 * a machine check fault if an attempt is made to access one of the 4-byte IP
348 * header fields on a non-4-byte boundary. And it's a major performance issue
349 * even on some architectures which allow it like some implementations of the
350 * x86 ISA. However, some architectures don't mind this and for some very
351 * edge-case performance sensitive applications (like forwarding large volumes
352 * of small packets), setting this DMA offset to 0 will decrease the number of
353 * PCI-E Bus transfers enough to measurably affect performance.
355 static int rx_dma_offset = 2;
359 #ifdef CONFIG_PCI_IOV
360 module_param(vf_acls, bool, 0644);
361 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
363 /* Configure the number of PCI-E Virtual Function which are to be instantiated
364 * on SR-IOV Capable Physical Functions.
366 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
368 module_param_array(num_vf, uint, NULL, 0644);
369 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
373 * The filter TCAM has a fixed portion and a variable portion. The fixed
374 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
375 * ports. The variable portion is 36 bits which can include things like Exact
376 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
377 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
378 * far exceed the 36-bit budget for this "compressed" header portion of the
379 * filter. Thus, we have a scarce resource which must be carefully managed.
381 * By default we set this up to mostly match the set of filter matching
382 * capabilities of T3 but with accommodations for some of T4's more
383 * interesting features:
385 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
386 * [Inner] VLAN (17), Port (3), FCoE (1) }
389 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
390 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
391 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
394 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
396 module_param(tp_vlan_pri_map, uint, 0644);
397 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
399 static struct dentry *cxgb4_debugfs_root;
401 static LIST_HEAD(adapter_list);
402 static DEFINE_MUTEX(uld_mutex);
403 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
404 static const char *uld_str[] = { "RDMA", "iSCSI" };
406 static void link_report(struct net_device *dev)
408 if (!netif_carrier_ok(dev))
409 netdev_info(dev, "link down\n");
411 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
413 const char *s = "10Mbps";
414 const struct port_info *p = netdev_priv(dev);
416 switch (p->link_cfg.speed) {
428 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
433 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
435 struct net_device *dev = adapter->port[port_id];
437 /* Skip changes from disabled ports. */
438 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
440 netif_carrier_on(dev);
442 netif_carrier_off(dev);
448 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
450 static const char *mod_str[] = {
451 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
454 const struct net_device *dev = adap->port[port_id];
455 const struct port_info *pi = netdev_priv(dev);
457 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
458 netdev_info(dev, "port module unplugged\n");
459 else if (pi->mod_type < ARRAY_SIZE(mod_str))
460 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
464 * Configure the exact and hash address filters to handle a port's multicast
465 * and secondary unicast MAC addresses.
467 static int set_addr_filters(const struct net_device *dev, bool sleep)
475 const struct netdev_hw_addr *ha;
476 int uc_cnt = netdev_uc_count(dev);
477 int mc_cnt = netdev_mc_count(dev);
478 const struct port_info *pi = netdev_priv(dev);
479 unsigned int mb = pi->adapter->fn;
481 /* first do the secondary unicast addresses */
482 netdev_for_each_uc_addr(ha, dev) {
483 addr[naddr++] = ha->addr;
484 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
485 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
486 naddr, addr, filt_idx, &uhash, sleep);
495 /* next set up the multicast addresses */
496 netdev_for_each_mc_addr(ha, dev) {
497 addr[naddr++] = ha->addr;
498 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
499 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
500 naddr, addr, filt_idx, &mhash, sleep);
509 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
510 uhash | mhash, sleep);
513 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
514 module_param(dbfifo_int_thresh, int, 0644);
515 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
518 * usecs to sleep while draining the dbfifo
520 static int dbfifo_drain_delay = 1000;
521 module_param(dbfifo_drain_delay, int, 0644);
522 MODULE_PARM_DESC(dbfifo_drain_delay,
523 "usecs to sleep while draining the dbfifo");
526 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
527 * If @mtu is -1 it is left unchanged.
529 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
532 struct port_info *pi = netdev_priv(dev);
534 ret = set_addr_filters(dev, sleep_ok);
536 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
537 (dev->flags & IFF_PROMISC) ? 1 : 0,
538 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
543 static struct workqueue_struct *workq;
546 * link_start - enable a port
547 * @dev: the port to enable
549 * Performs the MAC and PHY actions needed to enable a port.
551 static int link_start(struct net_device *dev)
554 struct port_info *pi = netdev_priv(dev);
555 unsigned int mb = pi->adapter->fn;
558 * We do not set address filters and promiscuity here, the stack does
559 * that step explicitly.
561 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
562 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
564 ret = t4_change_mac(pi->adapter, mb, pi->viid,
565 pi->xact_addr_filt, dev->dev_addr, true,
568 pi->xact_addr_filt = ret;
573 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
576 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
580 /* Clear a filter and release any of its resources that we own. This also
581 * clears the filter's "pending" status.
583 static void clear_filter(struct adapter *adap, struct filter_entry *f)
585 /* If the new or old filter have loopback rewriteing rules then we'll
586 * need to free any existing Layer Two Table (L2T) entries of the old
587 * filter rule. The firmware will handle freeing up any Source MAC
588 * Table (SMT) entries used for rewriting Source MAC Addresses in
592 cxgb4_l2t_release(f->l2t);
594 /* The zeroing of the filter rule below clears the filter valid,
595 * pending, locked flags, l2t pointer, etc. so it's all we need for
598 memset(f, 0, sizeof(*f));
601 /* Handle a filter write/deletion reply.
603 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
605 unsigned int idx = GET_TID(rpl);
606 unsigned int nidx = idx - adap->tids.ftid_base;
608 struct filter_entry *f;
610 if (idx >= adap->tids.ftid_base && nidx <
611 (adap->tids.nftids + adap->tids.nsftids)) {
613 ret = GET_TCB_COOKIE(rpl->cookie);
614 f = &adap->tids.ftid_tab[idx];
616 if (ret == FW_FILTER_WR_FLT_DELETED) {
617 /* Clear the filter when we get confirmation from the
618 * hardware that the filter has been deleted.
620 clear_filter(adap, f);
621 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
622 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
624 clear_filter(adap, f);
625 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
626 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
627 f->pending = 0; /* asynchronous setup completed */
630 /* Something went wrong. Issue a warning about the
631 * problem and clear everything out.
633 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
635 clear_filter(adap, f);
640 /* Response queue handler for the FW event queue.
642 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
643 const struct pkt_gl *gl)
645 u8 opcode = ((const struct rss_header *)rsp)->opcode;
647 rsp++; /* skip RSS header */
648 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
649 const struct cpl_sge_egr_update *p = (void *)rsp;
650 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
653 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
655 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
656 struct sge_eth_txq *eq;
658 eq = container_of(txq, struct sge_eth_txq, q);
659 netif_tx_wake_queue(eq->txq);
661 struct sge_ofld_txq *oq;
663 oq = container_of(txq, struct sge_ofld_txq, q);
664 tasklet_schedule(&oq->qresume_tsk);
666 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
667 const struct cpl_fw6_msg *p = (void *)rsp;
670 t4_handle_fw_rpl(q->adap, p->data);
671 } else if (opcode == CPL_L2T_WRITE_RPL) {
672 const struct cpl_l2t_write_rpl *p = (void *)rsp;
674 do_l2t_write_rpl(q->adap, p);
675 } else if (opcode == CPL_SET_TCB_RPL) {
676 const struct cpl_set_tcb_rpl *p = (void *)rsp;
678 filter_rpl(q->adap, p);
680 dev_err(q->adap->pdev_dev,
681 "unexpected CPL %#x on FW event queue\n", opcode);
686 * uldrx_handler - response queue handler for ULD queues
687 * @q: the response queue that received the packet
688 * @rsp: the response queue descriptor holding the offload message
689 * @gl: the gather list of packet fragments
691 * Deliver an ingress offload packet to a ULD. All processing is done by
692 * the ULD, we just maintain statistics.
694 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
695 const struct pkt_gl *gl)
697 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
699 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
705 else if (gl == CXGB4_MSG_AN)
712 static void disable_msi(struct adapter *adapter)
714 if (adapter->flags & USING_MSIX) {
715 pci_disable_msix(adapter->pdev);
716 adapter->flags &= ~USING_MSIX;
717 } else if (adapter->flags & USING_MSI) {
718 pci_disable_msi(adapter->pdev);
719 adapter->flags &= ~USING_MSI;
724 * Interrupt handler for non-data events used with MSI-X.
726 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
728 struct adapter *adap = cookie;
730 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
733 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
735 t4_slow_intr_handler(adap);
740 * Name the MSI-X interrupts.
742 static void name_msix_vecs(struct adapter *adap)
744 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
746 /* non-data interrupts */
747 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
750 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
751 adap->port[0]->name);
753 /* Ethernet queues */
754 for_each_port(adap, j) {
755 struct net_device *d = adap->port[j];
756 const struct port_info *pi = netdev_priv(d);
758 for (i = 0; i < pi->nqsets; i++, msi_idx++)
759 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
764 for_each_ofldrxq(&adap->sge, i)
765 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
766 adap->port[0]->name, i);
768 for_each_rdmarxq(&adap->sge, i)
769 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
770 adap->port[0]->name, i);
773 static int request_msix_queue_irqs(struct adapter *adap)
775 struct sge *s = &adap->sge;
776 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
778 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
779 adap->msix_info[1].desc, &s->fw_evtq);
783 for_each_ethrxq(s, ethqidx) {
784 err = request_irq(adap->msix_info[msi_index].vec,
786 adap->msix_info[msi_index].desc,
787 &s->ethrxq[ethqidx].rspq);
792 for_each_ofldrxq(s, ofldqidx) {
793 err = request_irq(adap->msix_info[msi_index].vec,
795 adap->msix_info[msi_index].desc,
796 &s->ofldrxq[ofldqidx].rspq);
801 for_each_rdmarxq(s, rdmaqidx) {
802 err = request_irq(adap->msix_info[msi_index].vec,
804 adap->msix_info[msi_index].desc,
805 &s->rdmarxq[rdmaqidx].rspq);
813 while (--rdmaqidx >= 0)
814 free_irq(adap->msix_info[--msi_index].vec,
815 &s->rdmarxq[rdmaqidx].rspq);
816 while (--ofldqidx >= 0)
817 free_irq(adap->msix_info[--msi_index].vec,
818 &s->ofldrxq[ofldqidx].rspq);
819 while (--ethqidx >= 0)
820 free_irq(adap->msix_info[--msi_index].vec,
821 &s->ethrxq[ethqidx].rspq);
822 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
826 static void free_msix_queue_irqs(struct adapter *adap)
828 int i, msi_index = 2;
829 struct sge *s = &adap->sge;
831 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
832 for_each_ethrxq(s, i)
833 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
834 for_each_ofldrxq(s, i)
835 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
836 for_each_rdmarxq(s, i)
837 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
841 * write_rss - write the RSS table for a given port
843 * @queues: array of queue indices for RSS
845 * Sets up the portion of the HW RSS table for the port's VI to distribute
846 * packets to the Rx queues in @queues.
848 static int write_rss(const struct port_info *pi, const u16 *queues)
852 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
854 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
858 /* map the queue indices to queue ids */
859 for (i = 0; i < pi->rss_size; i++, queues++)
860 rss[i] = q[*queues].rspq.abs_id;
862 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
863 pi->rss_size, rss, pi->rss_size);
869 * setup_rss - configure RSS
872 * Sets up RSS for each port.
874 static int setup_rss(struct adapter *adap)
878 for_each_port(adap, i) {
879 const struct port_info *pi = adap2pinfo(adap, i);
881 err = write_rss(pi, pi->rss);
889 * Return the channel of the ingress queue with the given qid.
891 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
893 qid -= p->ingr_start;
894 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
898 * Wait until all NAPI handlers are descheduled.
900 static void quiesce_rx(struct adapter *adap)
904 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
905 struct sge_rspq *q = adap->sge.ingr_map[i];
908 napi_disable(&q->napi);
913 * Enable NAPI scheduling and interrupt generation for all Rx queues.
915 static void enable_rx(struct adapter *adap)
919 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
920 struct sge_rspq *q = adap->sge.ingr_map[i];
925 napi_enable(&q->napi);
926 /* 0-increment GTS to start the timer and enable interrupts */
927 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
928 SEINTARM(q->intr_params) |
929 INGRESSQID(q->cntxt_id));
934 * setup_sge_queues - configure SGE Tx/Rx/response queues
937 * Determines how many sets of SGE queues to use and initializes them.
938 * We support multiple queue sets per port if we have MSI-X, otherwise
939 * just one queue set per port.
941 static int setup_sge_queues(struct adapter *adap)
943 int err, msi_idx, i, j;
944 struct sge *s = &adap->sge;
946 bitmap_zero(s->starving_fl, MAX_EGRQ);
947 bitmap_zero(s->txq_maperr, MAX_EGRQ);
949 if (adap->flags & USING_MSIX)
950 msi_idx = 1; /* vector 0 is for non-queue interrupts */
952 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
956 msi_idx = -((int)s->intrq.abs_id + 1);
959 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
960 msi_idx, NULL, fwevtq_handler);
962 freeout: t4_free_sge_resources(adap);
966 for_each_port(adap, i) {
967 struct net_device *dev = adap->port[i];
968 struct port_info *pi = netdev_priv(dev);
969 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
970 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
972 for (j = 0; j < pi->nqsets; j++, q++) {
975 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
981 memset(&q->stats, 0, sizeof(q->stats));
983 for (j = 0; j < pi->nqsets; j++, t++) {
984 err = t4_sge_alloc_eth_txq(adap, t, dev,
985 netdev_get_tx_queue(dev, j),
986 s->fw_evtq.cntxt_id);
992 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
993 for_each_ofldrxq(s, i) {
994 struct sge_ofld_rxq *q = &s->ofldrxq[i];
995 struct net_device *dev = adap->port[i / j];
999 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1000 &q->fl, uldrx_handler);
1003 memset(&q->stats, 0, sizeof(q->stats));
1004 s->ofld_rxq[i] = q->rspq.abs_id;
1005 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1006 s->fw_evtq.cntxt_id);
1011 for_each_rdmarxq(s, i) {
1012 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1016 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1017 msi_idx, &q->fl, uldrx_handler);
1020 memset(&q->stats, 0, sizeof(q->stats));
1021 s->rdma_rxq[i] = q->rspq.abs_id;
1024 for_each_port(adap, i) {
1026 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1027 * have RDMA queues, and that's the right value.
1029 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1030 s->fw_evtq.cntxt_id,
1031 s->rdmarxq[i].rspq.cntxt_id);
1036 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1037 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1038 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1043 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1044 * started but failed, and a negative errno if flash load couldn't start.
1046 static int upgrade_fw(struct adapter *adap)
1049 u32 vers, exp_major;
1050 const struct fw_hdr *hdr;
1051 const struct firmware *fw;
1052 struct device *dev = adap->pdev_dev;
1055 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1057 fw_file_name = FW_FNAME;
1058 exp_major = FW_VERSION_MAJOR;
1061 fw_file_name = FW5_FNAME;
1062 exp_major = FW_VERSION_MAJOR_T5;
1065 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1069 ret = request_firmware(&fw, fw_file_name, dev);
1071 dev_err(dev, "unable to load firmware image %s, error %d\n",
1076 hdr = (const struct fw_hdr *)fw->data;
1077 vers = ntohl(hdr->fw_ver);
1078 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1079 ret = -EINVAL; /* wrong major version, won't do */
1084 * If the flash FW is unusable or we found something newer, load it.
1086 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1087 vers > adap->params.fw_vers) {
1088 dev_info(dev, "upgrading firmware ...\n");
1089 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1093 "firmware upgraded to version %pI4 from %s\n",
1094 &hdr->fw_ver, fw_file_name);
1096 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1099 * Tell our caller that we didn't upgrade the firmware.
1104 out: release_firmware(fw);
1109 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1110 * The allocated memory is cleared.
1112 void *t4_alloc_mem(size_t size)
1114 void *p = kzalloc(size, GFP_KERNEL);
1122 * Free memory allocated through alloc_mem().
1124 static void t4_free_mem(void *addr)
1126 if (is_vmalloc_addr(addr))
1132 /* Send a Work Request to write the filter at a specified index. We construct
1133 * a Firmware Filter Work Request to have the work done and put the indicated
1134 * filter into "pending" mode which will prevent any further actions against
1135 * it till we get a reply from the firmware on the completion status of the
1138 static int set_filter_wr(struct adapter *adapter, int fidx)
1140 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1141 struct sk_buff *skb;
1142 struct fw_filter_wr *fwr;
1145 /* If the new filter requires loopback Destination MAC and/or VLAN
1146 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1149 if (f->fs.newdmac || f->fs.newvlan) {
1150 /* allocate L2T entry for new filter */
1151 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1154 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1155 f->fs.eport, f->fs.dmac)) {
1156 cxgb4_l2t_release(f->l2t);
1162 ftid = adapter->tids.ftid_base + fidx;
1164 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1165 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1166 memset(fwr, 0, sizeof(*fwr));
1168 /* It would be nice to put most of the following in t4_hw.c but most
1169 * of the work is translating the cxgbtool ch_filter_specification
1170 * into the Work Request and the definition of that structure is
1171 * currently in cxgbtool.h which isn't appropriate to pull into the
1172 * common code. We may eventually try to come up with a more neutral
1173 * filter specification structure but for now it's easiest to simply
1174 * put this fairly direct code in line ...
1176 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1177 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1179 htonl(V_FW_FILTER_WR_TID(ftid) |
1180 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1181 V_FW_FILTER_WR_NOREPLY(0) |
1182 V_FW_FILTER_WR_IQ(f->fs.iq));
1183 fwr->del_filter_to_l2tix =
1184 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1185 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1186 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1187 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1188 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1189 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1190 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1191 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1192 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1193 f->fs.newvlan == VLAN_REWRITE) |
1194 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1195 f->fs.newvlan == VLAN_REWRITE) |
1196 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1197 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1198 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1199 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1200 fwr->ethtype = htons(f->fs.val.ethtype);
1201 fwr->ethtypem = htons(f->fs.mask.ethtype);
1202 fwr->frag_to_ovlan_vldm =
1203 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1204 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1205 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1206 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1207 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1208 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1210 fwr->rx_chan_rx_rpl_iq =
1211 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1212 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1213 fwr->maci_to_matchtypem =
1214 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1215 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1216 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1217 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1218 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1219 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1220 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1221 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1222 fwr->ptcl = f->fs.val.proto;
1223 fwr->ptclm = f->fs.mask.proto;
1224 fwr->ttyp = f->fs.val.tos;
1225 fwr->ttypm = f->fs.mask.tos;
1226 fwr->ivlan = htons(f->fs.val.ivlan);
1227 fwr->ivlanm = htons(f->fs.mask.ivlan);
1228 fwr->ovlan = htons(f->fs.val.ovlan);
1229 fwr->ovlanm = htons(f->fs.mask.ovlan);
1230 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1231 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1232 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1233 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1234 fwr->lp = htons(f->fs.val.lport);
1235 fwr->lpm = htons(f->fs.mask.lport);
1236 fwr->fp = htons(f->fs.val.fport);
1237 fwr->fpm = htons(f->fs.mask.fport);
1239 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1241 /* Mark the filter as "pending" and ship off the Filter Work Request.
1242 * When we get the Work Request Reply we'll clear the pending status.
1245 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1246 t4_ofld_send(adapter, skb);
1250 /* Delete the filter at a specified index.
1252 static int del_filter_wr(struct adapter *adapter, int fidx)
1254 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1255 struct sk_buff *skb;
1256 struct fw_filter_wr *fwr;
1257 unsigned int len, ftid;
1260 ftid = adapter->tids.ftid_base + fidx;
1262 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1263 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1264 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1266 /* Mark the filter as "pending" and ship off the Filter Work Request.
1267 * When we get the Work Request Reply we'll clear the pending status.
1270 t4_mgmt_tx(adapter, skb);
1274 static inline int is_offload(const struct adapter *adap)
1276 return adap->params.offload;
1280 * Implementation of ethtool operations.
1283 static u32 get_msglevel(struct net_device *dev)
1285 return netdev2adap(dev)->msg_enable;
1288 static void set_msglevel(struct net_device *dev, u32 val)
1290 netdev2adap(dev)->msg_enable = val;
1293 static char stats_strings[][ETH_GSTRING_LEN] = {
1296 "TxBroadcastFrames ",
1297 "TxMulticastFrames ",
1303 "TxFrames128To255 ",
1304 "TxFrames256To511 ",
1305 "TxFrames512To1023 ",
1306 "TxFrames1024To1518 ",
1307 "TxFrames1519ToMax ",
1322 "RxBroadcastFrames ",
1323 "RxMulticastFrames ",
1335 "RxFrames128To255 ",
1336 "RxFrames256To511 ",
1337 "RxFrames512To1023 ",
1338 "RxFrames1024To1518 ",
1339 "RxFrames1519ToMax ",
1351 "RxBG0FramesDropped ",
1352 "RxBG1FramesDropped ",
1353 "RxBG2FramesDropped ",
1354 "RxBG3FramesDropped ",
1355 "RxBG0FramesTrunc ",
1356 "RxBG1FramesTrunc ",
1357 "RxBG2FramesTrunc ",
1358 "RxBG3FramesTrunc ",
1367 "WriteCoalSuccess ",
1371 static int get_sset_count(struct net_device *dev, int sset)
1375 return ARRAY_SIZE(stats_strings);
1381 #define T4_REGMAP_SIZE (160 * 1024)
1382 #define T5_REGMAP_SIZE (332 * 1024)
1384 static int get_regs_len(struct net_device *dev)
1386 struct adapter *adap = netdev2adap(dev);
1387 if (is_t4(adap->chip))
1388 return T4_REGMAP_SIZE;
1390 return T5_REGMAP_SIZE;
1393 static int get_eeprom_len(struct net_device *dev)
1398 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1400 struct adapter *adapter = netdev2adap(dev);
1402 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1403 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1404 strlcpy(info->bus_info, pci_name(adapter->pdev),
1405 sizeof(info->bus_info));
1407 if (adapter->params.fw_vers)
1408 snprintf(info->fw_version, sizeof(info->fw_version),
1409 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1410 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1411 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1412 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1413 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1414 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1415 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1416 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1417 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1420 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1422 if (stringset == ETH_SS_STATS)
1423 memcpy(data, stats_strings, sizeof(stats_strings));
1427 * port stats maintained per queue of the port. They should be in the same
1428 * order as in stats_strings above.
1430 struct queue_port_stats {
1440 static void collect_sge_port_stats(const struct adapter *adap,
1441 const struct port_info *p, struct queue_port_stats *s)
1444 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1445 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1447 memset(s, 0, sizeof(*s));
1448 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1450 s->tx_csum += tx->tx_cso;
1451 s->rx_csum += rx->stats.rx_cso;
1452 s->vlan_ex += rx->stats.vlan_ex;
1453 s->vlan_ins += tx->vlan_ins;
1454 s->gro_pkts += rx->stats.lro_pkts;
1455 s->gro_merged += rx->stats.lro_merged;
1459 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1462 struct port_info *pi = netdev_priv(dev);
1463 struct adapter *adapter = pi->adapter;
1466 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1468 data += sizeof(struct port_stats) / sizeof(u64);
1469 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1470 data += sizeof(struct queue_port_stats) / sizeof(u64);
1471 if (!is_t4(adapter->chip)) {
1472 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1473 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1474 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1475 *data = val1 - val2;
1480 memset(data, 0, 2 * sizeof(u64));
1486 * Return a version number to identify the type of adapter. The scheme is:
1487 * - bits 0..9: chip version
1488 * - bits 10..15: chip revision
1489 * - bits 16..23: register dump version
1491 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1493 return CHELSIO_CHIP_VERSION(ap->chip) |
1494 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1497 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1500 u32 *p = buf + start;
1502 for ( ; start <= end; start += sizeof(u32))
1503 *p++ = t4_read_reg(ap, start);
1506 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1509 static const unsigned int t4_reg_ranges[] = {
1729 static const unsigned int t5_reg_ranges[] = {
2157 struct adapter *ap = netdev2adap(dev);
2158 static const unsigned int *reg_ranges;
2159 int arr_size = 0, buf_size = 0;
2161 if (is_t4(ap->chip)) {
2162 reg_ranges = &t4_reg_ranges[0];
2163 arr_size = ARRAY_SIZE(t4_reg_ranges);
2164 buf_size = T4_REGMAP_SIZE;
2166 reg_ranges = &t5_reg_ranges[0];
2167 arr_size = ARRAY_SIZE(t5_reg_ranges);
2168 buf_size = T5_REGMAP_SIZE;
2171 regs->version = mk_adap_vers(ap);
2173 memset(buf, 0, buf_size);
2174 for (i = 0; i < arr_size; i += 2)
2175 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2178 static int restart_autoneg(struct net_device *dev)
2180 struct port_info *p = netdev_priv(dev);
2182 if (!netif_running(dev))
2184 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2186 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2190 static int identify_port(struct net_device *dev,
2191 enum ethtool_phys_id_state state)
2194 struct adapter *adap = netdev2adap(dev);
2196 if (state == ETHTOOL_ID_ACTIVE)
2198 else if (state == ETHTOOL_ID_INACTIVE)
2203 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2206 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2210 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2211 type == FW_PORT_TYPE_BT_XAUI) {
2213 if (caps & FW_PORT_CAP_SPEED_100M)
2214 v |= SUPPORTED_100baseT_Full;
2215 if (caps & FW_PORT_CAP_SPEED_1G)
2216 v |= SUPPORTED_1000baseT_Full;
2217 if (caps & FW_PORT_CAP_SPEED_10G)
2218 v |= SUPPORTED_10000baseT_Full;
2219 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2220 v |= SUPPORTED_Backplane;
2221 if (caps & FW_PORT_CAP_SPEED_1G)
2222 v |= SUPPORTED_1000baseKX_Full;
2223 if (caps & FW_PORT_CAP_SPEED_10G)
2224 v |= SUPPORTED_10000baseKX4_Full;
2225 } else if (type == FW_PORT_TYPE_KR)
2226 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2227 else if (type == FW_PORT_TYPE_BP_AP)
2228 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2229 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2230 else if (type == FW_PORT_TYPE_BP4_AP)
2231 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2232 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2233 SUPPORTED_10000baseKX4_Full;
2234 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2235 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2236 v |= SUPPORTED_FIBRE;
2238 if (caps & FW_PORT_CAP_ANEG)
2239 v |= SUPPORTED_Autoneg;
2243 static unsigned int to_fw_linkcaps(unsigned int caps)
2247 if (caps & ADVERTISED_100baseT_Full)
2248 v |= FW_PORT_CAP_SPEED_100M;
2249 if (caps & ADVERTISED_1000baseT_Full)
2250 v |= FW_PORT_CAP_SPEED_1G;
2251 if (caps & ADVERTISED_10000baseT_Full)
2252 v |= FW_PORT_CAP_SPEED_10G;
2256 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2258 const struct port_info *p = netdev_priv(dev);
2260 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2261 p->port_type == FW_PORT_TYPE_BT_XFI ||
2262 p->port_type == FW_PORT_TYPE_BT_XAUI)
2263 cmd->port = PORT_TP;
2264 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2265 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2266 cmd->port = PORT_FIBRE;
2267 else if (p->port_type == FW_PORT_TYPE_SFP) {
2268 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2269 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2270 cmd->port = PORT_DA;
2272 cmd->port = PORT_FIBRE;
2274 cmd->port = PORT_OTHER;
2276 if (p->mdio_addr >= 0) {
2277 cmd->phy_address = p->mdio_addr;
2278 cmd->transceiver = XCVR_EXTERNAL;
2279 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2280 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2282 cmd->phy_address = 0; /* not really, but no better option */
2283 cmd->transceiver = XCVR_INTERNAL;
2284 cmd->mdio_support = 0;
2287 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2288 cmd->advertising = from_fw_linkcaps(p->port_type,
2289 p->link_cfg.advertising);
2290 ethtool_cmd_speed_set(cmd,
2291 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2292 cmd->duplex = DUPLEX_FULL;
2293 cmd->autoneg = p->link_cfg.autoneg;
2299 static unsigned int speed_to_caps(int speed)
2301 if (speed == SPEED_100)
2302 return FW_PORT_CAP_SPEED_100M;
2303 if (speed == SPEED_1000)
2304 return FW_PORT_CAP_SPEED_1G;
2305 if (speed == SPEED_10000)
2306 return FW_PORT_CAP_SPEED_10G;
2310 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2313 struct port_info *p = netdev_priv(dev);
2314 struct link_config *lc = &p->link_cfg;
2315 u32 speed = ethtool_cmd_speed(cmd);
2317 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2320 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2322 * PHY offers a single speed. See if that's what's
2325 if (cmd->autoneg == AUTONEG_DISABLE &&
2326 (lc->supported & speed_to_caps(speed)))
2331 if (cmd->autoneg == AUTONEG_DISABLE) {
2332 cap = speed_to_caps(speed);
2334 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2335 (speed == SPEED_10000))
2337 lc->requested_speed = cap;
2338 lc->advertising = 0;
2340 cap = to_fw_linkcaps(cmd->advertising);
2341 if (!(lc->supported & cap))
2343 lc->requested_speed = 0;
2344 lc->advertising = cap | FW_PORT_CAP_ANEG;
2346 lc->autoneg = cmd->autoneg;
2348 if (netif_running(dev))
2349 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2354 static void get_pauseparam(struct net_device *dev,
2355 struct ethtool_pauseparam *epause)
2357 struct port_info *p = netdev_priv(dev);
2359 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2360 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2361 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2364 static int set_pauseparam(struct net_device *dev,
2365 struct ethtool_pauseparam *epause)
2367 struct port_info *p = netdev_priv(dev);
2368 struct link_config *lc = &p->link_cfg;
2370 if (epause->autoneg == AUTONEG_DISABLE)
2371 lc->requested_fc = 0;
2372 else if (lc->supported & FW_PORT_CAP_ANEG)
2373 lc->requested_fc = PAUSE_AUTONEG;
2377 if (epause->rx_pause)
2378 lc->requested_fc |= PAUSE_RX;
2379 if (epause->tx_pause)
2380 lc->requested_fc |= PAUSE_TX;
2381 if (netif_running(dev))
2382 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2387 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2389 const struct port_info *pi = netdev_priv(dev);
2390 const struct sge *s = &pi->adapter->sge;
2392 e->rx_max_pending = MAX_RX_BUFFERS;
2393 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2394 e->rx_jumbo_max_pending = 0;
2395 e->tx_max_pending = MAX_TXQ_ENTRIES;
2397 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2398 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2399 e->rx_jumbo_pending = 0;
2400 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2403 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2406 const struct port_info *pi = netdev_priv(dev);
2407 struct adapter *adapter = pi->adapter;
2408 struct sge *s = &adapter->sge;
2410 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2411 e->tx_pending > MAX_TXQ_ENTRIES ||
2412 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2413 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2414 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2417 if (adapter->flags & FULL_INIT_DONE)
2420 for (i = 0; i < pi->nqsets; ++i) {
2421 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2422 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2423 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2428 static int closest_timer(const struct sge *s, int time)
2430 int i, delta, match = 0, min_delta = INT_MAX;
2432 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2433 delta = time - s->timer_val[i];
2436 if (delta < min_delta) {
2444 static int closest_thres(const struct sge *s, int thres)
2446 int i, delta, match = 0, min_delta = INT_MAX;
2448 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2449 delta = thres - s->counter_val[i];
2452 if (delta < min_delta) {
2461 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2463 static unsigned int qtimer_val(const struct adapter *adap,
2464 const struct sge_rspq *q)
2466 unsigned int idx = q->intr_params >> 1;
2468 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2472 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2473 * @adap: the adapter
2475 * @us: the hold-off time in us, or 0 to disable timer
2476 * @cnt: the hold-off packet count, or 0 to disable counter
2478 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2479 * one of the two needs to be enabled for the queue to generate interrupts.
2481 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2482 unsigned int us, unsigned int cnt)
2484 if ((us | cnt) == 0)
2491 new_idx = closest_thres(&adap->sge, cnt);
2492 if (q->desc && q->pktcnt_idx != new_idx) {
2493 /* the queue has already been created, update it */
2494 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2495 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2496 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2497 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2502 q->pktcnt_idx = new_idx;
2505 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2506 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2510 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2512 const struct port_info *pi = netdev_priv(dev);
2513 struct adapter *adap = pi->adapter;
2518 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2519 q = &adap->sge.ethrxq[i].rspq;
2520 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2521 c->rx_max_coalesced_frames);
2523 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2530 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2532 const struct port_info *pi = netdev_priv(dev);
2533 const struct adapter *adap = pi->adapter;
2534 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2536 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2537 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2538 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2543 * eeprom_ptov - translate a physical EEPROM address to virtual
2544 * @phys_addr: the physical EEPROM address
2545 * @fn: the PCI function number
2546 * @sz: size of function-specific area
2548 * Translate a physical EEPROM address to virtual. The first 1K is
2549 * accessed through virtual addresses starting at 31K, the rest is
2550 * accessed through virtual addresses starting at 0.
2552 * The mapping is as follows:
2553 * [0..1K) -> [31K..32K)
2554 * [1K..1K+A) -> [31K-A..31K)
2555 * [1K+A..ES) -> [0..ES-A-1K)
2557 * where A = @fn * @sz, and ES = EEPROM size.
2559 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2562 if (phys_addr < 1024)
2563 return phys_addr + (31 << 10);
2564 if (phys_addr < 1024 + fn)
2565 return 31744 - fn + phys_addr - 1024;
2566 if (phys_addr < EEPROMSIZE)
2567 return phys_addr - 1024 - fn;
2572 * The next two routines implement eeprom read/write from physical addresses.
2574 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2576 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2579 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2580 return vaddr < 0 ? vaddr : 0;
2583 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2585 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2588 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2589 return vaddr < 0 ? vaddr : 0;
2592 #define EEPROM_MAGIC 0x38E2F10C
2594 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2598 struct adapter *adapter = netdev2adap(dev);
2600 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2604 e->magic = EEPROM_MAGIC;
2605 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2606 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2609 memcpy(data, buf + e->offset, e->len);
2614 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2619 u32 aligned_offset, aligned_len, *p;
2620 struct adapter *adapter = netdev2adap(dev);
2622 if (eeprom->magic != EEPROM_MAGIC)
2625 aligned_offset = eeprom->offset & ~3;
2626 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2628 if (adapter->fn > 0) {
2629 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2631 if (aligned_offset < start ||
2632 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2636 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2638 * RMW possibly needed for first or last words.
2640 buf = kmalloc(aligned_len, GFP_KERNEL);
2643 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2644 if (!err && aligned_len > 4)
2645 err = eeprom_rd_phys(adapter,
2646 aligned_offset + aligned_len - 4,
2647 (u32 *)&buf[aligned_len - 4]);
2650 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2654 err = t4_seeprom_wp(adapter, false);
2658 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2659 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2660 aligned_offset += 4;
2664 err = t4_seeprom_wp(adapter, true);
2671 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2674 const struct firmware *fw;
2675 struct adapter *adap = netdev2adap(netdev);
2677 ef->data[sizeof(ef->data) - 1] = '\0';
2678 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2682 ret = t4_load_fw(adap, fw->data, fw->size);
2683 release_firmware(fw);
2685 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2689 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2690 #define BCAST_CRC 0xa0ccc1a6
2692 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2694 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2695 wol->wolopts = netdev2adap(dev)->wol;
2696 memset(&wol->sopass, 0, sizeof(wol->sopass));
2699 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2702 struct port_info *pi = netdev_priv(dev);
2704 if (wol->wolopts & ~WOL_SUPPORTED)
2706 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2707 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2708 if (wol->wolopts & WAKE_BCAST) {
2709 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2712 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2713 ~6ULL, ~0ULL, BCAST_CRC, true);
2715 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2719 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2721 const struct port_info *pi = netdev_priv(dev);
2722 netdev_features_t changed = dev->features ^ features;
2725 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2728 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2730 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2732 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2736 static u32 get_rss_table_size(struct net_device *dev)
2738 const struct port_info *pi = netdev_priv(dev);
2740 return pi->rss_size;
2743 static int get_rss_table(struct net_device *dev, u32 *p)
2745 const struct port_info *pi = netdev_priv(dev);
2746 unsigned int n = pi->rss_size;
2753 static int set_rss_table(struct net_device *dev, const u32 *p)
2756 struct port_info *pi = netdev_priv(dev);
2758 for (i = 0; i < pi->rss_size; i++)
2760 if (pi->adapter->flags & FULL_INIT_DONE)
2761 return write_rss(pi, pi->rss);
2765 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2768 const struct port_info *pi = netdev_priv(dev);
2770 switch (info->cmd) {
2771 case ETHTOOL_GRXFH: {
2772 unsigned int v = pi->rss_mode;
2775 switch (info->flow_type) {
2777 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2778 info->data = RXH_IP_SRC | RXH_IP_DST |
2779 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2780 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2781 info->data = RXH_IP_SRC | RXH_IP_DST;
2784 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2785 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2786 info->data = RXH_IP_SRC | RXH_IP_DST |
2787 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2788 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2789 info->data = RXH_IP_SRC | RXH_IP_DST;
2792 case AH_ESP_V4_FLOW:
2794 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2795 info->data = RXH_IP_SRC | RXH_IP_DST;
2798 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2799 info->data = RXH_IP_SRC | RXH_IP_DST |
2800 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2801 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2802 info->data = RXH_IP_SRC | RXH_IP_DST;
2805 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2806 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2807 info->data = RXH_IP_SRC | RXH_IP_DST |
2808 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2809 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2810 info->data = RXH_IP_SRC | RXH_IP_DST;
2813 case AH_ESP_V6_FLOW:
2815 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2816 info->data = RXH_IP_SRC | RXH_IP_DST;
2821 case ETHTOOL_GRXRINGS:
2822 info->data = pi->nqsets;
2828 static const struct ethtool_ops cxgb_ethtool_ops = {
2829 .get_settings = get_settings,
2830 .set_settings = set_settings,
2831 .get_drvinfo = get_drvinfo,
2832 .get_msglevel = get_msglevel,
2833 .set_msglevel = set_msglevel,
2834 .get_ringparam = get_sge_param,
2835 .set_ringparam = set_sge_param,
2836 .get_coalesce = get_coalesce,
2837 .set_coalesce = set_coalesce,
2838 .get_eeprom_len = get_eeprom_len,
2839 .get_eeprom = get_eeprom,
2840 .set_eeprom = set_eeprom,
2841 .get_pauseparam = get_pauseparam,
2842 .set_pauseparam = set_pauseparam,
2843 .get_link = ethtool_op_get_link,
2844 .get_strings = get_strings,
2845 .set_phys_id = identify_port,
2846 .nway_reset = restart_autoneg,
2847 .get_sset_count = get_sset_count,
2848 .get_ethtool_stats = get_stats,
2849 .get_regs_len = get_regs_len,
2850 .get_regs = get_regs,
2853 .get_rxnfc = get_rxnfc,
2854 .get_rxfh_indir_size = get_rss_table_size,
2855 .get_rxfh_indir = get_rss_table,
2856 .set_rxfh_indir = set_rss_table,
2857 .flash_device = set_flash,
2863 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2867 loff_t avail = file_inode(file)->i_size;
2868 unsigned int mem = (uintptr_t)file->private_data & 3;
2869 struct adapter *adap = file->private_data - mem;
2875 if (count > avail - pos)
2876 count = avail - pos;
2883 if ((mem == MEM_MC) || (mem == MEM_MC1))
2884 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2886 ret = t4_edc_read(adap, mem, pos, data, NULL);
2890 ofst = pos % sizeof(data);
2891 len = min(count, sizeof(data) - ofst);
2892 if (copy_to_user(buf, (u8 *)data + ofst, len))
2899 count = pos - *ppos;
2904 static const struct file_operations mem_debugfs_fops = {
2905 .owner = THIS_MODULE,
2906 .open = simple_open,
2908 .llseek = default_llseek,
2911 static void add_debugfs_mem(struct adapter *adap, const char *name,
2912 unsigned int idx, unsigned int size_mb)
2916 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2917 (void *)adap + idx, &mem_debugfs_fops);
2918 if (de && de->d_inode)
2919 de->d_inode->i_size = size_mb << 20;
2922 static int setup_debugfs(struct adapter *adap)
2927 if (IS_ERR_OR_NULL(adap->debugfs_root))
2930 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2931 if (i & EDRAM0_ENABLE) {
2932 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2933 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2935 if (i & EDRAM1_ENABLE) {
2936 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2937 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2939 if (is_t4(adap->chip)) {
2940 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2941 if (i & EXT_MEM_ENABLE)
2942 add_debugfs_mem(adap, "mc", MEM_MC,
2943 EXT_MEM_SIZE_GET(size));
2945 if (i & EXT_MEM_ENABLE) {
2946 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2947 add_debugfs_mem(adap, "mc0", MEM_MC0,
2948 EXT_MEM_SIZE_GET(size));
2950 if (i & EXT_MEM1_ENABLE) {
2951 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2952 add_debugfs_mem(adap, "mc1", MEM_MC1,
2953 EXT_MEM_SIZE_GET(size));
2957 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2963 * upper-layer driver support
2967 * Allocate an active-open TID and set it to the supplied value.
2969 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2973 spin_lock_bh(&t->atid_lock);
2975 union aopen_entry *p = t->afree;
2977 atid = (p - t->atid_tab) + t->atid_base;
2982 spin_unlock_bh(&t->atid_lock);
2985 EXPORT_SYMBOL(cxgb4_alloc_atid);
2988 * Release an active-open TID.
2990 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2992 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2994 spin_lock_bh(&t->atid_lock);
2998 spin_unlock_bh(&t->atid_lock);
3000 EXPORT_SYMBOL(cxgb4_free_atid);
3003 * Allocate a server TID and set it to the supplied value.
3005 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3009 spin_lock_bh(&t->stid_lock);
3010 if (family == PF_INET) {
3011 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3012 if (stid < t->nstids)
3013 __set_bit(stid, t->stid_bmap);
3017 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3022 t->stid_tab[stid].data = data;
3023 stid += t->stid_base;
3026 spin_unlock_bh(&t->stid_lock);
3029 EXPORT_SYMBOL(cxgb4_alloc_stid);
3031 /* Allocate a server filter TID and set it to the supplied value.
3033 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3037 spin_lock_bh(&t->stid_lock);
3038 if (family == PF_INET) {
3039 stid = find_next_zero_bit(t->stid_bmap,
3040 t->nstids + t->nsftids, t->nstids);
3041 if (stid < (t->nstids + t->nsftids))
3042 __set_bit(stid, t->stid_bmap);
3049 t->stid_tab[stid].data = data;
3050 stid += t->stid_base;
3053 spin_unlock_bh(&t->stid_lock);
3056 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3058 /* Release a server TID.
3060 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3062 stid -= t->stid_base;
3063 spin_lock_bh(&t->stid_lock);
3064 if (family == PF_INET)
3065 __clear_bit(stid, t->stid_bmap);
3067 bitmap_release_region(t->stid_bmap, stid, 2);
3068 t->stid_tab[stid].data = NULL;
3070 spin_unlock_bh(&t->stid_lock);
3072 EXPORT_SYMBOL(cxgb4_free_stid);
3075 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3077 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3080 struct cpl_tid_release *req;
3082 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3083 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3084 INIT_TP_WR(req, tid);
3085 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3089 * Queue a TID release request and if necessary schedule a work queue to
3092 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3095 void **p = &t->tid_tab[tid];
3096 struct adapter *adap = container_of(t, struct adapter, tids);
3098 spin_lock_bh(&adap->tid_release_lock);
3099 *p = adap->tid_release_head;
3100 /* Low 2 bits encode the Tx channel number */
3101 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3102 if (!adap->tid_release_task_busy) {
3103 adap->tid_release_task_busy = true;
3104 queue_work(workq, &adap->tid_release_task);
3106 spin_unlock_bh(&adap->tid_release_lock);
3110 * Process the list of pending TID release requests.
3112 static void process_tid_release_list(struct work_struct *work)
3114 struct sk_buff *skb;
3115 struct adapter *adap;
3117 adap = container_of(work, struct adapter, tid_release_task);
3119 spin_lock_bh(&adap->tid_release_lock);
3120 while (adap->tid_release_head) {
3121 void **p = adap->tid_release_head;
3122 unsigned int chan = (uintptr_t)p & 3;
3123 p = (void *)p - chan;
3125 adap->tid_release_head = *p;
3127 spin_unlock_bh(&adap->tid_release_lock);
3129 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3131 schedule_timeout_uninterruptible(1);
3133 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3134 t4_ofld_send(adap, skb);
3135 spin_lock_bh(&adap->tid_release_lock);
3137 adap->tid_release_task_busy = false;
3138 spin_unlock_bh(&adap->tid_release_lock);
3142 * Release a TID and inform HW. If we are unable to allocate the release
3143 * message we defer to a work queue.
3145 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3148 struct sk_buff *skb;
3149 struct adapter *adap = container_of(t, struct adapter, tids);
3151 old = t->tid_tab[tid];
3152 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3154 t->tid_tab[tid] = NULL;
3155 mk_tid_release(skb, chan, tid);
3156 t4_ofld_send(adap, skb);
3158 cxgb4_queue_tid_release(t, chan, tid);
3160 atomic_dec(&t->tids_in_use);
3162 EXPORT_SYMBOL(cxgb4_remove_tid);
3165 * Allocate and initialize the TID tables. Returns 0 on success.
3167 static int tid_init(struct tid_info *t)
3170 unsigned int stid_bmap_size;
3171 unsigned int natids = t->natids;
3173 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3174 size = t->ntids * sizeof(*t->tid_tab) +
3175 natids * sizeof(*t->atid_tab) +
3176 t->nstids * sizeof(*t->stid_tab) +
3177 t->nsftids * sizeof(*t->stid_tab) +
3178 stid_bmap_size * sizeof(long) +
3179 t->nftids * sizeof(*t->ftid_tab) +
3180 t->nsftids * sizeof(*t->ftid_tab);
3182 t->tid_tab = t4_alloc_mem(size);
3186 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3187 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3188 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3189 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3190 spin_lock_init(&t->stid_lock);
3191 spin_lock_init(&t->atid_lock);
3193 t->stids_in_use = 0;
3195 t->atids_in_use = 0;
3196 atomic_set(&t->tids_in_use, 0);
3198 /* Setup the free list for atid_tab and clear the stid bitmap. */
3201 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3202 t->afree = t->atid_tab;
3204 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3209 * cxgb4_create_server - create an IP server
3211 * @stid: the server TID
3212 * @sip: local IP address to bind server to
3213 * @sport: the server's TCP port
3214 * @queue: queue to direct messages from this server to
3216 * Create an IP server for the given port and address.
3217 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3219 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3220 __be32 sip, __be16 sport, __be16 vlan,
3224 struct sk_buff *skb;
3225 struct adapter *adap;
3226 struct cpl_pass_open_req *req;
3228 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3232 adap = netdev2adap(dev);
3233 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3235 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3236 req->local_port = sport;
3237 req->peer_port = htons(0);
3238 req->local_ip = sip;
3239 req->peer_ip = htonl(0);
3240 chan = rxq_to_chan(&adap->sge, queue);
3241 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3242 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3243 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3244 return t4_mgmt_tx(adap, skb);
3246 EXPORT_SYMBOL(cxgb4_create_server);
3249 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3250 * @mtus: the HW MTU table
3251 * @mtu: the target MTU
3252 * @idx: index of selected entry in the MTU table
3254 * Returns the index and the value in the HW MTU table that is closest to
3255 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3256 * table, in which case that smallest available value is selected.
3258 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3263 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3269 EXPORT_SYMBOL(cxgb4_best_mtu);
3272 * cxgb4_port_chan - get the HW channel of a port
3273 * @dev: the net device for the port
3275 * Return the HW Tx channel of the given port.
3277 unsigned int cxgb4_port_chan(const struct net_device *dev)
3279 return netdev2pinfo(dev)->tx_chan;
3281 EXPORT_SYMBOL(cxgb4_port_chan);
3283 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3285 struct adapter *adap = netdev2adap(dev);
3286 u32 v1, v2, lp_count, hp_count;
3288 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3289 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3290 if (is_t4(adap->chip)) {
3291 lp_count = G_LP_COUNT(v1);
3292 hp_count = G_HP_COUNT(v1);
3294 lp_count = G_LP_COUNT_T5(v1);
3295 hp_count = G_HP_COUNT_T5(v2);
3297 return lpfifo ? lp_count : hp_count;
3299 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3302 * cxgb4_port_viid - get the VI id of a port
3303 * @dev: the net device for the port
3305 * Return the VI id of the given port.
3307 unsigned int cxgb4_port_viid(const struct net_device *dev)
3309 return netdev2pinfo(dev)->viid;
3311 EXPORT_SYMBOL(cxgb4_port_viid);
3314 * cxgb4_port_idx - get the index of a port
3315 * @dev: the net device for the port
3317 * Return the index of the given port.
3319 unsigned int cxgb4_port_idx(const struct net_device *dev)
3321 return netdev2pinfo(dev)->port_id;
3323 EXPORT_SYMBOL(cxgb4_port_idx);
3325 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3326 struct tp_tcp_stats *v6)
3328 struct adapter *adap = pci_get_drvdata(pdev);
3330 spin_lock(&adap->stats_lock);
3331 t4_tp_get_tcp_stats(adap, v4, v6);
3332 spin_unlock(&adap->stats_lock);
3334 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3336 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3337 const unsigned int *pgsz_order)
3339 struct adapter *adap = netdev2adap(dev);
3341 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3342 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3343 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3344 HPZ3(pgsz_order[3]));
3346 EXPORT_SYMBOL(cxgb4_iscsi_init);
3348 int cxgb4_flush_eq_cache(struct net_device *dev)
3350 struct adapter *adap = netdev2adap(dev);
3353 ret = t4_fwaddrspace_write(adap, adap->mbox,
3354 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3357 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3359 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3361 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3365 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3367 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3368 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3373 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3376 struct adapter *adap = netdev2adap(dev);
3377 u16 hw_pidx, hw_cidx;
3380 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3384 if (pidx != hw_pidx) {
3387 if (pidx >= hw_pidx)
3388 delta = pidx - hw_pidx;
3390 delta = size - hw_pidx + pidx;
3392 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3393 QID(qid) | PIDX(delta));
3398 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3400 void cxgb4_disable_db_coalescing(struct net_device *dev)
3402 struct adapter *adap;
3404 adap = netdev2adap(dev);
3405 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3408 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3410 void cxgb4_enable_db_coalescing(struct net_device *dev)
3412 struct adapter *adap;
3414 adap = netdev2adap(dev);
3415 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3417 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3419 static struct pci_driver cxgb4_driver;
3421 static void check_neigh_update(struct neighbour *neigh)
3423 const struct device *parent;
3424 const struct net_device *netdev = neigh->dev;
3426 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3427 netdev = vlan_dev_real_dev(netdev);
3428 parent = netdev->dev.parent;
3429 if (parent && parent->driver == &cxgb4_driver.driver)
3430 t4_l2t_update(dev_get_drvdata(parent), neigh);
3433 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3437 case NETEVENT_NEIGH_UPDATE:
3438 check_neigh_update(data);
3440 case NETEVENT_REDIRECT:
3447 static bool netevent_registered;
3448 static struct notifier_block cxgb4_netevent_nb = {
3449 .notifier_call = netevent_cb
3452 static void drain_db_fifo(struct adapter *adap, int usecs)
3454 u32 v1, v2, lp_count, hp_count;
3457 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3458 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3459 if (is_t4(adap->chip)) {
3460 lp_count = G_LP_COUNT(v1);
3461 hp_count = G_HP_COUNT(v1);
3463 lp_count = G_LP_COUNT_T5(v1);
3464 hp_count = G_HP_COUNT_T5(v2);
3467 if (lp_count == 0 && hp_count == 0)
3469 set_current_state(TASK_UNINTERRUPTIBLE);
3470 schedule_timeout(usecs_to_jiffies(usecs));
3474 static void disable_txq_db(struct sge_txq *q)
3476 spin_lock_irq(&q->db_lock);
3478 spin_unlock_irq(&q->db_lock);
3481 static void enable_txq_db(struct sge_txq *q)
3483 spin_lock_irq(&q->db_lock);
3485 spin_unlock_irq(&q->db_lock);
3488 static void disable_dbs(struct adapter *adap)
3492 for_each_ethrxq(&adap->sge, i)
3493 disable_txq_db(&adap->sge.ethtxq[i].q);
3494 for_each_ofldrxq(&adap->sge, i)
3495 disable_txq_db(&adap->sge.ofldtxq[i].q);
3496 for_each_port(adap, i)
3497 disable_txq_db(&adap->sge.ctrlq[i].q);
3500 static void enable_dbs(struct adapter *adap)
3504 for_each_ethrxq(&adap->sge, i)
3505 enable_txq_db(&adap->sge.ethtxq[i].q);
3506 for_each_ofldrxq(&adap->sge, i)
3507 enable_txq_db(&adap->sge.ofldtxq[i].q);
3508 for_each_port(adap, i)
3509 enable_txq_db(&adap->sge.ctrlq[i].q);
3512 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3514 u16 hw_pidx, hw_cidx;
3517 spin_lock_bh(&q->db_lock);
3518 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3521 if (q->db_pidx != hw_pidx) {
3524 if (q->db_pidx >= hw_pidx)
3525 delta = q->db_pidx - hw_pidx;
3527 delta = q->size - hw_pidx + q->db_pidx;
3529 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3530 QID(q->cntxt_id) | PIDX(delta));
3534 spin_unlock_bh(&q->db_lock);
3536 CH_WARN(adap, "DB drop recovery failed.\n");
3538 static void recover_all_queues(struct adapter *adap)
3542 for_each_ethrxq(&adap->sge, i)
3543 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3544 for_each_ofldrxq(&adap->sge, i)
3545 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3546 for_each_port(adap, i)
3547 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3550 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3552 mutex_lock(&uld_mutex);
3553 if (adap->uld_handle[CXGB4_ULD_RDMA])
3554 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3556 mutex_unlock(&uld_mutex);
3559 static void process_db_full(struct work_struct *work)
3561 struct adapter *adap;
3563 adap = container_of(work, struct adapter, db_full_task);
3565 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3566 drain_db_fifo(adap, dbfifo_drain_delay);
3567 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3568 DBFIFO_HP_INT | DBFIFO_LP_INT,
3569 DBFIFO_HP_INT | DBFIFO_LP_INT);
3570 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3573 static void process_db_drop(struct work_struct *work)
3575 struct adapter *adap;
3577 adap = container_of(work, struct adapter, db_drop_task);
3579 if (is_t4(adap->chip)) {
3581 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3582 drain_db_fifo(adap, 1);
3583 recover_all_queues(adap);
3586 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3587 u16 qid = (dropped_db >> 15) & 0x1ffff;
3588 u16 pidx_inc = dropped_db & 0x1fff;
3590 unsigned short udb_density;
3591 unsigned long qpshift;
3595 dev_warn(adap->pdev_dev,
3596 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3598 (dropped_db >> 14) & 1,
3599 (dropped_db >> 13) & 1,
3602 drain_db_fifo(adap, 1);
3604 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3605 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3606 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3607 qpshift = PAGE_SHIFT - ilog2(udb_density);
3608 udb = qid << qpshift;
3610 page = udb / PAGE_SIZE;
3611 udb += (qid - (page * udb_density)) * 128;
3613 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3615 /* Re-enable BAR2 WC */
3616 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3619 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3622 void t4_db_full(struct adapter *adap)
3624 if (is_t4(adap->chip)) {
3625 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3626 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3627 queue_work(workq, &adap->db_full_task);
3631 void t4_db_dropped(struct adapter *adap)
3633 if (is_t4(adap->chip))
3634 queue_work(workq, &adap->db_drop_task);
3637 static void uld_attach(struct adapter *adap, unsigned int uld)
3640 struct cxgb4_lld_info lli;
3643 lli.pdev = adap->pdev;
3644 lli.l2t = adap->l2t;
3645 lli.tids = &adap->tids;
3646 lli.ports = adap->port;
3647 lli.vr = &adap->vres;
3648 lli.mtus = adap->params.mtus;
3649 if (uld == CXGB4_ULD_RDMA) {
3650 lli.rxq_ids = adap->sge.rdma_rxq;
3651 lli.nrxq = adap->sge.rdmaqs;
3652 } else if (uld == CXGB4_ULD_ISCSI) {
3653 lli.rxq_ids = adap->sge.ofld_rxq;
3654 lli.nrxq = adap->sge.ofldqsets;
3656 lli.ntxq = adap->sge.ofldqsets;
3657 lli.nchan = adap->params.nports;
3658 lli.nports = adap->params.nports;
3659 lli.wr_cred = adap->params.ofldq_wr_cred;
3660 lli.adapter_type = adap->params.rev;
3661 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3662 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3663 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3665 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3666 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3668 lli.filt_mode = adap->filter_mode;
3669 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3670 for (i = 0; i < NCHAN; i++)
3672 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3673 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3674 lli.fw_vers = adap->params.fw_vers;
3675 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3676 lli.sge_pktshift = adap->sge.pktshift;
3677 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3679 handle = ulds[uld].add(&lli);
3680 if (IS_ERR(handle)) {
3681 dev_warn(adap->pdev_dev,
3682 "could not attach to the %s driver, error %ld\n",
3683 uld_str[uld], PTR_ERR(handle));
3687 adap->uld_handle[uld] = handle;
3689 if (!netevent_registered) {
3690 register_netevent_notifier(&cxgb4_netevent_nb);
3691 netevent_registered = true;
3694 if (adap->flags & FULL_INIT_DONE)
3695 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3698 static void attach_ulds(struct adapter *adap)
3702 mutex_lock(&uld_mutex);
3703 list_add_tail(&adap->list_node, &adapter_list);
3704 for (i = 0; i < CXGB4_ULD_MAX; i++)
3706 uld_attach(adap, i);
3707 mutex_unlock(&uld_mutex);
3710 static void detach_ulds(struct adapter *adap)
3714 mutex_lock(&uld_mutex);
3715 list_del(&adap->list_node);
3716 for (i = 0; i < CXGB4_ULD_MAX; i++)
3717 if (adap->uld_handle[i]) {
3718 ulds[i].state_change(adap->uld_handle[i],
3719 CXGB4_STATE_DETACH);
3720 adap->uld_handle[i] = NULL;
3722 if (netevent_registered && list_empty(&adapter_list)) {
3723 unregister_netevent_notifier(&cxgb4_netevent_nb);
3724 netevent_registered = false;
3726 mutex_unlock(&uld_mutex);
3729 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3733 mutex_lock(&uld_mutex);
3734 for (i = 0; i < CXGB4_ULD_MAX; i++)
3735 if (adap->uld_handle[i])
3736 ulds[i].state_change(adap->uld_handle[i], new_state);
3737 mutex_unlock(&uld_mutex);
3741 * cxgb4_register_uld - register an upper-layer driver
3742 * @type: the ULD type
3743 * @p: the ULD methods
3745 * Registers an upper-layer driver with this driver and notifies the ULD
3746 * about any presently available devices that support its type. Returns
3747 * %-EBUSY if a ULD of the same type is already registered.
3749 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3752 struct adapter *adap;
3754 if (type >= CXGB4_ULD_MAX)
3756 mutex_lock(&uld_mutex);
3757 if (ulds[type].add) {
3762 list_for_each_entry(adap, &adapter_list, list_node)
3763 uld_attach(adap, type);
3764 out: mutex_unlock(&uld_mutex);
3767 EXPORT_SYMBOL(cxgb4_register_uld);
3770 * cxgb4_unregister_uld - unregister an upper-layer driver
3771 * @type: the ULD type
3773 * Unregisters an existing upper-layer driver.
3775 int cxgb4_unregister_uld(enum cxgb4_uld type)
3777 struct adapter *adap;
3779 if (type >= CXGB4_ULD_MAX)
3781 mutex_lock(&uld_mutex);
3782 list_for_each_entry(adap, &adapter_list, list_node)
3783 adap->uld_handle[type] = NULL;
3784 ulds[type].add = NULL;
3785 mutex_unlock(&uld_mutex);
3788 EXPORT_SYMBOL(cxgb4_unregister_uld);
3791 * cxgb_up - enable the adapter
3792 * @adap: adapter being enabled
3794 * Called when the first port is enabled, this function performs the
3795 * actions necessary to make an adapter operational, such as completing
3796 * the initialization of HW modules, and enabling interrupts.
3798 * Must be called with the rtnl lock held.
3800 static int cxgb_up(struct adapter *adap)
3804 err = setup_sge_queues(adap);
3807 err = setup_rss(adap);
3811 if (adap->flags & USING_MSIX) {
3812 name_msix_vecs(adap);
3813 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3814 adap->msix_info[0].desc, adap);
3818 err = request_msix_queue_irqs(adap);
3820 free_irq(adap->msix_info[0].vec, adap);
3824 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3825 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3826 adap->port[0]->name, adap);
3832 t4_intr_enable(adap);
3833 adap->flags |= FULL_INIT_DONE;
3834 notify_ulds(adap, CXGB4_STATE_UP);
3838 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3840 t4_free_sge_resources(adap);
3844 static void cxgb_down(struct adapter *adapter)
3846 t4_intr_disable(adapter);
3847 cancel_work_sync(&adapter->tid_release_task);
3848 cancel_work_sync(&adapter->db_full_task);
3849 cancel_work_sync(&adapter->db_drop_task);
3850 adapter->tid_release_task_busy = false;
3851 adapter->tid_release_head = NULL;
3853 if (adapter->flags & USING_MSIX) {
3854 free_msix_queue_irqs(adapter);
3855 free_irq(adapter->msix_info[0].vec, adapter);
3857 free_irq(adapter->pdev->irq, adapter);
3858 quiesce_rx(adapter);
3859 t4_sge_stop(adapter);
3860 t4_free_sge_resources(adapter);
3861 adapter->flags &= ~FULL_INIT_DONE;
3865 * net_device operations
3867 static int cxgb_open(struct net_device *dev)
3870 struct port_info *pi = netdev_priv(dev);
3871 struct adapter *adapter = pi->adapter;
3873 netif_carrier_off(dev);
3875 if (!(adapter->flags & FULL_INIT_DONE)) {
3876 err = cxgb_up(adapter);
3881 err = link_start(dev);
3883 netif_tx_start_all_queues(dev);
3887 static int cxgb_close(struct net_device *dev)
3889 struct port_info *pi = netdev_priv(dev);
3890 struct adapter *adapter = pi->adapter;
3892 netif_tx_stop_all_queues(dev);
3893 netif_carrier_off(dev);
3894 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3897 /* Return an error number if the indicated filter isn't writable ...
3899 static int writable_filter(struct filter_entry *f)
3909 /* Delete the filter at the specified index (if valid). The checks for all
3910 * the common problems with doing this like the filter being locked, currently
3911 * pending in another operation, etc.
3913 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3915 struct filter_entry *f;
3918 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3921 f = &adapter->tids.ftid_tab[fidx];
3922 ret = writable_filter(f);
3926 return del_filter_wr(adapter, fidx);
3931 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3932 __be32 sip, __be16 sport, __be16 vlan,
3933 unsigned int queue, unsigned char port, unsigned char mask)
3936 struct filter_entry *f;
3937 struct adapter *adap;
3941 adap = netdev2adap(dev);
3943 /* Adjust stid to correct filter index */
3944 stid -= adap->tids.nstids;
3945 stid += adap->tids.nftids;
3947 /* Check to make sure the filter requested is writable ...
3949 f = &adap->tids.ftid_tab[stid];
3950 ret = writable_filter(f);
3954 /* Clear out any old resources being used by the filter before
3955 * we start constructing the new filter.
3958 clear_filter(adap, f);
3960 /* Clear out filter specifications */
3961 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3962 f->fs.val.lport = cpu_to_be16(sport);
3963 f->fs.mask.lport = ~0;
3965 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3966 for (i = 0; i < 4; i++) {
3967 f->fs.val.lip[i] = val[i];
3968 f->fs.mask.lip[i] = ~0;
3970 if (adap->filter_mode & F_PORT) {
3971 f->fs.val.iport = port;
3972 f->fs.mask.iport = mask;
3978 /* Mark filter as locked */
3982 ret = set_filter_wr(adap, stid);
3984 clear_filter(adap, f);
3990 EXPORT_SYMBOL(cxgb4_create_server_filter);
3992 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3993 unsigned int queue, bool ipv6)
3996 struct filter_entry *f;
3997 struct adapter *adap;
3999 adap = netdev2adap(dev);
4001 /* Adjust stid to correct filter index */
4002 stid -= adap->tids.nstids;
4003 stid += adap->tids.nftids;
4005 f = &adap->tids.ftid_tab[stid];
4006 /* Unlock the filter */
4009 ret = delete_filter(adap, stid);
4015 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4017 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4018 struct rtnl_link_stats64 *ns)
4020 struct port_stats stats;
4021 struct port_info *p = netdev_priv(dev);
4022 struct adapter *adapter = p->adapter;
4024 spin_lock(&adapter->stats_lock);
4025 t4_get_port_stats(adapter, p->tx_chan, &stats);
4026 spin_unlock(&adapter->stats_lock);
4028 ns->tx_bytes = stats.tx_octets;
4029 ns->tx_packets = stats.tx_frames;
4030 ns->rx_bytes = stats.rx_octets;
4031 ns->rx_packets = stats.rx_frames;
4032 ns->multicast = stats.rx_mcast_frames;
4034 /* detailed rx_errors */
4035 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4037 ns->rx_over_errors = 0;
4038 ns->rx_crc_errors = stats.rx_fcs_err;
4039 ns->rx_frame_errors = stats.rx_symbol_err;
4040 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4041 stats.rx_ovflow2 + stats.rx_ovflow3 +
4042 stats.rx_trunc0 + stats.rx_trunc1 +
4043 stats.rx_trunc2 + stats.rx_trunc3;
4044 ns->rx_missed_errors = 0;
4046 /* detailed tx_errors */
4047 ns->tx_aborted_errors = 0;
4048 ns->tx_carrier_errors = 0;
4049 ns->tx_fifo_errors = 0;
4050 ns->tx_heartbeat_errors = 0;
4051 ns->tx_window_errors = 0;
4053 ns->tx_errors = stats.tx_error_frames;
4054 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4055 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4059 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4062 int ret = 0, prtad, devad;
4063 struct port_info *pi = netdev_priv(dev);
4064 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4068 if (pi->mdio_addr < 0)
4070 data->phy_id = pi->mdio_addr;
4074 if (mdio_phy_id_is_c45(data->phy_id)) {
4075 prtad = mdio_phy_id_prtad(data->phy_id);
4076 devad = mdio_phy_id_devad(data->phy_id);
4077 } else if (data->phy_id < 32) {
4078 prtad = data->phy_id;
4080 data->reg_num &= 0x1f;
4084 mbox = pi->adapter->fn;
4085 if (cmd == SIOCGMIIREG)
4086 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4087 data->reg_num, &data->val_out);
4089 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4090 data->reg_num, data->val_in);
4098 static void cxgb_set_rxmode(struct net_device *dev)
4100 /* unfortunately we can't return errors to the stack */
4101 set_rxmode(dev, -1, false);
4104 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4107 struct port_info *pi = netdev_priv(dev);
4109 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4111 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4118 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4121 struct sockaddr *addr = p;
4122 struct port_info *pi = netdev_priv(dev);
4124 if (!is_valid_ether_addr(addr->sa_data))
4125 return -EADDRNOTAVAIL;
4127 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4128 pi->xact_addr_filt, addr->sa_data, true, true);
4132 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4133 pi->xact_addr_filt = ret;
4137 #ifdef CONFIG_NET_POLL_CONTROLLER
4138 static void cxgb_netpoll(struct net_device *dev)
4140 struct port_info *pi = netdev_priv(dev);
4141 struct adapter *adap = pi->adapter;
4143 if (adap->flags & USING_MSIX) {
4145 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4147 for (i = pi->nqsets; i; i--, rx++)
4148 t4_sge_intr_msix(0, &rx->rspq);
4150 t4_intr_handler(adap)(0, adap);
4154 static const struct net_device_ops cxgb4_netdev_ops = {
4155 .ndo_open = cxgb_open,
4156 .ndo_stop = cxgb_close,
4157 .ndo_start_xmit = t4_eth_xmit,
4158 .ndo_get_stats64 = cxgb_get_stats,
4159 .ndo_set_rx_mode = cxgb_set_rxmode,
4160 .ndo_set_mac_address = cxgb_set_mac_addr,
4161 .ndo_set_features = cxgb_set_features,
4162 .ndo_validate_addr = eth_validate_addr,
4163 .ndo_do_ioctl = cxgb_ioctl,
4164 .ndo_change_mtu = cxgb_change_mtu,
4165 #ifdef CONFIG_NET_POLL_CONTROLLER
4166 .ndo_poll_controller = cxgb_netpoll,
4170 void t4_fatal_err(struct adapter *adap)
4172 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4173 t4_intr_disable(adap);
4174 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4177 static void setup_memwin(struct adapter *adap)
4179 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4181 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4182 if (is_t4(adap->chip)) {
4183 mem_win0_base = bar0 + MEMWIN0_BASE;
4184 mem_win1_base = bar0 + MEMWIN1_BASE;
4185 mem_win2_base = bar0 + MEMWIN2_BASE;
4187 /* For T5, only relative offset inside the PCIe BAR is passed */
4188 mem_win0_base = MEMWIN0_BASE;
4189 mem_win1_base = MEMWIN1_BASE_T5;
4190 mem_win2_base = MEMWIN2_BASE_T5;
4192 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4193 mem_win0_base | BIR(0) |
4194 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4195 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4196 mem_win1_base | BIR(0) |
4197 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4198 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4199 mem_win2_base | BIR(0) |
4200 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4203 static void setup_memwin_rdma(struct adapter *adap)
4205 if (adap->vres.ocq.size) {
4206 unsigned int start, sz_kb;
4208 start = pci_resource_start(adap->pdev, 2) +
4209 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4210 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4212 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4213 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4215 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4216 adap->vres.ocq.start);
4218 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4222 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4227 /* get device capabilities */
4228 memset(c, 0, sizeof(*c));
4229 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4230 FW_CMD_REQUEST | FW_CMD_READ);
4231 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4232 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4236 /* select capabilities we'll be using */
4237 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4239 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4241 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4242 } else if (vf_acls) {
4243 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4246 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4247 FW_CMD_REQUEST | FW_CMD_WRITE);
4248 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4252 ret = t4_config_glbl_rss(adap, adap->fn,
4253 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4254 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4255 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4259 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4260 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4266 /* tweak some settings */
4267 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4268 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4269 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4270 v = t4_read_reg(adap, TP_PIO_DATA);
4271 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4273 /* first 4 Tx modulation queues point to consecutive Tx channels */
4274 adap->params.tp.tx_modq_map = 0xE4;
4275 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4276 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4278 /* associate each Tx modulation queue with consecutive Tx channels */
4280 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4281 &v, 1, A_TP_TX_SCHED_HDR);
4282 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4283 &v, 1, A_TP_TX_SCHED_FIFO);
4284 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4285 &v, 1, A_TP_TX_SCHED_PCMD);
4287 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4288 if (is_offload(adap)) {
4289 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4290 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4291 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4292 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4293 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4294 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4295 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4296 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4297 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4298 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4301 /* get basic stuff going */
4302 return t4_early_init(adap, adap->fn);
4306 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4308 #define MAX_ATIDS 8192U
4311 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4313 * If the firmware we're dealing with has Configuration File support, then
4314 * we use that to perform all configuration
4318 * Tweak configuration based on module parameters, etc. Most of these have
4319 * defaults assigned to them by Firmware Configuration Files (if we're using
4320 * them) but need to be explicitly set if we're using hard-coded
4321 * initialization. But even in the case of using Firmware Configuration
4322 * Files, we'd like to expose the ability to change these via module
4323 * parameters so these are essentially common tweaks/settings for
4324 * Configuration Files and hard-coded initialization ...
4326 static int adap_init0_tweaks(struct adapter *adapter)
4329 * Fix up various Host-Dependent Parameters like Page Size, Cache
4330 * Line Size, etc. The firmware default is for a 4KB Page Size and
4331 * 64B Cache Line Size ...
4333 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4336 * Process module parameters which affect early initialization.
4338 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4339 dev_err(&adapter->pdev->dev,
4340 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4344 t4_set_reg_field(adapter, SGE_CONTROL,
4346 PKTSHIFT(rx_dma_offset));
4349 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4350 * adds the pseudo header itself.
4352 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4353 CSUM_HAS_PSEUDO_HDR, 0);
4359 * Attempt to initialize the adapter via a Firmware Configuration File.
4361 static int adap_init0_config(struct adapter *adapter, int reset)
4363 struct fw_caps_config_cmd caps_cmd;
4364 const struct firmware *cf;
4365 unsigned long mtype = 0, maddr = 0;
4366 u32 finiver, finicsum, cfcsum;
4367 int ret, using_flash;
4368 char *fw_config_file, fw_config_file_path[256];
4371 * Reset device if necessary.
4374 ret = t4_fw_reset(adapter, adapter->mbox,
4375 PIORSTMODE | PIORST);
4381 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4382 * then use that. Otherwise, use the configuration file stored
4383 * in the adapter flash ...
4385 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4387 fw_config_file = FW_CFNAME;
4390 fw_config_file = FW5_CFNAME;
4393 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4394 adapter->pdev->device);
4399 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4402 mtype = FW_MEMTYPE_CF_FLASH;
4403 maddr = t4_flash_cfg_addr(adapter);
4405 u32 params[7], val[7];
4408 if (cf->size >= FLASH_CFG_MAX_SIZE)
4411 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4412 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4413 ret = t4_query_params(adapter, adapter->mbox,
4414 adapter->fn, 0, 1, params, val);
4417 * For t4_memory_write() below addresses and
4418 * sizes have to be in terms of multiples of 4
4419 * bytes. So, if the Configuration File isn't
4420 * a multiple of 4 bytes in length we'll have
4421 * to write that out separately since we can't
4422 * guarantee that the bytes following the
4423 * residual byte in the buffer returned by
4424 * request_firmware() are zeroed out ...
4426 size_t resid = cf->size & 0x3;
4427 size_t size = cf->size & ~0x3;
4428 __be32 *data = (__be32 *)cf->data;
4430 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4431 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4433 ret = t4_memory_write(adapter, mtype, maddr,
4435 if (ret == 0 && resid != 0) {
4442 last.word = data[size >> 2];
4443 for (i = resid; i < 4; i++)
4445 ret = t4_memory_write(adapter, mtype,
4452 release_firmware(cf);
4458 * Issue a Capability Configuration command to the firmware to get it
4459 * to parse the Configuration File. We don't use t4_fw_config_file()
4460 * because we want the ability to modify various features after we've
4461 * processed the configuration file ...
4463 memset(&caps_cmd, 0, sizeof(caps_cmd));
4464 caps_cmd.op_to_write =
4465 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4468 caps_cmd.cfvalid_to_len16 =
4469 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4470 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4471 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4472 FW_LEN16(caps_cmd));
4473 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4478 finiver = ntohl(caps_cmd.finiver);
4479 finicsum = ntohl(caps_cmd.finicsum);
4480 cfcsum = ntohl(caps_cmd.cfcsum);
4481 if (finicsum != cfcsum)
4482 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4483 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4487 * And now tell the firmware to use the configuration we just loaded.
4489 caps_cmd.op_to_write =
4490 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4493 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4494 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4500 * Tweak configuration based on system architecture, module
4503 ret = adap_init0_tweaks(adapter);
4508 * And finally tell the firmware to initialize itself using the
4509 * parameters from the Configuration File.
4511 ret = t4_fw_initialize(adapter, adapter->mbox);
4515 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4517 * Return successfully and note that we're operating with parameters
4518 * not supplied by the driver, rather than from hard-wired
4519 * initialization constants burried in the driver.
4521 adapter->flags |= USING_SOFT_PARAMS;
4522 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4523 "Configuration File %s, version %#x, computed checksum %#x\n",
4526 : fw_config_file_path),
4531 * Something bad happened. Return the error ... (If the "error"
4532 * is that there's no Configuration File on the adapter we don't
4533 * want to issue a warning since this is fairly common.)
4537 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4543 * Attempt to initialize the adapter via hard-coded, driver supplied
4546 static int adap_init0_no_config(struct adapter *adapter, int reset)
4548 struct sge *s = &adapter->sge;
4549 struct fw_caps_config_cmd caps_cmd;
4554 * Reset device if necessary
4557 ret = t4_fw_reset(adapter, adapter->mbox,
4558 PIORSTMODE | PIORST);
4564 * Get device capabilities and select which we'll be using.
4566 memset(&caps_cmd, 0, sizeof(caps_cmd));
4567 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4568 FW_CMD_REQUEST | FW_CMD_READ);
4569 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4570 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4575 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4577 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4579 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4580 } else if (vf_acls) {
4581 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4584 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4585 FW_CMD_REQUEST | FW_CMD_WRITE);
4586 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4592 * Tweak configuration based on system architecture, module
4595 ret = adap_init0_tweaks(adapter);
4600 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4601 * mode which maps each Virtual Interface to its own section of
4602 * the RSS Table and we turn on all map and hash enables ...
4604 adapter->flags |= RSS_TNLALLLOOKUP;
4605 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4606 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4607 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4608 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4609 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4610 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4615 * Set up our own fundamental resource provisioning ...
4617 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4618 PFRES_NEQ, PFRES_NETHCTRL,
4619 PFRES_NIQFLINT, PFRES_NIQ,
4620 PFRES_TC, PFRES_NVI,
4621 FW_PFVF_CMD_CMASK_MASK,
4622 pfvfres_pmask(adapter, adapter->fn, 0),
4624 PFRES_R_CAPS, PFRES_WX_CAPS);
4629 * Perform low level SGE initialization. We need to do this before we
4630 * send the firmware the INITIALIZE command because that will cause
4631 * any other PF Drivers which are waiting for the Master
4632 * Initialization to proceed forward.
4634 for (i = 0; i < SGE_NTIMERS - 1; i++)
4635 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4636 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4637 s->counter_val[0] = 1;
4638 for (i = 1; i < SGE_NCOUNTERS; i++)
4639 s->counter_val[i] = min(intr_cnt[i - 1],
4640 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4641 t4_sge_init(adapter);
4643 #ifdef CONFIG_PCI_IOV
4645 * Provision resource limits for Virtual Functions. We currently
4646 * grant them all the same static resource limits except for the Port
4647 * Access Rights Mask which we're assigning based on the PF. All of
4648 * the static provisioning stuff for both the PF and VF really needs
4649 * to be managed in a persistent manner for each device which the
4650 * firmware controls.
4655 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4656 if (num_vf[pf] <= 0)
4659 /* VF numbering starts at 1! */
4660 for (vf = 1; vf <= num_vf[pf]; vf++) {
4661 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4663 VFRES_NEQ, VFRES_NETHCTRL,
4664 VFRES_NIQFLINT, VFRES_NIQ,
4665 VFRES_TC, VFRES_NVI,
4666 FW_PFVF_CMD_CMASK_MASK,
4670 VFRES_R_CAPS, VFRES_WX_CAPS);
4672 dev_warn(adapter->pdev_dev,
4674 "provision pf/vf=%d/%d; "
4675 "err=%d\n", pf, vf, ret);
4682 * Set up the default filter mode. Later we'll want to implement this
4683 * via a firmware command, etc. ... This needs to be done before the
4684 * firmare initialization command ... If the selected set of fields
4685 * isn't equal to the default value, we'll need to make sure that the
4686 * field selections will fit in the 36-bit budget.
4688 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4691 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4692 switch (tp_vlan_pri_map & (1 << j)) {
4694 /* compressed filter field not enabled */
4714 case ETHERTYPE_MASK:
4720 case MPSHITTYPE_MASK:
4723 case FRAGMENTATION_MASK:
4729 dev_err(adapter->pdev_dev,
4730 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4731 " using %#x\n", tp_vlan_pri_map, bits,
4732 TP_VLAN_PRI_MAP_DEFAULT);
4733 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4736 v = tp_vlan_pri_map;
4737 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4738 &v, 1, TP_VLAN_PRI_MAP);
4741 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4742 * to support any of the compressed filter fields above. Newer
4743 * versions of the firmware do this automatically but it doesn't hurt
4744 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4745 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4746 * since the firmware automatically turns this on and off when we have
4747 * a non-zero number of filters active (since it does have a
4748 * performance impact).
4750 if (tp_vlan_pri_map)
4751 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4752 FIVETUPLELOOKUP_MASK,
4753 FIVETUPLELOOKUP_MASK);
4756 * Tweak some settings.
4758 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4759 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4760 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4761 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4764 * Get basic stuff going by issuing the Firmware Initialize command.
4765 * Note that this _must_ be after all PFVF commands ...
4767 ret = t4_fw_initialize(adapter, adapter->mbox);
4772 * Return successfully!
4774 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4775 "driver parameters\n");
4779 * Something bad happened. Return the error ...
4786 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4788 static int adap_init0(struct adapter *adap)
4792 enum dev_state state;
4793 u32 params[7], val[7];
4794 struct fw_caps_config_cmd caps_cmd;
4798 * Contact FW, advertising Master capability (and potentially forcing
4799 * ourselves as the Master PF if our module parameter force_init is
4802 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4803 force_init ? MASTER_MUST : MASTER_MAY,
4806 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4810 if (ret == adap->mbox)
4811 adap->flags |= MASTER_PF;
4812 if (force_init && state == DEV_STATE_INIT)
4813 state = DEV_STATE_UNINIT;
4816 * If we're the Master PF Driver and the device is uninitialized,
4817 * then let's consider upgrading the firmware ... (We always want
4818 * to check the firmware version number in order to A. get it for
4819 * later reporting and B. to warn if the currently loaded firmware
4820 * is excessively mismatched relative to the driver.)
4822 ret = t4_check_fw_version(adap);
4823 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4824 if (ret == -EINVAL || ret > 0) {
4825 if (upgrade_fw(adap) >= 0) {
4827 * Note that the chip was reset as part of the
4828 * firmware upgrade so we don't reset it again
4829 * below and grab the new firmware version.
4832 ret = t4_check_fw_version(adap);
4840 * Grab VPD parameters. This should be done after we establish a
4841 * connection to the firmware since some of the VPD parameters
4842 * (notably the Core Clock frequency) are retrieved via requests to
4843 * the firmware. On the other hand, we need these fairly early on
4844 * so we do this right after getting ahold of the firmware.
4846 ret = get_vpd_params(adap, &adap->params.vpd);
4851 * Find out what ports are available to us. Note that we need to do
4852 * this before calling adap_init0_no_config() since it needs nports
4856 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4857 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4858 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4862 adap->params.nports = hweight32(port_vec);
4863 adap->params.portvec = port_vec;
4866 * If the firmware is initialized already (and we're not forcing a
4867 * master initialization), note that we're living with existing
4868 * adapter parameters. Otherwise, it's time to try initializing the
4871 if (state == DEV_STATE_INIT) {
4872 dev_info(adap->pdev_dev, "Coming up as %s: "\
4873 "Adapter already initialized\n",
4874 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4875 adap->flags |= USING_SOFT_PARAMS;
4877 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4878 "Initializing adapter\n");
4881 * If the firmware doesn't support Configuration
4882 * Files warn user and exit,
4885 dev_warn(adap->pdev_dev, "Firmware doesn't support "
4886 "configuration file.\n");
4888 ret = adap_init0_no_config(adap, reset);
4891 * Find out whether we're dealing with a version of
4892 * the firmware which has configuration file support.
4894 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4895 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4896 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4900 * If the firmware doesn't support Configuration
4901 * Files, use the old Driver-based, hard-wired
4902 * initialization. Otherwise, try using the
4903 * Configuration File support and fall back to the
4904 * Driver-based initialization if there's no
4905 * Configuration File found.
4908 ret = adap_init0_no_config(adap, reset);
4911 * The firmware provides us with a memory
4912 * buffer where we can load a Configuration
4913 * File from the host if we want to override
4914 * the Configuration File in flash.
4917 ret = adap_init0_config(adap, reset);
4918 if (ret == -ENOENT) {
4919 dev_info(adap->pdev_dev,
4920 "No Configuration File present "
4921 "on adapter. Using hard-wired "
4922 "configuration parameters.\n");
4923 ret = adap_init0_no_config(adap, reset);
4928 dev_err(adap->pdev_dev,
4929 "could not initialize adapter, error %d\n",
4936 * If we're living with non-hard-coded parameters (either from a
4937 * Firmware Configuration File or values programmed by a different PF
4938 * Driver), give the SGE code a chance to pull in anything that it
4939 * needs ... Note that this must be called after we retrieve our VPD
4940 * parameters in order to know how to convert core ticks to seconds.
4942 if (adap->flags & USING_SOFT_PARAMS) {
4943 ret = t4_sge_init(adap);
4948 if (is_bypass_device(adap->pdev->device))
4949 adap->params.bypass = 1;
4952 * Grab some of our basic fundamental operating parameters.
4954 #define FW_PARAM_DEV(param) \
4955 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4956 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4958 #define FW_PARAM_PFVF(param) \
4959 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4960 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4961 FW_PARAMS_PARAM_Y(0) | \
4962 FW_PARAMS_PARAM_Z(0)
4964 params[0] = FW_PARAM_PFVF(EQ_START);
4965 params[1] = FW_PARAM_PFVF(L2T_START);
4966 params[2] = FW_PARAM_PFVF(L2T_END);
4967 params[3] = FW_PARAM_PFVF(FILTER_START);
4968 params[4] = FW_PARAM_PFVF(FILTER_END);
4969 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4970 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4973 adap->sge.egr_start = val[0];
4974 adap->l2t_start = val[1];
4975 adap->l2t_end = val[2];
4976 adap->tids.ftid_base = val[3];
4977 adap->tids.nftids = val[4] - val[3] + 1;
4978 adap->sge.ingr_start = val[5];
4980 /* query params related to active filter region */
4981 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4982 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4983 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4984 /* If Active filter size is set we enable establishing
4985 * offload connection through firmware work request
4987 if ((val[0] != val[1]) && (ret >= 0)) {
4988 adap->flags |= FW_OFLD_CONN;
4989 adap->tids.aftid_base = val[0];
4990 adap->tids.aftid_end = val[1];
4994 * Get device capabilities so we can determine what resources we need
4997 memset(&caps_cmd, 0, sizeof(caps_cmd));
4998 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4999 FW_CMD_REQUEST | FW_CMD_READ);
5000 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5001 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5006 if (caps_cmd.ofldcaps) {
5007 /* query offload-related parameters */
5008 params[0] = FW_PARAM_DEV(NTID);
5009 params[1] = FW_PARAM_PFVF(SERVER_START);
5010 params[2] = FW_PARAM_PFVF(SERVER_END);
5011 params[3] = FW_PARAM_PFVF(TDDP_START);
5012 params[4] = FW_PARAM_PFVF(TDDP_END);
5013 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5014 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5018 adap->tids.ntids = val[0];
5019 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5020 adap->tids.stid_base = val[1];
5021 adap->tids.nstids = val[2] - val[1] + 1;
5023 * Setup server filter region. Divide the availble filter
5024 * region into two parts. Regular filters get 1/3rd and server
5025 * filters get 2/3rd part. This is only enabled if workarond
5027 * 1. For regular filters.
5028 * 2. Server filter: This are special filters which are used
5029 * to redirect SYN packets to offload queue.
5031 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5032 adap->tids.sftid_base = adap->tids.ftid_base +
5033 DIV_ROUND_UP(adap->tids.nftids, 3);
5034 adap->tids.nsftids = adap->tids.nftids -
5035 DIV_ROUND_UP(adap->tids.nftids, 3);
5036 adap->tids.nftids = adap->tids.sftid_base -
5037 adap->tids.ftid_base;
5039 adap->vres.ddp.start = val[3];
5040 adap->vres.ddp.size = val[4] - val[3] + 1;
5041 adap->params.ofldq_wr_cred = val[5];
5043 adap->params.offload = 1;
5045 if (caps_cmd.rdmacaps) {
5046 params[0] = FW_PARAM_PFVF(STAG_START);
5047 params[1] = FW_PARAM_PFVF(STAG_END);
5048 params[2] = FW_PARAM_PFVF(RQ_START);
5049 params[3] = FW_PARAM_PFVF(RQ_END);
5050 params[4] = FW_PARAM_PFVF(PBL_START);
5051 params[5] = FW_PARAM_PFVF(PBL_END);
5052 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5056 adap->vres.stag.start = val[0];
5057 adap->vres.stag.size = val[1] - val[0] + 1;
5058 adap->vres.rq.start = val[2];
5059 adap->vres.rq.size = val[3] - val[2] + 1;
5060 adap->vres.pbl.start = val[4];
5061 adap->vres.pbl.size = val[5] - val[4] + 1;
5063 params[0] = FW_PARAM_PFVF(SQRQ_START);
5064 params[1] = FW_PARAM_PFVF(SQRQ_END);
5065 params[2] = FW_PARAM_PFVF(CQ_START);
5066 params[3] = FW_PARAM_PFVF(CQ_END);
5067 params[4] = FW_PARAM_PFVF(OCQ_START);
5068 params[5] = FW_PARAM_PFVF(OCQ_END);
5069 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5072 adap->vres.qp.start = val[0];
5073 adap->vres.qp.size = val[1] - val[0] + 1;
5074 adap->vres.cq.start = val[2];
5075 adap->vres.cq.size = val[3] - val[2] + 1;
5076 adap->vres.ocq.start = val[4];
5077 adap->vres.ocq.size = val[5] - val[4] + 1;
5079 if (caps_cmd.iscsicaps) {
5080 params[0] = FW_PARAM_PFVF(ISCSI_START);
5081 params[1] = FW_PARAM_PFVF(ISCSI_END);
5082 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5086 adap->vres.iscsi.start = val[0];
5087 adap->vres.iscsi.size = val[1] - val[0] + 1;
5089 #undef FW_PARAM_PFVF
5093 * These are finalized by FW initialization, load their values now.
5095 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5096 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5097 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5098 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5099 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5100 adap->params.b_wnd);
5102 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5103 for (j = 0; j < NCHAN; j++)
5104 adap->params.tp.tx_modq[j] = j;
5106 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5107 &adap->filter_mode, 1,
5110 adap->flags |= FW_OK;
5114 * Something bad happened. If a command timed out or failed with EIO
5115 * FW does not operate within its spec or something catastrophic
5116 * happened to HW/FW, stop issuing commands.
5119 if (ret != -ETIMEDOUT && ret != -EIO)
5120 t4_fw_bye(adap, adap->mbox);
5126 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5127 pci_channel_state_t state)
5130 struct adapter *adap = pci_get_drvdata(pdev);
5136 adap->flags &= ~FW_OK;
5137 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5138 for_each_port(adap, i) {
5139 struct net_device *dev = adap->port[i];
5141 netif_device_detach(dev);
5142 netif_carrier_off(dev);
5144 if (adap->flags & FULL_INIT_DONE)
5147 pci_disable_device(pdev);
5148 out: return state == pci_channel_io_perm_failure ?
5149 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5152 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5155 struct fw_caps_config_cmd c;
5156 struct adapter *adap = pci_get_drvdata(pdev);
5159 pci_restore_state(pdev);
5160 pci_save_state(pdev);
5161 return PCI_ERS_RESULT_RECOVERED;
5164 if (pci_enable_device(pdev)) {
5165 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5166 return PCI_ERS_RESULT_DISCONNECT;
5169 pci_set_master(pdev);
5170 pci_restore_state(pdev);
5171 pci_save_state(pdev);
5172 pci_cleanup_aer_uncorrect_error_status(pdev);
5174 if (t4_wait_dev_ready(adap) < 0)
5175 return PCI_ERS_RESULT_DISCONNECT;
5176 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
5177 return PCI_ERS_RESULT_DISCONNECT;
5178 adap->flags |= FW_OK;
5179 if (adap_init1(adap, &c))
5180 return PCI_ERS_RESULT_DISCONNECT;
5182 for_each_port(adap, i) {
5183 struct port_info *p = adap2pinfo(adap, i);
5185 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5188 return PCI_ERS_RESULT_DISCONNECT;
5190 p->xact_addr_filt = -1;
5193 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5194 adap->params.b_wnd);
5197 return PCI_ERS_RESULT_DISCONNECT;
5198 return PCI_ERS_RESULT_RECOVERED;
5201 static void eeh_resume(struct pci_dev *pdev)
5204 struct adapter *adap = pci_get_drvdata(pdev);
5210 for_each_port(adap, i) {
5211 struct net_device *dev = adap->port[i];
5213 if (netif_running(dev)) {
5215 cxgb_set_rxmode(dev);
5217 netif_device_attach(dev);
5222 static const struct pci_error_handlers cxgb4_eeh = {
5223 .error_detected = eeh_err_detected,
5224 .slot_reset = eeh_slot_reset,
5225 .resume = eeh_resume,
5228 static inline bool is_10g_port(const struct link_config *lc)
5230 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5233 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5234 unsigned int size, unsigned int iqe_size)
5236 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5237 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5238 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5239 q->iqe_len = iqe_size;
5244 * Perform default configuration of DMA queues depending on the number and type
5245 * of ports we found and the number of available CPUs. Most settings can be
5246 * modified by the admin prior to actual use.
5248 static void cfg_queues(struct adapter *adap)
5250 struct sge *s = &adap->sge;
5251 int i, q10g = 0, n10g = 0, qidx = 0;
5253 for_each_port(adap, i)
5254 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5257 * We default to 1 queue per non-10G port and up to # of cores queues
5261 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5262 if (q10g > netif_get_num_default_rss_queues())
5263 q10g = netif_get_num_default_rss_queues();
5265 for_each_port(adap, i) {
5266 struct port_info *pi = adap2pinfo(adap, i);
5268 pi->first_qset = qidx;
5269 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5274 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5276 if (is_offload(adap)) {
5278 * For offload we use 1 queue/channel if all ports are up to 1G,
5279 * otherwise we divide all available queues amongst the channels
5280 * capped by the number of available cores.
5283 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5285 s->ofldqsets = roundup(i, adap->params.nports);
5287 s->ofldqsets = adap->params.nports;
5288 /* For RDMA one Rx queue per channel suffices */
5289 s->rdmaqs = adap->params.nports;
5292 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5293 struct sge_eth_rxq *r = &s->ethrxq[i];
5295 init_rspq(&r->rspq, 0, 0, 1024, 64);
5299 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5300 s->ethtxq[i].q.size = 1024;
5302 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5303 s->ctrlq[i].q.size = 512;
5305 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5306 s->ofldtxq[i].q.size = 1024;
5308 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5309 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5311 init_rspq(&r->rspq, 0, 0, 1024, 64);
5312 r->rspq.uld = CXGB4_ULD_ISCSI;
5316 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5317 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5319 init_rspq(&r->rspq, 0, 0, 511, 64);
5320 r->rspq.uld = CXGB4_ULD_RDMA;
5324 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5325 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5329 * Reduce the number of Ethernet queues across all ports to at most n.
5330 * n provides at least one queue per port.
5332 static void reduce_ethqs(struct adapter *adap, int n)
5335 struct port_info *pi;
5337 while (n < adap->sge.ethqsets)
5338 for_each_port(adap, i) {
5339 pi = adap2pinfo(adap, i);
5340 if (pi->nqsets > 1) {
5342 adap->sge.ethqsets--;
5343 if (adap->sge.ethqsets <= n)
5349 for_each_port(adap, i) {
5350 pi = adap2pinfo(adap, i);
5356 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5357 #define EXTRA_VECS 2
5359 static int enable_msix(struct adapter *adap)
5362 int i, err, want, need;
5363 struct sge *s = &adap->sge;
5364 unsigned int nchan = adap->params.nports;
5365 struct msix_entry entries[MAX_INGQ + 1];
5367 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5368 entries[i].entry = i;
5370 want = s->max_ethqsets + EXTRA_VECS;
5371 if (is_offload(adap)) {
5372 want += s->rdmaqs + s->ofldqsets;
5373 /* need nchan for each possible ULD */
5374 ofld_need = 2 * nchan;
5376 need = adap->params.nports + EXTRA_VECS + ofld_need;
5378 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5383 * Distribute available vectors to the various queue groups.
5384 * Every group gets its minimum requirement and NIC gets top
5385 * priority for leftovers.
5387 i = want - EXTRA_VECS - ofld_need;
5388 if (i < s->max_ethqsets) {
5389 s->max_ethqsets = i;
5390 if (i < s->ethqsets)
5391 reduce_ethqs(adap, i);
5393 if (is_offload(adap)) {
5394 i = want - EXTRA_VECS - s->max_ethqsets;
5395 i -= ofld_need - nchan;
5396 s->ofldqsets = (i / nchan) * nchan; /* round down */
5398 for (i = 0; i < want; ++i)
5399 adap->msix_info[i].vec = entries[i].vector;
5401 dev_info(adap->pdev_dev,
5402 "only %d MSI-X vectors left, not using MSI-X\n", err);
5408 static int init_rss(struct adapter *adap)
5412 for_each_port(adap, i) {
5413 struct port_info *pi = adap2pinfo(adap, i);
5415 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5418 for (j = 0; j < pi->rss_size; j++)
5419 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5424 static void print_port_info(const struct net_device *dev)
5426 static const char *base[] = {
5427 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5428 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5433 const char *spd = "";
5434 const struct port_info *pi = netdev_priv(dev);
5435 const struct adapter *adap = pi->adapter;
5437 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5439 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5442 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5443 bufp += sprintf(bufp, "100/");
5444 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5445 bufp += sprintf(bufp, "1000/");
5446 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5447 bufp += sprintf(bufp, "10G/");
5450 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5452 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5453 adap->params.vpd.id,
5454 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
5455 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5456 (adap->flags & USING_MSIX) ? " MSI-X" :
5457 (adap->flags & USING_MSI) ? " MSI" : "");
5458 netdev_info(dev, "S/N: %s, E/C: %s\n",
5459 adap->params.vpd.sn, adap->params.vpd.ec);
5462 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5464 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5468 * Free the following resources:
5469 * - memory used for tables
5472 * - resources FW is holding for us
5474 static void free_some_resources(struct adapter *adapter)
5478 t4_free_mem(adapter->l2t);
5479 t4_free_mem(adapter->tids.tid_tab);
5480 disable_msi(adapter);
5482 for_each_port(adapter, i)
5483 if (adapter->port[i]) {
5484 kfree(adap2pinfo(adapter, i)->rss);
5485 free_netdev(adapter->port[i]);
5487 if (adapter->flags & FW_OK)
5488 t4_fw_bye(adapter, adapter->fn);
5491 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5492 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5493 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5494 #define SEGMENT_SIZE 128
5496 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5498 int func, i, err, s_qpp, qpp, num_seg;
5499 struct port_info *pi;
5500 bool highdma = false;
5501 struct adapter *adapter = NULL;
5503 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5505 err = pci_request_regions(pdev, KBUILD_MODNAME);
5507 /* Just info, some other driver may have claimed the device. */
5508 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5512 /* We control everything through one PF */
5513 func = PCI_FUNC(pdev->devfn);
5514 if (func != ent->driver_data) {
5515 pci_save_state(pdev); /* to restore SR-IOV later */
5519 err = pci_enable_device(pdev);
5521 dev_err(&pdev->dev, "cannot enable PCI device\n");
5522 goto out_release_regions;
5525 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5527 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5529 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5530 "coherent allocations\n");
5531 goto out_disable_device;
5534 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5536 dev_err(&pdev->dev, "no usable DMA configuration\n");
5537 goto out_disable_device;
5541 pci_enable_pcie_error_reporting(pdev);
5542 enable_pcie_relaxed_ordering(pdev);
5543 pci_set_master(pdev);
5544 pci_save_state(pdev);
5546 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5549 goto out_disable_device;
5552 adapter->regs = pci_ioremap_bar(pdev, 0);
5553 if (!adapter->regs) {
5554 dev_err(&pdev->dev, "cannot map device registers\n");
5556 goto out_free_adapter;
5559 adapter->pdev = pdev;
5560 adapter->pdev_dev = &pdev->dev;
5561 adapter->mbox = func;
5563 adapter->msg_enable = dflt_msg_enable;
5564 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5566 spin_lock_init(&adapter->stats_lock);
5567 spin_lock_init(&adapter->tid_release_lock);
5569 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5570 INIT_WORK(&adapter->db_full_task, process_db_full);
5571 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5573 err = t4_prep_adapter(adapter);
5575 goto out_unmap_bar0;
5577 if (!is_t4(adapter->chip)) {
5578 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5579 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5580 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5581 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5583 /* Each segment size is 128B. Write coalescing is enabled only
5584 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5585 * queue is less no of segments that can be accommodated in
5588 if (qpp > num_seg) {
5590 "Incorrect number of egress queues per page\n");
5592 goto out_unmap_bar0;
5594 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5595 pci_resource_len(pdev, 2));
5596 if (!adapter->bar2) {
5597 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5599 goto out_unmap_bar0;
5603 setup_memwin(adapter);
5604 err = adap_init0(adapter);
5605 setup_memwin_rdma(adapter);
5609 for_each_port(adapter, i) {
5610 struct net_device *netdev;
5612 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5619 SET_NETDEV_DEV(netdev, &pdev->dev);
5621 adapter->port[i] = netdev;
5622 pi = netdev_priv(netdev);
5623 pi->adapter = adapter;
5624 pi->xact_addr_filt = -1;
5626 netdev->irq = pdev->irq;
5628 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5629 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5630 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5631 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5633 netdev->hw_features |= NETIF_F_HIGHDMA;
5634 netdev->features |= netdev->hw_features;
5635 netdev->vlan_features = netdev->features & VLAN_FEAT;
5637 netdev->priv_flags |= IFF_UNICAST_FLT;
5639 netdev->netdev_ops = &cxgb4_netdev_ops;
5640 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5643 pci_set_drvdata(pdev, adapter);
5645 if (adapter->flags & FW_OK) {
5646 err = t4_port_init(adapter, func, func, 0);
5652 * Configure queues and allocate tables now, they can be needed as
5653 * soon as the first register_netdev completes.
5655 cfg_queues(adapter);
5657 adapter->l2t = t4_init_l2t();
5658 if (!adapter->l2t) {
5659 /* We tolerate a lack of L2T, giving up some functionality */
5660 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5661 adapter->params.offload = 0;
5664 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5665 dev_warn(&pdev->dev, "could not allocate TID table, "
5667 adapter->params.offload = 0;
5670 /* See what interrupts we'll be using */
5671 if (msi > 1 && enable_msix(adapter) == 0)
5672 adapter->flags |= USING_MSIX;
5673 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5674 adapter->flags |= USING_MSI;
5676 err = init_rss(adapter);
5681 * The card is now ready to go. If any errors occur during device
5682 * registration we do not fail the whole card but rather proceed only
5683 * with the ports we manage to register successfully. However we must
5684 * register at least one net device.
5686 for_each_port(adapter, i) {
5687 pi = adap2pinfo(adapter, i);
5688 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5689 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5691 err = register_netdev(adapter->port[i]);
5694 adapter->chan_map[pi->tx_chan] = i;
5695 print_port_info(adapter->port[i]);
5698 dev_err(&pdev->dev, "could not register any net devices\n");
5702 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5706 if (cxgb4_debugfs_root) {
5707 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5708 cxgb4_debugfs_root);
5709 setup_debugfs(adapter);
5712 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5713 pdev->needs_freset = 1;
5715 if (is_offload(adapter))
5716 attach_ulds(adapter);
5719 #ifdef CONFIG_PCI_IOV
5720 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5721 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5722 dev_info(&pdev->dev,
5723 "instantiated %u virtual functions\n",
5729 free_some_resources(adapter);
5731 if (!is_t4(adapter->chip))
5732 iounmap(adapter->bar2);
5734 iounmap(adapter->regs);
5738 pci_disable_pcie_error_reporting(pdev);
5739 pci_disable_device(pdev);
5740 out_release_regions:
5741 pci_release_regions(pdev);
5742 pci_set_drvdata(pdev, NULL);
5746 static void remove_one(struct pci_dev *pdev)
5748 struct adapter *adapter = pci_get_drvdata(pdev);
5750 #ifdef CONFIG_PCI_IOV
5751 pci_disable_sriov(pdev);
5758 if (is_offload(adapter))
5759 detach_ulds(adapter);
5761 for_each_port(adapter, i)
5762 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5763 unregister_netdev(adapter->port[i]);
5765 if (adapter->debugfs_root)
5766 debugfs_remove_recursive(adapter->debugfs_root);
5768 /* If we allocated filters, free up state associated with any
5771 if (adapter->tids.ftid_tab) {
5772 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5773 for (i = 0; i < (adapter->tids.nftids +
5774 adapter->tids.nsftids); i++, f++)
5776 clear_filter(adapter, f);
5779 if (adapter->flags & FULL_INIT_DONE)
5782 free_some_resources(adapter);
5783 iounmap(adapter->regs);
5784 if (!is_t4(adapter->chip))
5785 iounmap(adapter->bar2);
5787 pci_disable_pcie_error_reporting(pdev);
5788 pci_disable_device(pdev);
5789 pci_release_regions(pdev);
5790 pci_set_drvdata(pdev, NULL);
5792 pci_release_regions(pdev);
5795 static struct pci_driver cxgb4_driver = {
5796 .name = KBUILD_MODNAME,
5797 .id_table = cxgb4_pci_tbl,
5799 .remove = remove_one,
5800 .err_handler = &cxgb4_eeh,
5803 static int __init cxgb4_init_module(void)
5807 workq = create_singlethread_workqueue("cxgb4");
5811 /* Debugfs support is optional, just warn if this fails */
5812 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5813 if (!cxgb4_debugfs_root)
5814 pr_warn("could not create debugfs entry, continuing\n");
5816 ret = pci_register_driver(&cxgb4_driver);
5818 debugfs_remove(cxgb4_debugfs_root);
5822 static void __exit cxgb4_cleanup_module(void)
5824 pci_unregister_driver(&cxgb4_driver);
5825 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5826 flush_workqueue(workq);
5827 destroy_workqueue(workq);
5830 module_init(cxgb4_init_module);
5831 module_exit(cxgb4_cleanup_module);