2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "2.0.0-ko"
72 #define DRV_DESC "Chelsio T4/T5 Network Driver"
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
79 #define MAX_SGE_TIMERVAL 200U
83 * Physical Function provisioning constants.
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
131 unsigned int portn, portvec;
134 * Give PF's access to all of the ports.
137 return FW_PFVF_CMD_PMASK_MASK;
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
146 if (adapter->params.nports == 0)
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
178 /* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
184 struct filter_entry {
185 /* Administrative fields for filter.
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
199 struct ch_filter_specification fs;
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 CH_DEVICE(0xa000, 0), /* PE10K */
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
232 CH_DEVICE(0x5001, 5),
233 CH_DEVICE(0x5002, 5),
234 CH_DEVICE(0x5003, 5),
235 CH_DEVICE(0x5004, 5),
236 CH_DEVICE(0x5005, 5),
237 CH_DEVICE(0x5006, 5),
238 CH_DEVICE(0x5007, 5),
239 CH_DEVICE(0x5008, 5),
240 CH_DEVICE(0x5009, 5),
241 CH_DEVICE(0x500A, 5),
242 CH_DEVICE(0x500B, 5),
243 CH_DEVICE(0x500C, 5),
244 CH_DEVICE(0x500D, 5),
245 CH_DEVICE(0x500E, 5),
246 CH_DEVICE(0x500F, 5),
247 CH_DEVICE(0x5010, 5),
248 CH_DEVICE(0x5011, 5),
249 CH_DEVICE(0x5012, 5),
250 CH_DEVICE(0x5013, 5),
251 CH_DEVICE(0x5401, 5),
252 CH_DEVICE(0x5402, 5),
253 CH_DEVICE(0x5403, 5),
254 CH_DEVICE(0x5404, 5),
255 CH_DEVICE(0x5405, 5),
256 CH_DEVICE(0x5406, 5),
257 CH_DEVICE(0x5407, 5),
258 CH_DEVICE(0x5408, 5),
259 CH_DEVICE(0x5409, 5),
260 CH_DEVICE(0x540A, 5),
261 CH_DEVICE(0x540B, 5),
262 CH_DEVICE(0x540C, 5),
263 CH_DEVICE(0x540D, 5),
264 CH_DEVICE(0x540E, 5),
265 CH_DEVICE(0x540F, 5),
266 CH_DEVICE(0x5410, 5),
267 CH_DEVICE(0x5411, 5),
268 CH_DEVICE(0x5412, 5),
269 CH_DEVICE(0x5413, 5),
273 #define FW_FNAME "cxgb4/t4fw.bin"
274 #define FW5_FNAME "cxgb4/t5fw.bin"
275 #define FW_CFNAME "cxgb4/t4-config.txt"
276 #define FW5_CFNAME "cxgb4/t5-config.txt"
278 MODULE_DESCRIPTION(DRV_DESC);
279 MODULE_AUTHOR("Chelsio Communications");
280 MODULE_LICENSE("Dual BSD/GPL");
281 MODULE_VERSION(DRV_VERSION);
282 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283 MODULE_FIRMWARE(FW_FNAME);
284 MODULE_FIRMWARE(FW5_FNAME);
287 * Normally we're willing to become the firmware's Master PF but will be happy
288 * if another PF has already become the Master and initialized the adapter.
289 * Setting "force_init" will cause this driver to forcibly establish itself as
290 * the Master PF and initialize the adapter.
292 static uint force_init;
294 module_param(force_init, uint, 0644);
295 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
298 * Normally if the firmware we connect to has Configuration File support, we
299 * use that and only fall back to the old Driver-based initialization if the
300 * Configuration File fails for some reason. If force_old_init is set, then
301 * we'll always use the old Driver-based initialization sequence.
303 static uint force_old_init;
305 module_param(force_old_init, uint, 0644);
306 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
308 static int dflt_msg_enable = DFLT_MSG_ENABLE;
310 module_param(dflt_msg_enable, int, 0644);
311 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
314 * The driver uses the best interrupt scheme available on a platform in the
315 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
316 * of these schemes the driver may consider as follows:
318 * msi = 2: choose from among all three options
319 * msi = 1: only consider MSI and INTx interrupts
320 * msi = 0: force INTx interrupts
324 module_param(msi, int, 0644);
325 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
328 * Queue interrupt hold-off timer values. Queues default to the first of these
331 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
333 module_param_array(intr_holdoff, uint, NULL, 0644);
334 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 "0..4 in microseconds");
337 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
339 module_param_array(intr_cnt, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_cnt,
341 "thresholds 1..3 for queue interrupt packet counters");
344 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345 * offset by 2 bytes in order to have the IP headers line up on 4-byte
346 * boundaries. This is a requirement for many architectures which will throw
347 * a machine check fault if an attempt is made to access one of the 4-byte IP
348 * header fields on a non-4-byte boundary. And it's a major performance issue
349 * even on some architectures which allow it like some implementations of the
350 * x86 ISA. However, some architectures don't mind this and for some very
351 * edge-case performance sensitive applications (like forwarding large volumes
352 * of small packets), setting this DMA offset to 0 will decrease the number of
353 * PCI-E Bus transfers enough to measurably affect performance.
355 static int rx_dma_offset = 2;
359 #ifdef CONFIG_PCI_IOV
360 module_param(vf_acls, bool, 0644);
361 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
363 /* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
364 * macro as num_vf array size
366 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
368 module_param_array(num_vf, uint, NULL, 0644);
369 MODULE_PARM_DESC(num_vf,
370 "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
374 * The filter TCAM has a fixed portion and a variable portion. The fixed
375 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
376 * ports. The variable portion is 36 bits which can include things like Exact
377 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
378 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
379 * far exceed the 36-bit budget for this "compressed" header portion of the
380 * filter. Thus, we have a scarce resource which must be carefully managed.
382 * By default we set this up to mostly match the set of filter matching
383 * capabilities of T3 but with accommodations for some of T4's more
384 * interesting features:
386 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
387 * [Inner] VLAN (17), Port (3), FCoE (1) }
390 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
391 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
392 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
395 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
397 module_param(tp_vlan_pri_map, uint, 0644);
398 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
400 static struct dentry *cxgb4_debugfs_root;
402 static LIST_HEAD(adapter_list);
403 static DEFINE_MUTEX(uld_mutex);
404 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
405 static const char *uld_str[] = { "RDMA", "iSCSI" };
407 static void link_report(struct net_device *dev)
409 if (!netif_carrier_ok(dev))
410 netdev_info(dev, "link down\n");
412 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
414 const char *s = "10Mbps";
415 const struct port_info *p = netdev_priv(dev);
417 switch (p->link_cfg.speed) {
429 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
434 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
436 struct net_device *dev = adapter->port[port_id];
438 /* Skip changes from disabled ports. */
439 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
441 netif_carrier_on(dev);
443 netif_carrier_off(dev);
449 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
451 static const char *mod_str[] = {
452 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
455 const struct net_device *dev = adap->port[port_id];
456 const struct port_info *pi = netdev_priv(dev);
458 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
459 netdev_info(dev, "port module unplugged\n");
460 else if (pi->mod_type < ARRAY_SIZE(mod_str))
461 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
465 * Configure the exact and hash address filters to handle a port's multicast
466 * and secondary unicast MAC addresses.
468 static int set_addr_filters(const struct net_device *dev, bool sleep)
476 const struct netdev_hw_addr *ha;
477 int uc_cnt = netdev_uc_count(dev);
478 int mc_cnt = netdev_mc_count(dev);
479 const struct port_info *pi = netdev_priv(dev);
480 unsigned int mb = pi->adapter->fn;
482 /* first do the secondary unicast addresses */
483 netdev_for_each_uc_addr(ha, dev) {
484 addr[naddr++] = ha->addr;
485 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
486 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
487 naddr, addr, filt_idx, &uhash, sleep);
496 /* next set up the multicast addresses */
497 netdev_for_each_mc_addr(ha, dev) {
498 addr[naddr++] = ha->addr;
499 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
500 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
501 naddr, addr, filt_idx, &mhash, sleep);
510 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
511 uhash | mhash, sleep);
514 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
515 module_param(dbfifo_int_thresh, int, 0644);
516 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
519 * usecs to sleep while draining the dbfifo
521 static int dbfifo_drain_delay = 1000;
522 module_param(dbfifo_drain_delay, int, 0644);
523 MODULE_PARM_DESC(dbfifo_drain_delay,
524 "usecs to sleep while draining the dbfifo");
527 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
528 * If @mtu is -1 it is left unchanged.
530 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
533 struct port_info *pi = netdev_priv(dev);
535 ret = set_addr_filters(dev, sleep_ok);
537 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
538 (dev->flags & IFF_PROMISC) ? 1 : 0,
539 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
544 static struct workqueue_struct *workq;
547 * link_start - enable a port
548 * @dev: the port to enable
550 * Performs the MAC and PHY actions needed to enable a port.
552 static int link_start(struct net_device *dev)
555 struct port_info *pi = netdev_priv(dev);
556 unsigned int mb = pi->adapter->fn;
559 * We do not set address filters and promiscuity here, the stack does
560 * that step explicitly.
562 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
563 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
565 ret = t4_change_mac(pi->adapter, mb, pi->viid,
566 pi->xact_addr_filt, dev->dev_addr, true,
569 pi->xact_addr_filt = ret;
574 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
577 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
581 /* Clear a filter and release any of its resources that we own. This also
582 * clears the filter's "pending" status.
584 static void clear_filter(struct adapter *adap, struct filter_entry *f)
586 /* If the new or old filter have loopback rewriteing rules then we'll
587 * need to free any existing Layer Two Table (L2T) entries of the old
588 * filter rule. The firmware will handle freeing up any Source MAC
589 * Table (SMT) entries used for rewriting Source MAC Addresses in
593 cxgb4_l2t_release(f->l2t);
595 /* The zeroing of the filter rule below clears the filter valid,
596 * pending, locked flags, l2t pointer, etc. so it's all we need for
599 memset(f, 0, sizeof(*f));
602 /* Handle a filter write/deletion reply.
604 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
606 unsigned int idx = GET_TID(rpl);
607 unsigned int nidx = idx - adap->tids.ftid_base;
609 struct filter_entry *f;
611 if (idx >= adap->tids.ftid_base && nidx <
612 (adap->tids.nftids + adap->tids.nsftids)) {
614 ret = GET_TCB_COOKIE(rpl->cookie);
615 f = &adap->tids.ftid_tab[idx];
617 if (ret == FW_FILTER_WR_FLT_DELETED) {
618 /* Clear the filter when we get confirmation from the
619 * hardware that the filter has been deleted.
621 clear_filter(adap, f);
622 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
623 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
625 clear_filter(adap, f);
626 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
627 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
628 f->pending = 0; /* asynchronous setup completed */
631 /* Something went wrong. Issue a warning about the
632 * problem and clear everything out.
634 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
636 clear_filter(adap, f);
641 /* Response queue handler for the FW event queue.
643 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
644 const struct pkt_gl *gl)
646 u8 opcode = ((const struct rss_header *)rsp)->opcode;
648 rsp++; /* skip RSS header */
649 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
650 const struct cpl_sge_egr_update *p = (void *)rsp;
651 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
654 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
656 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
657 struct sge_eth_txq *eq;
659 eq = container_of(txq, struct sge_eth_txq, q);
660 netif_tx_wake_queue(eq->txq);
662 struct sge_ofld_txq *oq;
664 oq = container_of(txq, struct sge_ofld_txq, q);
665 tasklet_schedule(&oq->qresume_tsk);
667 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
668 const struct cpl_fw6_msg *p = (void *)rsp;
671 t4_handle_fw_rpl(q->adap, p->data);
672 } else if (opcode == CPL_L2T_WRITE_RPL) {
673 const struct cpl_l2t_write_rpl *p = (void *)rsp;
675 do_l2t_write_rpl(q->adap, p);
676 } else if (opcode == CPL_SET_TCB_RPL) {
677 const struct cpl_set_tcb_rpl *p = (void *)rsp;
679 filter_rpl(q->adap, p);
681 dev_err(q->adap->pdev_dev,
682 "unexpected CPL %#x on FW event queue\n", opcode);
687 * uldrx_handler - response queue handler for ULD queues
688 * @q: the response queue that received the packet
689 * @rsp: the response queue descriptor holding the offload message
690 * @gl: the gather list of packet fragments
692 * Deliver an ingress offload packet to a ULD. All processing is done by
693 * the ULD, we just maintain statistics.
695 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
696 const struct pkt_gl *gl)
698 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
700 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
706 else if (gl == CXGB4_MSG_AN)
713 static void disable_msi(struct adapter *adapter)
715 if (adapter->flags & USING_MSIX) {
716 pci_disable_msix(adapter->pdev);
717 adapter->flags &= ~USING_MSIX;
718 } else if (adapter->flags & USING_MSI) {
719 pci_disable_msi(adapter->pdev);
720 adapter->flags &= ~USING_MSI;
725 * Interrupt handler for non-data events used with MSI-X.
727 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
729 struct adapter *adap = cookie;
731 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
734 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
736 t4_slow_intr_handler(adap);
741 * Name the MSI-X interrupts.
743 static void name_msix_vecs(struct adapter *adap)
745 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
747 /* non-data interrupts */
748 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
751 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
752 adap->port[0]->name);
754 /* Ethernet queues */
755 for_each_port(adap, j) {
756 struct net_device *d = adap->port[j];
757 const struct port_info *pi = netdev_priv(d);
759 for (i = 0; i < pi->nqsets; i++, msi_idx++)
760 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
765 for_each_ofldrxq(&adap->sge, i)
766 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
767 adap->port[0]->name, i);
769 for_each_rdmarxq(&adap->sge, i)
770 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
771 adap->port[0]->name, i);
774 static int request_msix_queue_irqs(struct adapter *adap)
776 struct sge *s = &adap->sge;
777 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
779 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
780 adap->msix_info[1].desc, &s->fw_evtq);
784 for_each_ethrxq(s, ethqidx) {
785 err = request_irq(adap->msix_info[msi_index].vec,
787 adap->msix_info[msi_index].desc,
788 &s->ethrxq[ethqidx].rspq);
793 for_each_ofldrxq(s, ofldqidx) {
794 err = request_irq(adap->msix_info[msi_index].vec,
796 adap->msix_info[msi_index].desc,
797 &s->ofldrxq[ofldqidx].rspq);
802 for_each_rdmarxq(s, rdmaqidx) {
803 err = request_irq(adap->msix_info[msi_index].vec,
805 adap->msix_info[msi_index].desc,
806 &s->rdmarxq[rdmaqidx].rspq);
814 while (--rdmaqidx >= 0)
815 free_irq(adap->msix_info[--msi_index].vec,
816 &s->rdmarxq[rdmaqidx].rspq);
817 while (--ofldqidx >= 0)
818 free_irq(adap->msix_info[--msi_index].vec,
819 &s->ofldrxq[ofldqidx].rspq);
820 while (--ethqidx >= 0)
821 free_irq(adap->msix_info[--msi_index].vec,
822 &s->ethrxq[ethqidx].rspq);
823 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
827 static void free_msix_queue_irqs(struct adapter *adap)
829 int i, msi_index = 2;
830 struct sge *s = &adap->sge;
832 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
833 for_each_ethrxq(s, i)
834 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
835 for_each_ofldrxq(s, i)
836 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
837 for_each_rdmarxq(s, i)
838 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
842 * write_rss - write the RSS table for a given port
844 * @queues: array of queue indices for RSS
846 * Sets up the portion of the HW RSS table for the port's VI to distribute
847 * packets to the Rx queues in @queues.
849 static int write_rss(const struct port_info *pi, const u16 *queues)
853 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
855 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
859 /* map the queue indices to queue ids */
860 for (i = 0; i < pi->rss_size; i++, queues++)
861 rss[i] = q[*queues].rspq.abs_id;
863 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
864 pi->rss_size, rss, pi->rss_size);
870 * setup_rss - configure RSS
873 * Sets up RSS for each port.
875 static int setup_rss(struct adapter *adap)
879 for_each_port(adap, i) {
880 const struct port_info *pi = adap2pinfo(adap, i);
882 err = write_rss(pi, pi->rss);
890 * Return the channel of the ingress queue with the given qid.
892 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
894 qid -= p->ingr_start;
895 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
899 * Wait until all NAPI handlers are descheduled.
901 static void quiesce_rx(struct adapter *adap)
905 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
906 struct sge_rspq *q = adap->sge.ingr_map[i];
909 napi_disable(&q->napi);
914 * Enable NAPI scheduling and interrupt generation for all Rx queues.
916 static void enable_rx(struct adapter *adap)
920 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
921 struct sge_rspq *q = adap->sge.ingr_map[i];
926 napi_enable(&q->napi);
927 /* 0-increment GTS to start the timer and enable interrupts */
928 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
929 SEINTARM(q->intr_params) |
930 INGRESSQID(q->cntxt_id));
935 * setup_sge_queues - configure SGE Tx/Rx/response queues
938 * Determines how many sets of SGE queues to use and initializes them.
939 * We support multiple queue sets per port if we have MSI-X, otherwise
940 * just one queue set per port.
942 static int setup_sge_queues(struct adapter *adap)
944 int err, msi_idx, i, j;
945 struct sge *s = &adap->sge;
947 bitmap_zero(s->starving_fl, MAX_EGRQ);
948 bitmap_zero(s->txq_maperr, MAX_EGRQ);
950 if (adap->flags & USING_MSIX)
951 msi_idx = 1; /* vector 0 is for non-queue interrupts */
953 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
957 msi_idx = -((int)s->intrq.abs_id + 1);
960 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
961 msi_idx, NULL, fwevtq_handler);
963 freeout: t4_free_sge_resources(adap);
967 for_each_port(adap, i) {
968 struct net_device *dev = adap->port[i];
969 struct port_info *pi = netdev_priv(dev);
970 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
971 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
973 for (j = 0; j < pi->nqsets; j++, q++) {
976 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
982 memset(&q->stats, 0, sizeof(q->stats));
984 for (j = 0; j < pi->nqsets; j++, t++) {
985 err = t4_sge_alloc_eth_txq(adap, t, dev,
986 netdev_get_tx_queue(dev, j),
987 s->fw_evtq.cntxt_id);
993 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
994 for_each_ofldrxq(s, i) {
995 struct sge_ofld_rxq *q = &s->ofldrxq[i];
996 struct net_device *dev = adap->port[i / j];
1000 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1001 &q->fl, uldrx_handler);
1004 memset(&q->stats, 0, sizeof(q->stats));
1005 s->ofld_rxq[i] = q->rspq.abs_id;
1006 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1007 s->fw_evtq.cntxt_id);
1012 for_each_rdmarxq(s, i) {
1013 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1017 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1018 msi_idx, &q->fl, uldrx_handler);
1021 memset(&q->stats, 0, sizeof(q->stats));
1022 s->rdma_rxq[i] = q->rspq.abs_id;
1025 for_each_port(adap, i) {
1027 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1028 * have RDMA queues, and that's the right value.
1030 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1031 s->fw_evtq.cntxt_id,
1032 s->rdmarxq[i].rspq.cntxt_id);
1037 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1038 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1039 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1044 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1045 * started but failed, and a negative errno if flash load couldn't start.
1047 static int upgrade_fw(struct adapter *adap)
1050 u32 vers, exp_major;
1051 const struct fw_hdr *hdr;
1052 const struct firmware *fw;
1053 struct device *dev = adap->pdev_dev;
1056 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1058 fw_file_name = FW_FNAME;
1059 exp_major = FW_VERSION_MAJOR;
1062 fw_file_name = FW5_FNAME;
1063 exp_major = FW_VERSION_MAJOR_T5;
1066 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1070 ret = request_firmware(&fw, fw_file_name, dev);
1072 dev_err(dev, "unable to load firmware image %s, error %d\n",
1077 hdr = (const struct fw_hdr *)fw->data;
1078 vers = ntohl(hdr->fw_ver);
1079 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1080 ret = -EINVAL; /* wrong major version, won't do */
1085 * If the flash FW is unusable or we found something newer, load it.
1087 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1088 vers > adap->params.fw_vers) {
1089 dev_info(dev, "upgrading firmware ...\n");
1090 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1094 "firmware upgraded to version %pI4 from %s\n",
1095 &hdr->fw_ver, fw_file_name);
1097 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1100 * Tell our caller that we didn't upgrade the firmware.
1105 out: release_firmware(fw);
1110 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1111 * The allocated memory is cleared.
1113 void *t4_alloc_mem(size_t size)
1115 void *p = kzalloc(size, GFP_KERNEL);
1123 * Free memory allocated through alloc_mem().
1125 static void t4_free_mem(void *addr)
1127 if (is_vmalloc_addr(addr))
1133 /* Send a Work Request to write the filter at a specified index. We construct
1134 * a Firmware Filter Work Request to have the work done and put the indicated
1135 * filter into "pending" mode which will prevent any further actions against
1136 * it till we get a reply from the firmware on the completion status of the
1139 static int set_filter_wr(struct adapter *adapter, int fidx)
1141 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1142 struct sk_buff *skb;
1143 struct fw_filter_wr *fwr;
1146 /* If the new filter requires loopback Destination MAC and/or VLAN
1147 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1150 if (f->fs.newdmac || f->fs.newvlan) {
1151 /* allocate L2T entry for new filter */
1152 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1155 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1156 f->fs.eport, f->fs.dmac)) {
1157 cxgb4_l2t_release(f->l2t);
1163 ftid = adapter->tids.ftid_base + fidx;
1165 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1166 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1167 memset(fwr, 0, sizeof(*fwr));
1169 /* It would be nice to put most of the following in t4_hw.c but most
1170 * of the work is translating the cxgbtool ch_filter_specification
1171 * into the Work Request and the definition of that structure is
1172 * currently in cxgbtool.h which isn't appropriate to pull into the
1173 * common code. We may eventually try to come up with a more neutral
1174 * filter specification structure but for now it's easiest to simply
1175 * put this fairly direct code in line ...
1177 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1178 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1180 htonl(V_FW_FILTER_WR_TID(ftid) |
1181 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1182 V_FW_FILTER_WR_NOREPLY(0) |
1183 V_FW_FILTER_WR_IQ(f->fs.iq));
1184 fwr->del_filter_to_l2tix =
1185 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1186 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1187 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1188 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1189 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1190 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1191 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1192 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1193 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1194 f->fs.newvlan == VLAN_REWRITE) |
1195 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1196 f->fs.newvlan == VLAN_REWRITE) |
1197 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1198 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1199 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1200 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1201 fwr->ethtype = htons(f->fs.val.ethtype);
1202 fwr->ethtypem = htons(f->fs.mask.ethtype);
1203 fwr->frag_to_ovlan_vldm =
1204 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1205 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1206 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1207 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1208 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1209 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1211 fwr->rx_chan_rx_rpl_iq =
1212 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1213 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1214 fwr->maci_to_matchtypem =
1215 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1216 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1217 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1218 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1219 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1220 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1221 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1222 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1223 fwr->ptcl = f->fs.val.proto;
1224 fwr->ptclm = f->fs.mask.proto;
1225 fwr->ttyp = f->fs.val.tos;
1226 fwr->ttypm = f->fs.mask.tos;
1227 fwr->ivlan = htons(f->fs.val.ivlan);
1228 fwr->ivlanm = htons(f->fs.mask.ivlan);
1229 fwr->ovlan = htons(f->fs.val.ovlan);
1230 fwr->ovlanm = htons(f->fs.mask.ovlan);
1231 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1232 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1233 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1234 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1235 fwr->lp = htons(f->fs.val.lport);
1236 fwr->lpm = htons(f->fs.mask.lport);
1237 fwr->fp = htons(f->fs.val.fport);
1238 fwr->fpm = htons(f->fs.mask.fport);
1240 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1242 /* Mark the filter as "pending" and ship off the Filter Work Request.
1243 * When we get the Work Request Reply we'll clear the pending status.
1246 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1247 t4_ofld_send(adapter, skb);
1251 /* Delete the filter at a specified index.
1253 static int del_filter_wr(struct adapter *adapter, int fidx)
1255 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1256 struct sk_buff *skb;
1257 struct fw_filter_wr *fwr;
1258 unsigned int len, ftid;
1261 ftid = adapter->tids.ftid_base + fidx;
1263 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1264 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1265 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1267 /* Mark the filter as "pending" and ship off the Filter Work Request.
1268 * When we get the Work Request Reply we'll clear the pending status.
1271 t4_mgmt_tx(adapter, skb);
1275 static inline int is_offload(const struct adapter *adap)
1277 return adap->params.offload;
1281 * Implementation of ethtool operations.
1284 static u32 get_msglevel(struct net_device *dev)
1286 return netdev2adap(dev)->msg_enable;
1289 static void set_msglevel(struct net_device *dev, u32 val)
1291 netdev2adap(dev)->msg_enable = val;
1294 static char stats_strings[][ETH_GSTRING_LEN] = {
1297 "TxBroadcastFrames ",
1298 "TxMulticastFrames ",
1304 "TxFrames128To255 ",
1305 "TxFrames256To511 ",
1306 "TxFrames512To1023 ",
1307 "TxFrames1024To1518 ",
1308 "TxFrames1519ToMax ",
1323 "RxBroadcastFrames ",
1324 "RxMulticastFrames ",
1336 "RxFrames128To255 ",
1337 "RxFrames256To511 ",
1338 "RxFrames512To1023 ",
1339 "RxFrames1024To1518 ",
1340 "RxFrames1519ToMax ",
1352 "RxBG0FramesDropped ",
1353 "RxBG1FramesDropped ",
1354 "RxBG2FramesDropped ",
1355 "RxBG3FramesDropped ",
1356 "RxBG0FramesTrunc ",
1357 "RxBG1FramesTrunc ",
1358 "RxBG2FramesTrunc ",
1359 "RxBG3FramesTrunc ",
1368 "WriteCoalSuccess ",
1372 static int get_sset_count(struct net_device *dev, int sset)
1376 return ARRAY_SIZE(stats_strings);
1382 #define T4_REGMAP_SIZE (160 * 1024)
1383 #define T5_REGMAP_SIZE (332 * 1024)
1385 static int get_regs_len(struct net_device *dev)
1387 struct adapter *adap = netdev2adap(dev);
1388 if (is_t4(adap->chip))
1389 return T4_REGMAP_SIZE;
1391 return T5_REGMAP_SIZE;
1394 static int get_eeprom_len(struct net_device *dev)
1399 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1401 struct adapter *adapter = netdev2adap(dev);
1403 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1404 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1405 strlcpy(info->bus_info, pci_name(adapter->pdev),
1406 sizeof(info->bus_info));
1408 if (adapter->params.fw_vers)
1409 snprintf(info->fw_version, sizeof(info->fw_version),
1410 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1411 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1412 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1413 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1414 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1415 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1416 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1417 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1418 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1421 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1423 if (stringset == ETH_SS_STATS)
1424 memcpy(data, stats_strings, sizeof(stats_strings));
1428 * port stats maintained per queue of the port. They should be in the same
1429 * order as in stats_strings above.
1431 struct queue_port_stats {
1441 static void collect_sge_port_stats(const struct adapter *adap,
1442 const struct port_info *p, struct queue_port_stats *s)
1445 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1446 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1448 memset(s, 0, sizeof(*s));
1449 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1451 s->tx_csum += tx->tx_cso;
1452 s->rx_csum += rx->stats.rx_cso;
1453 s->vlan_ex += rx->stats.vlan_ex;
1454 s->vlan_ins += tx->vlan_ins;
1455 s->gro_pkts += rx->stats.lro_pkts;
1456 s->gro_merged += rx->stats.lro_merged;
1460 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1463 struct port_info *pi = netdev_priv(dev);
1464 struct adapter *adapter = pi->adapter;
1467 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1469 data += sizeof(struct port_stats) / sizeof(u64);
1470 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1471 data += sizeof(struct queue_port_stats) / sizeof(u64);
1472 if (!is_t4(adapter->chip)) {
1473 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1474 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1475 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1476 *data = val1 - val2;
1481 memset(data, 0, 2 * sizeof(u64));
1487 * Return a version number to identify the type of adapter. The scheme is:
1488 * - bits 0..9: chip version
1489 * - bits 10..15: chip revision
1490 * - bits 16..23: register dump version
1492 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1494 return CHELSIO_CHIP_VERSION(ap->chip) |
1495 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1498 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1501 u32 *p = buf + start;
1503 for ( ; start <= end; start += sizeof(u32))
1504 *p++ = t4_read_reg(ap, start);
1507 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1510 static const unsigned int t4_reg_ranges[] = {
1730 static const unsigned int t5_reg_ranges[] = {
2158 struct adapter *ap = netdev2adap(dev);
2159 static const unsigned int *reg_ranges;
2160 int arr_size = 0, buf_size = 0;
2162 if (is_t4(ap->chip)) {
2163 reg_ranges = &t4_reg_ranges[0];
2164 arr_size = ARRAY_SIZE(t4_reg_ranges);
2165 buf_size = T4_REGMAP_SIZE;
2167 reg_ranges = &t5_reg_ranges[0];
2168 arr_size = ARRAY_SIZE(t5_reg_ranges);
2169 buf_size = T5_REGMAP_SIZE;
2172 regs->version = mk_adap_vers(ap);
2174 memset(buf, 0, buf_size);
2175 for (i = 0; i < arr_size; i += 2)
2176 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2179 static int restart_autoneg(struct net_device *dev)
2181 struct port_info *p = netdev_priv(dev);
2183 if (!netif_running(dev))
2185 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2187 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2191 static int identify_port(struct net_device *dev,
2192 enum ethtool_phys_id_state state)
2195 struct adapter *adap = netdev2adap(dev);
2197 if (state == ETHTOOL_ID_ACTIVE)
2199 else if (state == ETHTOOL_ID_INACTIVE)
2204 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2207 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2211 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2212 type == FW_PORT_TYPE_BT_XAUI) {
2214 if (caps & FW_PORT_CAP_SPEED_100M)
2215 v |= SUPPORTED_100baseT_Full;
2216 if (caps & FW_PORT_CAP_SPEED_1G)
2217 v |= SUPPORTED_1000baseT_Full;
2218 if (caps & FW_PORT_CAP_SPEED_10G)
2219 v |= SUPPORTED_10000baseT_Full;
2220 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2221 v |= SUPPORTED_Backplane;
2222 if (caps & FW_PORT_CAP_SPEED_1G)
2223 v |= SUPPORTED_1000baseKX_Full;
2224 if (caps & FW_PORT_CAP_SPEED_10G)
2225 v |= SUPPORTED_10000baseKX4_Full;
2226 } else if (type == FW_PORT_TYPE_KR)
2227 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2228 else if (type == FW_PORT_TYPE_BP_AP)
2229 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2230 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2231 else if (type == FW_PORT_TYPE_BP4_AP)
2232 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2233 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2234 SUPPORTED_10000baseKX4_Full;
2235 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2236 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2237 v |= SUPPORTED_FIBRE;
2239 if (caps & FW_PORT_CAP_ANEG)
2240 v |= SUPPORTED_Autoneg;
2244 static unsigned int to_fw_linkcaps(unsigned int caps)
2248 if (caps & ADVERTISED_100baseT_Full)
2249 v |= FW_PORT_CAP_SPEED_100M;
2250 if (caps & ADVERTISED_1000baseT_Full)
2251 v |= FW_PORT_CAP_SPEED_1G;
2252 if (caps & ADVERTISED_10000baseT_Full)
2253 v |= FW_PORT_CAP_SPEED_10G;
2257 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2259 const struct port_info *p = netdev_priv(dev);
2261 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2262 p->port_type == FW_PORT_TYPE_BT_XFI ||
2263 p->port_type == FW_PORT_TYPE_BT_XAUI)
2264 cmd->port = PORT_TP;
2265 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2266 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2267 cmd->port = PORT_FIBRE;
2268 else if (p->port_type == FW_PORT_TYPE_SFP) {
2269 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2270 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2271 cmd->port = PORT_DA;
2273 cmd->port = PORT_FIBRE;
2275 cmd->port = PORT_OTHER;
2277 if (p->mdio_addr >= 0) {
2278 cmd->phy_address = p->mdio_addr;
2279 cmd->transceiver = XCVR_EXTERNAL;
2280 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2281 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2283 cmd->phy_address = 0; /* not really, but no better option */
2284 cmd->transceiver = XCVR_INTERNAL;
2285 cmd->mdio_support = 0;
2288 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2289 cmd->advertising = from_fw_linkcaps(p->port_type,
2290 p->link_cfg.advertising);
2291 ethtool_cmd_speed_set(cmd,
2292 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2293 cmd->duplex = DUPLEX_FULL;
2294 cmd->autoneg = p->link_cfg.autoneg;
2300 static unsigned int speed_to_caps(int speed)
2302 if (speed == SPEED_100)
2303 return FW_PORT_CAP_SPEED_100M;
2304 if (speed == SPEED_1000)
2305 return FW_PORT_CAP_SPEED_1G;
2306 if (speed == SPEED_10000)
2307 return FW_PORT_CAP_SPEED_10G;
2311 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2314 struct port_info *p = netdev_priv(dev);
2315 struct link_config *lc = &p->link_cfg;
2316 u32 speed = ethtool_cmd_speed(cmd);
2318 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2321 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2323 * PHY offers a single speed. See if that's what's
2326 if (cmd->autoneg == AUTONEG_DISABLE &&
2327 (lc->supported & speed_to_caps(speed)))
2332 if (cmd->autoneg == AUTONEG_DISABLE) {
2333 cap = speed_to_caps(speed);
2335 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2336 (speed == SPEED_10000))
2338 lc->requested_speed = cap;
2339 lc->advertising = 0;
2341 cap = to_fw_linkcaps(cmd->advertising);
2342 if (!(lc->supported & cap))
2344 lc->requested_speed = 0;
2345 lc->advertising = cap | FW_PORT_CAP_ANEG;
2347 lc->autoneg = cmd->autoneg;
2349 if (netif_running(dev))
2350 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2355 static void get_pauseparam(struct net_device *dev,
2356 struct ethtool_pauseparam *epause)
2358 struct port_info *p = netdev_priv(dev);
2360 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2361 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2362 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2365 static int set_pauseparam(struct net_device *dev,
2366 struct ethtool_pauseparam *epause)
2368 struct port_info *p = netdev_priv(dev);
2369 struct link_config *lc = &p->link_cfg;
2371 if (epause->autoneg == AUTONEG_DISABLE)
2372 lc->requested_fc = 0;
2373 else if (lc->supported & FW_PORT_CAP_ANEG)
2374 lc->requested_fc = PAUSE_AUTONEG;
2378 if (epause->rx_pause)
2379 lc->requested_fc |= PAUSE_RX;
2380 if (epause->tx_pause)
2381 lc->requested_fc |= PAUSE_TX;
2382 if (netif_running(dev))
2383 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2388 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2390 const struct port_info *pi = netdev_priv(dev);
2391 const struct sge *s = &pi->adapter->sge;
2393 e->rx_max_pending = MAX_RX_BUFFERS;
2394 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2395 e->rx_jumbo_max_pending = 0;
2396 e->tx_max_pending = MAX_TXQ_ENTRIES;
2398 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2399 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2400 e->rx_jumbo_pending = 0;
2401 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2404 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2407 const struct port_info *pi = netdev_priv(dev);
2408 struct adapter *adapter = pi->adapter;
2409 struct sge *s = &adapter->sge;
2411 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2412 e->tx_pending > MAX_TXQ_ENTRIES ||
2413 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2414 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2415 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2418 if (adapter->flags & FULL_INIT_DONE)
2421 for (i = 0; i < pi->nqsets; ++i) {
2422 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2423 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2424 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2429 static int closest_timer(const struct sge *s, int time)
2431 int i, delta, match = 0, min_delta = INT_MAX;
2433 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2434 delta = time - s->timer_val[i];
2437 if (delta < min_delta) {
2445 static int closest_thres(const struct sge *s, int thres)
2447 int i, delta, match = 0, min_delta = INT_MAX;
2449 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2450 delta = thres - s->counter_val[i];
2453 if (delta < min_delta) {
2462 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2464 static unsigned int qtimer_val(const struct adapter *adap,
2465 const struct sge_rspq *q)
2467 unsigned int idx = q->intr_params >> 1;
2469 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2473 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2474 * @adap: the adapter
2476 * @us: the hold-off time in us, or 0 to disable timer
2477 * @cnt: the hold-off packet count, or 0 to disable counter
2479 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2480 * one of the two needs to be enabled for the queue to generate interrupts.
2482 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2483 unsigned int us, unsigned int cnt)
2485 if ((us | cnt) == 0)
2492 new_idx = closest_thres(&adap->sge, cnt);
2493 if (q->desc && q->pktcnt_idx != new_idx) {
2494 /* the queue has already been created, update it */
2495 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2496 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2497 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2498 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2503 q->pktcnt_idx = new_idx;
2506 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2507 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2511 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2513 const struct port_info *pi = netdev_priv(dev);
2514 struct adapter *adap = pi->adapter;
2519 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2520 q = &adap->sge.ethrxq[i].rspq;
2521 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2522 c->rx_max_coalesced_frames);
2524 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2531 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2533 const struct port_info *pi = netdev_priv(dev);
2534 const struct adapter *adap = pi->adapter;
2535 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2537 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2538 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2539 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2544 * eeprom_ptov - translate a physical EEPROM address to virtual
2545 * @phys_addr: the physical EEPROM address
2546 * @fn: the PCI function number
2547 * @sz: size of function-specific area
2549 * Translate a physical EEPROM address to virtual. The first 1K is
2550 * accessed through virtual addresses starting at 31K, the rest is
2551 * accessed through virtual addresses starting at 0.
2553 * The mapping is as follows:
2554 * [0..1K) -> [31K..32K)
2555 * [1K..1K+A) -> [31K-A..31K)
2556 * [1K+A..ES) -> [0..ES-A-1K)
2558 * where A = @fn * @sz, and ES = EEPROM size.
2560 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2563 if (phys_addr < 1024)
2564 return phys_addr + (31 << 10);
2565 if (phys_addr < 1024 + fn)
2566 return 31744 - fn + phys_addr - 1024;
2567 if (phys_addr < EEPROMSIZE)
2568 return phys_addr - 1024 - fn;
2573 * The next two routines implement eeprom read/write from physical addresses.
2575 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2577 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2580 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2581 return vaddr < 0 ? vaddr : 0;
2584 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2586 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2589 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2590 return vaddr < 0 ? vaddr : 0;
2593 #define EEPROM_MAGIC 0x38E2F10C
2595 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2599 struct adapter *adapter = netdev2adap(dev);
2601 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2605 e->magic = EEPROM_MAGIC;
2606 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2607 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2610 memcpy(data, buf + e->offset, e->len);
2615 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2620 u32 aligned_offset, aligned_len, *p;
2621 struct adapter *adapter = netdev2adap(dev);
2623 if (eeprom->magic != EEPROM_MAGIC)
2626 aligned_offset = eeprom->offset & ~3;
2627 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2629 if (adapter->fn > 0) {
2630 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2632 if (aligned_offset < start ||
2633 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2637 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2639 * RMW possibly needed for first or last words.
2641 buf = kmalloc(aligned_len, GFP_KERNEL);
2644 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2645 if (!err && aligned_len > 4)
2646 err = eeprom_rd_phys(adapter,
2647 aligned_offset + aligned_len - 4,
2648 (u32 *)&buf[aligned_len - 4]);
2651 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2655 err = t4_seeprom_wp(adapter, false);
2659 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2660 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2661 aligned_offset += 4;
2665 err = t4_seeprom_wp(adapter, true);
2672 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2675 const struct firmware *fw;
2676 struct adapter *adap = netdev2adap(netdev);
2678 ef->data[sizeof(ef->data) - 1] = '\0';
2679 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2683 ret = t4_load_fw(adap, fw->data, fw->size);
2684 release_firmware(fw);
2686 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2690 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2691 #define BCAST_CRC 0xa0ccc1a6
2693 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2695 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2696 wol->wolopts = netdev2adap(dev)->wol;
2697 memset(&wol->sopass, 0, sizeof(wol->sopass));
2700 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2703 struct port_info *pi = netdev_priv(dev);
2705 if (wol->wolopts & ~WOL_SUPPORTED)
2707 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2708 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2709 if (wol->wolopts & WAKE_BCAST) {
2710 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2713 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2714 ~6ULL, ~0ULL, BCAST_CRC, true);
2716 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2720 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2722 const struct port_info *pi = netdev_priv(dev);
2723 netdev_features_t changed = dev->features ^ features;
2726 if (!(changed & NETIF_F_HW_VLAN_RX))
2729 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2731 !!(features & NETIF_F_HW_VLAN_RX), true);
2733 dev->features = features ^ NETIF_F_HW_VLAN_RX;
2737 static u32 get_rss_table_size(struct net_device *dev)
2739 const struct port_info *pi = netdev_priv(dev);
2741 return pi->rss_size;
2744 static int get_rss_table(struct net_device *dev, u32 *p)
2746 const struct port_info *pi = netdev_priv(dev);
2747 unsigned int n = pi->rss_size;
2754 static int set_rss_table(struct net_device *dev, const u32 *p)
2757 struct port_info *pi = netdev_priv(dev);
2759 for (i = 0; i < pi->rss_size; i++)
2761 if (pi->adapter->flags & FULL_INIT_DONE)
2762 return write_rss(pi, pi->rss);
2766 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2769 const struct port_info *pi = netdev_priv(dev);
2771 switch (info->cmd) {
2772 case ETHTOOL_GRXFH: {
2773 unsigned int v = pi->rss_mode;
2776 switch (info->flow_type) {
2778 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2779 info->data = RXH_IP_SRC | RXH_IP_DST |
2780 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2781 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2782 info->data = RXH_IP_SRC | RXH_IP_DST;
2785 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2786 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2787 info->data = RXH_IP_SRC | RXH_IP_DST |
2788 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2789 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2790 info->data = RXH_IP_SRC | RXH_IP_DST;
2793 case AH_ESP_V4_FLOW:
2795 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2796 info->data = RXH_IP_SRC | RXH_IP_DST;
2799 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2800 info->data = RXH_IP_SRC | RXH_IP_DST |
2801 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2802 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2803 info->data = RXH_IP_SRC | RXH_IP_DST;
2806 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2807 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2808 info->data = RXH_IP_SRC | RXH_IP_DST |
2809 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2810 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2811 info->data = RXH_IP_SRC | RXH_IP_DST;
2814 case AH_ESP_V6_FLOW:
2816 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2817 info->data = RXH_IP_SRC | RXH_IP_DST;
2822 case ETHTOOL_GRXRINGS:
2823 info->data = pi->nqsets;
2829 static const struct ethtool_ops cxgb_ethtool_ops = {
2830 .get_settings = get_settings,
2831 .set_settings = set_settings,
2832 .get_drvinfo = get_drvinfo,
2833 .get_msglevel = get_msglevel,
2834 .set_msglevel = set_msglevel,
2835 .get_ringparam = get_sge_param,
2836 .set_ringparam = set_sge_param,
2837 .get_coalesce = get_coalesce,
2838 .set_coalesce = set_coalesce,
2839 .get_eeprom_len = get_eeprom_len,
2840 .get_eeprom = get_eeprom,
2841 .set_eeprom = set_eeprom,
2842 .get_pauseparam = get_pauseparam,
2843 .set_pauseparam = set_pauseparam,
2844 .get_link = ethtool_op_get_link,
2845 .get_strings = get_strings,
2846 .set_phys_id = identify_port,
2847 .nway_reset = restart_autoneg,
2848 .get_sset_count = get_sset_count,
2849 .get_ethtool_stats = get_stats,
2850 .get_regs_len = get_regs_len,
2851 .get_regs = get_regs,
2854 .get_rxnfc = get_rxnfc,
2855 .get_rxfh_indir_size = get_rss_table_size,
2856 .get_rxfh_indir = get_rss_table,
2857 .set_rxfh_indir = set_rss_table,
2858 .flash_device = set_flash,
2864 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2868 loff_t avail = file_inode(file)->i_size;
2869 unsigned int mem = (uintptr_t)file->private_data & 3;
2870 struct adapter *adap = file->private_data - mem;
2876 if (count > avail - pos)
2877 count = avail - pos;
2884 if ((mem == MEM_MC) || (mem == MEM_MC1))
2885 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2887 ret = t4_edc_read(adap, mem, pos, data, NULL);
2891 ofst = pos % sizeof(data);
2892 len = min(count, sizeof(data) - ofst);
2893 if (copy_to_user(buf, (u8 *)data + ofst, len))
2900 count = pos - *ppos;
2905 static const struct file_operations mem_debugfs_fops = {
2906 .owner = THIS_MODULE,
2907 .open = simple_open,
2909 .llseek = default_llseek,
2912 static void add_debugfs_mem(struct adapter *adap, const char *name,
2913 unsigned int idx, unsigned int size_mb)
2917 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2918 (void *)adap + idx, &mem_debugfs_fops);
2919 if (de && de->d_inode)
2920 de->d_inode->i_size = size_mb << 20;
2923 static int setup_debugfs(struct adapter *adap)
2928 if (IS_ERR_OR_NULL(adap->debugfs_root))
2931 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2932 if (i & EDRAM0_ENABLE) {
2933 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2934 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2936 if (i & EDRAM1_ENABLE) {
2937 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2938 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2940 if (is_t4(adap->chip)) {
2941 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2942 if (i & EXT_MEM_ENABLE)
2943 add_debugfs_mem(adap, "mc", MEM_MC,
2944 EXT_MEM_SIZE_GET(size));
2946 if (i & EXT_MEM_ENABLE) {
2947 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2948 add_debugfs_mem(adap, "mc0", MEM_MC0,
2949 EXT_MEM_SIZE_GET(size));
2951 if (i & EXT_MEM1_ENABLE) {
2952 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2953 add_debugfs_mem(adap, "mc1", MEM_MC1,
2954 EXT_MEM_SIZE_GET(size));
2958 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2964 * upper-layer driver support
2968 * Allocate an active-open TID and set it to the supplied value.
2970 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2974 spin_lock_bh(&t->atid_lock);
2976 union aopen_entry *p = t->afree;
2978 atid = (p - t->atid_tab) + t->atid_base;
2983 spin_unlock_bh(&t->atid_lock);
2986 EXPORT_SYMBOL(cxgb4_alloc_atid);
2989 * Release an active-open TID.
2991 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2993 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2995 spin_lock_bh(&t->atid_lock);
2999 spin_unlock_bh(&t->atid_lock);
3001 EXPORT_SYMBOL(cxgb4_free_atid);
3004 * Allocate a server TID and set it to the supplied value.
3006 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3010 spin_lock_bh(&t->stid_lock);
3011 if (family == PF_INET) {
3012 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3013 if (stid < t->nstids)
3014 __set_bit(stid, t->stid_bmap);
3018 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3023 t->stid_tab[stid].data = data;
3024 stid += t->stid_base;
3027 spin_unlock_bh(&t->stid_lock);
3030 EXPORT_SYMBOL(cxgb4_alloc_stid);
3032 /* Allocate a server filter TID and set it to the supplied value.
3034 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3038 spin_lock_bh(&t->stid_lock);
3039 if (family == PF_INET) {
3040 stid = find_next_zero_bit(t->stid_bmap,
3041 t->nstids + t->nsftids, t->nstids);
3042 if (stid < (t->nstids + t->nsftids))
3043 __set_bit(stid, t->stid_bmap);
3050 t->stid_tab[stid].data = data;
3051 stid += t->stid_base;
3054 spin_unlock_bh(&t->stid_lock);
3057 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3059 /* Release a server TID.
3061 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3063 stid -= t->stid_base;
3064 spin_lock_bh(&t->stid_lock);
3065 if (family == PF_INET)
3066 __clear_bit(stid, t->stid_bmap);
3068 bitmap_release_region(t->stid_bmap, stid, 2);
3069 t->stid_tab[stid].data = NULL;
3071 spin_unlock_bh(&t->stid_lock);
3073 EXPORT_SYMBOL(cxgb4_free_stid);
3076 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3078 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3081 struct cpl_tid_release *req;
3083 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3084 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3085 INIT_TP_WR(req, tid);
3086 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3090 * Queue a TID release request and if necessary schedule a work queue to
3093 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3096 void **p = &t->tid_tab[tid];
3097 struct adapter *adap = container_of(t, struct adapter, tids);
3099 spin_lock_bh(&adap->tid_release_lock);
3100 *p = adap->tid_release_head;
3101 /* Low 2 bits encode the Tx channel number */
3102 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3103 if (!adap->tid_release_task_busy) {
3104 adap->tid_release_task_busy = true;
3105 queue_work(workq, &adap->tid_release_task);
3107 spin_unlock_bh(&adap->tid_release_lock);
3111 * Process the list of pending TID release requests.
3113 static void process_tid_release_list(struct work_struct *work)
3115 struct sk_buff *skb;
3116 struct adapter *adap;
3118 adap = container_of(work, struct adapter, tid_release_task);
3120 spin_lock_bh(&adap->tid_release_lock);
3121 while (adap->tid_release_head) {
3122 void **p = adap->tid_release_head;
3123 unsigned int chan = (uintptr_t)p & 3;
3124 p = (void *)p - chan;
3126 adap->tid_release_head = *p;
3128 spin_unlock_bh(&adap->tid_release_lock);
3130 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3132 schedule_timeout_uninterruptible(1);
3134 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3135 t4_ofld_send(adap, skb);
3136 spin_lock_bh(&adap->tid_release_lock);
3138 adap->tid_release_task_busy = false;
3139 spin_unlock_bh(&adap->tid_release_lock);
3143 * Release a TID and inform HW. If we are unable to allocate the release
3144 * message we defer to a work queue.
3146 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3149 struct sk_buff *skb;
3150 struct adapter *adap = container_of(t, struct adapter, tids);
3152 old = t->tid_tab[tid];
3153 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3155 t->tid_tab[tid] = NULL;
3156 mk_tid_release(skb, chan, tid);
3157 t4_ofld_send(adap, skb);
3159 cxgb4_queue_tid_release(t, chan, tid);
3161 atomic_dec(&t->tids_in_use);
3163 EXPORT_SYMBOL(cxgb4_remove_tid);
3166 * Allocate and initialize the TID tables. Returns 0 on success.
3168 static int tid_init(struct tid_info *t)
3171 unsigned int stid_bmap_size;
3172 unsigned int natids = t->natids;
3174 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3175 size = t->ntids * sizeof(*t->tid_tab) +
3176 natids * sizeof(*t->atid_tab) +
3177 t->nstids * sizeof(*t->stid_tab) +
3178 t->nsftids * sizeof(*t->stid_tab) +
3179 stid_bmap_size * sizeof(long) +
3180 t->nftids * sizeof(*t->ftid_tab) +
3181 t->nsftids * sizeof(*t->ftid_tab);
3183 t->tid_tab = t4_alloc_mem(size);
3187 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3188 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3189 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3190 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3191 spin_lock_init(&t->stid_lock);
3192 spin_lock_init(&t->atid_lock);
3194 t->stids_in_use = 0;
3196 t->atids_in_use = 0;
3197 atomic_set(&t->tids_in_use, 0);
3199 /* Setup the free list for atid_tab and clear the stid bitmap. */
3202 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3203 t->afree = t->atid_tab;
3205 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3210 * cxgb4_create_server - create an IP server
3212 * @stid: the server TID
3213 * @sip: local IP address to bind server to
3214 * @sport: the server's TCP port
3215 * @queue: queue to direct messages from this server to
3217 * Create an IP server for the given port and address.
3218 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3220 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3221 __be32 sip, __be16 sport, __be16 vlan,
3225 struct sk_buff *skb;
3226 struct adapter *adap;
3227 struct cpl_pass_open_req *req;
3229 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3233 adap = netdev2adap(dev);
3234 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3236 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3237 req->local_port = sport;
3238 req->peer_port = htons(0);
3239 req->local_ip = sip;
3240 req->peer_ip = htonl(0);
3241 chan = rxq_to_chan(&adap->sge, queue);
3242 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3243 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3244 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3245 return t4_mgmt_tx(adap, skb);
3247 EXPORT_SYMBOL(cxgb4_create_server);
3250 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3251 * @mtus: the HW MTU table
3252 * @mtu: the target MTU
3253 * @idx: index of selected entry in the MTU table
3255 * Returns the index and the value in the HW MTU table that is closest to
3256 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3257 * table, in which case that smallest available value is selected.
3259 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3264 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3270 EXPORT_SYMBOL(cxgb4_best_mtu);
3273 * cxgb4_port_chan - get the HW channel of a port
3274 * @dev: the net device for the port
3276 * Return the HW Tx channel of the given port.
3278 unsigned int cxgb4_port_chan(const struct net_device *dev)
3280 return netdev2pinfo(dev)->tx_chan;
3282 EXPORT_SYMBOL(cxgb4_port_chan);
3284 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3286 struct adapter *adap = netdev2adap(dev);
3287 u32 v1, v2, lp_count, hp_count;
3289 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3290 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3291 if (is_t4(adap->chip)) {
3292 lp_count = G_LP_COUNT(v1);
3293 hp_count = G_HP_COUNT(v1);
3295 lp_count = G_LP_COUNT_T5(v1);
3296 hp_count = G_HP_COUNT_T5(v2);
3298 return lpfifo ? lp_count : hp_count;
3300 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3303 * cxgb4_port_viid - get the VI id of a port
3304 * @dev: the net device for the port
3306 * Return the VI id of the given port.
3308 unsigned int cxgb4_port_viid(const struct net_device *dev)
3310 return netdev2pinfo(dev)->viid;
3312 EXPORT_SYMBOL(cxgb4_port_viid);
3315 * cxgb4_port_idx - get the index of a port
3316 * @dev: the net device for the port
3318 * Return the index of the given port.
3320 unsigned int cxgb4_port_idx(const struct net_device *dev)
3322 return netdev2pinfo(dev)->port_id;
3324 EXPORT_SYMBOL(cxgb4_port_idx);
3326 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3327 struct tp_tcp_stats *v6)
3329 struct adapter *adap = pci_get_drvdata(pdev);
3331 spin_lock(&adap->stats_lock);
3332 t4_tp_get_tcp_stats(adap, v4, v6);
3333 spin_unlock(&adap->stats_lock);
3335 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3337 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3338 const unsigned int *pgsz_order)
3340 struct adapter *adap = netdev2adap(dev);
3342 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3343 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3344 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3345 HPZ3(pgsz_order[3]));
3347 EXPORT_SYMBOL(cxgb4_iscsi_init);
3349 int cxgb4_flush_eq_cache(struct net_device *dev)
3351 struct adapter *adap = netdev2adap(dev);
3354 ret = t4_fwaddrspace_write(adap, adap->mbox,
3355 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3358 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3360 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3362 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3366 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3368 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3369 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3374 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3377 struct adapter *adap = netdev2adap(dev);
3378 u16 hw_pidx, hw_cidx;
3381 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3385 if (pidx != hw_pidx) {
3388 if (pidx >= hw_pidx)
3389 delta = pidx - hw_pidx;
3391 delta = size - hw_pidx + pidx;
3393 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3394 QID(qid) | PIDX(delta));
3399 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3401 static struct pci_driver cxgb4_driver;
3403 static void check_neigh_update(struct neighbour *neigh)
3405 const struct device *parent;
3406 const struct net_device *netdev = neigh->dev;
3408 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3409 netdev = vlan_dev_real_dev(netdev);
3410 parent = netdev->dev.parent;
3411 if (parent && parent->driver == &cxgb4_driver.driver)
3412 t4_l2t_update(dev_get_drvdata(parent), neigh);
3415 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3419 case NETEVENT_NEIGH_UPDATE:
3420 check_neigh_update(data);
3422 case NETEVENT_REDIRECT:
3429 static bool netevent_registered;
3430 static struct notifier_block cxgb4_netevent_nb = {
3431 .notifier_call = netevent_cb
3434 static void drain_db_fifo(struct adapter *adap, int usecs)
3436 u32 v1, v2, lp_count, hp_count;
3439 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3440 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3441 if (is_t4(adap->chip)) {
3442 lp_count = G_LP_COUNT(v1);
3443 hp_count = G_HP_COUNT(v1);
3445 lp_count = G_LP_COUNT_T5(v1);
3446 hp_count = G_HP_COUNT_T5(v2);
3449 if (lp_count == 0 && hp_count == 0)
3451 set_current_state(TASK_UNINTERRUPTIBLE);
3452 schedule_timeout(usecs_to_jiffies(usecs));
3456 static void disable_txq_db(struct sge_txq *q)
3458 spin_lock_irq(&q->db_lock);
3460 spin_unlock_irq(&q->db_lock);
3463 static void enable_txq_db(struct sge_txq *q)
3465 spin_lock_irq(&q->db_lock);
3467 spin_unlock_irq(&q->db_lock);
3470 static void disable_dbs(struct adapter *adap)
3474 for_each_ethrxq(&adap->sge, i)
3475 disable_txq_db(&adap->sge.ethtxq[i].q);
3476 for_each_ofldrxq(&adap->sge, i)
3477 disable_txq_db(&adap->sge.ofldtxq[i].q);
3478 for_each_port(adap, i)
3479 disable_txq_db(&adap->sge.ctrlq[i].q);
3482 static void enable_dbs(struct adapter *adap)
3486 for_each_ethrxq(&adap->sge, i)
3487 enable_txq_db(&adap->sge.ethtxq[i].q);
3488 for_each_ofldrxq(&adap->sge, i)
3489 enable_txq_db(&adap->sge.ofldtxq[i].q);
3490 for_each_port(adap, i)
3491 enable_txq_db(&adap->sge.ctrlq[i].q);
3494 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3496 u16 hw_pidx, hw_cidx;
3499 spin_lock_bh(&q->db_lock);
3500 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3503 if (q->db_pidx != hw_pidx) {
3506 if (q->db_pidx >= hw_pidx)
3507 delta = q->db_pidx - hw_pidx;
3509 delta = q->size - hw_pidx + q->db_pidx;
3511 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3512 QID(q->cntxt_id) | PIDX(delta));
3516 spin_unlock_bh(&q->db_lock);
3518 CH_WARN(adap, "DB drop recovery failed.\n");
3520 static void recover_all_queues(struct adapter *adap)
3524 for_each_ethrxq(&adap->sge, i)
3525 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3526 for_each_ofldrxq(&adap->sge, i)
3527 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3528 for_each_port(adap, i)
3529 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3532 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3534 mutex_lock(&uld_mutex);
3535 if (adap->uld_handle[CXGB4_ULD_RDMA])
3536 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3538 mutex_unlock(&uld_mutex);
3541 static void process_db_full(struct work_struct *work)
3543 struct adapter *adap;
3545 adap = container_of(work, struct adapter, db_full_task);
3547 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3548 drain_db_fifo(adap, dbfifo_drain_delay);
3549 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3550 DBFIFO_HP_INT | DBFIFO_LP_INT,
3551 DBFIFO_HP_INT | DBFIFO_LP_INT);
3552 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3555 static void process_db_drop(struct work_struct *work)
3557 struct adapter *adap;
3559 adap = container_of(work, struct adapter, db_drop_task);
3561 if (is_t4(adap->chip)) {
3563 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3564 drain_db_fifo(adap, 1);
3565 recover_all_queues(adap);
3568 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3569 u16 qid = (dropped_db >> 15) & 0x1ffff;
3570 u16 pidx_inc = dropped_db & 0x1fff;
3572 unsigned short udb_density;
3573 unsigned long qpshift;
3577 dev_warn(adap->pdev_dev,
3578 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3580 (dropped_db >> 14) & 1,
3581 (dropped_db >> 13) & 1,
3584 drain_db_fifo(adap, 1);
3586 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3587 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3588 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3589 qpshift = PAGE_SHIFT - ilog2(udb_density);
3590 udb = qid << qpshift;
3592 page = udb / PAGE_SIZE;
3593 udb += (qid - (page * udb_density)) * 128;
3595 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3597 /* Re-enable BAR2 WC */
3598 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3601 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3604 void t4_db_full(struct adapter *adap)
3606 if (is_t4(adap->chip)) {
3607 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3608 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3609 queue_work(workq, &adap->db_full_task);
3613 void t4_db_dropped(struct adapter *adap)
3615 if (is_t4(adap->chip))
3616 queue_work(workq, &adap->db_drop_task);
3619 static void uld_attach(struct adapter *adap, unsigned int uld)
3622 struct cxgb4_lld_info lli;
3625 lli.pdev = adap->pdev;
3626 lli.l2t = adap->l2t;
3627 lli.tids = &adap->tids;
3628 lli.ports = adap->port;
3629 lli.vr = &adap->vres;
3630 lli.mtus = adap->params.mtus;
3631 if (uld == CXGB4_ULD_RDMA) {
3632 lli.rxq_ids = adap->sge.rdma_rxq;
3633 lli.nrxq = adap->sge.rdmaqs;
3634 } else if (uld == CXGB4_ULD_ISCSI) {
3635 lli.rxq_ids = adap->sge.ofld_rxq;
3636 lli.nrxq = adap->sge.ofldqsets;
3638 lli.ntxq = adap->sge.ofldqsets;
3639 lli.nchan = adap->params.nports;
3640 lli.nports = adap->params.nports;
3641 lli.wr_cred = adap->params.ofldq_wr_cred;
3642 lli.adapter_type = adap->params.rev;
3643 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3644 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3645 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3647 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3648 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3650 lli.filt_mode = adap->filter_mode;
3651 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3652 for (i = 0; i < NCHAN; i++)
3654 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3655 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3656 lli.fw_vers = adap->params.fw_vers;
3657 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3658 lli.sge_pktshift = adap->sge.pktshift;
3659 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3661 handle = ulds[uld].add(&lli);
3662 if (IS_ERR(handle)) {
3663 dev_warn(adap->pdev_dev,
3664 "could not attach to the %s driver, error %ld\n",
3665 uld_str[uld], PTR_ERR(handle));
3669 adap->uld_handle[uld] = handle;
3671 if (!netevent_registered) {
3672 register_netevent_notifier(&cxgb4_netevent_nb);
3673 netevent_registered = true;
3676 if (adap->flags & FULL_INIT_DONE)
3677 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3680 static void attach_ulds(struct adapter *adap)
3684 mutex_lock(&uld_mutex);
3685 list_add_tail(&adap->list_node, &adapter_list);
3686 for (i = 0; i < CXGB4_ULD_MAX; i++)
3688 uld_attach(adap, i);
3689 mutex_unlock(&uld_mutex);
3692 static void detach_ulds(struct adapter *adap)
3696 mutex_lock(&uld_mutex);
3697 list_del(&adap->list_node);
3698 for (i = 0; i < CXGB4_ULD_MAX; i++)
3699 if (adap->uld_handle[i]) {
3700 ulds[i].state_change(adap->uld_handle[i],
3701 CXGB4_STATE_DETACH);
3702 adap->uld_handle[i] = NULL;
3704 if (netevent_registered && list_empty(&adapter_list)) {
3705 unregister_netevent_notifier(&cxgb4_netevent_nb);
3706 netevent_registered = false;
3708 mutex_unlock(&uld_mutex);
3711 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3715 mutex_lock(&uld_mutex);
3716 for (i = 0; i < CXGB4_ULD_MAX; i++)
3717 if (adap->uld_handle[i])
3718 ulds[i].state_change(adap->uld_handle[i], new_state);
3719 mutex_unlock(&uld_mutex);
3723 * cxgb4_register_uld - register an upper-layer driver
3724 * @type: the ULD type
3725 * @p: the ULD methods
3727 * Registers an upper-layer driver with this driver and notifies the ULD
3728 * about any presently available devices that support its type. Returns
3729 * %-EBUSY if a ULD of the same type is already registered.
3731 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3734 struct adapter *adap;
3736 if (type >= CXGB4_ULD_MAX)
3738 mutex_lock(&uld_mutex);
3739 if (ulds[type].add) {
3744 list_for_each_entry(adap, &adapter_list, list_node)
3745 uld_attach(adap, type);
3746 out: mutex_unlock(&uld_mutex);
3749 EXPORT_SYMBOL(cxgb4_register_uld);
3752 * cxgb4_unregister_uld - unregister an upper-layer driver
3753 * @type: the ULD type
3755 * Unregisters an existing upper-layer driver.
3757 int cxgb4_unregister_uld(enum cxgb4_uld type)
3759 struct adapter *adap;
3761 if (type >= CXGB4_ULD_MAX)
3763 mutex_lock(&uld_mutex);
3764 list_for_each_entry(adap, &adapter_list, list_node)
3765 adap->uld_handle[type] = NULL;
3766 ulds[type].add = NULL;
3767 mutex_unlock(&uld_mutex);
3770 EXPORT_SYMBOL(cxgb4_unregister_uld);
3773 * cxgb_up - enable the adapter
3774 * @adap: adapter being enabled
3776 * Called when the first port is enabled, this function performs the
3777 * actions necessary to make an adapter operational, such as completing
3778 * the initialization of HW modules, and enabling interrupts.
3780 * Must be called with the rtnl lock held.
3782 static int cxgb_up(struct adapter *adap)
3786 err = setup_sge_queues(adap);
3789 err = setup_rss(adap);
3793 if (adap->flags & USING_MSIX) {
3794 name_msix_vecs(adap);
3795 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3796 adap->msix_info[0].desc, adap);
3800 err = request_msix_queue_irqs(adap);
3802 free_irq(adap->msix_info[0].vec, adap);
3806 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3807 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3808 adap->port[0]->name, adap);
3814 t4_intr_enable(adap);
3815 adap->flags |= FULL_INIT_DONE;
3816 notify_ulds(adap, CXGB4_STATE_UP);
3820 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3822 t4_free_sge_resources(adap);
3826 static void cxgb_down(struct adapter *adapter)
3828 t4_intr_disable(adapter);
3829 cancel_work_sync(&adapter->tid_release_task);
3830 cancel_work_sync(&adapter->db_full_task);
3831 cancel_work_sync(&adapter->db_drop_task);
3832 adapter->tid_release_task_busy = false;
3833 adapter->tid_release_head = NULL;
3835 if (adapter->flags & USING_MSIX) {
3836 free_msix_queue_irqs(adapter);
3837 free_irq(adapter->msix_info[0].vec, adapter);
3839 free_irq(adapter->pdev->irq, adapter);
3840 quiesce_rx(adapter);
3841 t4_sge_stop(adapter);
3842 t4_free_sge_resources(adapter);
3843 adapter->flags &= ~FULL_INIT_DONE;
3847 * net_device operations
3849 static int cxgb_open(struct net_device *dev)
3852 struct port_info *pi = netdev_priv(dev);
3853 struct adapter *adapter = pi->adapter;
3855 netif_carrier_off(dev);
3857 if (!(adapter->flags & FULL_INIT_DONE)) {
3858 err = cxgb_up(adapter);
3863 err = link_start(dev);
3865 netif_tx_start_all_queues(dev);
3869 static int cxgb_close(struct net_device *dev)
3871 struct port_info *pi = netdev_priv(dev);
3872 struct adapter *adapter = pi->adapter;
3874 netif_tx_stop_all_queues(dev);
3875 netif_carrier_off(dev);
3876 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3879 /* Return an error number if the indicated filter isn't writable ...
3881 static int writable_filter(struct filter_entry *f)
3891 /* Delete the filter at the specified index (if valid). The checks for all
3892 * the common problems with doing this like the filter being locked, currently
3893 * pending in another operation, etc.
3895 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3897 struct filter_entry *f;
3900 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3903 f = &adapter->tids.ftid_tab[fidx];
3904 ret = writable_filter(f);
3908 return del_filter_wr(adapter, fidx);
3913 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3914 __be32 sip, __be16 sport, __be16 vlan,
3915 unsigned int queue, unsigned char port, unsigned char mask)
3918 struct filter_entry *f;
3919 struct adapter *adap;
3923 adap = netdev2adap(dev);
3925 /* Adjust stid to correct filter index */
3926 stid -= adap->tids.nstids;
3927 stid += adap->tids.nftids;
3929 /* Check to make sure the filter requested is writable ...
3931 f = &adap->tids.ftid_tab[stid];
3932 ret = writable_filter(f);
3936 /* Clear out any old resources being used by the filter before
3937 * we start constructing the new filter.
3940 clear_filter(adap, f);
3942 /* Clear out filter specifications */
3943 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3944 f->fs.val.lport = cpu_to_be16(sport);
3945 f->fs.mask.lport = ~0;
3947 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3948 for (i = 0; i < 4; i++) {
3949 f->fs.val.lip[i] = val[i];
3950 f->fs.mask.lip[i] = ~0;
3952 if (adap->filter_mode & F_PORT) {
3953 f->fs.val.iport = port;
3954 f->fs.mask.iport = mask;
3960 /* Mark filter as locked */
3964 ret = set_filter_wr(adap, stid);
3966 clear_filter(adap, f);
3972 EXPORT_SYMBOL(cxgb4_create_server_filter);
3974 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3975 unsigned int queue, bool ipv6)
3978 struct filter_entry *f;
3979 struct adapter *adap;
3981 adap = netdev2adap(dev);
3983 /* Adjust stid to correct filter index */
3984 stid -= adap->tids.nstids;
3985 stid += adap->tids.nftids;
3987 f = &adap->tids.ftid_tab[stid];
3988 /* Unlock the filter */
3991 ret = delete_filter(adap, stid);
3997 EXPORT_SYMBOL(cxgb4_remove_server_filter);
3999 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4000 struct rtnl_link_stats64 *ns)
4002 struct port_stats stats;
4003 struct port_info *p = netdev_priv(dev);
4004 struct adapter *adapter = p->adapter;
4006 spin_lock(&adapter->stats_lock);
4007 t4_get_port_stats(adapter, p->tx_chan, &stats);
4008 spin_unlock(&adapter->stats_lock);
4010 ns->tx_bytes = stats.tx_octets;
4011 ns->tx_packets = stats.tx_frames;
4012 ns->rx_bytes = stats.rx_octets;
4013 ns->rx_packets = stats.rx_frames;
4014 ns->multicast = stats.rx_mcast_frames;
4016 /* detailed rx_errors */
4017 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4019 ns->rx_over_errors = 0;
4020 ns->rx_crc_errors = stats.rx_fcs_err;
4021 ns->rx_frame_errors = stats.rx_symbol_err;
4022 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4023 stats.rx_ovflow2 + stats.rx_ovflow3 +
4024 stats.rx_trunc0 + stats.rx_trunc1 +
4025 stats.rx_trunc2 + stats.rx_trunc3;
4026 ns->rx_missed_errors = 0;
4028 /* detailed tx_errors */
4029 ns->tx_aborted_errors = 0;
4030 ns->tx_carrier_errors = 0;
4031 ns->tx_fifo_errors = 0;
4032 ns->tx_heartbeat_errors = 0;
4033 ns->tx_window_errors = 0;
4035 ns->tx_errors = stats.tx_error_frames;
4036 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4037 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4041 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4044 int ret = 0, prtad, devad;
4045 struct port_info *pi = netdev_priv(dev);
4046 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4050 if (pi->mdio_addr < 0)
4052 data->phy_id = pi->mdio_addr;
4056 if (mdio_phy_id_is_c45(data->phy_id)) {
4057 prtad = mdio_phy_id_prtad(data->phy_id);
4058 devad = mdio_phy_id_devad(data->phy_id);
4059 } else if (data->phy_id < 32) {
4060 prtad = data->phy_id;
4062 data->reg_num &= 0x1f;
4066 mbox = pi->adapter->fn;
4067 if (cmd == SIOCGMIIREG)
4068 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4069 data->reg_num, &data->val_out);
4071 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4072 data->reg_num, data->val_in);
4080 static void cxgb_set_rxmode(struct net_device *dev)
4082 /* unfortunately we can't return errors to the stack */
4083 set_rxmode(dev, -1, false);
4086 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4089 struct port_info *pi = netdev_priv(dev);
4091 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4093 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4100 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4103 struct sockaddr *addr = p;
4104 struct port_info *pi = netdev_priv(dev);
4106 if (!is_valid_ether_addr(addr->sa_data))
4107 return -EADDRNOTAVAIL;
4109 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4110 pi->xact_addr_filt, addr->sa_data, true, true);
4114 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4115 pi->xact_addr_filt = ret;
4119 #ifdef CONFIG_NET_POLL_CONTROLLER
4120 static void cxgb_netpoll(struct net_device *dev)
4122 struct port_info *pi = netdev_priv(dev);
4123 struct adapter *adap = pi->adapter;
4125 if (adap->flags & USING_MSIX) {
4127 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4129 for (i = pi->nqsets; i; i--, rx++)
4130 t4_sge_intr_msix(0, &rx->rspq);
4132 t4_intr_handler(adap)(0, adap);
4136 static const struct net_device_ops cxgb4_netdev_ops = {
4137 .ndo_open = cxgb_open,
4138 .ndo_stop = cxgb_close,
4139 .ndo_start_xmit = t4_eth_xmit,
4140 .ndo_get_stats64 = cxgb_get_stats,
4141 .ndo_set_rx_mode = cxgb_set_rxmode,
4142 .ndo_set_mac_address = cxgb_set_mac_addr,
4143 .ndo_set_features = cxgb_set_features,
4144 .ndo_validate_addr = eth_validate_addr,
4145 .ndo_do_ioctl = cxgb_ioctl,
4146 .ndo_change_mtu = cxgb_change_mtu,
4147 #ifdef CONFIG_NET_POLL_CONTROLLER
4148 .ndo_poll_controller = cxgb_netpoll,
4152 void t4_fatal_err(struct adapter *adap)
4154 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4155 t4_intr_disable(adap);
4156 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4159 static void setup_memwin(struct adapter *adap)
4161 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4163 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4164 if (is_t4(adap->chip)) {
4165 mem_win0_base = bar0 + MEMWIN0_BASE;
4166 mem_win1_base = bar0 + MEMWIN1_BASE;
4167 mem_win2_base = bar0 + MEMWIN2_BASE;
4169 /* For T5, only relative offset inside the PCIe BAR is passed */
4170 mem_win0_base = MEMWIN0_BASE;
4171 mem_win1_base = MEMWIN1_BASE_T5;
4172 mem_win2_base = MEMWIN2_BASE_T5;
4174 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4175 mem_win0_base | BIR(0) |
4176 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4177 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4178 mem_win1_base | BIR(0) |
4179 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4180 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4181 mem_win2_base | BIR(0) |
4182 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4185 static void setup_memwin_rdma(struct adapter *adap)
4187 if (adap->vres.ocq.size) {
4188 unsigned int start, sz_kb;
4190 start = pci_resource_start(adap->pdev, 2) +
4191 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4192 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4194 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4195 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4197 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4198 adap->vres.ocq.start);
4200 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4204 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4209 /* get device capabilities */
4210 memset(c, 0, sizeof(*c));
4211 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4212 FW_CMD_REQUEST | FW_CMD_READ);
4213 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4214 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4218 /* select capabilities we'll be using */
4219 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4221 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4223 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4224 } else if (vf_acls) {
4225 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4228 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4229 FW_CMD_REQUEST | FW_CMD_WRITE);
4230 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4234 ret = t4_config_glbl_rss(adap, adap->fn,
4235 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4236 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4237 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4241 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4242 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4248 /* tweak some settings */
4249 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4250 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4251 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4252 v = t4_read_reg(adap, TP_PIO_DATA);
4253 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4255 /* first 4 Tx modulation queues point to consecutive Tx channels */
4256 adap->params.tp.tx_modq_map = 0xE4;
4257 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4258 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4260 /* associate each Tx modulation queue with consecutive Tx channels */
4262 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4263 &v, 1, A_TP_TX_SCHED_HDR);
4264 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4265 &v, 1, A_TP_TX_SCHED_FIFO);
4266 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4267 &v, 1, A_TP_TX_SCHED_PCMD);
4269 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4270 if (is_offload(adap)) {
4271 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4272 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4273 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4274 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4275 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4276 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4277 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4278 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4279 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4280 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4283 /* get basic stuff going */
4284 return t4_early_init(adap, adap->fn);
4288 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4290 #define MAX_ATIDS 8192U
4293 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4295 * If the firmware we're dealing with has Configuration File support, then
4296 * we use that to perform all configuration
4300 * Tweak configuration based on module parameters, etc. Most of these have
4301 * defaults assigned to them by Firmware Configuration Files (if we're using
4302 * them) but need to be explicitly set if we're using hard-coded
4303 * initialization. But even in the case of using Firmware Configuration
4304 * Files, we'd like to expose the ability to change these via module
4305 * parameters so these are essentially common tweaks/settings for
4306 * Configuration Files and hard-coded initialization ...
4308 static int adap_init0_tweaks(struct adapter *adapter)
4311 * Fix up various Host-Dependent Parameters like Page Size, Cache
4312 * Line Size, etc. The firmware default is for a 4KB Page Size and
4313 * 64B Cache Line Size ...
4315 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4318 * Process module parameters which affect early initialization.
4320 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4321 dev_err(&adapter->pdev->dev,
4322 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4326 t4_set_reg_field(adapter, SGE_CONTROL,
4328 PKTSHIFT(rx_dma_offset));
4331 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4332 * adds the pseudo header itself.
4334 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4335 CSUM_HAS_PSEUDO_HDR, 0);
4341 * Attempt to initialize the adapter via a Firmware Configuration File.
4343 static int adap_init0_config(struct adapter *adapter, int reset)
4345 struct fw_caps_config_cmd caps_cmd;
4346 const struct firmware *cf;
4347 unsigned long mtype = 0, maddr = 0;
4348 u32 finiver, finicsum, cfcsum;
4349 int ret, using_flash;
4350 char *fw_config_file, fw_config_file_path[256];
4353 * Reset device if necessary.
4356 ret = t4_fw_reset(adapter, adapter->mbox,
4357 PIORSTMODE | PIORST);
4363 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4364 * then use that. Otherwise, use the configuration file stored
4365 * in the adapter flash ...
4367 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4369 fw_config_file = FW_CFNAME;
4372 fw_config_file = FW5_CFNAME;
4375 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4376 adapter->pdev->device);
4381 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4384 mtype = FW_MEMTYPE_CF_FLASH;
4385 maddr = t4_flash_cfg_addr(adapter);
4387 u32 params[7], val[7];
4390 if (cf->size >= FLASH_CFG_MAX_SIZE)
4393 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4394 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4395 ret = t4_query_params(adapter, adapter->mbox,
4396 adapter->fn, 0, 1, params, val);
4399 * For t4_memory_write() below addresses and
4400 * sizes have to be in terms of multiples of 4
4401 * bytes. So, if the Configuration File isn't
4402 * a multiple of 4 bytes in length we'll have
4403 * to write that out separately since we can't
4404 * guarantee that the bytes following the
4405 * residual byte in the buffer returned by
4406 * request_firmware() are zeroed out ...
4408 size_t resid = cf->size & 0x3;
4409 size_t size = cf->size & ~0x3;
4410 __be32 *data = (__be32 *)cf->data;
4412 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4413 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4415 ret = t4_memory_write(adapter, mtype, maddr,
4417 if (ret == 0 && resid != 0) {
4424 last.word = data[size >> 2];
4425 for (i = resid; i < 4; i++)
4427 ret = t4_memory_write(adapter, mtype,
4434 release_firmware(cf);
4440 * Issue a Capability Configuration command to the firmware to get it
4441 * to parse the Configuration File. We don't use t4_fw_config_file()
4442 * because we want the ability to modify various features after we've
4443 * processed the configuration file ...
4445 memset(&caps_cmd, 0, sizeof(caps_cmd));
4446 caps_cmd.op_to_write =
4447 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4450 caps_cmd.cfvalid_to_len16 =
4451 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4452 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4453 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4454 FW_LEN16(caps_cmd));
4455 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4460 finiver = ntohl(caps_cmd.finiver);
4461 finicsum = ntohl(caps_cmd.finicsum);
4462 cfcsum = ntohl(caps_cmd.cfcsum);
4463 if (finicsum != cfcsum)
4464 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4465 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4469 * And now tell the firmware to use the configuration we just loaded.
4471 caps_cmd.op_to_write =
4472 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4475 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4476 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4482 * Tweak configuration based on system architecture, module
4485 ret = adap_init0_tweaks(adapter);
4490 * And finally tell the firmware to initialize itself using the
4491 * parameters from the Configuration File.
4493 ret = t4_fw_initialize(adapter, adapter->mbox);
4497 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4499 * Return successfully and note that we're operating with parameters
4500 * not supplied by the driver, rather than from hard-wired
4501 * initialization constants burried in the driver.
4503 adapter->flags |= USING_SOFT_PARAMS;
4504 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4505 "Configuration File %s, version %#x, computed checksum %#x\n",
4508 : fw_config_file_path),
4513 * Something bad happened. Return the error ... (If the "error"
4514 * is that there's no Configuration File on the adapter we don't
4515 * want to issue a warning since this is fairly common.)
4519 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4525 * Attempt to initialize the adapter via hard-coded, driver supplied
4528 static int adap_init0_no_config(struct adapter *adapter, int reset)
4530 struct sge *s = &adapter->sge;
4531 struct fw_caps_config_cmd caps_cmd;
4536 * Reset device if necessary
4539 ret = t4_fw_reset(adapter, adapter->mbox,
4540 PIORSTMODE | PIORST);
4546 * Get device capabilities and select which we'll be using.
4548 memset(&caps_cmd, 0, sizeof(caps_cmd));
4549 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4550 FW_CMD_REQUEST | FW_CMD_READ);
4551 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4552 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4557 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4559 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4561 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4562 } else if (vf_acls) {
4563 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4566 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4567 FW_CMD_REQUEST | FW_CMD_WRITE);
4568 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4574 * Tweak configuration based on system architecture, module
4577 ret = adap_init0_tweaks(adapter);
4582 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4583 * mode which maps each Virtual Interface to its own section of
4584 * the RSS Table and we turn on all map and hash enables ...
4586 adapter->flags |= RSS_TNLALLLOOKUP;
4587 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4588 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4589 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4590 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4591 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4592 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4597 * Set up our own fundamental resource provisioning ...
4599 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4600 PFRES_NEQ, PFRES_NETHCTRL,
4601 PFRES_NIQFLINT, PFRES_NIQ,
4602 PFRES_TC, PFRES_NVI,
4603 FW_PFVF_CMD_CMASK_MASK,
4604 pfvfres_pmask(adapter, adapter->fn, 0),
4606 PFRES_R_CAPS, PFRES_WX_CAPS);
4611 * Perform low level SGE initialization. We need to do this before we
4612 * send the firmware the INITIALIZE command because that will cause
4613 * any other PF Drivers which are waiting for the Master
4614 * Initialization to proceed forward.
4616 for (i = 0; i < SGE_NTIMERS - 1; i++)
4617 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4618 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4619 s->counter_val[0] = 1;
4620 for (i = 1; i < SGE_NCOUNTERS; i++)
4621 s->counter_val[i] = min(intr_cnt[i - 1],
4622 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4623 t4_sge_init(adapter);
4625 #ifdef CONFIG_PCI_IOV
4627 * Provision resource limits for Virtual Functions. We currently
4628 * grant them all the same static resource limits except for the Port
4629 * Access Rights Mask which we're assigning based on the PF. All of
4630 * the static provisioning stuff for both the PF and VF really needs
4631 * to be managed in a persistent manner for each device which the
4632 * firmware controls.
4636 int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
4637 NUM_OF_PF_WITH_SRIOV_T5;
4639 for (pf = 0; pf < max_no_pf; pf++) {
4640 if (num_vf[pf] <= 0)
4643 /* VF numbering starts at 1! */
4644 for (vf = 1; vf <= num_vf[pf]; vf++) {
4645 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4647 VFRES_NEQ, VFRES_NETHCTRL,
4648 VFRES_NIQFLINT, VFRES_NIQ,
4649 VFRES_TC, VFRES_NVI,
4650 FW_PFVF_CMD_CMASK_MASK,
4654 VFRES_R_CAPS, VFRES_WX_CAPS);
4656 dev_warn(adapter->pdev_dev,
4658 "provision pf/vf=%d/%d; "
4659 "err=%d\n", pf, vf, ret);
4666 * Set up the default filter mode. Later we'll want to implement this
4667 * via a firmware command, etc. ... This needs to be done before the
4668 * firmare initialization command ... If the selected set of fields
4669 * isn't equal to the default value, we'll need to make sure that the
4670 * field selections will fit in the 36-bit budget.
4672 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4675 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4676 switch (tp_vlan_pri_map & (1 << j)) {
4678 /* compressed filter field not enabled */
4698 case ETHERTYPE_MASK:
4704 case MPSHITTYPE_MASK:
4707 case FRAGMENTATION_MASK:
4713 dev_err(adapter->pdev_dev,
4714 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4715 " using %#x\n", tp_vlan_pri_map, bits,
4716 TP_VLAN_PRI_MAP_DEFAULT);
4717 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4720 v = tp_vlan_pri_map;
4721 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4722 &v, 1, TP_VLAN_PRI_MAP);
4725 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4726 * to support any of the compressed filter fields above. Newer
4727 * versions of the firmware do this automatically but it doesn't hurt
4728 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4729 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4730 * since the firmware automatically turns this on and off when we have
4731 * a non-zero number of filters active (since it does have a
4732 * performance impact).
4734 if (tp_vlan_pri_map)
4735 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4736 FIVETUPLELOOKUP_MASK,
4737 FIVETUPLELOOKUP_MASK);
4740 * Tweak some settings.
4742 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4743 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4744 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4745 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4748 * Get basic stuff going by issuing the Firmware Initialize command.
4749 * Note that this _must_ be after all PFVF commands ...
4751 ret = t4_fw_initialize(adapter, adapter->mbox);
4756 * Return successfully!
4758 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4759 "driver parameters\n");
4763 * Something bad happened. Return the error ...
4770 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4772 static int adap_init0(struct adapter *adap)
4776 enum dev_state state;
4777 u32 params[7], val[7];
4778 struct fw_caps_config_cmd caps_cmd;
4782 * Contact FW, advertising Master capability (and potentially forcing
4783 * ourselves as the Master PF if our module parameter force_init is
4786 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4787 force_init ? MASTER_MUST : MASTER_MAY,
4790 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4794 if (ret == adap->mbox)
4795 adap->flags |= MASTER_PF;
4796 if (force_init && state == DEV_STATE_INIT)
4797 state = DEV_STATE_UNINIT;
4800 * If we're the Master PF Driver and the device is uninitialized,
4801 * then let's consider upgrading the firmware ... (We always want
4802 * to check the firmware version number in order to A. get it for
4803 * later reporting and B. to warn if the currently loaded firmware
4804 * is excessively mismatched relative to the driver.)
4806 ret = t4_check_fw_version(adap);
4807 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4808 if (ret == -EINVAL || ret > 0) {
4809 if (upgrade_fw(adap) >= 0) {
4811 * Note that the chip was reset as part of the
4812 * firmware upgrade so we don't reset it again
4813 * below and grab the new firmware version.
4816 ret = t4_check_fw_version(adap);
4824 * Grab VPD parameters. This should be done after we establish a
4825 * connection to the firmware since some of the VPD parameters
4826 * (notably the Core Clock frequency) are retrieved via requests to
4827 * the firmware. On the other hand, we need these fairly early on
4828 * so we do this right after getting ahold of the firmware.
4830 ret = get_vpd_params(adap, &adap->params.vpd);
4835 * Find out what ports are available to us. Note that we need to do
4836 * this before calling adap_init0_no_config() since it needs nports
4840 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4841 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4842 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4846 adap->params.nports = hweight32(port_vec);
4847 adap->params.portvec = port_vec;
4850 * If the firmware is initialized already (and we're not forcing a
4851 * master initialization), note that we're living with existing
4852 * adapter parameters. Otherwise, it's time to try initializing the
4855 if (state == DEV_STATE_INIT) {
4856 dev_info(adap->pdev_dev, "Coming up as %s: "\
4857 "Adapter already initialized\n",
4858 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4859 adap->flags |= USING_SOFT_PARAMS;
4861 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4862 "Initializing adapter\n");
4865 * If the firmware doesn't support Configuration
4866 * Files warn user and exit,
4869 dev_warn(adap->pdev_dev, "Firmware doesn't support "
4870 "configuration file.\n");
4872 ret = adap_init0_no_config(adap, reset);
4875 * Find out whether we're dealing with a version of
4876 * the firmware which has configuration file support.
4878 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4879 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4880 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4884 * If the firmware doesn't support Configuration
4885 * Files, use the old Driver-based, hard-wired
4886 * initialization. Otherwise, try using the
4887 * Configuration File support and fall back to the
4888 * Driver-based initialization if there's no
4889 * Configuration File found.
4892 ret = adap_init0_no_config(adap, reset);
4895 * The firmware provides us with a memory
4896 * buffer where we can load a Configuration
4897 * File from the host if we want to override
4898 * the Configuration File in flash.
4901 ret = adap_init0_config(adap, reset);
4902 if (ret == -ENOENT) {
4903 dev_info(adap->pdev_dev,
4904 "No Configuration File present "
4905 "on adapter. Using hard-wired "
4906 "configuration parameters.\n");
4907 ret = adap_init0_no_config(adap, reset);
4912 dev_err(adap->pdev_dev,
4913 "could not initialize adapter, error %d\n",
4920 * If we're living with non-hard-coded parameters (either from a
4921 * Firmware Configuration File or values programmed by a different PF
4922 * Driver), give the SGE code a chance to pull in anything that it
4923 * needs ... Note that this must be called after we retrieve our VPD
4924 * parameters in order to know how to convert core ticks to seconds.
4926 if (adap->flags & USING_SOFT_PARAMS) {
4927 ret = t4_sge_init(adap);
4932 if (is_bypass_device(adap->pdev->device))
4933 adap->params.bypass = 1;
4936 * Grab some of our basic fundamental operating parameters.
4938 #define FW_PARAM_DEV(param) \
4939 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4940 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4942 #define FW_PARAM_PFVF(param) \
4943 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4944 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4945 FW_PARAMS_PARAM_Y(0) | \
4946 FW_PARAMS_PARAM_Z(0)
4948 params[0] = FW_PARAM_PFVF(EQ_START);
4949 params[1] = FW_PARAM_PFVF(L2T_START);
4950 params[2] = FW_PARAM_PFVF(L2T_END);
4951 params[3] = FW_PARAM_PFVF(FILTER_START);
4952 params[4] = FW_PARAM_PFVF(FILTER_END);
4953 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4954 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4957 adap->sge.egr_start = val[0];
4958 adap->l2t_start = val[1];
4959 adap->l2t_end = val[2];
4960 adap->tids.ftid_base = val[3];
4961 adap->tids.nftids = val[4] - val[3] + 1;
4962 adap->sge.ingr_start = val[5];
4964 /* query params related to active filter region */
4965 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4966 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4967 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4968 /* If Active filter size is set we enable establishing
4969 * offload connection through firmware work request
4971 if ((val[0] != val[1]) && (ret >= 0)) {
4972 adap->flags |= FW_OFLD_CONN;
4973 adap->tids.aftid_base = val[0];
4974 adap->tids.aftid_end = val[1];
4978 * Get device capabilities so we can determine what resources we need
4981 memset(&caps_cmd, 0, sizeof(caps_cmd));
4982 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4983 FW_CMD_REQUEST | FW_CMD_READ);
4984 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4985 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4990 if (caps_cmd.ofldcaps) {
4991 /* query offload-related parameters */
4992 params[0] = FW_PARAM_DEV(NTID);
4993 params[1] = FW_PARAM_PFVF(SERVER_START);
4994 params[2] = FW_PARAM_PFVF(SERVER_END);
4995 params[3] = FW_PARAM_PFVF(TDDP_START);
4996 params[4] = FW_PARAM_PFVF(TDDP_END);
4997 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4998 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5002 adap->tids.ntids = val[0];
5003 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5004 adap->tids.stid_base = val[1];
5005 adap->tids.nstids = val[2] - val[1] + 1;
5007 * Setup server filter region. Divide the availble filter
5008 * region into two parts. Regular filters get 1/3rd and server
5009 * filters get 2/3rd part. This is only enabled if workarond
5011 * 1. For regular filters.
5012 * 2. Server filter: This are special filters which are used
5013 * to redirect SYN packets to offload queue.
5015 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5016 adap->tids.sftid_base = adap->tids.ftid_base +
5017 DIV_ROUND_UP(adap->tids.nftids, 3);
5018 adap->tids.nsftids = adap->tids.nftids -
5019 DIV_ROUND_UP(adap->tids.nftids, 3);
5020 adap->tids.nftids = adap->tids.sftid_base -
5021 adap->tids.ftid_base;
5023 adap->vres.ddp.start = val[3];
5024 adap->vres.ddp.size = val[4] - val[3] + 1;
5025 adap->params.ofldq_wr_cred = val[5];
5027 adap->params.offload = 1;
5029 if (caps_cmd.rdmacaps) {
5030 params[0] = FW_PARAM_PFVF(STAG_START);
5031 params[1] = FW_PARAM_PFVF(STAG_END);
5032 params[2] = FW_PARAM_PFVF(RQ_START);
5033 params[3] = FW_PARAM_PFVF(RQ_END);
5034 params[4] = FW_PARAM_PFVF(PBL_START);
5035 params[5] = FW_PARAM_PFVF(PBL_END);
5036 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5040 adap->vres.stag.start = val[0];
5041 adap->vres.stag.size = val[1] - val[0] + 1;
5042 adap->vres.rq.start = val[2];
5043 adap->vres.rq.size = val[3] - val[2] + 1;
5044 adap->vres.pbl.start = val[4];
5045 adap->vres.pbl.size = val[5] - val[4] + 1;
5047 params[0] = FW_PARAM_PFVF(SQRQ_START);
5048 params[1] = FW_PARAM_PFVF(SQRQ_END);
5049 params[2] = FW_PARAM_PFVF(CQ_START);
5050 params[3] = FW_PARAM_PFVF(CQ_END);
5051 params[4] = FW_PARAM_PFVF(OCQ_START);
5052 params[5] = FW_PARAM_PFVF(OCQ_END);
5053 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5056 adap->vres.qp.start = val[0];
5057 adap->vres.qp.size = val[1] - val[0] + 1;
5058 adap->vres.cq.start = val[2];
5059 adap->vres.cq.size = val[3] - val[2] + 1;
5060 adap->vres.ocq.start = val[4];
5061 adap->vres.ocq.size = val[5] - val[4] + 1;
5063 if (caps_cmd.iscsicaps) {
5064 params[0] = FW_PARAM_PFVF(ISCSI_START);
5065 params[1] = FW_PARAM_PFVF(ISCSI_END);
5066 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5070 adap->vres.iscsi.start = val[0];
5071 adap->vres.iscsi.size = val[1] - val[0] + 1;
5073 #undef FW_PARAM_PFVF
5077 * These are finalized by FW initialization, load their values now.
5079 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5080 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5081 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5082 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5083 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5084 adap->params.b_wnd);
5086 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5087 for (j = 0; j < NCHAN; j++)
5088 adap->params.tp.tx_modq[j] = j;
5090 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5091 &adap->filter_mode, 1,
5094 adap->flags |= FW_OK;
5098 * Something bad happened. If a command timed out or failed with EIO
5099 * FW does not operate within its spec or something catastrophic
5100 * happened to HW/FW, stop issuing commands.
5103 if (ret != -ETIMEDOUT && ret != -EIO)
5104 t4_fw_bye(adap, adap->mbox);
5110 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5111 pci_channel_state_t state)
5114 struct adapter *adap = pci_get_drvdata(pdev);
5120 adap->flags &= ~FW_OK;
5121 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5122 for_each_port(adap, i) {
5123 struct net_device *dev = adap->port[i];
5125 netif_device_detach(dev);
5126 netif_carrier_off(dev);
5128 if (adap->flags & FULL_INIT_DONE)
5131 pci_disable_device(pdev);
5132 out: return state == pci_channel_io_perm_failure ?
5133 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5136 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5139 struct fw_caps_config_cmd c;
5140 struct adapter *adap = pci_get_drvdata(pdev);
5143 pci_restore_state(pdev);
5144 pci_save_state(pdev);
5145 return PCI_ERS_RESULT_RECOVERED;
5148 if (pci_enable_device(pdev)) {
5149 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5150 return PCI_ERS_RESULT_DISCONNECT;
5153 pci_set_master(pdev);
5154 pci_restore_state(pdev);
5155 pci_save_state(pdev);
5156 pci_cleanup_aer_uncorrect_error_status(pdev);
5158 if (t4_wait_dev_ready(adap) < 0)
5159 return PCI_ERS_RESULT_DISCONNECT;
5160 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
5161 return PCI_ERS_RESULT_DISCONNECT;
5162 adap->flags |= FW_OK;
5163 if (adap_init1(adap, &c))
5164 return PCI_ERS_RESULT_DISCONNECT;
5166 for_each_port(adap, i) {
5167 struct port_info *p = adap2pinfo(adap, i);
5169 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5172 return PCI_ERS_RESULT_DISCONNECT;
5174 p->xact_addr_filt = -1;
5177 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5178 adap->params.b_wnd);
5181 return PCI_ERS_RESULT_DISCONNECT;
5182 return PCI_ERS_RESULT_RECOVERED;
5185 static void eeh_resume(struct pci_dev *pdev)
5188 struct adapter *adap = pci_get_drvdata(pdev);
5194 for_each_port(adap, i) {
5195 struct net_device *dev = adap->port[i];
5197 if (netif_running(dev)) {
5199 cxgb_set_rxmode(dev);
5201 netif_device_attach(dev);
5206 static const struct pci_error_handlers cxgb4_eeh = {
5207 .error_detected = eeh_err_detected,
5208 .slot_reset = eeh_slot_reset,
5209 .resume = eeh_resume,
5212 static inline bool is_10g_port(const struct link_config *lc)
5214 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5217 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5218 unsigned int size, unsigned int iqe_size)
5220 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5221 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5222 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5223 q->iqe_len = iqe_size;
5228 * Perform default configuration of DMA queues depending on the number and type
5229 * of ports we found and the number of available CPUs. Most settings can be
5230 * modified by the admin prior to actual use.
5232 static void cfg_queues(struct adapter *adap)
5234 struct sge *s = &adap->sge;
5235 int i, q10g = 0, n10g = 0, qidx = 0;
5237 for_each_port(adap, i)
5238 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5241 * We default to 1 queue per non-10G port and up to # of cores queues
5245 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5246 if (q10g > netif_get_num_default_rss_queues())
5247 q10g = netif_get_num_default_rss_queues();
5249 for_each_port(adap, i) {
5250 struct port_info *pi = adap2pinfo(adap, i);
5252 pi->first_qset = qidx;
5253 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5258 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5260 if (is_offload(adap)) {
5262 * For offload we use 1 queue/channel if all ports are up to 1G,
5263 * otherwise we divide all available queues amongst the channels
5264 * capped by the number of available cores.
5267 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5269 s->ofldqsets = roundup(i, adap->params.nports);
5271 s->ofldqsets = adap->params.nports;
5272 /* For RDMA one Rx queue per channel suffices */
5273 s->rdmaqs = adap->params.nports;
5276 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5277 struct sge_eth_rxq *r = &s->ethrxq[i];
5279 init_rspq(&r->rspq, 0, 0, 1024, 64);
5283 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5284 s->ethtxq[i].q.size = 1024;
5286 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5287 s->ctrlq[i].q.size = 512;
5289 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5290 s->ofldtxq[i].q.size = 1024;
5292 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5293 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5295 init_rspq(&r->rspq, 0, 0, 1024, 64);
5296 r->rspq.uld = CXGB4_ULD_ISCSI;
5300 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5301 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5303 init_rspq(&r->rspq, 0, 0, 511, 64);
5304 r->rspq.uld = CXGB4_ULD_RDMA;
5308 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5309 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5313 * Reduce the number of Ethernet queues across all ports to at most n.
5314 * n provides at least one queue per port.
5316 static void reduce_ethqs(struct adapter *adap, int n)
5319 struct port_info *pi;
5321 while (n < adap->sge.ethqsets)
5322 for_each_port(adap, i) {
5323 pi = adap2pinfo(adap, i);
5324 if (pi->nqsets > 1) {
5326 adap->sge.ethqsets--;
5327 if (adap->sge.ethqsets <= n)
5333 for_each_port(adap, i) {
5334 pi = adap2pinfo(adap, i);
5340 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5341 #define EXTRA_VECS 2
5343 static int enable_msix(struct adapter *adap)
5346 int i, err, want, need;
5347 struct sge *s = &adap->sge;
5348 unsigned int nchan = adap->params.nports;
5349 struct msix_entry entries[MAX_INGQ + 1];
5351 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5352 entries[i].entry = i;
5354 want = s->max_ethqsets + EXTRA_VECS;
5355 if (is_offload(adap)) {
5356 want += s->rdmaqs + s->ofldqsets;
5357 /* need nchan for each possible ULD */
5358 ofld_need = 2 * nchan;
5360 need = adap->params.nports + EXTRA_VECS + ofld_need;
5362 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5367 * Distribute available vectors to the various queue groups.
5368 * Every group gets its minimum requirement and NIC gets top
5369 * priority for leftovers.
5371 i = want - EXTRA_VECS - ofld_need;
5372 if (i < s->max_ethqsets) {
5373 s->max_ethqsets = i;
5374 if (i < s->ethqsets)
5375 reduce_ethqs(adap, i);
5377 if (is_offload(adap)) {
5378 i = want - EXTRA_VECS - s->max_ethqsets;
5379 i -= ofld_need - nchan;
5380 s->ofldqsets = (i / nchan) * nchan; /* round down */
5382 for (i = 0; i < want; ++i)
5383 adap->msix_info[i].vec = entries[i].vector;
5385 dev_info(adap->pdev_dev,
5386 "only %d MSI-X vectors left, not using MSI-X\n", err);
5392 static int init_rss(struct adapter *adap)
5396 for_each_port(adap, i) {
5397 struct port_info *pi = adap2pinfo(adap, i);
5399 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5402 for (j = 0; j < pi->rss_size; j++)
5403 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5408 static void print_port_info(const struct net_device *dev)
5410 static const char *base[] = {
5411 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5412 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5417 const char *spd = "";
5418 const struct port_info *pi = netdev_priv(dev);
5419 const struct adapter *adap = pi->adapter;
5421 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5423 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5426 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5427 bufp += sprintf(bufp, "100/");
5428 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5429 bufp += sprintf(bufp, "1000/");
5430 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5431 bufp += sprintf(bufp, "10G/");
5434 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5436 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5437 adap->params.vpd.id,
5438 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
5439 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5440 (adap->flags & USING_MSIX) ? " MSI-X" :
5441 (adap->flags & USING_MSI) ? " MSI" : "");
5442 netdev_info(dev, "S/N: %s, E/C: %s\n",
5443 adap->params.vpd.sn, adap->params.vpd.ec);
5446 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5448 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5452 * Free the following resources:
5453 * - memory used for tables
5456 * - resources FW is holding for us
5458 static void free_some_resources(struct adapter *adapter)
5462 t4_free_mem(adapter->l2t);
5463 t4_free_mem(adapter->tids.tid_tab);
5464 disable_msi(adapter);
5466 for_each_port(adapter, i)
5467 if (adapter->port[i]) {
5468 kfree(adap2pinfo(adapter, i)->rss);
5469 free_netdev(adapter->port[i]);
5471 if (adapter->flags & FW_OK)
5472 t4_fw_bye(adapter, adapter->fn);
5475 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5476 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5477 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5478 #define SEGMENT_SIZE 128
5480 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5482 int func, i, err, s_qpp, qpp, num_seg;
5483 struct port_info *pi;
5484 bool highdma = false;
5485 struct adapter *adapter = NULL;
5486 #ifdef CONFIG_PCI_IOV
5490 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5492 err = pci_request_regions(pdev, KBUILD_MODNAME);
5494 /* Just info, some other driver may have claimed the device. */
5495 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5499 /* We control everything through one PF */
5500 func = PCI_FUNC(pdev->devfn);
5501 if (func != ent->driver_data) {
5502 pci_save_state(pdev); /* to restore SR-IOV later */
5506 err = pci_enable_device(pdev);
5508 dev_err(&pdev->dev, "cannot enable PCI device\n");
5509 goto out_release_regions;
5512 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5514 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5516 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5517 "coherent allocations\n");
5518 goto out_disable_device;
5521 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5523 dev_err(&pdev->dev, "no usable DMA configuration\n");
5524 goto out_disable_device;
5528 pci_enable_pcie_error_reporting(pdev);
5529 enable_pcie_relaxed_ordering(pdev);
5530 pci_set_master(pdev);
5531 pci_save_state(pdev);
5533 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5536 goto out_disable_device;
5539 adapter->regs = pci_ioremap_bar(pdev, 0);
5540 if (!adapter->regs) {
5541 dev_err(&pdev->dev, "cannot map device registers\n");
5543 goto out_free_adapter;
5546 adapter->pdev = pdev;
5547 adapter->pdev_dev = &pdev->dev;
5548 adapter->mbox = func;
5550 adapter->msg_enable = dflt_msg_enable;
5551 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5553 spin_lock_init(&adapter->stats_lock);
5554 spin_lock_init(&adapter->tid_release_lock);
5556 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5557 INIT_WORK(&adapter->db_full_task, process_db_full);
5558 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5560 err = t4_prep_adapter(adapter);
5562 goto out_unmap_bar0;
5564 if (!is_t4(adapter->chip)) {
5565 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5566 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5567 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5568 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5570 /* Each segment size is 128B. Write coalescing is enabled only
5571 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5572 * queue is less no of segments that can be accommodated in
5575 if (qpp > num_seg) {
5577 "Incorrect number of egress queues per page\n");
5579 goto out_unmap_bar0;
5581 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5582 pci_resource_len(pdev, 2));
5583 if (!adapter->bar2) {
5584 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5586 goto out_unmap_bar0;
5590 setup_memwin(adapter);
5591 err = adap_init0(adapter);
5592 setup_memwin_rdma(adapter);
5596 for_each_port(adapter, i) {
5597 struct net_device *netdev;
5599 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5606 SET_NETDEV_DEV(netdev, &pdev->dev);
5608 adapter->port[i] = netdev;
5609 pi = netdev_priv(netdev);
5610 pi->adapter = adapter;
5611 pi->xact_addr_filt = -1;
5613 netdev->irq = pdev->irq;
5615 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5616 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5617 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5618 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5620 netdev->hw_features |= NETIF_F_HIGHDMA;
5621 netdev->features |= netdev->hw_features;
5622 netdev->vlan_features = netdev->features & VLAN_FEAT;
5624 netdev->priv_flags |= IFF_UNICAST_FLT;
5626 netdev->netdev_ops = &cxgb4_netdev_ops;
5627 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5630 pci_set_drvdata(pdev, adapter);
5632 if (adapter->flags & FW_OK) {
5633 err = t4_port_init(adapter, func, func, 0);
5639 * Configure queues and allocate tables now, they can be needed as
5640 * soon as the first register_netdev completes.
5642 cfg_queues(adapter);
5644 adapter->l2t = t4_init_l2t();
5645 if (!adapter->l2t) {
5646 /* We tolerate a lack of L2T, giving up some functionality */
5647 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5648 adapter->params.offload = 0;
5651 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5652 dev_warn(&pdev->dev, "could not allocate TID table, "
5654 adapter->params.offload = 0;
5657 /* See what interrupts we'll be using */
5658 if (msi > 1 && enable_msix(adapter) == 0)
5659 adapter->flags |= USING_MSIX;
5660 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5661 adapter->flags |= USING_MSI;
5663 err = init_rss(adapter);
5668 * The card is now ready to go. If any errors occur during device
5669 * registration we do not fail the whole card but rather proceed only
5670 * with the ports we manage to register successfully. However we must
5671 * register at least one net device.
5673 for_each_port(adapter, i) {
5674 pi = adap2pinfo(adapter, i);
5675 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5676 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5678 err = register_netdev(adapter->port[i]);
5681 adapter->chan_map[pi->tx_chan] = i;
5682 print_port_info(adapter->port[i]);
5685 dev_err(&pdev->dev, "could not register any net devices\n");
5689 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5693 if (cxgb4_debugfs_root) {
5694 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5695 cxgb4_debugfs_root);
5696 setup_debugfs(adapter);
5699 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5700 pdev->needs_freset = 1;
5702 if (is_offload(adapter))
5703 attach_ulds(adapter);
5706 #ifdef CONFIG_PCI_IOV
5707 max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
5708 NUM_OF_PF_WITH_SRIOV_T5;
5710 if (func < max_no_pf && num_vf[func] > 0)
5711 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5712 dev_info(&pdev->dev,
5713 "instantiated %u virtual functions\n",
5719 free_some_resources(adapter);
5721 if (!is_t4(adapter->chip))
5722 iounmap(adapter->bar2);
5724 iounmap(adapter->regs);
5728 pci_disable_pcie_error_reporting(pdev);
5729 pci_disable_device(pdev);
5730 out_release_regions:
5731 pci_release_regions(pdev);
5732 pci_set_drvdata(pdev, NULL);
5736 static void remove_one(struct pci_dev *pdev)
5738 struct adapter *adapter = pci_get_drvdata(pdev);
5740 #ifdef CONFIG_PCI_IOV
5741 pci_disable_sriov(pdev);
5748 if (is_offload(adapter))
5749 detach_ulds(adapter);
5751 for_each_port(adapter, i)
5752 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5753 unregister_netdev(adapter->port[i]);
5755 if (adapter->debugfs_root)
5756 debugfs_remove_recursive(adapter->debugfs_root);
5758 /* If we allocated filters, free up state associated with any
5761 if (adapter->tids.ftid_tab) {
5762 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5763 for (i = 0; i < (adapter->tids.nftids +
5764 adapter->tids.nsftids); i++, f++)
5766 clear_filter(adapter, f);
5769 if (adapter->flags & FULL_INIT_DONE)
5772 free_some_resources(adapter);
5773 iounmap(adapter->regs);
5774 if (!is_t4(adapter->chip))
5775 iounmap(adapter->bar2);
5777 pci_disable_pcie_error_reporting(pdev);
5778 pci_disable_device(pdev);
5779 pci_release_regions(pdev);
5780 pci_set_drvdata(pdev, NULL);
5782 pci_release_regions(pdev);
5785 static struct pci_driver cxgb4_driver = {
5786 .name = KBUILD_MODNAME,
5787 .id_table = cxgb4_pci_tbl,
5789 .remove = remove_one,
5790 .err_handler = &cxgb4_eeh,
5793 static int __init cxgb4_init_module(void)
5797 workq = create_singlethread_workqueue("cxgb4");
5801 /* Debugfs support is optional, just warn if this fails */
5802 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5803 if (!cxgb4_debugfs_root)
5804 pr_warn("could not create debugfs entry, continuing\n");
5806 ret = pci_register_driver(&cxgb4_driver);
5808 debugfs_remove(cxgb4_debugfs_root);
5812 static void __exit cxgb4_cleanup_module(void)
5814 pci_unregister_driver(&cxgb4_driver);
5815 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5816 flush_workqueue(workq);
5817 destroy_workqueue(workq);
5820 module_init(cxgb4_init_module);
5821 module_exit(cxgb4_cleanup_module);