2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
70 #include "cxgb4_dcb.h"
73 #include <../drivers/net/bonding/bonding.h>
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
138 unsigned int portn, portvec;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_MASK;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter->params.nports == 0)
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry {
192 /* Administrative fields for filter.
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
316 CH_DEVICE(0x5486, 4),
317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt"
327 MODULE_DESCRIPTION(DRV_DESC);
328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION);
331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
332 MODULE_FIRMWARE(FW4_FNAME);
333 MODULE_FIRMWARE(FW5_FNAME);
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
341 static uint force_init;
343 module_param(force_init, uint, 0644);
344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
352 static uint force_old_init;
354 module_param(force_old_init, uint, 0644);
355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
357 static int dflt_msg_enable = DFLT_MSG_ENABLE;
359 module_param(dflt_msg_enable, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
373 module_param(msi, int, 0644);
374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
377 * Queue interrupt hold-off timer values. Queues default to the first of these
380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
382 module_param_array(intr_holdoff, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
388 module_param_array(intr_cnt, uint, NULL, 0644);
389 MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters");
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
404 static int rx_dma_offset = 2;
408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls, bool, 0644);
410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
417 module_param_array(num_vf, uint, NULL, 0644);
418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
421 /* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
425 * Default: select_queue=0
427 static int select_queue;
428 module_param(select_queue, int, 0644);
429 MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
456 module_param(tp_vlan_pri_map, uint, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
459 static struct dentry *cxgb4_debugfs_root;
461 static LIST_HEAD(adapter_list);
462 static DEFINE_MUTEX(uld_mutex);
463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list);
465 static DEFINE_SPINLOCK(adap_rcu_lock);
466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467 static const char *uld_str[] = { "RDMA", "iSCSI" };
469 static void link_report(struct net_device *dev)
471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n");
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev);
479 switch (p->link_cfg.speed) {
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
511 for (i = 0; i < pi->nqsets; i++, txq++) {
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff;
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err);
532 txq->dcb_prio = value;
535 #endif /* CONFIG_CHELSIO_T4_DCB */
537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
539 struct net_device *dev = adapter->port[port_id];
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
544 netif_carrier_on(dev);
546 #ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 netif_carrier_off(dev);
557 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
559 static const char *mod_str[] = {
560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev);
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n");
568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
576 static int set_addr_filters(const struct net_device *dev, bool sleep)
584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev);
586 int mc_cnt = netdev_mc_count(dev);
587 const struct port_info *pi = netdev_priv(dev);
588 unsigned int mb = pi->adapter->fn;
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
595 naddr, addr, filt_idx, &uhash, sleep);
604 /* next set up the multicast addresses */
605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
609 naddr, addr, filt_idx, &mhash, sleep);
618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
619 uhash | mhash, sleep);
622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
627 * usecs to sleep while draining the dbfifo
629 static int dbfifo_drain_delay = 1000;
630 module_param(dbfifo_drain_delay, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo");
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
641 struct port_info *pi = netdev_priv(dev);
643 ret = set_addr_filters(dev, sleep_ok);
645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
646 (dev->flags & IFF_PROMISC) ? 1 : 0,
647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
653 * link_start - enable a port
654 * @dev: the port to enable
656 * Performs the MAC and PHY actions needed to enable a port.
658 static int link_start(struct net_device *dev)
661 struct port_info *pi = netdev_priv(dev);
662 unsigned int mb = pi->adapter->fn;
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
672 pi->xact_addr_filt, dev->dev_addr, true,
675 pi->xact_addr_filt = ret;
680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED);
692 int cxgb4_dcb_enabled(const struct net_device *dev)
694 #ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev);
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
702 EXPORT_SYMBOL(cxgb4_dcb_enabled);
704 #ifdef CONFIG_CHELSIO_T4_DCB
705 /* Handle a Data Center Bridging update message from the firmware. */
706 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
709 struct net_device *dev = adap->port[port];
710 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
713 cxgb4_dcb_handle_fw_update(adap, pcmd);
714 new_dcb_enabled = cxgb4_dcb_enabled(dev);
716 /* If the DCB has become enabled or disabled on the port then we're
717 * going to need to set up/tear down DCB Priority parameters for the
718 * TX Queues associated with the port.
720 if (new_dcb_enabled != old_dcb_enabled)
721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
723 #endif /* CONFIG_CHELSIO_T4_DCB */
725 /* Clear a filter and release any of its resources that we own. This also
726 * clears the filter's "pending" status.
728 static void clear_filter(struct adapter *adap, struct filter_entry *f)
730 /* If the new or old filter have loopback rewriteing rules then we'll
731 * need to free any existing Layer Two Table (L2T) entries of the old
732 * filter rule. The firmware will handle freeing up any Source MAC
733 * Table (SMT) entries used for rewriting Source MAC Addresses in
737 cxgb4_l2t_release(f->l2t);
739 /* The zeroing of the filter rule below clears the filter valid,
740 * pending, locked flags, l2t pointer, etc. so it's all we need for
743 memset(f, 0, sizeof(*f));
746 /* Handle a filter write/deletion reply.
748 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
750 unsigned int idx = GET_TID(rpl);
751 unsigned int nidx = idx - adap->tids.ftid_base;
753 struct filter_entry *f;
755 if (idx >= adap->tids.ftid_base && nidx <
756 (adap->tids.nftids + adap->tids.nsftids)) {
758 ret = GET_TCB_COOKIE(rpl->cookie);
759 f = &adap->tids.ftid_tab[idx];
761 if (ret == FW_FILTER_WR_FLT_DELETED) {
762 /* Clear the filter when we get confirmation from the
763 * hardware that the filter has been deleted.
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
772 f->pending = 0; /* asynchronous setup completed */
775 /* Something went wrong. Issue a warning about the
776 * problem and clear everything out.
778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
780 clear_filter(adap, f);
785 /* Response queue handler for the FW event queue.
787 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
788 const struct pkt_gl *gl)
790 u8 opcode = ((const struct rss_header *)rsp)->opcode;
792 rsp++; /* skip RSS header */
794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
796 if (unlikely(opcode == CPL_FW4_MSG &&
797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
799 opcode = ((const struct rss_header *)rsp)->opcode;
801 if (opcode != CPL_SGE_EGR_UPDATE) {
802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
809 const struct cpl_sge_egr_update *p = (void *)rsp;
810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
816 struct sge_eth_txq *eq;
818 eq = container_of(txq, struct sge_eth_txq, q);
819 netif_tx_wake_queue(eq->txq);
821 struct sge_ofld_txq *oq;
823 oq = container_of(txq, struct sge_ofld_txq, q);
824 tasklet_schedule(&oq->qresume_tsk);
826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
827 const struct cpl_fw6_msg *p = (void *)rsp;
829 #ifdef CONFIG_CHELSIO_T4_DCB
830 const struct fw_port_cmd *pcmd = (const void *)p->data;
831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
832 unsigned int action =
833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
835 if (cmd == FW_PORT_CMD &&
836 action == FW_PORT_ACTION_GET_PORT_INFO) {
837 int port = FW_PORT_CMD_PORTID_GET(
838 be32_to_cpu(pcmd->op_to_portid));
839 struct net_device *dev = q->adap->port[port];
840 int state_input = ((pcmd->u.info.dcbxdis_pkd &
842 ? CXGB4_DCB_INPUT_FW_DISABLED
843 : CXGB4_DCB_INPUT_FW_ENABLED);
845 cxgb4_dcb_state_fsm(dev, state_input);
848 if (cmd == FW_PORT_CMD &&
849 action == FW_PORT_ACTION_L2_DCB_CFG)
850 dcb_rpl(q->adap, pcmd);
854 t4_handle_fw_rpl(q->adap, p->data);
855 } else if (opcode == CPL_L2T_WRITE_RPL) {
856 const struct cpl_l2t_write_rpl *p = (void *)rsp;
858 do_l2t_write_rpl(q->adap, p);
859 } else if (opcode == CPL_SET_TCB_RPL) {
860 const struct cpl_set_tcb_rpl *p = (void *)rsp;
862 filter_rpl(q->adap, p);
864 dev_err(q->adap->pdev_dev,
865 "unexpected CPL %#x on FW event queue\n", opcode);
871 * uldrx_handler - response queue handler for ULD queues
872 * @q: the response queue that received the packet
873 * @rsp: the response queue descriptor holding the offload message
874 * @gl: the gather list of packet fragments
876 * Deliver an ingress offload packet to a ULD. All processing is done by
877 * the ULD, we just maintain statistics.
879 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl)
882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
896 else if (gl == CXGB4_MSG_AN)
903 static void disable_msi(struct adapter *adapter)
905 if (adapter->flags & USING_MSIX) {
906 pci_disable_msix(adapter->pdev);
907 adapter->flags &= ~USING_MSIX;
908 } else if (adapter->flags & USING_MSI) {
909 pci_disable_msi(adapter->pdev);
910 adapter->flags &= ~USING_MSI;
915 * Interrupt handler for non-data events used with MSI-X.
917 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
919 struct adapter *adap = cookie;
921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
926 t4_slow_intr_handler(adap);
931 * Name the MSI-X interrupts.
933 static void name_msix_vecs(struct adapter *adap)
935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
937 /* non-data interrupts */
938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
942 adap->port[0]->name);
944 /* Ethernet queues */
945 for_each_port(adap, j) {
946 struct net_device *d = adap->port[j];
947 const struct port_info *pi = netdev_priv(d);
949 for (i = 0; i < pi->nqsets; i++, msi_idx++)
950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
955 for_each_ofldrxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
957 adap->port[0]->name, i);
959 for_each_rdmarxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
961 adap->port[0]->name, i);
963 for_each_rdmaciq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
965 adap->port[0]->name, i);
968 static int request_msix_queue_irqs(struct adapter *adap)
970 struct sge *s = &adap->sge;
971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
975 adap->msix_info[1].desc, &s->fw_evtq);
979 for_each_ethrxq(s, ethqidx) {
980 err = request_irq(adap->msix_info[msi_index].vec,
982 adap->msix_info[msi_index].desc,
983 &s->ethrxq[ethqidx].rspq);
988 for_each_ofldrxq(s, ofldqidx) {
989 err = request_irq(adap->msix_info[msi_index].vec,
991 adap->msix_info[msi_index].desc,
992 &s->ofldrxq[ofldqidx].rspq);
997 for_each_rdmarxq(s, rdmaqidx) {
998 err = request_irq(adap->msix_info[msi_index].vec,
1000 adap->msix_info[msi_index].desc,
1001 &s->rdmarxq[rdmaqidx].rspq);
1006 for_each_rdmaciq(s, rdmaciqqidx) {
1007 err = request_irq(adap->msix_info[msi_index].vec,
1008 t4_sge_intr_msix, 0,
1009 adap->msix_info[msi_index].desc,
1010 &s->rdmaciq[rdmaciqqidx].rspq);
1018 while (--rdmaciqqidx >= 0)
1019 free_irq(adap->msix_info[--msi_index].vec,
1020 &s->rdmaciq[rdmaciqqidx].rspq);
1021 while (--rdmaqidx >= 0)
1022 free_irq(adap->msix_info[--msi_index].vec,
1023 &s->rdmarxq[rdmaqidx].rspq);
1024 while (--ofldqidx >= 0)
1025 free_irq(adap->msix_info[--msi_index].vec,
1026 &s->ofldrxq[ofldqidx].rspq);
1027 while (--ethqidx >= 0)
1028 free_irq(adap->msix_info[--msi_index].vec,
1029 &s->ethrxq[ethqidx].rspq);
1030 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1034 static void free_msix_queue_irqs(struct adapter *adap)
1036 int i, msi_index = 2;
1037 struct sge *s = &adap->sge;
1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1040 for_each_ethrxq(s, i)
1041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1042 for_each_ofldrxq(s, i)
1043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1044 for_each_rdmarxq(s, i)
1045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1046 for_each_rdmaciq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1051 * write_rss - write the RSS table for a given port
1053 * @queues: array of queue indices for RSS
1055 * Sets up the portion of the HW RSS table for the port's VI to distribute
1056 * packets to the Rx queues in @queues.
1058 static int write_rss(const struct port_info *pi, const u16 *queues)
1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1068 /* map the queue indices to queue ids */
1069 for (i = 0; i < pi->rss_size; i++, queues++)
1070 rss[i] = q[*queues].rspq.abs_id;
1072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1073 pi->rss_size, rss, pi->rss_size);
1079 * setup_rss - configure RSS
1080 * @adap: the adapter
1082 * Sets up RSS for each port.
1084 static int setup_rss(struct adapter *adap)
1088 for_each_port(adap, i) {
1089 const struct port_info *pi = adap2pinfo(adap, i);
1091 err = write_rss(pi, pi->rss);
1099 * Return the channel of the ingress queue with the given qid.
1101 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1103 qid -= p->ingr_start;
1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1108 * Wait until all NAPI handlers are descheduled.
1110 static void quiesce_rx(struct adapter *adap)
1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1115 struct sge_rspq *q = adap->sge.ingr_map[i];
1117 if (q && q->handler)
1118 napi_disable(&q->napi);
1123 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1125 static void enable_rx(struct adapter *adap)
1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1130 struct sge_rspq *q = adap->sge.ingr_map[i];
1135 napi_enable(&q->napi);
1136 /* 0-increment GTS to start the timer and enable interrupts */
1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1138 SEINTARM(q->intr_params) |
1139 INGRESSQID(q->cntxt_id));
1144 * setup_sge_queues - configure SGE Tx/Rx/response queues
1145 * @adap: the adapter
1147 * Determines how many sets of SGE queues to use and initializes them.
1148 * We support multiple queue sets per port if we have MSI-X, otherwise
1149 * just one queue set per port.
1151 static int setup_sge_queues(struct adapter *adap)
1153 int err, msi_idx, i, j;
1154 struct sge *s = &adap->sge;
1156 bitmap_zero(s->starving_fl, MAX_EGRQ);
1157 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1159 if (adap->flags & USING_MSIX)
1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1166 msi_idx = -((int)s->intrq.abs_id + 1);
1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1170 msi_idx, NULL, fwevtq_handler);
1172 freeout: t4_free_sge_resources(adap);
1176 for_each_port(adap, i) {
1177 struct net_device *dev = adap->port[i];
1178 struct port_info *pi = netdev_priv(dev);
1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1182 for (j = 0; j < pi->nqsets; j++, q++) {
1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1191 memset(&q->stats, 0, sizeof(q->stats));
1193 for (j = 0; j < pi->nqsets; j++, t++) {
1194 err = t4_sge_alloc_eth_txq(adap, t, dev,
1195 netdev_get_tx_queue(dev, j),
1196 s->fw_evtq.cntxt_id);
1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1203 for_each_ofldrxq(s, i) {
1204 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1205 struct net_device *dev = adap->port[i / j];
1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1210 q->fl.size ? &q->fl : NULL,
1214 memset(&q->stats, 0, sizeof(q->stats));
1215 s->ofld_rxq[i] = q->rspq.abs_id;
1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1217 s->fw_evtq.cntxt_id);
1222 for_each_rdmarxq(s, i) {
1223 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1228 msi_idx, q->fl.size ? &q->fl : NULL,
1232 memset(&q->stats, 0, sizeof(q->stats));
1233 s->rdma_rxq[i] = q->rspq.abs_id;
1236 for_each_rdmaciq(s, i) {
1237 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1242 msi_idx, q->fl.size ? &q->fl : NULL,
1246 memset(&q->stats, 0, sizeof(q->stats));
1247 s->rdma_ciq[i] = q->rspq.abs_id;
1250 for_each_port(adap, i) {
1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1253 * have RDMA queues, and that's the right value.
1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1256 s->fw_evtq.cntxt_id,
1257 s->rdmarxq[i].rspq.cntxt_id);
1262 t4_write_reg(adap, is_t4(adap->params.chip) ?
1263 MPS_TRC_RSS_CONTROL :
1264 MPS_T5_TRC_RSS_CONTROL,
1265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1272 * The allocated memory is cleared.
1274 void *t4_alloc_mem(size_t size)
1276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1284 * Free memory allocated through alloc_mem().
1286 static void t4_free_mem(void *addr)
1288 if (is_vmalloc_addr(addr))
1294 /* Send a Work Request to write the filter at a specified index. We construct
1295 * a Firmware Filter Work Request to have the work done and put the indicated
1296 * filter into "pending" mode which will prevent any further actions against
1297 * it till we get a reply from the firmware on the completion status of the
1300 static int set_filter_wr(struct adapter *adapter, int fidx)
1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1303 struct sk_buff *skb;
1304 struct fw_filter_wr *fwr;
1307 /* If the new filter requires loopback Destination MAC and/or VLAN
1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1311 if (f->fs.newdmac || f->fs.newvlan) {
1312 /* allocate L2T entry for new filter */
1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1317 f->fs.eport, f->fs.dmac)) {
1318 cxgb4_l2t_release(f->l2t);
1324 ftid = adapter->tids.ftid_base + fidx;
1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1328 memset(fwr, 0, sizeof(*fwr));
1330 /* It would be nice to put most of the following in t4_hw.c but most
1331 * of the work is translating the cxgbtool ch_filter_specification
1332 * into the Work Request and the definition of that structure is
1333 * currently in cxgbtool.h which isn't appropriate to pull into the
1334 * common code. We may eventually try to come up with a more neutral
1335 * filter specification structure but for now it's easiest to simply
1336 * put this fairly direct code in line ...
1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1341 htonl(V_FW_FILTER_WR_TID(ftid) |
1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1343 V_FW_FILTER_WR_NOREPLY(0) |
1344 V_FW_FILTER_WR_IQ(f->fs.iq));
1345 fwr->del_filter_to_l2tix =
1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1355 f->fs.newvlan == VLAN_REWRITE) |
1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1357 f->fs.newvlan == VLAN_REWRITE) |
1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1360 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1362 fwr->ethtype = htons(f->fs.val.ethtype);
1363 fwr->ethtypem = htons(f->fs.mask.ethtype);
1364 fwr->frag_to_ovlan_vldm =
1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1372 fwr->rx_chan_rx_rpl_iq =
1373 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1375 fwr->maci_to_matchtypem =
1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1384 fwr->ptcl = f->fs.val.proto;
1385 fwr->ptclm = f->fs.mask.proto;
1386 fwr->ttyp = f->fs.val.tos;
1387 fwr->ttypm = f->fs.mask.tos;
1388 fwr->ivlan = htons(f->fs.val.ivlan);
1389 fwr->ivlanm = htons(f->fs.mask.ivlan);
1390 fwr->ovlan = htons(f->fs.val.ovlan);
1391 fwr->ovlanm = htons(f->fs.mask.ovlan);
1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1396 fwr->lp = htons(f->fs.val.lport);
1397 fwr->lpm = htons(f->fs.mask.lport);
1398 fwr->fp = htons(f->fs.val.fport);
1399 fwr->fpm = htons(f->fs.mask.fport);
1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1403 /* Mark the filter as "pending" and ship off the Filter Work Request.
1404 * When we get the Work Request Reply we'll clear the pending status.
1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1408 t4_ofld_send(adapter, skb);
1412 /* Delete the filter at a specified index.
1414 static int del_filter_wr(struct adapter *adapter, int fidx)
1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1417 struct sk_buff *skb;
1418 struct fw_filter_wr *fwr;
1419 unsigned int len, ftid;
1422 ftid = adapter->tids.ftid_base + fidx;
1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1428 /* Mark the filter as "pending" and ship off the Filter Work Request.
1429 * When we get the Work Request Reply we'll clear the pending status.
1432 t4_mgmt_tx(adapter, skb);
1436 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1437 void *accel_priv, select_queue_fallback_t fallback)
1441 #ifdef CONFIG_CHELSIO_T4_DCB
1442 /* If a Data Center Bridging has been successfully negotiated on this
1443 * link then we'll use the skb's priority to map it to a TX Queue.
1444 * The skb's priority is determined via the VLAN Tag Priority Code
1447 if (cxgb4_dcb_enabled(dev)) {
1451 err = vlan_get_tag(skb, &vlan_tci);
1452 if (unlikely(err)) {
1453 if (net_ratelimit())
1455 "TX Packet without VLAN Tag on DCB Link\n");
1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1462 #endif /* CONFIG_CHELSIO_T4_DCB */
1465 txq = (skb_rx_queue_recorded(skb)
1466 ? skb_get_rx_queue(skb)
1467 : smp_processor_id());
1469 while (unlikely(txq >= dev->real_num_tx_queues))
1470 txq -= dev->real_num_tx_queues;
1475 return fallback(dev, skb) % dev->real_num_tx_queues;
1478 static inline int is_offload(const struct adapter *adap)
1480 return adap->params.offload;
1484 * Implementation of ethtool operations.
1487 static u32 get_msglevel(struct net_device *dev)
1489 return netdev2adap(dev)->msg_enable;
1492 static void set_msglevel(struct net_device *dev, u32 val)
1494 netdev2adap(dev)->msg_enable = val;
1497 static char stats_strings[][ETH_GSTRING_LEN] = {
1500 "TxBroadcastFrames ",
1501 "TxMulticastFrames ",
1507 "TxFrames128To255 ",
1508 "TxFrames256To511 ",
1509 "TxFrames512To1023 ",
1510 "TxFrames1024To1518 ",
1511 "TxFrames1519ToMax ",
1526 "RxBroadcastFrames ",
1527 "RxMulticastFrames ",
1539 "RxFrames128To255 ",
1540 "RxFrames256To511 ",
1541 "RxFrames512To1023 ",
1542 "RxFrames1024To1518 ",
1543 "RxFrames1519ToMax ",
1555 "RxBG0FramesDropped ",
1556 "RxBG1FramesDropped ",
1557 "RxBG2FramesDropped ",
1558 "RxBG3FramesDropped ",
1559 "RxBG0FramesTrunc ",
1560 "RxBG1FramesTrunc ",
1561 "RxBG2FramesTrunc ",
1562 "RxBG3FramesTrunc ",
1571 "WriteCoalSuccess ",
1575 static int get_sset_count(struct net_device *dev, int sset)
1579 return ARRAY_SIZE(stats_strings);
1585 #define T4_REGMAP_SIZE (160 * 1024)
1586 #define T5_REGMAP_SIZE (332 * 1024)
1588 static int get_regs_len(struct net_device *dev)
1590 struct adapter *adap = netdev2adap(dev);
1591 if (is_t4(adap->params.chip))
1592 return T4_REGMAP_SIZE;
1594 return T5_REGMAP_SIZE;
1597 static int get_eeprom_len(struct net_device *dev)
1602 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1604 struct adapter *adapter = netdev2adap(dev);
1606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1608 strlcpy(info->bus_info, pci_name(adapter->pdev),
1609 sizeof(info->bus_info));
1611 if (adapter->params.fw_vers)
1612 snprintf(info->fw_version, sizeof(info->fw_version),
1613 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1624 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1626 if (stringset == ETH_SS_STATS)
1627 memcpy(data, stats_strings, sizeof(stats_strings));
1631 * port stats maintained per queue of the port. They should be in the same
1632 * order as in stats_strings above.
1634 struct queue_port_stats {
1644 static void collect_sge_port_stats(const struct adapter *adap,
1645 const struct port_info *p, struct queue_port_stats *s)
1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1651 memset(s, 0, sizeof(*s));
1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1654 s->tx_csum += tx->tx_cso;
1655 s->rx_csum += rx->stats.rx_cso;
1656 s->vlan_ex += rx->stats.vlan_ex;
1657 s->vlan_ins += tx->vlan_ins;
1658 s->gro_pkts += rx->stats.lro_pkts;
1659 s->gro_merged += rx->stats.lro_merged;
1663 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1666 struct port_info *pi = netdev_priv(dev);
1667 struct adapter *adapter = pi->adapter;
1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1672 data += sizeof(struct port_stats) / sizeof(u64);
1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1674 data += sizeof(struct queue_port_stats) / sizeof(u64);
1675 if (!is_t4(adapter->params.chip)) {
1676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1679 *data = val1 - val2;
1684 memset(data, 0, 2 * sizeof(u64));
1690 * Return a version number to identify the type of adapter. The scheme is:
1691 * - bits 0..9: chip version
1692 * - bits 10..15: chip revision
1693 * - bits 16..23: register dump version
1695 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1697 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1701 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1704 u32 *p = buf + start;
1706 for ( ; start <= end; start += sizeof(u32))
1707 *p++ = t4_read_reg(ap, start);
1710 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1713 static const unsigned int t4_reg_ranges[] = {
1934 static const unsigned int t5_reg_ranges[] = {
2363 struct adapter *ap = netdev2adap(dev);
2364 static const unsigned int *reg_ranges;
2365 int arr_size = 0, buf_size = 0;
2367 if (is_t4(ap->params.chip)) {
2368 reg_ranges = &t4_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t4_reg_ranges);
2370 buf_size = T4_REGMAP_SIZE;
2372 reg_ranges = &t5_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t5_reg_ranges);
2374 buf_size = T5_REGMAP_SIZE;
2377 regs->version = mk_adap_vers(ap);
2379 memset(buf, 0, buf_size);
2380 for (i = 0; i < arr_size; i += 2)
2381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2384 static int restart_autoneg(struct net_device *dev)
2386 struct port_info *p = netdev_priv(dev);
2388 if (!netif_running(dev))
2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2396 static int identify_port(struct net_device *dev,
2397 enum ethtool_phys_id_state state)
2400 struct adapter *adap = netdev2adap(dev);
2402 if (state == ETHTOOL_ID_ACTIVE)
2404 else if (state == ETHTOOL_ID_INACTIVE)
2409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2412 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2417 type == FW_PORT_TYPE_BT_XAUI) {
2419 if (caps & FW_PORT_CAP_SPEED_100M)
2420 v |= SUPPORTED_100baseT_Full;
2421 if (caps & FW_PORT_CAP_SPEED_1G)
2422 v |= SUPPORTED_1000baseT_Full;
2423 if (caps & FW_PORT_CAP_SPEED_10G)
2424 v |= SUPPORTED_10000baseT_Full;
2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2426 v |= SUPPORTED_Backplane;
2427 if (caps & FW_PORT_CAP_SPEED_1G)
2428 v |= SUPPORTED_1000baseKX_Full;
2429 if (caps & FW_PORT_CAP_SPEED_10G)
2430 v |= SUPPORTED_10000baseKX4_Full;
2431 } else if (type == FW_PORT_TYPE_KR)
2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2433 else if (type == FW_PORT_TYPE_BP_AP)
2434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2436 else if (type == FW_PORT_TYPE_BP4_AP)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full;
2440 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2442 v |= SUPPORTED_FIBRE;
2443 else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full;
2446 if (caps & FW_PORT_CAP_ANEG)
2447 v |= SUPPORTED_Autoneg;
2451 static unsigned int to_fw_linkcaps(unsigned int caps)
2455 if (caps & ADVERTISED_100baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_100M;
2457 if (caps & ADVERTISED_1000baseT_Full)
2458 v |= FW_PORT_CAP_SPEED_1G;
2459 if (caps & ADVERTISED_10000baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_10G;
2461 if (caps & ADVERTISED_40000baseSR4_Full)
2462 v |= FW_PORT_CAP_SPEED_40G;
2466 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2468 const struct port_info *p = netdev_priv(dev);
2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2471 p->port_type == FW_PORT_TYPE_BT_XFI ||
2472 p->port_type == FW_PORT_TYPE_BT_XAUI)
2473 cmd->port = PORT_TP;
2474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2476 cmd->port = PORT_FIBRE;
2477 else if (p->port_type == FW_PORT_TYPE_SFP ||
2478 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2479 p->port_type == FW_PORT_TYPE_QSFP) {
2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2481 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2482 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2483 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2484 cmd->port = PORT_FIBRE;
2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2487 cmd->port = PORT_DA;
2489 cmd->port = PORT_OTHER;
2491 cmd->port = PORT_OTHER;
2493 if (p->mdio_addr >= 0) {
2494 cmd->phy_address = p->mdio_addr;
2495 cmd->transceiver = XCVR_EXTERNAL;
2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2499 cmd->phy_address = 0; /* not really, but no better option */
2500 cmd->transceiver = XCVR_INTERNAL;
2501 cmd->mdio_support = 0;
2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2505 cmd->advertising = from_fw_linkcaps(p->port_type,
2506 p->link_cfg.advertising);
2507 ethtool_cmd_speed_set(cmd,
2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2509 cmd->duplex = DUPLEX_FULL;
2510 cmd->autoneg = p->link_cfg.autoneg;
2516 static unsigned int speed_to_caps(int speed)
2519 return FW_PORT_CAP_SPEED_100M;
2521 return FW_PORT_CAP_SPEED_1G;
2523 return FW_PORT_CAP_SPEED_10G;
2525 return FW_PORT_CAP_SPEED_40G;
2529 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2532 struct port_info *p = netdev_priv(dev);
2533 struct link_config *lc = &p->link_cfg;
2534 u32 speed = ethtool_cmd_speed(cmd);
2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2541 * PHY offers a single speed. See if that's what's
2544 if (cmd->autoneg == AUTONEG_DISABLE &&
2545 (lc->supported & speed_to_caps(speed)))
2550 if (cmd->autoneg == AUTONEG_DISABLE) {
2551 cap = speed_to_caps(speed);
2553 if (!(lc->supported & cap) ||
2558 lc->requested_speed = cap;
2559 lc->advertising = 0;
2561 cap = to_fw_linkcaps(cmd->advertising);
2562 if (!(lc->supported & cap))
2564 lc->requested_speed = 0;
2565 lc->advertising = cap | FW_PORT_CAP_ANEG;
2567 lc->autoneg = cmd->autoneg;
2569 if (netif_running(dev))
2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2575 static void get_pauseparam(struct net_device *dev,
2576 struct ethtool_pauseparam *epause)
2578 struct port_info *p = netdev_priv(dev);
2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2585 static int set_pauseparam(struct net_device *dev,
2586 struct ethtool_pauseparam *epause)
2588 struct port_info *p = netdev_priv(dev);
2589 struct link_config *lc = &p->link_cfg;
2591 if (epause->autoneg == AUTONEG_DISABLE)
2592 lc->requested_fc = 0;
2593 else if (lc->supported & FW_PORT_CAP_ANEG)
2594 lc->requested_fc = PAUSE_AUTONEG;
2598 if (epause->rx_pause)
2599 lc->requested_fc |= PAUSE_RX;
2600 if (epause->tx_pause)
2601 lc->requested_fc |= PAUSE_TX;
2602 if (netif_running(dev))
2603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2608 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2610 const struct port_info *pi = netdev_priv(dev);
2611 const struct sge *s = &pi->adapter->sge;
2613 e->rx_max_pending = MAX_RX_BUFFERS;
2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2615 e->rx_jumbo_max_pending = 0;
2616 e->tx_max_pending = MAX_TXQ_ENTRIES;
2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2620 e->rx_jumbo_pending = 0;
2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2624 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2627 const struct port_info *pi = netdev_priv(dev);
2628 struct adapter *adapter = pi->adapter;
2629 struct sge *s = &adapter->sge;
2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2632 e->tx_pending > MAX_TXQ_ENTRIES ||
2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2638 if (adapter->flags & FULL_INIT_DONE)
2641 for (i = 0; i < pi->nqsets; ++i) {
2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2649 static int closest_timer(const struct sge *s, int time)
2651 int i, delta, match = 0, min_delta = INT_MAX;
2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2654 delta = time - s->timer_val[i];
2657 if (delta < min_delta) {
2665 static int closest_thres(const struct sge *s, int thres)
2667 int i, delta, match = 0, min_delta = INT_MAX;
2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2670 delta = thres - s->counter_val[i];
2673 if (delta < min_delta) {
2682 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2684 static unsigned int qtimer_val(const struct adapter *adap,
2685 const struct sge_rspq *q)
2687 unsigned int idx = q->intr_params >> 1;
2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2695 * @us: the hold-off time in us, or 0 to disable timer
2696 * @cnt: the hold-off packet count, or 0 to disable counter
2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2699 * one of the two needs to be enabled for the queue to generate interrupts.
2701 static int set_rspq_intr_params(struct sge_rspq *q,
2702 unsigned int us, unsigned int cnt)
2704 struct adapter *adap = q->adap;
2706 if ((us | cnt) == 0)
2713 new_idx = closest_thres(&adap->sge, cnt);
2714 if (q->desc && q->pktcnt_idx != new_idx) {
2715 /* the queue has already been created, update it */
2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2718 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2724 q->pktcnt_idx = new_idx;
2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2734 * @dev: the network device
2735 * @us: the hold-off time in us, or 0 to disable timer
2736 * @cnt: the hold-off packet count, or 0 to disable counter
2738 * Set the RX interrupt hold-off parameters for a network device.
2740 static int set_rx_intr_params(struct net_device *dev,
2741 unsigned int us, unsigned int cnt)
2744 struct port_info *pi = netdev_priv(dev);
2745 struct adapter *adap = pi->adapter;
2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2748 for (i = 0; i < pi->nqsets; i++, q++) {
2749 err = set_rspq_intr_params(&q->rspq, us, cnt);
2756 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2759 struct port_info *pi = netdev_priv(dev);
2760 struct adapter *adap = pi->adapter;
2761 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2763 for (i = 0; i < pi->nqsets; i++, q++)
2764 q->rspq.adaptive_rx = adaptive_rx;
2769 static int get_adaptive_rx_setting(struct net_device *dev)
2771 struct port_info *pi = netdev_priv(dev);
2772 struct adapter *adap = pi->adapter;
2773 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2775 return q->rspq.adaptive_rx;
2778 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2780 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2781 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2782 c->rx_max_coalesced_frames);
2785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2787 const struct port_info *pi = netdev_priv(dev);
2788 const struct adapter *adap = pi->adapter;
2789 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2791 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2792 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2793 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2794 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2799 * eeprom_ptov - translate a physical EEPROM address to virtual
2800 * @phys_addr: the physical EEPROM address
2801 * @fn: the PCI function number
2802 * @sz: size of function-specific area
2804 * Translate a physical EEPROM address to virtual. The first 1K is
2805 * accessed through virtual addresses starting at 31K, the rest is
2806 * accessed through virtual addresses starting at 0.
2808 * The mapping is as follows:
2809 * [0..1K) -> [31K..32K)
2810 * [1K..1K+A) -> [31K-A..31K)
2811 * [1K+A..ES) -> [0..ES-A-1K)
2813 * where A = @fn * @sz, and ES = EEPROM size.
2815 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2818 if (phys_addr < 1024)
2819 return phys_addr + (31 << 10);
2820 if (phys_addr < 1024 + fn)
2821 return 31744 - fn + phys_addr - 1024;
2822 if (phys_addr < EEPROMSIZE)
2823 return phys_addr - 1024 - fn;
2828 * The next two routines implement eeprom read/write from physical addresses.
2830 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2832 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2835 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2836 return vaddr < 0 ? vaddr : 0;
2839 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2841 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2844 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2845 return vaddr < 0 ? vaddr : 0;
2848 #define EEPROM_MAGIC 0x38E2F10C
2850 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2854 struct adapter *adapter = netdev2adap(dev);
2856 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2860 e->magic = EEPROM_MAGIC;
2861 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2862 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2865 memcpy(data, buf + e->offset, e->len);
2870 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2875 u32 aligned_offset, aligned_len, *p;
2876 struct adapter *adapter = netdev2adap(dev);
2878 if (eeprom->magic != EEPROM_MAGIC)
2881 aligned_offset = eeprom->offset & ~3;
2882 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2884 if (adapter->fn > 0) {
2885 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2887 if (aligned_offset < start ||
2888 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2892 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2894 * RMW possibly needed for first or last words.
2896 buf = kmalloc(aligned_len, GFP_KERNEL);
2899 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2900 if (!err && aligned_len > 4)
2901 err = eeprom_rd_phys(adapter,
2902 aligned_offset + aligned_len - 4,
2903 (u32 *)&buf[aligned_len - 4]);
2906 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2910 err = t4_seeprom_wp(adapter, false);
2914 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2915 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2916 aligned_offset += 4;
2920 err = t4_seeprom_wp(adapter, true);
2927 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2930 const struct firmware *fw;
2931 struct adapter *adap = netdev2adap(netdev);
2932 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2934 ef->data[sizeof(ef->data) - 1] = '\0';
2935 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2939 /* If the adapter has been fully initialized then we'll go ahead and
2940 * try to get the firmware's cooperation in upgrading to the new
2941 * firmware image otherwise we'll try to do the entire job from the
2942 * host ... and we always "force" the operation in this path.
2944 if (adap->flags & FULL_INIT_DONE)
2947 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2948 release_firmware(fw);
2950 dev_info(adap->pdev_dev, "loaded firmware %s,"
2951 " reload cxgb4 driver\n", ef->data);
2955 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2956 #define BCAST_CRC 0xa0ccc1a6
2958 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2960 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2961 wol->wolopts = netdev2adap(dev)->wol;
2962 memset(&wol->sopass, 0, sizeof(wol->sopass));
2965 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2968 struct port_info *pi = netdev_priv(dev);
2970 if (wol->wolopts & ~WOL_SUPPORTED)
2972 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2973 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2974 if (wol->wolopts & WAKE_BCAST) {
2975 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2978 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2979 ~6ULL, ~0ULL, BCAST_CRC, true);
2981 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2985 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2987 const struct port_info *pi = netdev_priv(dev);
2988 netdev_features_t changed = dev->features ^ features;
2991 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2994 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2996 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2998 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
3002 static u32 get_rss_table_size(struct net_device *dev)
3004 const struct port_info *pi = netdev_priv(dev);
3006 return pi->rss_size;
3009 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3011 const struct port_info *pi = netdev_priv(dev);
3012 unsigned int n = pi->rss_size;
3019 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3022 struct port_info *pi = netdev_priv(dev);
3024 for (i = 0; i < pi->rss_size; i++)
3026 if (pi->adapter->flags & FULL_INIT_DONE)
3027 return write_rss(pi, pi->rss);
3031 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3034 const struct port_info *pi = netdev_priv(dev);
3036 switch (info->cmd) {
3037 case ETHTOOL_GRXFH: {
3038 unsigned int v = pi->rss_mode;
3041 switch (info->flow_type) {
3043 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3044 info->data = RXH_IP_SRC | RXH_IP_DST |
3045 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3046 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3047 info->data = RXH_IP_SRC | RXH_IP_DST;
3050 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3051 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3052 info->data = RXH_IP_SRC | RXH_IP_DST |
3053 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3054 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3055 info->data = RXH_IP_SRC | RXH_IP_DST;
3058 case AH_ESP_V4_FLOW:
3060 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3061 info->data = RXH_IP_SRC | RXH_IP_DST;
3064 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3065 info->data = RXH_IP_SRC | RXH_IP_DST |
3066 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3067 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3068 info->data = RXH_IP_SRC | RXH_IP_DST;
3071 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3072 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3073 info->data = RXH_IP_SRC | RXH_IP_DST |
3074 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3075 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3076 info->data = RXH_IP_SRC | RXH_IP_DST;
3079 case AH_ESP_V6_FLOW:
3081 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3082 info->data = RXH_IP_SRC | RXH_IP_DST;
3087 case ETHTOOL_GRXRINGS:
3088 info->data = pi->nqsets;
3094 static const struct ethtool_ops cxgb_ethtool_ops = {
3095 .get_settings = get_settings,
3096 .set_settings = set_settings,
3097 .get_drvinfo = get_drvinfo,
3098 .get_msglevel = get_msglevel,
3099 .set_msglevel = set_msglevel,
3100 .get_ringparam = get_sge_param,
3101 .set_ringparam = set_sge_param,
3102 .get_coalesce = get_coalesce,
3103 .set_coalesce = set_coalesce,
3104 .get_eeprom_len = get_eeprom_len,
3105 .get_eeprom = get_eeprom,
3106 .set_eeprom = set_eeprom,
3107 .get_pauseparam = get_pauseparam,
3108 .set_pauseparam = set_pauseparam,
3109 .get_link = ethtool_op_get_link,
3110 .get_strings = get_strings,
3111 .set_phys_id = identify_port,
3112 .nway_reset = restart_autoneg,
3113 .get_sset_count = get_sset_count,
3114 .get_ethtool_stats = get_stats,
3115 .get_regs_len = get_regs_len,
3116 .get_regs = get_regs,
3119 .get_rxnfc = get_rxnfc,
3120 .get_rxfh_indir_size = get_rss_table_size,
3121 .get_rxfh = get_rss_table,
3122 .set_rxfh = set_rss_table,
3123 .flash_device = set_flash,
3129 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3133 loff_t avail = file_inode(file)->i_size;
3134 unsigned int mem = (uintptr_t)file->private_data & 3;
3135 struct adapter *adap = file->private_data - mem;
3143 if (count > avail - pos)
3144 count = avail - pos;
3146 data = t4_alloc_mem(count);
3150 spin_lock(&adap->win0_lock);
3151 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3152 spin_unlock(&adap->win0_lock);
3157 ret = copy_to_user(buf, data, count);
3163 *ppos = pos + count;
3167 static const struct file_operations mem_debugfs_fops = {
3168 .owner = THIS_MODULE,
3169 .open = simple_open,
3171 .llseek = default_llseek,
3174 static void add_debugfs_mem(struct adapter *adap, const char *name,
3175 unsigned int idx, unsigned int size_mb)
3179 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3180 (void *)adap + idx, &mem_debugfs_fops);
3181 if (de && de->d_inode)
3182 de->d_inode->i_size = size_mb << 20;
3185 static int setup_debugfs(struct adapter *adap)
3190 if (IS_ERR_OR_NULL(adap->debugfs_root))
3193 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3194 if (i & EDRAM0_ENABLE) {
3195 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3196 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3198 if (i & EDRAM1_ENABLE) {
3199 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3200 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3202 if (is_t4(adap->params.chip)) {
3203 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3204 if (i & EXT_MEM_ENABLE)
3205 add_debugfs_mem(adap, "mc", MEM_MC,
3206 EXT_MEM_SIZE_GET(size));
3208 if (i & EXT_MEM_ENABLE) {
3209 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3210 add_debugfs_mem(adap, "mc0", MEM_MC0,
3211 EXT_MEM_SIZE_GET(size));
3213 if (i & EXT_MEM1_ENABLE) {
3214 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3215 add_debugfs_mem(adap, "mc1", MEM_MC1,
3216 EXT_MEM_SIZE_GET(size));
3220 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3226 * upper-layer driver support
3230 * Allocate an active-open TID and set it to the supplied value.
3232 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3236 spin_lock_bh(&t->atid_lock);
3238 union aopen_entry *p = t->afree;
3240 atid = (p - t->atid_tab) + t->atid_base;
3245 spin_unlock_bh(&t->atid_lock);
3248 EXPORT_SYMBOL(cxgb4_alloc_atid);
3251 * Release an active-open TID.
3253 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3255 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3257 spin_lock_bh(&t->atid_lock);
3261 spin_unlock_bh(&t->atid_lock);
3263 EXPORT_SYMBOL(cxgb4_free_atid);
3266 * Allocate a server TID and set it to the supplied value.
3268 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3272 spin_lock_bh(&t->stid_lock);
3273 if (family == PF_INET) {
3274 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3275 if (stid < t->nstids)
3276 __set_bit(stid, t->stid_bmap);
3280 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3285 t->stid_tab[stid].data = data;
3286 stid += t->stid_base;
3287 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3288 * This is equivalent to 4 TIDs. With CLIP enabled it
3291 if (family == PF_INET)
3294 t->stids_in_use += 4;
3296 spin_unlock_bh(&t->stid_lock);
3299 EXPORT_SYMBOL(cxgb4_alloc_stid);
3301 /* Allocate a server filter TID and set it to the supplied value.
3303 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3307 spin_lock_bh(&t->stid_lock);
3308 if (family == PF_INET) {
3309 stid = find_next_zero_bit(t->stid_bmap,
3310 t->nstids + t->nsftids, t->nstids);
3311 if (stid < (t->nstids + t->nsftids))
3312 __set_bit(stid, t->stid_bmap);
3319 t->stid_tab[stid].data = data;
3321 stid += t->sftid_base;
3324 spin_unlock_bh(&t->stid_lock);
3327 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3329 /* Release a server TID.
3331 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3333 /* Is it a server filter TID? */
3334 if (t->nsftids && (stid >= t->sftid_base)) {
3335 stid -= t->sftid_base;
3338 stid -= t->stid_base;
3341 spin_lock_bh(&t->stid_lock);
3342 if (family == PF_INET)
3343 __clear_bit(stid, t->stid_bmap);
3345 bitmap_release_region(t->stid_bmap, stid, 2);
3346 t->stid_tab[stid].data = NULL;
3347 if (family == PF_INET)
3350 t->stids_in_use -= 4;
3351 spin_unlock_bh(&t->stid_lock);
3353 EXPORT_SYMBOL(cxgb4_free_stid);
3356 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3358 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3361 struct cpl_tid_release *req;
3363 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3364 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3365 INIT_TP_WR(req, tid);
3366 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3370 * Queue a TID release request and if necessary schedule a work queue to
3373 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3376 void **p = &t->tid_tab[tid];
3377 struct adapter *adap = container_of(t, struct adapter, tids);
3379 spin_lock_bh(&adap->tid_release_lock);
3380 *p = adap->tid_release_head;
3381 /* Low 2 bits encode the Tx channel number */
3382 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3383 if (!adap->tid_release_task_busy) {
3384 adap->tid_release_task_busy = true;
3385 queue_work(adap->workq, &adap->tid_release_task);
3387 spin_unlock_bh(&adap->tid_release_lock);
3391 * Process the list of pending TID release requests.
3393 static void process_tid_release_list(struct work_struct *work)
3395 struct sk_buff *skb;
3396 struct adapter *adap;
3398 adap = container_of(work, struct adapter, tid_release_task);
3400 spin_lock_bh(&adap->tid_release_lock);
3401 while (adap->tid_release_head) {
3402 void **p = adap->tid_release_head;
3403 unsigned int chan = (uintptr_t)p & 3;
3404 p = (void *)p - chan;
3406 adap->tid_release_head = *p;
3408 spin_unlock_bh(&adap->tid_release_lock);
3410 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3412 schedule_timeout_uninterruptible(1);
3414 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3415 t4_ofld_send(adap, skb);
3416 spin_lock_bh(&adap->tid_release_lock);
3418 adap->tid_release_task_busy = false;
3419 spin_unlock_bh(&adap->tid_release_lock);
3423 * Release a TID and inform HW. If we are unable to allocate the release
3424 * message we defer to a work queue.
3426 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3429 struct sk_buff *skb;
3430 struct adapter *adap = container_of(t, struct adapter, tids);
3432 old = t->tid_tab[tid];
3433 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3435 t->tid_tab[tid] = NULL;
3436 mk_tid_release(skb, chan, tid);
3437 t4_ofld_send(adap, skb);
3439 cxgb4_queue_tid_release(t, chan, tid);
3441 atomic_dec(&t->tids_in_use);
3443 EXPORT_SYMBOL(cxgb4_remove_tid);
3446 * Allocate and initialize the TID tables. Returns 0 on success.
3448 static int tid_init(struct tid_info *t)
3451 unsigned int stid_bmap_size;
3452 unsigned int natids = t->natids;
3453 struct adapter *adap = container_of(t, struct adapter, tids);
3455 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3456 size = t->ntids * sizeof(*t->tid_tab) +
3457 natids * sizeof(*t->atid_tab) +
3458 t->nstids * sizeof(*t->stid_tab) +
3459 t->nsftids * sizeof(*t->stid_tab) +
3460 stid_bmap_size * sizeof(long) +
3461 t->nftids * sizeof(*t->ftid_tab) +
3462 t->nsftids * sizeof(*t->ftid_tab);
3464 t->tid_tab = t4_alloc_mem(size);
3468 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3469 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3470 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3471 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3472 spin_lock_init(&t->stid_lock);
3473 spin_lock_init(&t->atid_lock);
3475 t->stids_in_use = 0;
3477 t->atids_in_use = 0;
3478 atomic_set(&t->tids_in_use, 0);
3480 /* Setup the free list for atid_tab and clear the stid bitmap. */
3483 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3484 t->afree = t->atid_tab;
3486 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3487 /* Reserve stid 0 for T4/T5 adapters */
3488 if (!t->stid_base &&
3489 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3490 __set_bit(0, t->stid_bmap);
3495 int cxgb4_clip_get(const struct net_device *dev,
3496 const struct in6_addr *lip)
3498 struct adapter *adap;
3499 struct fw_clip_cmd c;
3501 adap = netdev2adap(dev);
3502 memset(&c, 0, sizeof(c));
3503 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3504 FW_CMD_REQUEST | FW_CMD_WRITE);
3505 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3506 c.ip_hi = *(__be64 *)(lip->s6_addr);
3507 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3508 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3510 EXPORT_SYMBOL(cxgb4_clip_get);
3512 int cxgb4_clip_release(const struct net_device *dev,
3513 const struct in6_addr *lip)
3515 struct adapter *adap;
3516 struct fw_clip_cmd c;
3518 adap = netdev2adap(dev);
3519 memset(&c, 0, sizeof(c));
3520 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3521 FW_CMD_REQUEST | FW_CMD_READ);
3522 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3523 c.ip_hi = *(__be64 *)(lip->s6_addr);
3524 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3525 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3527 EXPORT_SYMBOL(cxgb4_clip_release);
3530 * cxgb4_create_server - create an IP server
3532 * @stid: the server TID
3533 * @sip: local IP address to bind server to
3534 * @sport: the server's TCP port
3535 * @queue: queue to direct messages from this server to
3537 * Create an IP server for the given port and address.
3538 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3540 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3541 __be32 sip, __be16 sport, __be16 vlan,
3545 struct sk_buff *skb;
3546 struct adapter *adap;
3547 struct cpl_pass_open_req *req;
3550 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3554 adap = netdev2adap(dev);
3555 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3557 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3558 req->local_port = sport;
3559 req->peer_port = htons(0);
3560 req->local_ip = sip;
3561 req->peer_ip = htonl(0);
3562 chan = rxq_to_chan(&adap->sge, queue);
3563 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3564 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3565 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3566 ret = t4_mgmt_tx(adap, skb);
3567 return net_xmit_eval(ret);
3569 EXPORT_SYMBOL(cxgb4_create_server);
3571 /* cxgb4_create_server6 - create an IPv6 server
3573 * @stid: the server TID
3574 * @sip: local IPv6 address to bind server to
3575 * @sport: the server's TCP port
3576 * @queue: queue to direct messages from this server to
3578 * Create an IPv6 server for the given port and address.
3579 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3581 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3582 const struct in6_addr *sip, __be16 sport,
3586 struct sk_buff *skb;
3587 struct adapter *adap;
3588 struct cpl_pass_open_req6 *req;
3591 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3595 adap = netdev2adap(dev);
3596 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3598 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3599 req->local_port = sport;
3600 req->peer_port = htons(0);
3601 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3602 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3603 req->peer_ip_hi = cpu_to_be64(0);
3604 req->peer_ip_lo = cpu_to_be64(0);
3605 chan = rxq_to_chan(&adap->sge, queue);
3606 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3607 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3608 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3609 ret = t4_mgmt_tx(adap, skb);
3610 return net_xmit_eval(ret);
3612 EXPORT_SYMBOL(cxgb4_create_server6);
3614 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3615 unsigned int queue, bool ipv6)
3617 struct sk_buff *skb;
3618 struct adapter *adap;
3619 struct cpl_close_listsvr_req *req;
3622 adap = netdev2adap(dev);
3624 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3628 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3630 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3631 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3632 LISTSVR_IPV6(0)) | QUEUENO(queue));
3633 ret = t4_mgmt_tx(adap, skb);
3634 return net_xmit_eval(ret);
3636 EXPORT_SYMBOL(cxgb4_remove_server);
3639 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3640 * @mtus: the HW MTU table
3641 * @mtu: the target MTU
3642 * @idx: index of selected entry in the MTU table
3644 * Returns the index and the value in the HW MTU table that is closest to
3645 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3646 * table, in which case that smallest available value is selected.
3648 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3653 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3659 EXPORT_SYMBOL(cxgb4_best_mtu);
3662 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3663 * @mtus: the HW MTU table
3664 * @header_size: Header Size
3665 * @data_size_max: maximum Data Segment Size
3666 * @data_size_align: desired Data Segment Size Alignment (2^N)
3667 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3669 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3670 * MTU Table based solely on a Maximum MTU parameter, we break that
3671 * parameter up into a Header Size and Maximum Data Segment Size, and
3672 * provide a desired Data Segment Size Alignment. If we find an MTU in
3673 * the Hardware MTU Table which will result in a Data Segment Size with
3674 * the requested alignment _and_ that MTU isn't "too far" from the
3675 * closest MTU, then we'll return that rather than the closest MTU.
3677 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3678 unsigned short header_size,
3679 unsigned short data_size_max,
3680 unsigned short data_size_align,
3681 unsigned int *mtu_idxp)
3683 unsigned short max_mtu = header_size + data_size_max;
3684 unsigned short data_size_align_mask = data_size_align - 1;
3685 int mtu_idx, aligned_mtu_idx;
3687 /* Scan the MTU Table till we find an MTU which is larger than our
3688 * Maximum MTU or we reach the end of the table. Along the way,
3689 * record the last MTU found, if any, which will result in a Data
3690 * Segment Length matching the requested alignment.
3692 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3693 unsigned short data_size = mtus[mtu_idx] - header_size;
3695 /* If this MTU minus the Header Size would result in a
3696 * Data Segment Size of the desired alignment, remember it.
3698 if ((data_size & data_size_align_mask) == 0)
3699 aligned_mtu_idx = mtu_idx;
3701 /* If we're not at the end of the Hardware MTU Table and the
3702 * next element is larger than our Maximum MTU, drop out of
3705 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3709 /* If we fell out of the loop because we ran to the end of the table,
3710 * then we just have to use the last [largest] entry.
3712 if (mtu_idx == NMTUS)
3715 /* If we found an MTU which resulted in the requested Data Segment
3716 * Length alignment and that's "not far" from the largest MTU which is
3717 * less than or equal to the maximum MTU, then use that.
3719 if (aligned_mtu_idx >= 0 &&
3720 mtu_idx - aligned_mtu_idx <= 1)
3721 mtu_idx = aligned_mtu_idx;
3723 /* If the caller has passed in an MTU Index pointer, pass the
3724 * MTU Index back. Return the MTU value.
3727 *mtu_idxp = mtu_idx;
3728 return mtus[mtu_idx];
3730 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3733 * cxgb4_port_chan - get the HW channel of a port
3734 * @dev: the net device for the port
3736 * Return the HW Tx channel of the given port.
3738 unsigned int cxgb4_port_chan(const struct net_device *dev)
3740 return netdev2pinfo(dev)->tx_chan;
3742 EXPORT_SYMBOL(cxgb4_port_chan);
3744 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3746 struct adapter *adap = netdev2adap(dev);
3747 u32 v1, v2, lp_count, hp_count;
3749 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3750 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3751 if (is_t4(adap->params.chip)) {
3752 lp_count = G_LP_COUNT(v1);
3753 hp_count = G_HP_COUNT(v1);
3755 lp_count = G_LP_COUNT_T5(v1);
3756 hp_count = G_HP_COUNT_T5(v2);
3758 return lpfifo ? lp_count : hp_count;
3760 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3763 * cxgb4_port_viid - get the VI id of a port
3764 * @dev: the net device for the port
3766 * Return the VI id of the given port.
3768 unsigned int cxgb4_port_viid(const struct net_device *dev)
3770 return netdev2pinfo(dev)->viid;
3772 EXPORT_SYMBOL(cxgb4_port_viid);
3775 * cxgb4_port_idx - get the index of a port
3776 * @dev: the net device for the port
3778 * Return the index of the given port.
3780 unsigned int cxgb4_port_idx(const struct net_device *dev)
3782 return netdev2pinfo(dev)->port_id;
3784 EXPORT_SYMBOL(cxgb4_port_idx);
3786 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3787 struct tp_tcp_stats *v6)
3789 struct adapter *adap = pci_get_drvdata(pdev);
3791 spin_lock(&adap->stats_lock);
3792 t4_tp_get_tcp_stats(adap, v4, v6);
3793 spin_unlock(&adap->stats_lock);
3795 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3797 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3798 const unsigned int *pgsz_order)
3800 struct adapter *adap = netdev2adap(dev);
3802 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3803 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3804 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3805 HPZ3(pgsz_order[3]));
3807 EXPORT_SYMBOL(cxgb4_iscsi_init);
3809 int cxgb4_flush_eq_cache(struct net_device *dev)
3811 struct adapter *adap = netdev2adap(dev);
3814 ret = t4_fwaddrspace_write(adap, adap->mbox,
3815 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3818 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3820 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3822 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3826 spin_lock(&adap->win0_lock);
3827 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3828 sizeof(indices), (__be32 *)&indices,
3830 spin_unlock(&adap->win0_lock);
3832 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3833 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3838 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3841 struct adapter *adap = netdev2adap(dev);
3842 u16 hw_pidx, hw_cidx;
3845 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3849 if (pidx != hw_pidx) {
3852 if (pidx >= hw_pidx)
3853 delta = pidx - hw_pidx;
3855 delta = size - hw_pidx + pidx;
3857 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3858 QID(qid) | PIDX(delta));
3863 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3865 void cxgb4_disable_db_coalescing(struct net_device *dev)
3867 struct adapter *adap;
3869 adap = netdev2adap(dev);
3870 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3873 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3875 void cxgb4_enable_db_coalescing(struct net_device *dev)
3877 struct adapter *adap;
3879 adap = netdev2adap(dev);
3880 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3882 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3884 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3886 struct adapter *adap;
3887 u32 offset, memtype, memaddr;
3888 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3889 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3892 adap = netdev2adap(dev);
3894 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3896 /* Figure out where the offset lands in the Memory Type/Address scheme.
3897 * This code assumes that the memory is laid out starting at offset 0
3898 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3899 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3900 * MC0, and some have both MC0 and MC1.
3902 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3903 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3904 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3906 edc0_end = edc0_size;
3907 edc1_end = edc0_end + edc1_size;
3908 mc0_end = edc1_end + mc0_size;
3910 if (offset < edc0_end) {
3913 } else if (offset < edc1_end) {
3915 memaddr = offset - edc0_end;
3917 if (offset < mc0_end) {
3919 memaddr = offset - edc1_end;
3920 } else if (is_t4(adap->params.chip)) {
3921 /* T4 only has a single memory channel */
3924 mc1_size = EXT_MEM_SIZE_GET(
3926 MA_EXT_MEMORY1_BAR)) << 20;
3927 mc1_end = mc0_end + mc1_size;
3928 if (offset < mc1_end) {
3930 memaddr = offset - mc0_end;
3932 /* offset beyond the end of any memory */
3938 spin_lock(&adap->win0_lock);
3939 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3940 spin_unlock(&adap->win0_lock);
3944 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3948 EXPORT_SYMBOL(cxgb4_read_tpte);
3950 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3953 struct adapter *adap;
3955 adap = netdev2adap(dev);
3956 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3957 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3959 return ((u64)hi << 32) | (u64)lo;
3961 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3963 static struct pci_driver cxgb4_driver;
3965 static void check_neigh_update(struct neighbour *neigh)
3967 const struct device *parent;
3968 const struct net_device *netdev = neigh->dev;
3970 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3971 netdev = vlan_dev_real_dev(netdev);
3972 parent = netdev->dev.parent;
3973 if (parent && parent->driver == &cxgb4_driver.driver)
3974 t4_l2t_update(dev_get_drvdata(parent), neigh);
3977 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3981 case NETEVENT_NEIGH_UPDATE:
3982 check_neigh_update(data);
3984 case NETEVENT_REDIRECT:
3991 static bool netevent_registered;
3992 static struct notifier_block cxgb4_netevent_nb = {
3993 .notifier_call = netevent_cb
3996 static void drain_db_fifo(struct adapter *adap, int usecs)
3998 u32 v1, v2, lp_count, hp_count;
4001 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
4002 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
4003 if (is_t4(adap->params.chip)) {
4004 lp_count = G_LP_COUNT(v1);
4005 hp_count = G_HP_COUNT(v1);
4007 lp_count = G_LP_COUNT_T5(v1);
4008 hp_count = G_HP_COUNT_T5(v2);
4011 if (lp_count == 0 && hp_count == 0)
4013 set_current_state(TASK_UNINTERRUPTIBLE);
4014 schedule_timeout(usecs_to_jiffies(usecs));
4018 static void disable_txq_db(struct sge_txq *q)
4020 unsigned long flags;
4022 spin_lock_irqsave(&q->db_lock, flags);
4024 spin_unlock_irqrestore(&q->db_lock, flags);
4027 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
4029 spin_lock_irq(&q->db_lock);
4030 if (q->db_pidx_inc) {
4031 /* Make sure that all writes to the TX descriptors
4032 * are committed before we tell HW about them.
4035 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4036 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4040 spin_unlock_irq(&q->db_lock);
4043 static void disable_dbs(struct adapter *adap)
4047 for_each_ethrxq(&adap->sge, i)
4048 disable_txq_db(&adap->sge.ethtxq[i].q);
4049 for_each_ofldrxq(&adap->sge, i)
4050 disable_txq_db(&adap->sge.ofldtxq[i].q);
4051 for_each_port(adap, i)
4052 disable_txq_db(&adap->sge.ctrlq[i].q);
4055 static void enable_dbs(struct adapter *adap)
4059 for_each_ethrxq(&adap->sge, i)
4060 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4061 for_each_ofldrxq(&adap->sge, i)
4062 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4063 for_each_port(adap, i)
4064 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4067 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4069 if (adap->uld_handle[CXGB4_ULD_RDMA])
4070 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4074 static void process_db_full(struct work_struct *work)
4076 struct adapter *adap;
4078 adap = container_of(work, struct adapter, db_full_task);
4080 drain_db_fifo(adap, dbfifo_drain_delay);
4082 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4083 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4084 DBFIFO_HP_INT | DBFIFO_LP_INT,
4085 DBFIFO_HP_INT | DBFIFO_LP_INT);
4088 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4090 u16 hw_pidx, hw_cidx;
4093 spin_lock_irq(&q->db_lock);
4094 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4097 if (q->db_pidx != hw_pidx) {
4100 if (q->db_pidx >= hw_pidx)
4101 delta = q->db_pidx - hw_pidx;
4103 delta = q->size - hw_pidx + q->db_pidx;
4105 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4106 QID(q->cntxt_id) | PIDX(delta));
4111 spin_unlock_irq(&q->db_lock);
4113 CH_WARN(adap, "DB drop recovery failed.\n");
4115 static void recover_all_queues(struct adapter *adap)
4119 for_each_ethrxq(&adap->sge, i)
4120 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4121 for_each_ofldrxq(&adap->sge, i)
4122 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4123 for_each_port(adap, i)
4124 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4127 static void process_db_drop(struct work_struct *work)
4129 struct adapter *adap;
4131 adap = container_of(work, struct adapter, db_drop_task);
4133 if (is_t4(adap->params.chip)) {
4134 drain_db_fifo(adap, dbfifo_drain_delay);
4135 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4136 drain_db_fifo(adap, dbfifo_drain_delay);
4137 recover_all_queues(adap);
4138 drain_db_fifo(adap, dbfifo_drain_delay);
4140 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4142 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4143 u16 qid = (dropped_db >> 15) & 0x1ffff;
4144 u16 pidx_inc = dropped_db & 0x1fff;
4146 unsigned short udb_density;
4147 unsigned long qpshift;
4151 dev_warn(adap->pdev_dev,
4152 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4154 (dropped_db >> 14) & 1,
4155 (dropped_db >> 13) & 1,
4158 drain_db_fifo(adap, 1);
4160 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4161 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4162 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4163 qpshift = PAGE_SHIFT - ilog2(udb_density);
4164 udb = qid << qpshift;
4166 page = udb / PAGE_SIZE;
4167 udb += (qid - (page * udb_density)) * 128;
4169 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4171 /* Re-enable BAR2 WC */
4172 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4175 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4178 void t4_db_full(struct adapter *adap)
4180 if (is_t4(adap->params.chip)) {
4182 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4183 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4184 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4185 queue_work(adap->workq, &adap->db_full_task);
4189 void t4_db_dropped(struct adapter *adap)
4191 if (is_t4(adap->params.chip)) {
4193 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4195 queue_work(adap->workq, &adap->db_drop_task);
4198 static void uld_attach(struct adapter *adap, unsigned int uld)
4201 struct cxgb4_lld_info lli;
4204 lli.pdev = adap->pdev;
4206 lli.l2t = adap->l2t;
4207 lli.tids = &adap->tids;
4208 lli.ports = adap->port;
4209 lli.vr = &adap->vres;
4210 lli.mtus = adap->params.mtus;
4211 if (uld == CXGB4_ULD_RDMA) {
4212 lli.rxq_ids = adap->sge.rdma_rxq;
4213 lli.ciq_ids = adap->sge.rdma_ciq;
4214 lli.nrxq = adap->sge.rdmaqs;
4215 lli.nciq = adap->sge.rdmaciqs;
4216 } else if (uld == CXGB4_ULD_ISCSI) {
4217 lli.rxq_ids = adap->sge.ofld_rxq;
4218 lli.nrxq = adap->sge.ofldqsets;
4220 lli.ntxq = adap->sge.ofldqsets;
4221 lli.nchan = adap->params.nports;
4222 lli.nports = adap->params.nports;
4223 lli.wr_cred = adap->params.ofldq_wr_cred;
4224 lli.adapter_type = adap->params.chip;
4225 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4226 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4227 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4228 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4230 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4231 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4233 lli.filt_mode = adap->params.tp.vlan_pri_map;
4234 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4235 for (i = 0; i < NCHAN; i++)
4237 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4238 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4239 lli.fw_vers = adap->params.fw_vers;
4240 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4241 lli.sge_ingpadboundary = adap->sge.fl_align;
4242 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4243 lli.sge_pktshift = adap->sge.pktshift;
4244 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4245 lli.max_ordird_qp = adap->params.max_ordird_qp;
4246 lli.max_ird_adapter = adap->params.max_ird_adapter;
4247 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4249 handle = ulds[uld].add(&lli);
4250 if (IS_ERR(handle)) {
4251 dev_warn(adap->pdev_dev,
4252 "could not attach to the %s driver, error %ld\n",
4253 uld_str[uld], PTR_ERR(handle));
4257 adap->uld_handle[uld] = handle;
4259 if (!netevent_registered) {
4260 register_netevent_notifier(&cxgb4_netevent_nb);
4261 netevent_registered = true;
4264 if (adap->flags & FULL_INIT_DONE)
4265 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4268 static void attach_ulds(struct adapter *adap)
4272 spin_lock(&adap_rcu_lock);
4273 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4274 spin_unlock(&adap_rcu_lock);
4276 mutex_lock(&uld_mutex);
4277 list_add_tail(&adap->list_node, &adapter_list);
4278 for (i = 0; i < CXGB4_ULD_MAX; i++)
4280 uld_attach(adap, i);
4281 mutex_unlock(&uld_mutex);
4284 static void detach_ulds(struct adapter *adap)
4288 mutex_lock(&uld_mutex);
4289 list_del(&adap->list_node);
4290 for (i = 0; i < CXGB4_ULD_MAX; i++)
4291 if (adap->uld_handle[i]) {
4292 ulds[i].state_change(adap->uld_handle[i],
4293 CXGB4_STATE_DETACH);
4294 adap->uld_handle[i] = NULL;
4296 if (netevent_registered && list_empty(&adapter_list)) {
4297 unregister_netevent_notifier(&cxgb4_netevent_nb);
4298 netevent_registered = false;
4300 mutex_unlock(&uld_mutex);
4302 spin_lock(&adap_rcu_lock);
4303 list_del_rcu(&adap->rcu_node);
4304 spin_unlock(&adap_rcu_lock);
4307 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4311 mutex_lock(&uld_mutex);
4312 for (i = 0; i < CXGB4_ULD_MAX; i++)
4313 if (adap->uld_handle[i])
4314 ulds[i].state_change(adap->uld_handle[i], new_state);
4315 mutex_unlock(&uld_mutex);
4319 * cxgb4_register_uld - register an upper-layer driver
4320 * @type: the ULD type
4321 * @p: the ULD methods
4323 * Registers an upper-layer driver with this driver and notifies the ULD
4324 * about any presently available devices that support its type. Returns
4325 * %-EBUSY if a ULD of the same type is already registered.
4327 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4330 struct adapter *adap;
4332 if (type >= CXGB4_ULD_MAX)
4334 mutex_lock(&uld_mutex);
4335 if (ulds[type].add) {
4340 list_for_each_entry(adap, &adapter_list, list_node)
4341 uld_attach(adap, type);
4342 out: mutex_unlock(&uld_mutex);
4345 EXPORT_SYMBOL(cxgb4_register_uld);
4348 * cxgb4_unregister_uld - unregister an upper-layer driver
4349 * @type: the ULD type
4351 * Unregisters an existing upper-layer driver.
4353 int cxgb4_unregister_uld(enum cxgb4_uld type)
4355 struct adapter *adap;
4357 if (type >= CXGB4_ULD_MAX)
4359 mutex_lock(&uld_mutex);
4360 list_for_each_entry(adap, &adapter_list, list_node)
4361 adap->uld_handle[type] = NULL;
4362 ulds[type].add = NULL;
4363 mutex_unlock(&uld_mutex);
4366 EXPORT_SYMBOL(cxgb4_unregister_uld);
4368 /* Check if netdev on which event is occured belongs to us or not. Return
4369 * success (true) if it belongs otherwise failure (false).
4370 * Called with rcu_read_lock() held.
4372 #if IS_ENABLED(CONFIG_IPV6)
4373 static bool cxgb4_netdev(const struct net_device *netdev)
4375 struct adapter *adap;
4378 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4379 for (i = 0; i < MAX_NPORTS; i++)
4380 if (adap->port[i] == netdev)
4385 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4386 unsigned long event)
4388 int ret = NOTIFY_DONE;
4391 if (cxgb4_netdev(event_dev)) {
4394 ret = cxgb4_clip_get(event_dev,
4395 (const struct in6_addr *)ifa->addr.s6_addr);
4403 cxgb4_clip_release(event_dev,
4404 (const struct in6_addr *)ifa->addr.s6_addr);
4415 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4416 unsigned long event, void *data)
4418 struct inet6_ifaddr *ifa = data;
4419 struct net_device *event_dev;
4420 int ret = NOTIFY_DONE;
4421 struct bonding *bond = netdev_priv(ifa->idev->dev);
4422 struct list_head *iter;
4423 struct slave *slave;
4424 struct pci_dev *first_pdev = NULL;
4426 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4427 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4428 ret = clip_add(event_dev, ifa, event);
4429 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4430 /* It is possible that two different adapters are bonded in one
4431 * bond. We need to find such different adapters and add clip
4432 * in all of them only once.
4434 bond_for_each_slave(bond, slave, iter) {
4436 ret = clip_add(slave->dev, ifa, event);
4437 /* If clip_add is success then only initialize
4438 * first_pdev since it means it is our device
4440 if (ret == NOTIFY_OK)
4441 first_pdev = to_pci_dev(
4442 slave->dev->dev.parent);
4443 } else if (first_pdev !=
4444 to_pci_dev(slave->dev->dev.parent))
4445 ret = clip_add(slave->dev, ifa, event);
4448 ret = clip_add(ifa->idev->dev, ifa, event);
4453 static struct notifier_block cxgb4_inet6addr_notifier = {
4454 .notifier_call = cxgb4_inet6addr_handler
4457 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4458 * a physical device.
4459 * The physical device reference is needed to send the actul CLIP command.
4461 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4463 struct inet6_dev *idev = NULL;
4464 struct inet6_ifaddr *ifa;
4467 idev = __in6_dev_get(root_dev);
4471 read_lock_bh(&idev->lock);
4472 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4473 ret = cxgb4_clip_get(dev,
4474 (const struct in6_addr *)ifa->addr.s6_addr);
4478 read_unlock_bh(&idev->lock);
4483 static int update_root_dev_clip(struct net_device *dev)
4485 struct net_device *root_dev = NULL;
4488 /* First populate the real net device's IPv6 addresses */
4489 ret = update_dev_clip(dev, dev);
4493 /* Parse all bond and vlan devices layered on top of the physical dev */
4494 root_dev = netdev_master_upper_dev_get_rcu(dev);
4496 ret = update_dev_clip(root_dev, dev);
4501 for (i = 0; i < VLAN_N_VID; i++) {
4502 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4506 ret = update_dev_clip(root_dev, dev);
4513 static void update_clip(const struct adapter *adap)
4516 struct net_device *dev;
4521 for (i = 0; i < MAX_NPORTS; i++) {
4522 dev = adap->port[i];
4526 ret = update_root_dev_clip(dev);
4533 #endif /* IS_ENABLED(CONFIG_IPV6) */
4536 * cxgb_up - enable the adapter
4537 * @adap: adapter being enabled
4539 * Called when the first port is enabled, this function performs the
4540 * actions necessary to make an adapter operational, such as completing
4541 * the initialization of HW modules, and enabling interrupts.
4543 * Must be called with the rtnl lock held.
4545 static int cxgb_up(struct adapter *adap)
4549 err = setup_sge_queues(adap);
4552 err = setup_rss(adap);
4556 if (adap->flags & USING_MSIX) {
4557 name_msix_vecs(adap);
4558 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4559 adap->msix_info[0].desc, adap);
4563 err = request_msix_queue_irqs(adap);
4565 free_irq(adap->msix_info[0].vec, adap);
4569 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4570 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4571 adap->port[0]->name, adap);
4577 t4_intr_enable(adap);
4578 adap->flags |= FULL_INIT_DONE;
4579 notify_ulds(adap, CXGB4_STATE_UP);
4580 #if IS_ENABLED(CONFIG_IPV6)
4586 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4588 t4_free_sge_resources(adap);
4592 static void cxgb_down(struct adapter *adapter)
4594 t4_intr_disable(adapter);
4595 cancel_work_sync(&adapter->tid_release_task);
4596 cancel_work_sync(&adapter->db_full_task);
4597 cancel_work_sync(&adapter->db_drop_task);
4598 adapter->tid_release_task_busy = false;
4599 adapter->tid_release_head = NULL;
4601 if (adapter->flags & USING_MSIX) {
4602 free_msix_queue_irqs(adapter);
4603 free_irq(adapter->msix_info[0].vec, adapter);
4605 free_irq(adapter->pdev->irq, adapter);
4606 quiesce_rx(adapter);
4607 t4_sge_stop(adapter);
4608 t4_free_sge_resources(adapter);
4609 adapter->flags &= ~FULL_INIT_DONE;
4613 * net_device operations
4615 static int cxgb_open(struct net_device *dev)
4618 struct port_info *pi = netdev_priv(dev);
4619 struct adapter *adapter = pi->adapter;
4621 netif_carrier_off(dev);
4623 if (!(adapter->flags & FULL_INIT_DONE)) {
4624 err = cxgb_up(adapter);
4629 err = link_start(dev);
4631 netif_tx_start_all_queues(dev);
4635 static int cxgb_close(struct net_device *dev)
4637 struct port_info *pi = netdev_priv(dev);
4638 struct adapter *adapter = pi->adapter;
4640 netif_tx_stop_all_queues(dev);
4641 netif_carrier_off(dev);
4642 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4645 /* Return an error number if the indicated filter isn't writable ...
4647 static int writable_filter(struct filter_entry *f)
4657 /* Delete the filter at the specified index (if valid). The checks for all
4658 * the common problems with doing this like the filter being locked, currently
4659 * pending in another operation, etc.
4661 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4663 struct filter_entry *f;
4666 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4669 f = &adapter->tids.ftid_tab[fidx];
4670 ret = writable_filter(f);
4674 return del_filter_wr(adapter, fidx);
4679 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4680 __be32 sip, __be16 sport, __be16 vlan,
4681 unsigned int queue, unsigned char port, unsigned char mask)
4684 struct filter_entry *f;
4685 struct adapter *adap;
4689 adap = netdev2adap(dev);
4691 /* Adjust stid to correct filter index */
4692 stid -= adap->tids.sftid_base;
4693 stid += adap->tids.nftids;
4695 /* Check to make sure the filter requested is writable ...
4697 f = &adap->tids.ftid_tab[stid];
4698 ret = writable_filter(f);
4702 /* Clear out any old resources being used by the filter before
4703 * we start constructing the new filter.
4706 clear_filter(adap, f);
4708 /* Clear out filter specifications */
4709 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4710 f->fs.val.lport = cpu_to_be16(sport);
4711 f->fs.mask.lport = ~0;
4713 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4714 for (i = 0; i < 4; i++) {
4715 f->fs.val.lip[i] = val[i];
4716 f->fs.mask.lip[i] = ~0;
4718 if (adap->params.tp.vlan_pri_map & F_PORT) {
4719 f->fs.val.iport = port;
4720 f->fs.mask.iport = mask;
4724 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4725 f->fs.val.proto = IPPROTO_TCP;
4726 f->fs.mask.proto = ~0;
4731 /* Mark filter as locked */
4735 ret = set_filter_wr(adap, stid);
4737 clear_filter(adap, f);
4743 EXPORT_SYMBOL(cxgb4_create_server_filter);
4745 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4746 unsigned int queue, bool ipv6)
4749 struct filter_entry *f;
4750 struct adapter *adap;
4752 adap = netdev2adap(dev);
4754 /* Adjust stid to correct filter index */
4755 stid -= adap->tids.sftid_base;
4756 stid += adap->tids.nftids;
4758 f = &adap->tids.ftid_tab[stid];
4759 /* Unlock the filter */
4762 ret = delete_filter(adap, stid);
4768 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4770 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4771 struct rtnl_link_stats64 *ns)
4773 struct port_stats stats;
4774 struct port_info *p = netdev_priv(dev);
4775 struct adapter *adapter = p->adapter;
4777 /* Block retrieving statistics during EEH error
4778 * recovery. Otherwise, the recovery might fail
4779 * and the PCI device will be removed permanently
4781 spin_lock(&adapter->stats_lock);
4782 if (!netif_device_present(dev)) {
4783 spin_unlock(&adapter->stats_lock);
4786 t4_get_port_stats(adapter, p->tx_chan, &stats);
4787 spin_unlock(&adapter->stats_lock);
4789 ns->tx_bytes = stats.tx_octets;
4790 ns->tx_packets = stats.tx_frames;
4791 ns->rx_bytes = stats.rx_octets;
4792 ns->rx_packets = stats.rx_frames;
4793 ns->multicast = stats.rx_mcast_frames;
4795 /* detailed rx_errors */
4796 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4798 ns->rx_over_errors = 0;
4799 ns->rx_crc_errors = stats.rx_fcs_err;
4800 ns->rx_frame_errors = stats.rx_symbol_err;
4801 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4802 stats.rx_ovflow2 + stats.rx_ovflow3 +
4803 stats.rx_trunc0 + stats.rx_trunc1 +
4804 stats.rx_trunc2 + stats.rx_trunc3;
4805 ns->rx_missed_errors = 0;
4807 /* detailed tx_errors */
4808 ns->tx_aborted_errors = 0;
4809 ns->tx_carrier_errors = 0;
4810 ns->tx_fifo_errors = 0;
4811 ns->tx_heartbeat_errors = 0;
4812 ns->tx_window_errors = 0;
4814 ns->tx_errors = stats.tx_error_frames;
4815 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4816 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4820 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4823 int ret = 0, prtad, devad;
4824 struct port_info *pi = netdev_priv(dev);
4825 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4829 if (pi->mdio_addr < 0)
4831 data->phy_id = pi->mdio_addr;
4835 if (mdio_phy_id_is_c45(data->phy_id)) {
4836 prtad = mdio_phy_id_prtad(data->phy_id);
4837 devad = mdio_phy_id_devad(data->phy_id);
4838 } else if (data->phy_id < 32) {
4839 prtad = data->phy_id;
4841 data->reg_num &= 0x1f;
4845 mbox = pi->adapter->fn;
4846 if (cmd == SIOCGMIIREG)
4847 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4848 data->reg_num, &data->val_out);
4850 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4851 data->reg_num, data->val_in);
4859 static void cxgb_set_rxmode(struct net_device *dev)
4861 /* unfortunately we can't return errors to the stack */
4862 set_rxmode(dev, -1, false);
4865 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4868 struct port_info *pi = netdev_priv(dev);
4870 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4872 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4879 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4882 struct sockaddr *addr = p;
4883 struct port_info *pi = netdev_priv(dev);
4885 if (!is_valid_ether_addr(addr->sa_data))
4886 return -EADDRNOTAVAIL;
4888 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4889 pi->xact_addr_filt, addr->sa_data, true, true);
4893 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4894 pi->xact_addr_filt = ret;
4898 #ifdef CONFIG_NET_POLL_CONTROLLER
4899 static void cxgb_netpoll(struct net_device *dev)
4901 struct port_info *pi = netdev_priv(dev);
4902 struct adapter *adap = pi->adapter;
4904 if (adap->flags & USING_MSIX) {
4906 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4908 for (i = pi->nqsets; i; i--, rx++)
4909 t4_sge_intr_msix(0, &rx->rspq);
4911 t4_intr_handler(adap)(0, adap);
4915 static const struct net_device_ops cxgb4_netdev_ops = {
4916 .ndo_open = cxgb_open,
4917 .ndo_stop = cxgb_close,
4918 .ndo_start_xmit = t4_eth_xmit,
4919 .ndo_select_queue = cxgb_select_queue,
4920 .ndo_get_stats64 = cxgb_get_stats,
4921 .ndo_set_rx_mode = cxgb_set_rxmode,
4922 .ndo_set_mac_address = cxgb_set_mac_addr,
4923 .ndo_set_features = cxgb_set_features,
4924 .ndo_validate_addr = eth_validate_addr,
4925 .ndo_do_ioctl = cxgb_ioctl,
4926 .ndo_change_mtu = cxgb_change_mtu,
4927 #ifdef CONFIG_NET_POLL_CONTROLLER
4928 .ndo_poll_controller = cxgb_netpoll,
4932 void t4_fatal_err(struct adapter *adap)
4934 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4935 t4_intr_disable(adap);
4936 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4939 /* Return the specified PCI-E Configuration Space register from our Physical
4940 * Function. We try first via a Firmware LDST Command since we prefer to let
4941 * the firmware own all of these registers, but if that fails we go for it
4942 * directly ourselves.
4944 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4946 struct fw_ldst_cmd ldst_cmd;
4950 /* Construct and send the Firmware LDST Command to retrieve the
4951 * specified PCI-E Configuration Space register.
4953 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4954 ldst_cmd.op_to_addrspace =
4955 htonl(FW_CMD_OP(FW_LDST_CMD) |
4958 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4959 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4960 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4961 ldst_cmd.u.pcie.ctrl_to_fn =
4962 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4963 ldst_cmd.u.pcie.r = reg;
4964 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4967 /* If the LDST Command suucceeded, exctract the returned register
4968 * value. Otherwise read it directly ourself.
4971 val = ntohl(ldst_cmd.u.pcie.data[0]);
4973 t4_hw_pci_read_cfg4(adap, reg, &val);
4978 static void setup_memwin(struct adapter *adap)
4980 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4982 if (is_t4(adap->params.chip)) {
4985 /* Truncation intentional: we only read the bottom 32-bits of
4986 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4987 * mechanism to read BAR0 instead of using
4988 * pci_resource_start() because we could be operating from
4989 * within a Virtual Machine which is trapping our accesses to
4990 * our Configuration Space and we need to set up the PCI-E
4991 * Memory Window decoders with the actual addresses which will
4992 * be coming across the PCI-E link.
4994 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4995 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4996 adap->t4_bar0 = bar0;
4998 mem_win0_base = bar0 + MEMWIN0_BASE;
4999 mem_win1_base = bar0 + MEMWIN1_BASE;
5000 mem_win2_base = bar0 + MEMWIN2_BASE;
5001 mem_win2_aperture = MEMWIN2_APERTURE;
5003 /* For T5, only relative offset inside the PCIe BAR is passed */
5004 mem_win0_base = MEMWIN0_BASE;
5005 mem_win1_base = MEMWIN1_BASE;
5006 mem_win2_base = MEMWIN2_BASE_T5;
5007 mem_win2_aperture = MEMWIN2_APERTURE_T5;
5009 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
5010 mem_win0_base | BIR(0) |
5011 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
5012 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
5013 mem_win1_base | BIR(0) |
5014 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
5015 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
5016 mem_win2_base | BIR(0) |
5017 WINDOW(ilog2(mem_win2_aperture) - 10));
5018 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
5021 static void setup_memwin_rdma(struct adapter *adap)
5023 if (adap->vres.ocq.size) {
5027 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
5028 start &= PCI_BASE_ADDRESS_MEM_MASK;
5029 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
5030 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
5032 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
5033 start | BIR(1) | WINDOW(ilog2(sz_kb)));
5035 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
5036 adap->vres.ocq.start);
5038 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
5042 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
5047 /* get device capabilities */
5048 memset(c, 0, sizeof(*c));
5049 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5050 FW_CMD_REQUEST | FW_CMD_READ);
5051 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5052 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5056 /* select capabilities we'll be using */
5057 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5059 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5061 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5062 } else if (vf_acls) {
5063 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5066 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5067 FW_CMD_REQUEST | FW_CMD_WRITE);
5068 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5072 ret = t4_config_glbl_rss(adap, adap->fn,
5073 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5074 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5075 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5079 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5080 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5086 /* tweak some settings */
5087 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5088 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5089 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5090 v = t4_read_reg(adap, TP_PIO_DATA);
5091 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5093 /* first 4 Tx modulation queues point to consecutive Tx channels */
5094 adap->params.tp.tx_modq_map = 0xE4;
5095 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5096 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5098 /* associate each Tx modulation queue with consecutive Tx channels */
5100 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5101 &v, 1, A_TP_TX_SCHED_HDR);
5102 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5103 &v, 1, A_TP_TX_SCHED_FIFO);
5104 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5105 &v, 1, A_TP_TX_SCHED_PCMD);
5107 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5108 if (is_offload(adap)) {
5109 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5110 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5111 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5112 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5113 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5114 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5115 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5116 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5117 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5118 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5121 /* get basic stuff going */
5122 return t4_early_init(adap, adap->fn);
5126 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5128 #define MAX_ATIDS 8192U
5131 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5133 * If the firmware we're dealing with has Configuration File support, then
5134 * we use that to perform all configuration
5138 * Tweak configuration based on module parameters, etc. Most of these have
5139 * defaults assigned to them by Firmware Configuration Files (if we're using
5140 * them) but need to be explicitly set if we're using hard-coded
5141 * initialization. But even in the case of using Firmware Configuration
5142 * Files, we'd like to expose the ability to change these via module
5143 * parameters so these are essentially common tweaks/settings for
5144 * Configuration Files and hard-coded initialization ...
5146 static int adap_init0_tweaks(struct adapter *adapter)
5149 * Fix up various Host-Dependent Parameters like Page Size, Cache
5150 * Line Size, etc. The firmware default is for a 4KB Page Size and
5151 * 64B Cache Line Size ...
5153 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5156 * Process module parameters which affect early initialization.
5158 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5159 dev_err(&adapter->pdev->dev,
5160 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5164 t4_set_reg_field(adapter, SGE_CONTROL,
5166 PKTSHIFT(rx_dma_offset));
5169 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5170 * adds the pseudo header itself.
5172 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5173 CSUM_HAS_PSEUDO_HDR, 0);
5179 * Attempt to initialize the adapter via a Firmware Configuration File.
5181 static int adap_init0_config(struct adapter *adapter, int reset)
5183 struct fw_caps_config_cmd caps_cmd;
5184 const struct firmware *cf;
5185 unsigned long mtype = 0, maddr = 0;
5186 u32 finiver, finicsum, cfcsum;
5188 int config_issued = 0;
5189 char *fw_config_file, fw_config_file_path[256];
5190 char *config_name = NULL;
5193 * Reset device if necessary.
5196 ret = t4_fw_reset(adapter, adapter->mbox,
5197 PIORSTMODE | PIORST);
5203 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5204 * then use that. Otherwise, use the configuration file stored
5205 * in the adapter flash ...
5207 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5209 fw_config_file = FW4_CFNAME;
5212 fw_config_file = FW5_CFNAME;
5215 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5216 adapter->pdev->device);
5221 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5223 config_name = "On FLASH";
5224 mtype = FW_MEMTYPE_CF_FLASH;
5225 maddr = t4_flash_cfg_addr(adapter);
5227 u32 params[7], val[7];
5229 sprintf(fw_config_file_path,
5230 "/lib/firmware/%s", fw_config_file);
5231 config_name = fw_config_file_path;
5233 if (cf->size >= FLASH_CFG_MAX_SIZE)
5236 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5237 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5238 ret = t4_query_params(adapter, adapter->mbox,
5239 adapter->fn, 0, 1, params, val);
5242 * For t4_memory_rw() below addresses and
5243 * sizes have to be in terms of multiples of 4
5244 * bytes. So, if the Configuration File isn't
5245 * a multiple of 4 bytes in length we'll have
5246 * to write that out separately since we can't
5247 * guarantee that the bytes following the
5248 * residual byte in the buffer returned by
5249 * request_firmware() are zeroed out ...
5251 size_t resid = cf->size & 0x3;
5252 size_t size = cf->size & ~0x3;
5253 __be32 *data = (__be32 *)cf->data;
5255 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5256 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5258 spin_lock(&adapter->win0_lock);
5259 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5260 size, data, T4_MEMORY_WRITE);
5261 if (ret == 0 && resid != 0) {
5268 last.word = data[size >> 2];
5269 for (i = resid; i < 4; i++)
5271 ret = t4_memory_rw(adapter, 0, mtype,
5276 spin_unlock(&adapter->win0_lock);
5280 release_firmware(cf);
5286 * Issue a Capability Configuration command to the firmware to get it
5287 * to parse the Configuration File. We don't use t4_fw_config_file()
5288 * because we want the ability to modify various features after we've
5289 * processed the configuration file ...
5291 memset(&caps_cmd, 0, sizeof(caps_cmd));
5292 caps_cmd.op_to_write =
5293 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5296 caps_cmd.cfvalid_to_len16 =
5297 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5298 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5299 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5300 FW_LEN16(caps_cmd));
5301 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5304 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5305 * Configuration File in FLASH), our last gasp effort is to use the
5306 * Firmware Configuration File which is embedded in the firmware. A
5307 * very few early versions of the firmware didn't have one embedded
5308 * but we can ignore those.
5310 if (ret == -ENOENT) {
5311 memset(&caps_cmd, 0, sizeof(caps_cmd));
5312 caps_cmd.op_to_write =
5313 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5316 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5317 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5318 sizeof(caps_cmd), &caps_cmd);
5319 config_name = "Firmware Default";
5326 finiver = ntohl(caps_cmd.finiver);
5327 finicsum = ntohl(caps_cmd.finicsum);
5328 cfcsum = ntohl(caps_cmd.cfcsum);
5329 if (finicsum != cfcsum)
5330 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5331 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5335 * And now tell the firmware to use the configuration we just loaded.
5337 caps_cmd.op_to_write =
5338 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5341 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5342 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5348 * Tweak configuration based on system architecture, module
5351 ret = adap_init0_tweaks(adapter);
5356 * And finally tell the firmware to initialize itself using the
5357 * parameters from the Configuration File.
5359 ret = t4_fw_initialize(adapter, adapter->mbox);
5364 * Return successfully and note that we're operating with parameters
5365 * not supplied by the driver, rather than from hard-wired
5366 * initialization constants burried in the driver.
5368 adapter->flags |= USING_SOFT_PARAMS;
5369 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5370 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5371 config_name, finiver, cfcsum);
5375 * Something bad happened. Return the error ... (If the "error"
5376 * is that there's no Configuration File on the adapter we don't
5377 * want to issue a warning since this is fairly common.)
5380 if (config_issued && ret != -ENOENT)
5381 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5387 * Attempt to initialize the adapter via hard-coded, driver supplied
5390 static int adap_init0_no_config(struct adapter *adapter, int reset)
5392 struct sge *s = &adapter->sge;
5393 struct fw_caps_config_cmd caps_cmd;
5398 * Reset device if necessary
5401 ret = t4_fw_reset(adapter, adapter->mbox,
5402 PIORSTMODE | PIORST);
5408 * Get device capabilities and select which we'll be using.
5410 memset(&caps_cmd, 0, sizeof(caps_cmd));
5411 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5412 FW_CMD_REQUEST | FW_CMD_READ);
5413 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5414 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5419 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5421 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5423 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5424 } else if (vf_acls) {
5425 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5428 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5429 FW_CMD_REQUEST | FW_CMD_WRITE);
5430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5436 * Tweak configuration based on system architecture, module
5439 ret = adap_init0_tweaks(adapter);
5444 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5445 * mode which maps each Virtual Interface to its own section of
5446 * the RSS Table and we turn on all map and hash enables ...
5448 adapter->flags |= RSS_TNLALLLOOKUP;
5449 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5450 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5451 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5452 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5453 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5454 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5459 * Set up our own fundamental resource provisioning ...
5461 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5462 PFRES_NEQ, PFRES_NETHCTRL,
5463 PFRES_NIQFLINT, PFRES_NIQ,
5464 PFRES_TC, PFRES_NVI,
5465 FW_PFVF_CMD_CMASK_MASK,
5466 pfvfres_pmask(adapter, adapter->fn, 0),
5468 PFRES_R_CAPS, PFRES_WX_CAPS);
5473 * Perform low level SGE initialization. We need to do this before we
5474 * send the firmware the INITIALIZE command because that will cause
5475 * any other PF Drivers which are waiting for the Master
5476 * Initialization to proceed forward.
5478 for (i = 0; i < SGE_NTIMERS - 1; i++)
5479 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5480 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5481 s->counter_val[0] = 1;
5482 for (i = 1; i < SGE_NCOUNTERS; i++)
5483 s->counter_val[i] = min(intr_cnt[i - 1],
5484 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5485 t4_sge_init(adapter);
5487 #ifdef CONFIG_PCI_IOV
5489 * Provision resource limits for Virtual Functions. We currently
5490 * grant them all the same static resource limits except for the Port
5491 * Access Rights Mask which we're assigning based on the PF. All of
5492 * the static provisioning stuff for both the PF and VF really needs
5493 * to be managed in a persistent manner for each device which the
5494 * firmware controls.
5499 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5500 if (num_vf[pf] <= 0)
5503 /* VF numbering starts at 1! */
5504 for (vf = 1; vf <= num_vf[pf]; vf++) {
5505 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5507 VFRES_NEQ, VFRES_NETHCTRL,
5508 VFRES_NIQFLINT, VFRES_NIQ,
5509 VFRES_TC, VFRES_NVI,
5510 FW_PFVF_CMD_CMASK_MASK,
5514 VFRES_R_CAPS, VFRES_WX_CAPS);
5516 dev_warn(adapter->pdev_dev,
5518 "provision pf/vf=%d/%d; "
5519 "err=%d\n", pf, vf, ret);
5526 * Set up the default filter mode. Later we'll want to implement this
5527 * via a firmware command, etc. ... This needs to be done before the
5528 * firmare initialization command ... If the selected set of fields
5529 * isn't equal to the default value, we'll need to make sure that the
5530 * field selections will fit in the 36-bit budget.
5532 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5535 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5536 switch (tp_vlan_pri_map & (1 << j)) {
5538 /* compressed filter field not enabled */
5558 case ETHERTYPE_MASK:
5564 case MPSHITTYPE_MASK:
5567 case FRAGMENTATION_MASK:
5573 dev_err(adapter->pdev_dev,
5574 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5575 " using %#x\n", tp_vlan_pri_map, bits,
5576 TP_VLAN_PRI_MAP_DEFAULT);
5577 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5580 v = tp_vlan_pri_map;
5581 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5582 &v, 1, TP_VLAN_PRI_MAP);
5585 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5586 * to support any of the compressed filter fields above. Newer
5587 * versions of the firmware do this automatically but it doesn't hurt
5588 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5589 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5590 * since the firmware automatically turns this on and off when we have
5591 * a non-zero number of filters active (since it does have a
5592 * performance impact).
5594 if (tp_vlan_pri_map)
5595 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5596 FIVETUPLELOOKUP_MASK,
5597 FIVETUPLELOOKUP_MASK);
5600 * Tweak some settings.
5602 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5603 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5604 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5605 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5608 * Get basic stuff going by issuing the Firmware Initialize command.
5609 * Note that this _must_ be after all PFVF commands ...
5611 ret = t4_fw_initialize(adapter, adapter->mbox);
5616 * Return successfully!
5618 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5619 "driver parameters\n");
5623 * Something bad happened. Return the error ...
5629 static struct fw_info fw_info_array[] = {
5632 .fs_name = FW4_CFNAME,
5633 .fw_mod_name = FW4_FNAME,
5635 .chip = FW_HDR_CHIP_T4,
5636 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5637 .intfver_nic = FW_INTFVER(T4, NIC),
5638 .intfver_vnic = FW_INTFVER(T4, VNIC),
5639 .intfver_ri = FW_INTFVER(T4, RI),
5640 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5641 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5645 .fs_name = FW5_CFNAME,
5646 .fw_mod_name = FW5_FNAME,
5648 .chip = FW_HDR_CHIP_T5,
5649 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5650 .intfver_nic = FW_INTFVER(T5, NIC),
5651 .intfver_vnic = FW_INTFVER(T5, VNIC),
5652 .intfver_ri = FW_INTFVER(T5, RI),
5653 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5654 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5659 static struct fw_info *find_fw_info(int chip)
5663 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5664 if (fw_info_array[i].chip == chip)
5665 return &fw_info_array[i];
5671 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5673 static int adap_init0(struct adapter *adap)
5677 enum dev_state state;
5678 u32 params[7], val[7];
5679 struct fw_caps_config_cmd caps_cmd;
5683 * Contact FW, advertising Master capability (and potentially forcing
5684 * ourselves as the Master PF if our module parameter force_init is
5687 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5688 force_init ? MASTER_MUST : MASTER_MAY,
5691 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5695 if (ret == adap->mbox)
5696 adap->flags |= MASTER_PF;
5697 if (force_init && state == DEV_STATE_INIT)
5698 state = DEV_STATE_UNINIT;
5701 * If we're the Master PF Driver and the device is uninitialized,
5702 * then let's consider upgrading the firmware ... (We always want
5703 * to check the firmware version number in order to A. get it for
5704 * later reporting and B. to warn if the currently loaded firmware
5705 * is excessively mismatched relative to the driver.)
5707 t4_get_fw_version(adap, &adap->params.fw_vers);
5708 t4_get_tp_version(adap, &adap->params.tp_vers);
5709 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5710 struct fw_info *fw_info;
5711 struct fw_hdr *card_fw;
5712 const struct firmware *fw;
5713 const u8 *fw_data = NULL;
5714 unsigned int fw_size = 0;
5716 /* This is the firmware whose headers the driver was compiled
5719 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5720 if (fw_info == NULL) {
5721 dev_err(adap->pdev_dev,
5722 "unable to get firmware info for chip %d.\n",
5723 CHELSIO_CHIP_VERSION(adap->params.chip));
5727 /* allocate memory to read the header of the firmware on the
5730 card_fw = t4_alloc_mem(sizeof(*card_fw));
5732 /* Get FW from from /lib/firmware/ */
5733 ret = request_firmware(&fw, fw_info->fw_mod_name,
5736 dev_err(adap->pdev_dev,
5737 "unable to load firmware image %s, error %d\n",
5738 fw_info->fw_mod_name, ret);
5744 /* upgrade FW logic */
5745 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5750 release_firmware(fw);
5751 t4_free_mem(card_fw);
5758 * Grab VPD parameters. This should be done after we establish a
5759 * connection to the firmware since some of the VPD parameters
5760 * (notably the Core Clock frequency) are retrieved via requests to
5761 * the firmware. On the other hand, we need these fairly early on
5762 * so we do this right after getting ahold of the firmware.
5764 ret = get_vpd_params(adap, &adap->params.vpd);
5769 * Find out what ports are available to us. Note that we need to do
5770 * this before calling adap_init0_no_config() since it needs nports
5774 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5775 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5776 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5780 adap->params.nports = hweight32(port_vec);
5781 adap->params.portvec = port_vec;
5784 * If the firmware is initialized already (and we're not forcing a
5785 * master initialization), note that we're living with existing
5786 * adapter parameters. Otherwise, it's time to try initializing the
5789 if (state == DEV_STATE_INIT) {
5790 dev_info(adap->pdev_dev, "Coming up as %s: "\
5791 "Adapter already initialized\n",
5792 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5793 adap->flags |= USING_SOFT_PARAMS;
5795 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5796 "Initializing adapter\n");
5799 * If the firmware doesn't support Configuration
5800 * Files warn user and exit,
5803 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5804 "configuration file.\n");
5806 ret = adap_init0_no_config(adap, reset);
5809 * Find out whether we're dealing with a version of
5810 * the firmware which has configuration file support.
5812 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5813 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5814 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5818 * If the firmware doesn't support Configuration
5819 * Files, use the old Driver-based, hard-wired
5820 * initialization. Otherwise, try using the
5821 * Configuration File support and fall back to the
5822 * Driver-based initialization if there's no
5823 * Configuration File found.
5826 ret = adap_init0_no_config(adap, reset);
5829 * The firmware provides us with a memory
5830 * buffer where we can load a Configuration
5831 * File from the host if we want to override
5832 * the Configuration File in flash.
5835 ret = adap_init0_config(adap, reset);
5836 if (ret == -ENOENT) {
5837 dev_info(adap->pdev_dev,
5838 "No Configuration File present "
5839 "on adapter. Using hard-wired "
5840 "configuration parameters.\n");
5841 ret = adap_init0_no_config(adap, reset);
5846 dev_err(adap->pdev_dev,
5847 "could not initialize adapter, error %d\n",
5854 * If we're living with non-hard-coded parameters (either from a
5855 * Firmware Configuration File or values programmed by a different PF
5856 * Driver), give the SGE code a chance to pull in anything that it
5857 * needs ... Note that this must be called after we retrieve our VPD
5858 * parameters in order to know how to convert core ticks to seconds.
5860 if (adap->flags & USING_SOFT_PARAMS) {
5861 ret = t4_sge_init(adap);
5866 if (is_bypass_device(adap->pdev->device))
5867 adap->params.bypass = 1;
5870 * Grab some of our basic fundamental operating parameters.
5872 #define FW_PARAM_DEV(param) \
5873 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5874 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5876 #define FW_PARAM_PFVF(param) \
5877 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5878 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5879 FW_PARAMS_PARAM_Y(0) | \
5880 FW_PARAMS_PARAM_Z(0)
5882 params[0] = FW_PARAM_PFVF(EQ_START);
5883 params[1] = FW_PARAM_PFVF(L2T_START);
5884 params[2] = FW_PARAM_PFVF(L2T_END);
5885 params[3] = FW_PARAM_PFVF(FILTER_START);
5886 params[4] = FW_PARAM_PFVF(FILTER_END);
5887 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5888 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5891 adap->sge.egr_start = val[0];
5892 adap->l2t_start = val[1];
5893 adap->l2t_end = val[2];
5894 adap->tids.ftid_base = val[3];
5895 adap->tids.nftids = val[4] - val[3] + 1;
5896 adap->sge.ingr_start = val[5];
5898 /* query params related to active filter region */
5899 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5900 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5901 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5902 /* If Active filter size is set we enable establishing
5903 * offload connection through firmware work request
5905 if ((val[0] != val[1]) && (ret >= 0)) {
5906 adap->flags |= FW_OFLD_CONN;
5907 adap->tids.aftid_base = val[0];
5908 adap->tids.aftid_end = val[1];
5911 /* If we're running on newer firmware, let it know that we're
5912 * prepared to deal with encapsulated CPL messages. Older
5913 * firmware won't understand this and we'll just get
5914 * unencapsulated messages ...
5916 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5918 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5921 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5922 * capability. Earlier versions of the firmware didn't have the
5923 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5924 * permission to use ULPTX MEMWRITE DSGL.
5926 if (is_t4(adap->params.chip)) {
5927 adap->params.ulptx_memwrite_dsgl = false;
5929 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5930 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5932 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5936 * Get device capabilities so we can determine what resources we need
5939 memset(&caps_cmd, 0, sizeof(caps_cmd));
5940 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5941 FW_CMD_REQUEST | FW_CMD_READ);
5942 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5943 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5948 if (caps_cmd.ofldcaps) {
5949 /* query offload-related parameters */
5950 params[0] = FW_PARAM_DEV(NTID);
5951 params[1] = FW_PARAM_PFVF(SERVER_START);
5952 params[2] = FW_PARAM_PFVF(SERVER_END);
5953 params[3] = FW_PARAM_PFVF(TDDP_START);
5954 params[4] = FW_PARAM_PFVF(TDDP_END);
5955 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5956 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5960 adap->tids.ntids = val[0];
5961 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5962 adap->tids.stid_base = val[1];
5963 adap->tids.nstids = val[2] - val[1] + 1;
5965 * Setup server filter region. Divide the availble filter
5966 * region into two parts. Regular filters get 1/3rd and server
5967 * filters get 2/3rd part. This is only enabled if workarond
5969 * 1. For regular filters.
5970 * 2. Server filter: This are special filters which are used
5971 * to redirect SYN packets to offload queue.
5973 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5974 adap->tids.sftid_base = adap->tids.ftid_base +
5975 DIV_ROUND_UP(adap->tids.nftids, 3);
5976 adap->tids.nsftids = adap->tids.nftids -
5977 DIV_ROUND_UP(adap->tids.nftids, 3);
5978 adap->tids.nftids = adap->tids.sftid_base -
5979 adap->tids.ftid_base;
5981 adap->vres.ddp.start = val[3];
5982 adap->vres.ddp.size = val[4] - val[3] + 1;
5983 adap->params.ofldq_wr_cred = val[5];
5985 adap->params.offload = 1;
5987 if (caps_cmd.rdmacaps) {
5988 params[0] = FW_PARAM_PFVF(STAG_START);
5989 params[1] = FW_PARAM_PFVF(STAG_END);
5990 params[2] = FW_PARAM_PFVF(RQ_START);
5991 params[3] = FW_PARAM_PFVF(RQ_END);
5992 params[4] = FW_PARAM_PFVF(PBL_START);
5993 params[5] = FW_PARAM_PFVF(PBL_END);
5994 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5998 adap->vres.stag.start = val[0];
5999 adap->vres.stag.size = val[1] - val[0] + 1;
6000 adap->vres.rq.start = val[2];
6001 adap->vres.rq.size = val[3] - val[2] + 1;
6002 adap->vres.pbl.start = val[4];
6003 adap->vres.pbl.size = val[5] - val[4] + 1;
6005 params[0] = FW_PARAM_PFVF(SQRQ_START);
6006 params[1] = FW_PARAM_PFVF(SQRQ_END);
6007 params[2] = FW_PARAM_PFVF(CQ_START);
6008 params[3] = FW_PARAM_PFVF(CQ_END);
6009 params[4] = FW_PARAM_PFVF(OCQ_START);
6010 params[5] = FW_PARAM_PFVF(OCQ_END);
6011 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
6015 adap->vres.qp.start = val[0];
6016 adap->vres.qp.size = val[1] - val[0] + 1;
6017 adap->vres.cq.start = val[2];
6018 adap->vres.cq.size = val[3] - val[2] + 1;
6019 adap->vres.ocq.start = val[4];
6020 adap->vres.ocq.size = val[5] - val[4] + 1;
6022 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
6023 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
6024 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
6027 adap->params.max_ordird_qp = 8;
6028 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
6031 adap->params.max_ordird_qp = val[0];
6032 adap->params.max_ird_adapter = val[1];
6034 dev_info(adap->pdev_dev,
6035 "max_ordird_qp %d max_ird_adapter %d\n",
6036 adap->params.max_ordird_qp,
6037 adap->params.max_ird_adapter);
6039 if (caps_cmd.iscsicaps) {
6040 params[0] = FW_PARAM_PFVF(ISCSI_START);
6041 params[1] = FW_PARAM_PFVF(ISCSI_END);
6042 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
6046 adap->vres.iscsi.start = val[0];
6047 adap->vres.iscsi.size = val[1] - val[0] + 1;
6049 #undef FW_PARAM_PFVF
6052 /* The MTU/MSS Table is initialized by now, so load their values. If
6053 * we're initializing the adapter, then we'll make any modifications
6054 * we want to the MTU/MSS Table and also initialize the congestion
6057 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6058 if (state != DEV_STATE_INIT) {
6061 /* The default MTU Table contains values 1492 and 1500.
6062 * However, for TCP, it's better to have two values which are
6063 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6064 * This allows us to have a TCP Data Payload which is a
6065 * multiple of 8 regardless of what combination of TCP Options
6066 * are in use (always a multiple of 4 bytes) which is
6067 * important for performance reasons. For instance, if no
6068 * options are in use, then we have a 20-byte IP header and a
6069 * 20-byte TCP header. In this case, a 1500-byte MSS would
6070 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6071 * which is not a multiple of 8. So using an MSS of 1488 in
6072 * this case results in a TCP Data Payload of 1448 bytes which
6073 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6074 * Stamps have been negotiated, then an MTU of 1500 bytes
6075 * results in a TCP Data Payload of 1448 bytes which, as
6076 * above, is a multiple of 8 bytes ...
6078 for (i = 0; i < NMTUS; i++)
6079 if (adap->params.mtus[i] == 1492) {
6080 adap->params.mtus[i] = 1488;
6084 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6085 adap->params.b_wnd);
6087 t4_init_tp_params(adap);
6088 adap->flags |= FW_OK;
6092 * Something bad happened. If a command timed out or failed with EIO
6093 * FW does not operate within its spec or something catastrophic
6094 * happened to HW/FW, stop issuing commands.
6097 if (ret != -ETIMEDOUT && ret != -EIO)
6098 t4_fw_bye(adap, adap->mbox);
6104 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6105 pci_channel_state_t state)
6108 struct adapter *adap = pci_get_drvdata(pdev);
6114 adap->flags &= ~FW_OK;
6115 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6116 spin_lock(&adap->stats_lock);
6117 for_each_port(adap, i) {
6118 struct net_device *dev = adap->port[i];
6120 netif_device_detach(dev);
6121 netif_carrier_off(dev);
6123 spin_unlock(&adap->stats_lock);
6124 if (adap->flags & FULL_INIT_DONE)
6127 if ((adap->flags & DEV_ENABLED)) {
6128 pci_disable_device(pdev);
6129 adap->flags &= ~DEV_ENABLED;
6131 out: return state == pci_channel_io_perm_failure ?
6132 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6135 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6138 struct fw_caps_config_cmd c;
6139 struct adapter *adap = pci_get_drvdata(pdev);
6142 pci_restore_state(pdev);
6143 pci_save_state(pdev);
6144 return PCI_ERS_RESULT_RECOVERED;
6147 if (!(adap->flags & DEV_ENABLED)) {
6148 if (pci_enable_device(pdev)) {
6149 dev_err(&pdev->dev, "Cannot reenable PCI "
6150 "device after reset\n");
6151 return PCI_ERS_RESULT_DISCONNECT;
6153 adap->flags |= DEV_ENABLED;
6156 pci_set_master(pdev);
6157 pci_restore_state(pdev);
6158 pci_save_state(pdev);
6159 pci_cleanup_aer_uncorrect_error_status(pdev);
6161 if (t4_wait_dev_ready(adap->regs) < 0)
6162 return PCI_ERS_RESULT_DISCONNECT;
6163 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6164 return PCI_ERS_RESULT_DISCONNECT;
6165 adap->flags |= FW_OK;
6166 if (adap_init1(adap, &c))
6167 return PCI_ERS_RESULT_DISCONNECT;
6169 for_each_port(adap, i) {
6170 struct port_info *p = adap2pinfo(adap, i);
6172 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6175 return PCI_ERS_RESULT_DISCONNECT;
6177 p->xact_addr_filt = -1;
6180 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6181 adap->params.b_wnd);
6184 return PCI_ERS_RESULT_DISCONNECT;
6185 return PCI_ERS_RESULT_RECOVERED;
6188 static void eeh_resume(struct pci_dev *pdev)
6191 struct adapter *adap = pci_get_drvdata(pdev);
6197 for_each_port(adap, i) {
6198 struct net_device *dev = adap->port[i];
6200 if (netif_running(dev)) {
6202 cxgb_set_rxmode(dev);
6204 netif_device_attach(dev);
6209 static const struct pci_error_handlers cxgb4_eeh = {
6210 .error_detected = eeh_err_detected,
6211 .slot_reset = eeh_slot_reset,
6212 .resume = eeh_resume,
6215 static inline bool is_x_10g_port(const struct link_config *lc)
6217 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6218 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6221 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6222 unsigned int us, unsigned int cnt,
6223 unsigned int size, unsigned int iqe_size)
6226 set_rspq_intr_params(q, us, cnt);
6227 q->iqe_len = iqe_size;
6232 * Perform default configuration of DMA queues depending on the number and type
6233 * of ports we found and the number of available CPUs. Most settings can be
6234 * modified by the admin prior to actual use.
6236 static void cfg_queues(struct adapter *adap)
6238 struct sge *s = &adap->sge;
6239 int i, n10g = 0, qidx = 0;
6240 #ifndef CONFIG_CHELSIO_T4_DCB
6245 for_each_port(adap, i)
6246 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6247 #ifdef CONFIG_CHELSIO_T4_DCB
6248 /* For Data Center Bridging support we need to be able to support up
6249 * to 8 Traffic Priorities; each of which will be assigned to its
6250 * own TX Queue in order to prevent Head-Of-Line Blocking.
6252 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6253 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6254 MAX_ETH_QSETS, adap->params.nports * 8);
6258 for_each_port(adap, i) {
6259 struct port_info *pi = adap2pinfo(adap, i);
6261 pi->first_qset = qidx;
6265 #else /* !CONFIG_CHELSIO_T4_DCB */
6267 * We default to 1 queue per non-10G port and up to # of cores queues
6271 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6272 if (q10g > netif_get_num_default_rss_queues())
6273 q10g = netif_get_num_default_rss_queues();
6275 for_each_port(adap, i) {
6276 struct port_info *pi = adap2pinfo(adap, i);
6278 pi->first_qset = qidx;
6279 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6282 #endif /* !CONFIG_CHELSIO_T4_DCB */
6285 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6287 if (is_offload(adap)) {
6289 * For offload we use 1 queue/channel if all ports are up to 1G,
6290 * otherwise we divide all available queues amongst the channels
6291 * capped by the number of available cores.
6294 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6296 s->ofldqsets = roundup(i, adap->params.nports);
6298 s->ofldqsets = adap->params.nports;
6299 /* For RDMA one Rx queue per channel suffices */
6300 s->rdmaqs = adap->params.nports;
6301 s->rdmaciqs = adap->params.nports;
6304 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6305 struct sge_eth_rxq *r = &s->ethrxq[i];
6307 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6311 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6312 s->ethtxq[i].q.size = 1024;
6314 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6315 s->ctrlq[i].q.size = 512;
6317 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6318 s->ofldtxq[i].q.size = 1024;
6320 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6321 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6323 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6324 r->rspq.uld = CXGB4_ULD_ISCSI;
6328 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6329 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6331 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6332 r->rspq.uld = CXGB4_ULD_RDMA;
6336 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6337 if (ciq_size > SGE_MAX_IQ_SIZE) {
6338 CH_WARN(adap, "CIQ size too small for available IQs\n");
6339 ciq_size = SGE_MAX_IQ_SIZE;
6342 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6343 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6345 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6346 r->rspq.uld = CXGB4_ULD_RDMA;
6349 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6350 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6354 * Reduce the number of Ethernet queues across all ports to at most n.
6355 * n provides at least one queue per port.
6357 static void reduce_ethqs(struct adapter *adap, int n)
6360 struct port_info *pi;
6362 while (n < adap->sge.ethqsets)
6363 for_each_port(adap, i) {
6364 pi = adap2pinfo(adap, i);
6365 if (pi->nqsets > 1) {
6367 adap->sge.ethqsets--;
6368 if (adap->sge.ethqsets <= n)
6374 for_each_port(adap, i) {
6375 pi = adap2pinfo(adap, i);
6381 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6382 #define EXTRA_VECS 2
6384 static int enable_msix(struct adapter *adap)
6388 struct sge *s = &adap->sge;
6389 unsigned int nchan = adap->params.nports;
6390 struct msix_entry entries[MAX_INGQ + 1];
6392 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6393 entries[i].entry = i;
6395 want = s->max_ethqsets + EXTRA_VECS;
6396 if (is_offload(adap)) {
6397 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6398 /* need nchan for each possible ULD */
6399 ofld_need = 3 * nchan;
6401 #ifdef CONFIG_CHELSIO_T4_DCB
6402 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6405 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6407 need = adap->params.nports + EXTRA_VECS + ofld_need;
6409 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6414 * Distribute available vectors to the various queue groups.
6415 * Every group gets its minimum requirement and NIC gets top
6416 * priority for leftovers.
6418 i = want - EXTRA_VECS - ofld_need;
6419 if (i < s->max_ethqsets) {
6420 s->max_ethqsets = i;
6421 if (i < s->ethqsets)
6422 reduce_ethqs(adap, i);
6424 if (is_offload(adap)) {
6425 i = want - EXTRA_VECS - s->max_ethqsets;
6426 i -= ofld_need - nchan;
6427 s->ofldqsets = (i / nchan) * nchan; /* round down */
6429 for (i = 0; i < want; ++i)
6430 adap->msix_info[i].vec = entries[i].vector;
6437 static int init_rss(struct adapter *adap)
6441 for_each_port(adap, i) {
6442 struct port_info *pi = adap2pinfo(adap, i);
6444 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6447 for (j = 0; j < pi->rss_size; j++)
6448 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6453 static void print_port_info(const struct net_device *dev)
6457 const char *spd = "";
6458 const struct port_info *pi = netdev_priv(dev);
6459 const struct adapter *adap = pi->adapter;
6461 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6463 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6465 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6468 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6469 bufp += sprintf(bufp, "100/");
6470 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6471 bufp += sprintf(bufp, "1000/");
6472 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6473 bufp += sprintf(bufp, "10G/");
6474 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6475 bufp += sprintf(bufp, "40G/");
6478 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6480 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6481 adap->params.vpd.id,
6482 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6483 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6484 (adap->flags & USING_MSIX) ? " MSI-X" :
6485 (adap->flags & USING_MSI) ? " MSI" : "");
6486 netdev_info(dev, "S/N: %s, P/N: %s\n",
6487 adap->params.vpd.sn, adap->params.vpd.pn);
6490 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6492 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6496 * Free the following resources:
6497 * - memory used for tables
6500 * - resources FW is holding for us
6502 static void free_some_resources(struct adapter *adapter)
6506 t4_free_mem(adapter->l2t);
6507 t4_free_mem(adapter->tids.tid_tab);
6508 disable_msi(adapter);
6510 for_each_port(adapter, i)
6511 if (adapter->port[i]) {
6512 kfree(adap2pinfo(adapter, i)->rss);
6513 free_netdev(adapter->port[i]);
6515 if (adapter->flags & FW_OK)
6516 t4_fw_bye(adapter, adapter->fn);
6519 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6520 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6521 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6522 #define SEGMENT_SIZE 128
6524 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6526 int func, i, err, s_qpp, qpp, num_seg;
6527 struct port_info *pi;
6528 bool highdma = false;
6529 struct adapter *adapter = NULL;
6532 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6534 err = pci_request_regions(pdev, KBUILD_MODNAME);
6536 /* Just info, some other driver may have claimed the device. */
6537 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6541 err = pci_enable_device(pdev);
6543 dev_err(&pdev->dev, "cannot enable PCI device\n");
6544 goto out_release_regions;
6547 regs = pci_ioremap_bar(pdev, 0);
6549 dev_err(&pdev->dev, "cannot map device registers\n");
6551 goto out_disable_device;
6554 err = t4_wait_dev_ready(regs);
6556 goto out_unmap_bar0;
6558 /* We control everything through one PF */
6559 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6560 if (func != ent->driver_data) {
6562 pci_disable_device(pdev);
6563 pci_save_state(pdev); /* to restore SR-IOV later */
6567 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6569 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6571 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6572 "coherent allocations\n");
6573 goto out_unmap_bar0;
6576 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6578 dev_err(&pdev->dev, "no usable DMA configuration\n");
6579 goto out_unmap_bar0;
6583 pci_enable_pcie_error_reporting(pdev);
6584 enable_pcie_relaxed_ordering(pdev);
6585 pci_set_master(pdev);
6586 pci_save_state(pdev);
6588 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6591 goto out_unmap_bar0;
6594 adapter->workq = create_singlethread_workqueue("cxgb4");
6595 if (!adapter->workq) {
6597 goto out_free_adapter;
6600 /* PCI device has been enabled */
6601 adapter->flags |= DEV_ENABLED;
6603 adapter->regs = regs;
6604 adapter->pdev = pdev;
6605 adapter->pdev_dev = &pdev->dev;
6606 adapter->mbox = func;
6608 adapter->msg_enable = dflt_msg_enable;
6609 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6611 spin_lock_init(&adapter->stats_lock);
6612 spin_lock_init(&adapter->tid_release_lock);
6614 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6615 INIT_WORK(&adapter->db_full_task, process_db_full);
6616 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6618 err = t4_prep_adapter(adapter);
6620 goto out_free_adapter;
6623 if (!is_t4(adapter->params.chip)) {
6624 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6625 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6626 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6627 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6629 /* Each segment size is 128B. Write coalescing is enabled only
6630 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6631 * queue is less no of segments that can be accommodated in
6634 if (qpp > num_seg) {
6636 "Incorrect number of egress queues per page\n");
6638 goto out_free_adapter;
6640 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6641 pci_resource_len(pdev, 2));
6642 if (!adapter->bar2) {
6643 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6645 goto out_free_adapter;
6649 setup_memwin(adapter);
6650 err = adap_init0(adapter);
6651 setup_memwin_rdma(adapter);
6655 for_each_port(adapter, i) {
6656 struct net_device *netdev;
6658 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6665 SET_NETDEV_DEV(netdev, &pdev->dev);
6667 adapter->port[i] = netdev;
6668 pi = netdev_priv(netdev);
6669 pi->adapter = adapter;
6670 pi->xact_addr_filt = -1;
6672 netdev->irq = pdev->irq;
6674 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6675 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6676 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6677 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6679 netdev->hw_features |= NETIF_F_HIGHDMA;
6680 netdev->features |= netdev->hw_features;
6681 netdev->vlan_features = netdev->features & VLAN_FEAT;
6683 netdev->priv_flags |= IFF_UNICAST_FLT;
6685 netdev->netdev_ops = &cxgb4_netdev_ops;
6686 #ifdef CONFIG_CHELSIO_T4_DCB
6687 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6688 cxgb4_dcb_state_init(netdev);
6690 netdev->ethtool_ops = &cxgb_ethtool_ops;
6693 pci_set_drvdata(pdev, adapter);
6695 if (adapter->flags & FW_OK) {
6696 err = t4_port_init(adapter, func, func, 0);
6702 * Configure queues and allocate tables now, they can be needed as
6703 * soon as the first register_netdev completes.
6705 cfg_queues(adapter);
6707 adapter->l2t = t4_init_l2t();
6708 if (!adapter->l2t) {
6709 /* We tolerate a lack of L2T, giving up some functionality */
6710 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6711 adapter->params.offload = 0;
6714 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6715 dev_warn(&pdev->dev, "could not allocate TID table, "
6717 adapter->params.offload = 0;
6720 /* See what interrupts we'll be using */
6721 if (msi > 1 && enable_msix(adapter) == 0)
6722 adapter->flags |= USING_MSIX;
6723 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6724 adapter->flags |= USING_MSI;
6726 err = init_rss(adapter);
6731 * The card is now ready to go. If any errors occur during device
6732 * registration we do not fail the whole card but rather proceed only
6733 * with the ports we manage to register successfully. However we must
6734 * register at least one net device.
6736 for_each_port(adapter, i) {
6737 pi = adap2pinfo(adapter, i);
6738 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6739 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6741 err = register_netdev(adapter->port[i]);
6744 adapter->chan_map[pi->tx_chan] = i;
6745 print_port_info(adapter->port[i]);
6748 dev_err(&pdev->dev, "could not register any net devices\n");
6752 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6756 if (cxgb4_debugfs_root) {
6757 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6758 cxgb4_debugfs_root);
6759 setup_debugfs(adapter);
6762 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6763 pdev->needs_freset = 1;
6765 if (is_offload(adapter))
6766 attach_ulds(adapter);
6769 #ifdef CONFIG_PCI_IOV
6770 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6771 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6772 dev_info(&pdev->dev,
6773 "instantiated %u virtual functions\n",
6779 free_some_resources(adapter);
6781 if (!is_t4(adapter->params.chip))
6782 iounmap(adapter->bar2);
6785 destroy_workqueue(adapter->workq);
6791 pci_disable_pcie_error_reporting(pdev);
6792 pci_disable_device(pdev);
6793 out_release_regions:
6794 pci_release_regions(pdev);
6798 static void remove_one(struct pci_dev *pdev)
6800 struct adapter *adapter = pci_get_drvdata(pdev);
6802 #ifdef CONFIG_PCI_IOV
6803 pci_disable_sriov(pdev);
6810 /* Tear down per-adapter Work Queue first since it can contain
6811 * references to our adapter data structure.
6813 destroy_workqueue(adapter->workq);
6815 if (is_offload(adapter))
6816 detach_ulds(adapter);
6818 for_each_port(adapter, i)
6819 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6820 unregister_netdev(adapter->port[i]);
6822 debugfs_remove_recursive(adapter->debugfs_root);
6824 /* If we allocated filters, free up state associated with any
6827 if (adapter->tids.ftid_tab) {
6828 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6829 for (i = 0; i < (adapter->tids.nftids +
6830 adapter->tids.nsftids); i++, f++)
6832 clear_filter(adapter, f);
6835 if (adapter->flags & FULL_INIT_DONE)
6838 free_some_resources(adapter);
6839 iounmap(adapter->regs);
6840 if (!is_t4(adapter->params.chip))
6841 iounmap(adapter->bar2);
6842 pci_disable_pcie_error_reporting(pdev);
6843 if ((adapter->flags & DEV_ENABLED)) {
6844 pci_disable_device(pdev);
6845 adapter->flags &= ~DEV_ENABLED;
6847 pci_release_regions(pdev);
6851 pci_release_regions(pdev);
6854 static struct pci_driver cxgb4_driver = {
6855 .name = KBUILD_MODNAME,
6856 .id_table = cxgb4_pci_tbl,
6858 .remove = remove_one,
6859 .shutdown = remove_one,
6860 .err_handler = &cxgb4_eeh,
6863 static int __init cxgb4_init_module(void)
6867 /* Debugfs support is optional, just warn if this fails */
6868 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6869 if (!cxgb4_debugfs_root)
6870 pr_warn("could not create debugfs entry, continuing\n");
6872 ret = pci_register_driver(&cxgb4_driver);
6874 debugfs_remove(cxgb4_debugfs_root);
6876 #if IS_ENABLED(CONFIG_IPV6)
6877 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6883 static void __exit cxgb4_cleanup_module(void)
6885 #if IS_ENABLED(CONFIG_IPV6)
6886 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6888 pci_unregister_driver(&cxgb4_driver);
6889 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6892 module_init(cxgb4_init_module);
6893 module_exit(cxgb4_cleanup_module);