2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <asm/uaccess.h>
71 #include "cxgb4_dcb.h"
72 #include "cxgb4_debugfs.h"
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
138 unsigned int portn, portvec;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_MASK;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter->params.nports == 0)
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry {
192 /* Administrative fields for filter.
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
316 CH_DEVICE(0x5486, 4),
317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt"
327 MODULE_DESCRIPTION(DRV_DESC);
328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION);
331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
332 MODULE_FIRMWARE(FW4_FNAME);
333 MODULE_FIRMWARE(FW5_FNAME);
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
341 static uint force_init;
343 module_param(force_init, uint, 0644);
344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
352 static uint force_old_init;
354 module_param(force_old_init, uint, 0644);
355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
357 static int dflt_msg_enable = DFLT_MSG_ENABLE;
359 module_param(dflt_msg_enable, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
373 module_param(msi, int, 0644);
374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
377 * Queue interrupt hold-off timer values. Queues default to the first of these
380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
382 module_param_array(intr_holdoff, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
388 module_param_array(intr_cnt, uint, NULL, 0644);
389 MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters");
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
404 static int rx_dma_offset = 2;
408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls, bool, 0644);
410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
417 module_param_array(num_vf, uint, NULL, 0644);
418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
421 /* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
425 * Default: select_queue=0
427 static int select_queue;
428 module_param(select_queue, int, 0644);
429 MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
456 module_param(tp_vlan_pri_map, uint, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
459 static struct dentry *cxgb4_debugfs_root;
461 static LIST_HEAD(adapter_list);
462 static DEFINE_MUTEX(uld_mutex);
463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list);
465 static DEFINE_SPINLOCK(adap_rcu_lock);
466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467 static const char *uld_str[] = { "RDMA", "iSCSI" };
469 static void link_report(struct net_device *dev)
471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n");
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev);
479 switch (p->link_cfg.speed) {
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
511 for (i = 0; i < pi->nqsets; i++, txq++) {
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff;
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err);
532 txq->dcb_prio = value;
535 #endif /* CONFIG_CHELSIO_T4_DCB */
537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
539 struct net_device *dev = adapter->port[port_id];
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
544 netif_carrier_on(dev);
546 #ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 netif_carrier_off(dev);
557 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
559 static const char *mod_str[] = {
560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev);
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n");
568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
576 static int set_addr_filters(const struct net_device *dev, bool sleep)
584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev);
586 int mc_cnt = netdev_mc_count(dev);
587 const struct port_info *pi = netdev_priv(dev);
588 unsigned int mb = pi->adapter->fn;
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
595 naddr, addr, filt_idx, &uhash, sleep);
604 /* next set up the multicast addresses */
605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
609 naddr, addr, filt_idx, &mhash, sleep);
618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
619 uhash | mhash, sleep);
622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
627 * usecs to sleep while draining the dbfifo
629 static int dbfifo_drain_delay = 1000;
630 module_param(dbfifo_drain_delay, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo");
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
641 struct port_info *pi = netdev_priv(dev);
643 ret = set_addr_filters(dev, sleep_ok);
645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
646 (dev->flags & IFF_PROMISC) ? 1 : 0,
647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
653 * link_start - enable a port
654 * @dev: the port to enable
656 * Performs the MAC and PHY actions needed to enable a port.
658 static int link_start(struct net_device *dev)
661 struct port_info *pi = netdev_priv(dev);
662 unsigned int mb = pi->adapter->fn;
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
672 pi->xact_addr_filt, dev->dev_addr, true,
675 pi->xact_addr_filt = ret;
680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED);
692 int cxgb4_dcb_enabled(const struct net_device *dev)
694 #ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev);
697 if (!pi->dcb.enabled)
700 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
701 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
706 EXPORT_SYMBOL(cxgb4_dcb_enabled);
708 #ifdef CONFIG_CHELSIO_T4_DCB
709 /* Handle a Data Center Bridging update message from the firmware. */
710 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
712 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
713 struct net_device *dev = adap->port[port];
714 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
717 cxgb4_dcb_handle_fw_update(adap, pcmd);
718 new_dcb_enabled = cxgb4_dcb_enabled(dev);
720 /* If the DCB has become enabled or disabled on the port then we're
721 * going to need to set up/tear down DCB Priority parameters for the
722 * TX Queues associated with the port.
724 if (new_dcb_enabled != old_dcb_enabled)
725 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
727 #endif /* CONFIG_CHELSIO_T4_DCB */
729 /* Clear a filter and release any of its resources that we own. This also
730 * clears the filter's "pending" status.
732 static void clear_filter(struct adapter *adap, struct filter_entry *f)
734 /* If the new or old filter have loopback rewriteing rules then we'll
735 * need to free any existing Layer Two Table (L2T) entries of the old
736 * filter rule. The firmware will handle freeing up any Source MAC
737 * Table (SMT) entries used for rewriting Source MAC Addresses in
741 cxgb4_l2t_release(f->l2t);
743 /* The zeroing of the filter rule below clears the filter valid,
744 * pending, locked flags, l2t pointer, etc. so it's all we need for
747 memset(f, 0, sizeof(*f));
750 /* Handle a filter write/deletion reply.
752 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
754 unsigned int idx = GET_TID(rpl);
755 unsigned int nidx = idx - adap->tids.ftid_base;
757 struct filter_entry *f;
759 if (idx >= adap->tids.ftid_base && nidx <
760 (adap->tids.nftids + adap->tids.nsftids)) {
762 ret = GET_TCB_COOKIE(rpl->cookie);
763 f = &adap->tids.ftid_tab[idx];
765 if (ret == FW_FILTER_WR_FLT_DELETED) {
766 /* Clear the filter when we get confirmation from the
767 * hardware that the filter has been deleted.
769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
771 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
773 clear_filter(adap, f);
774 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
775 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
776 f->pending = 0; /* asynchronous setup completed */
779 /* Something went wrong. Issue a warning about the
780 * problem and clear everything out.
782 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
784 clear_filter(adap, f);
789 /* Response queue handler for the FW event queue.
791 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
792 const struct pkt_gl *gl)
794 u8 opcode = ((const struct rss_header *)rsp)->opcode;
796 rsp++; /* skip RSS header */
798 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
800 if (unlikely(opcode == CPL_FW4_MSG &&
801 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
803 opcode = ((const struct rss_header *)rsp)->opcode;
805 if (opcode != CPL_SGE_EGR_UPDATE) {
806 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
812 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
813 const struct cpl_sge_egr_update *p = (void *)rsp;
814 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
817 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
819 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
820 struct sge_eth_txq *eq;
822 eq = container_of(txq, struct sge_eth_txq, q);
823 netif_tx_wake_queue(eq->txq);
825 struct sge_ofld_txq *oq;
827 oq = container_of(txq, struct sge_ofld_txq, q);
828 tasklet_schedule(&oq->qresume_tsk);
830 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
831 const struct cpl_fw6_msg *p = (void *)rsp;
833 #ifdef CONFIG_CHELSIO_T4_DCB
834 const struct fw_port_cmd *pcmd = (const void *)p->data;
835 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
836 unsigned int action =
837 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
839 if (cmd == FW_PORT_CMD &&
840 action == FW_PORT_ACTION_GET_PORT_INFO) {
841 int port = FW_PORT_CMD_PORTID_GET(
842 be32_to_cpu(pcmd->op_to_portid));
843 struct net_device *dev = q->adap->port[port];
844 int state_input = ((pcmd->u.info.dcbxdis_pkd &
846 ? CXGB4_DCB_INPUT_FW_DISABLED
847 : CXGB4_DCB_INPUT_FW_ENABLED);
849 cxgb4_dcb_state_fsm(dev, state_input);
852 if (cmd == FW_PORT_CMD &&
853 action == FW_PORT_ACTION_L2_DCB_CFG)
854 dcb_rpl(q->adap, pcmd);
858 t4_handle_fw_rpl(q->adap, p->data);
859 } else if (opcode == CPL_L2T_WRITE_RPL) {
860 const struct cpl_l2t_write_rpl *p = (void *)rsp;
862 do_l2t_write_rpl(q->adap, p);
863 } else if (opcode == CPL_SET_TCB_RPL) {
864 const struct cpl_set_tcb_rpl *p = (void *)rsp;
866 filter_rpl(q->adap, p);
868 dev_err(q->adap->pdev_dev,
869 "unexpected CPL %#x on FW event queue\n", opcode);
875 * uldrx_handler - response queue handler for ULD queues
876 * @q: the response queue that received the packet
877 * @rsp: the response queue descriptor holding the offload message
878 * @gl: the gather list of packet fragments
880 * Deliver an ingress offload packet to a ULD. All processing is done by
881 * the ULD, we just maintain statistics.
883 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
884 const struct pkt_gl *gl)
886 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
888 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
890 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
891 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
894 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
900 else if (gl == CXGB4_MSG_AN)
907 static void disable_msi(struct adapter *adapter)
909 if (adapter->flags & USING_MSIX) {
910 pci_disable_msix(adapter->pdev);
911 adapter->flags &= ~USING_MSIX;
912 } else if (adapter->flags & USING_MSI) {
913 pci_disable_msi(adapter->pdev);
914 adapter->flags &= ~USING_MSI;
919 * Interrupt handler for non-data events used with MSI-X.
921 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
923 struct adapter *adap = cookie;
925 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
928 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
930 t4_slow_intr_handler(adap);
935 * Name the MSI-X interrupts.
937 static void name_msix_vecs(struct adapter *adap)
939 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
941 /* non-data interrupts */
942 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
945 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
946 adap->port[0]->name);
948 /* Ethernet queues */
949 for_each_port(adap, j) {
950 struct net_device *d = adap->port[j];
951 const struct port_info *pi = netdev_priv(d);
953 for (i = 0; i < pi->nqsets; i++, msi_idx++)
954 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
959 for_each_ofldrxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
961 adap->port[0]->name, i);
963 for_each_rdmarxq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
965 adap->port[0]->name, i);
967 for_each_rdmaciq(&adap->sge, i)
968 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
969 adap->port[0]->name, i);
972 static int request_msix_queue_irqs(struct adapter *adap)
974 struct sge *s = &adap->sge;
975 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
978 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
979 adap->msix_info[1].desc, &s->fw_evtq);
983 for_each_ethrxq(s, ethqidx) {
984 err = request_irq(adap->msix_info[msi_index].vec,
986 adap->msix_info[msi_index].desc,
987 &s->ethrxq[ethqidx].rspq);
992 for_each_ofldrxq(s, ofldqidx) {
993 err = request_irq(adap->msix_info[msi_index].vec,
995 adap->msix_info[msi_index].desc,
996 &s->ofldrxq[ofldqidx].rspq);
1001 for_each_rdmarxq(s, rdmaqidx) {
1002 err = request_irq(adap->msix_info[msi_index].vec,
1003 t4_sge_intr_msix, 0,
1004 adap->msix_info[msi_index].desc,
1005 &s->rdmarxq[rdmaqidx].rspq);
1010 for_each_rdmaciq(s, rdmaciqqidx) {
1011 err = request_irq(adap->msix_info[msi_index].vec,
1012 t4_sge_intr_msix, 0,
1013 adap->msix_info[msi_index].desc,
1014 &s->rdmaciq[rdmaciqqidx].rspq);
1022 while (--rdmaciqqidx >= 0)
1023 free_irq(adap->msix_info[--msi_index].vec,
1024 &s->rdmaciq[rdmaciqqidx].rspq);
1025 while (--rdmaqidx >= 0)
1026 free_irq(adap->msix_info[--msi_index].vec,
1027 &s->rdmarxq[rdmaqidx].rspq);
1028 while (--ofldqidx >= 0)
1029 free_irq(adap->msix_info[--msi_index].vec,
1030 &s->ofldrxq[ofldqidx].rspq);
1031 while (--ethqidx >= 0)
1032 free_irq(adap->msix_info[--msi_index].vec,
1033 &s->ethrxq[ethqidx].rspq);
1034 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1038 static void free_msix_queue_irqs(struct adapter *adap)
1040 int i, msi_index = 2;
1041 struct sge *s = &adap->sge;
1043 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1044 for_each_ethrxq(s, i)
1045 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1046 for_each_ofldrxq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1048 for_each_rdmarxq(s, i)
1049 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1050 for_each_rdmaciq(s, i)
1051 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1055 * write_rss - write the RSS table for a given port
1057 * @queues: array of queue indices for RSS
1059 * Sets up the portion of the HW RSS table for the port's VI to distribute
1060 * packets to the Rx queues in @queues.
1062 static int write_rss(const struct port_info *pi, const u16 *queues)
1066 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1068 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1072 /* map the queue indices to queue ids */
1073 for (i = 0; i < pi->rss_size; i++, queues++)
1074 rss[i] = q[*queues].rspq.abs_id;
1076 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1077 pi->rss_size, rss, pi->rss_size);
1083 * setup_rss - configure RSS
1084 * @adap: the adapter
1086 * Sets up RSS for each port.
1088 static int setup_rss(struct adapter *adap)
1092 for_each_port(adap, i) {
1093 const struct port_info *pi = adap2pinfo(adap, i);
1095 err = write_rss(pi, pi->rss);
1103 * Return the channel of the ingress queue with the given qid.
1105 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1107 qid -= p->ingr_start;
1108 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1112 * Wait until all NAPI handlers are descheduled.
1114 static void quiesce_rx(struct adapter *adap)
1118 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1119 struct sge_rspq *q = adap->sge.ingr_map[i];
1121 if (q && q->handler)
1122 napi_disable(&q->napi);
1127 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1129 static void enable_rx(struct adapter *adap)
1133 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1134 struct sge_rspq *q = adap->sge.ingr_map[i];
1139 napi_enable(&q->napi);
1140 /* 0-increment GTS to start the timer and enable interrupts */
1141 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1142 SEINTARM(q->intr_params) |
1143 INGRESSQID(q->cntxt_id));
1148 * setup_sge_queues - configure SGE Tx/Rx/response queues
1149 * @adap: the adapter
1151 * Determines how many sets of SGE queues to use and initializes them.
1152 * We support multiple queue sets per port if we have MSI-X, otherwise
1153 * just one queue set per port.
1155 static int setup_sge_queues(struct adapter *adap)
1157 int err, msi_idx, i, j;
1158 struct sge *s = &adap->sge;
1160 bitmap_zero(s->starving_fl, MAX_EGRQ);
1161 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1163 if (adap->flags & USING_MSIX)
1164 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1166 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1170 msi_idx = -((int)s->intrq.abs_id + 1);
1173 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1174 msi_idx, NULL, fwevtq_handler);
1176 freeout: t4_free_sge_resources(adap);
1180 for_each_port(adap, i) {
1181 struct net_device *dev = adap->port[i];
1182 struct port_info *pi = netdev_priv(dev);
1183 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1184 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1186 for (j = 0; j < pi->nqsets; j++, q++) {
1189 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1195 memset(&q->stats, 0, sizeof(q->stats));
1197 for (j = 0; j < pi->nqsets; j++, t++) {
1198 err = t4_sge_alloc_eth_txq(adap, t, dev,
1199 netdev_get_tx_queue(dev, j),
1200 s->fw_evtq.cntxt_id);
1206 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1207 for_each_ofldrxq(s, i) {
1208 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1209 struct net_device *dev = adap->port[i / j];
1213 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1214 q->fl.size ? &q->fl : NULL,
1218 memset(&q->stats, 0, sizeof(q->stats));
1219 s->ofld_rxq[i] = q->rspq.abs_id;
1220 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1221 s->fw_evtq.cntxt_id);
1226 for_each_rdmarxq(s, i) {
1227 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1231 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1232 msi_idx, q->fl.size ? &q->fl : NULL,
1236 memset(&q->stats, 0, sizeof(q->stats));
1237 s->rdma_rxq[i] = q->rspq.abs_id;
1240 for_each_rdmaciq(s, i) {
1241 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1245 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1246 msi_idx, q->fl.size ? &q->fl : NULL,
1250 memset(&q->stats, 0, sizeof(q->stats));
1251 s->rdma_ciq[i] = q->rspq.abs_id;
1254 for_each_port(adap, i) {
1256 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1257 * have RDMA queues, and that's the right value.
1259 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1260 s->fw_evtq.cntxt_id,
1261 s->rdmarxq[i].rspq.cntxt_id);
1266 t4_write_reg(adap, is_t4(adap->params.chip) ?
1267 MPS_TRC_RSS_CONTROL :
1268 MPS_T5_TRC_RSS_CONTROL,
1269 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1270 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1275 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1276 * The allocated memory is cleared.
1278 void *t4_alloc_mem(size_t size)
1280 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1288 * Free memory allocated through alloc_mem().
1290 void t4_free_mem(void *addr)
1292 if (is_vmalloc_addr(addr))
1298 /* Send a Work Request to write the filter at a specified index. We construct
1299 * a Firmware Filter Work Request to have the work done and put the indicated
1300 * filter into "pending" mode which will prevent any further actions against
1301 * it till we get a reply from the firmware on the completion status of the
1304 static int set_filter_wr(struct adapter *adapter, int fidx)
1306 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1307 struct sk_buff *skb;
1308 struct fw_filter_wr *fwr;
1311 /* If the new filter requires loopback Destination MAC and/or VLAN
1312 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1315 if (f->fs.newdmac || f->fs.newvlan) {
1316 /* allocate L2T entry for new filter */
1317 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1320 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1321 f->fs.eport, f->fs.dmac)) {
1322 cxgb4_l2t_release(f->l2t);
1328 ftid = adapter->tids.ftid_base + fidx;
1330 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1331 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1332 memset(fwr, 0, sizeof(*fwr));
1334 /* It would be nice to put most of the following in t4_hw.c but most
1335 * of the work is translating the cxgbtool ch_filter_specification
1336 * into the Work Request and the definition of that structure is
1337 * currently in cxgbtool.h which isn't appropriate to pull into the
1338 * common code. We may eventually try to come up with a more neutral
1339 * filter specification structure but for now it's easiest to simply
1340 * put this fairly direct code in line ...
1342 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1343 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1345 htonl(V_FW_FILTER_WR_TID(ftid) |
1346 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1347 V_FW_FILTER_WR_NOREPLY(0) |
1348 V_FW_FILTER_WR_IQ(f->fs.iq));
1349 fwr->del_filter_to_l2tix =
1350 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1351 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1352 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1353 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1354 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1355 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1356 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1357 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1358 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1359 f->fs.newvlan == VLAN_REWRITE) |
1360 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1361 f->fs.newvlan == VLAN_REWRITE) |
1362 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1363 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1364 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1365 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1366 fwr->ethtype = htons(f->fs.val.ethtype);
1367 fwr->ethtypem = htons(f->fs.mask.ethtype);
1368 fwr->frag_to_ovlan_vldm =
1369 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1370 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1371 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1372 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1373 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1374 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1376 fwr->rx_chan_rx_rpl_iq =
1377 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1378 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1379 fwr->maci_to_matchtypem =
1380 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1381 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1382 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1383 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1384 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1385 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1386 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1387 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1388 fwr->ptcl = f->fs.val.proto;
1389 fwr->ptclm = f->fs.mask.proto;
1390 fwr->ttyp = f->fs.val.tos;
1391 fwr->ttypm = f->fs.mask.tos;
1392 fwr->ivlan = htons(f->fs.val.ivlan);
1393 fwr->ivlanm = htons(f->fs.mask.ivlan);
1394 fwr->ovlan = htons(f->fs.val.ovlan);
1395 fwr->ovlanm = htons(f->fs.mask.ovlan);
1396 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1397 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1398 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1399 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1400 fwr->lp = htons(f->fs.val.lport);
1401 fwr->lpm = htons(f->fs.mask.lport);
1402 fwr->fp = htons(f->fs.val.fport);
1403 fwr->fpm = htons(f->fs.mask.fport);
1405 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1407 /* Mark the filter as "pending" and ship off the Filter Work Request.
1408 * When we get the Work Request Reply we'll clear the pending status.
1411 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1412 t4_ofld_send(adapter, skb);
1416 /* Delete the filter at a specified index.
1418 static int del_filter_wr(struct adapter *adapter, int fidx)
1420 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1421 struct sk_buff *skb;
1422 struct fw_filter_wr *fwr;
1423 unsigned int len, ftid;
1426 ftid = adapter->tids.ftid_base + fidx;
1428 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1429 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1430 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1432 /* Mark the filter as "pending" and ship off the Filter Work Request.
1433 * When we get the Work Request Reply we'll clear the pending status.
1436 t4_mgmt_tx(adapter, skb);
1440 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1441 void *accel_priv, select_queue_fallback_t fallback)
1445 #ifdef CONFIG_CHELSIO_T4_DCB
1446 /* If a Data Center Bridging has been successfully negotiated on this
1447 * link then we'll use the skb's priority to map it to a TX Queue.
1448 * The skb's priority is determined via the VLAN Tag Priority Code
1451 if (cxgb4_dcb_enabled(dev)) {
1455 err = vlan_get_tag(skb, &vlan_tci);
1456 if (unlikely(err)) {
1457 if (net_ratelimit())
1459 "TX Packet without VLAN Tag on DCB Link\n");
1462 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1466 #endif /* CONFIG_CHELSIO_T4_DCB */
1469 txq = (skb_rx_queue_recorded(skb)
1470 ? skb_get_rx_queue(skb)
1471 : smp_processor_id());
1473 while (unlikely(txq >= dev->real_num_tx_queues))
1474 txq -= dev->real_num_tx_queues;
1479 return fallback(dev, skb) % dev->real_num_tx_queues;
1482 static inline int is_offload(const struct adapter *adap)
1484 return adap->params.offload;
1488 * Implementation of ethtool operations.
1491 static u32 get_msglevel(struct net_device *dev)
1493 return netdev2adap(dev)->msg_enable;
1496 static void set_msglevel(struct net_device *dev, u32 val)
1498 netdev2adap(dev)->msg_enable = val;
1501 static char stats_strings[][ETH_GSTRING_LEN] = {
1504 "TxBroadcastFrames ",
1505 "TxMulticastFrames ",
1511 "TxFrames128To255 ",
1512 "TxFrames256To511 ",
1513 "TxFrames512To1023 ",
1514 "TxFrames1024To1518 ",
1515 "TxFrames1519ToMax ",
1530 "RxBroadcastFrames ",
1531 "RxMulticastFrames ",
1543 "RxFrames128To255 ",
1544 "RxFrames256To511 ",
1545 "RxFrames512To1023 ",
1546 "RxFrames1024To1518 ",
1547 "RxFrames1519ToMax ",
1559 "RxBG0FramesDropped ",
1560 "RxBG1FramesDropped ",
1561 "RxBG2FramesDropped ",
1562 "RxBG3FramesDropped ",
1563 "RxBG0FramesTrunc ",
1564 "RxBG1FramesTrunc ",
1565 "RxBG2FramesTrunc ",
1566 "RxBG3FramesTrunc ",
1575 "WriteCoalSuccess ",
1579 static int get_sset_count(struct net_device *dev, int sset)
1583 return ARRAY_SIZE(stats_strings);
1589 #define T4_REGMAP_SIZE (160 * 1024)
1590 #define T5_REGMAP_SIZE (332 * 1024)
1592 static int get_regs_len(struct net_device *dev)
1594 struct adapter *adap = netdev2adap(dev);
1595 if (is_t4(adap->params.chip))
1596 return T4_REGMAP_SIZE;
1598 return T5_REGMAP_SIZE;
1601 static int get_eeprom_len(struct net_device *dev)
1606 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1608 struct adapter *adapter = netdev2adap(dev);
1610 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1611 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1612 strlcpy(info->bus_info, pci_name(adapter->pdev),
1613 sizeof(info->bus_info));
1615 if (adapter->params.fw_vers)
1616 snprintf(info->fw_version, sizeof(info->fw_version),
1617 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1622 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1623 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1624 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1625 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1628 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1630 if (stringset == ETH_SS_STATS)
1631 memcpy(data, stats_strings, sizeof(stats_strings));
1635 * port stats maintained per queue of the port. They should be in the same
1636 * order as in stats_strings above.
1638 struct queue_port_stats {
1648 static void collect_sge_port_stats(const struct adapter *adap,
1649 const struct port_info *p, struct queue_port_stats *s)
1652 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1653 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1655 memset(s, 0, sizeof(*s));
1656 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1658 s->tx_csum += tx->tx_cso;
1659 s->rx_csum += rx->stats.rx_cso;
1660 s->vlan_ex += rx->stats.vlan_ex;
1661 s->vlan_ins += tx->vlan_ins;
1662 s->gro_pkts += rx->stats.lro_pkts;
1663 s->gro_merged += rx->stats.lro_merged;
1667 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1670 struct port_info *pi = netdev_priv(dev);
1671 struct adapter *adapter = pi->adapter;
1674 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1676 data += sizeof(struct port_stats) / sizeof(u64);
1677 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1678 data += sizeof(struct queue_port_stats) / sizeof(u64);
1679 if (!is_t4(adapter->params.chip)) {
1680 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1681 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1682 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1683 *data = val1 - val2;
1688 memset(data, 0, 2 * sizeof(u64));
1694 * Return a version number to identify the type of adapter. The scheme is:
1695 * - bits 0..9: chip version
1696 * - bits 10..15: chip revision
1697 * - bits 16..23: register dump version
1699 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1701 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1702 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1705 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1708 u32 *p = buf + start;
1710 for ( ; start <= end; start += sizeof(u32))
1711 *p++ = t4_read_reg(ap, start);
1714 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1717 static const unsigned int t4_reg_ranges[] = {
1938 static const unsigned int t5_reg_ranges[] = {
2367 struct adapter *ap = netdev2adap(dev);
2368 static const unsigned int *reg_ranges;
2369 int arr_size = 0, buf_size = 0;
2371 if (is_t4(ap->params.chip)) {
2372 reg_ranges = &t4_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t4_reg_ranges);
2374 buf_size = T4_REGMAP_SIZE;
2376 reg_ranges = &t5_reg_ranges[0];
2377 arr_size = ARRAY_SIZE(t5_reg_ranges);
2378 buf_size = T5_REGMAP_SIZE;
2381 regs->version = mk_adap_vers(ap);
2383 memset(buf, 0, buf_size);
2384 for (i = 0; i < arr_size; i += 2)
2385 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2388 static int restart_autoneg(struct net_device *dev)
2390 struct port_info *p = netdev_priv(dev);
2392 if (!netif_running(dev))
2394 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2396 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2400 static int identify_port(struct net_device *dev,
2401 enum ethtool_phys_id_state state)
2404 struct adapter *adap = netdev2adap(dev);
2406 if (state == ETHTOOL_ID_ACTIVE)
2408 else if (state == ETHTOOL_ID_INACTIVE)
2413 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2416 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2420 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2421 type == FW_PORT_TYPE_BT_XAUI) {
2423 if (caps & FW_PORT_CAP_SPEED_100M)
2424 v |= SUPPORTED_100baseT_Full;
2425 if (caps & FW_PORT_CAP_SPEED_1G)
2426 v |= SUPPORTED_1000baseT_Full;
2427 if (caps & FW_PORT_CAP_SPEED_10G)
2428 v |= SUPPORTED_10000baseT_Full;
2429 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2430 v |= SUPPORTED_Backplane;
2431 if (caps & FW_PORT_CAP_SPEED_1G)
2432 v |= SUPPORTED_1000baseKX_Full;
2433 if (caps & FW_PORT_CAP_SPEED_10G)
2434 v |= SUPPORTED_10000baseKX4_Full;
2435 } else if (type == FW_PORT_TYPE_KR)
2436 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2437 else if (type == FW_PORT_TYPE_BP_AP)
2438 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2439 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2440 else if (type == FW_PORT_TYPE_BP4_AP)
2441 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2443 SUPPORTED_10000baseKX4_Full;
2444 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2446 v |= SUPPORTED_FIBRE;
2447 else if (type == FW_PORT_TYPE_BP40_BA)
2448 v |= SUPPORTED_40000baseSR4_Full;
2450 if (caps & FW_PORT_CAP_ANEG)
2451 v |= SUPPORTED_Autoneg;
2455 static unsigned int to_fw_linkcaps(unsigned int caps)
2459 if (caps & ADVERTISED_100baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_100M;
2461 if (caps & ADVERTISED_1000baseT_Full)
2462 v |= FW_PORT_CAP_SPEED_1G;
2463 if (caps & ADVERTISED_10000baseT_Full)
2464 v |= FW_PORT_CAP_SPEED_10G;
2465 if (caps & ADVERTISED_40000baseSR4_Full)
2466 v |= FW_PORT_CAP_SPEED_40G;
2470 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2472 const struct port_info *p = netdev_priv(dev);
2474 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2475 p->port_type == FW_PORT_TYPE_BT_XFI ||
2476 p->port_type == FW_PORT_TYPE_BT_XAUI)
2477 cmd->port = PORT_TP;
2478 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2479 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2480 cmd->port = PORT_FIBRE;
2481 else if (p->port_type == FW_PORT_TYPE_SFP ||
2482 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2483 p->port_type == FW_PORT_TYPE_QSFP) {
2484 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2485 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2486 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2487 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2488 cmd->port = PORT_FIBRE;
2489 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2490 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2491 cmd->port = PORT_DA;
2493 cmd->port = PORT_OTHER;
2495 cmd->port = PORT_OTHER;
2497 if (p->mdio_addr >= 0) {
2498 cmd->phy_address = p->mdio_addr;
2499 cmd->transceiver = XCVR_EXTERNAL;
2500 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2501 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2503 cmd->phy_address = 0; /* not really, but no better option */
2504 cmd->transceiver = XCVR_INTERNAL;
2505 cmd->mdio_support = 0;
2508 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2509 cmd->advertising = from_fw_linkcaps(p->port_type,
2510 p->link_cfg.advertising);
2511 ethtool_cmd_speed_set(cmd,
2512 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2513 cmd->duplex = DUPLEX_FULL;
2514 cmd->autoneg = p->link_cfg.autoneg;
2520 static unsigned int speed_to_caps(int speed)
2523 return FW_PORT_CAP_SPEED_100M;
2525 return FW_PORT_CAP_SPEED_1G;
2527 return FW_PORT_CAP_SPEED_10G;
2529 return FW_PORT_CAP_SPEED_40G;
2533 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2536 struct port_info *p = netdev_priv(dev);
2537 struct link_config *lc = &p->link_cfg;
2538 u32 speed = ethtool_cmd_speed(cmd);
2540 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2543 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2545 * PHY offers a single speed. See if that's what's
2548 if (cmd->autoneg == AUTONEG_DISABLE &&
2549 (lc->supported & speed_to_caps(speed)))
2554 if (cmd->autoneg == AUTONEG_DISABLE) {
2555 cap = speed_to_caps(speed);
2557 if (!(lc->supported & cap) ||
2562 lc->requested_speed = cap;
2563 lc->advertising = 0;
2565 cap = to_fw_linkcaps(cmd->advertising);
2566 if (!(lc->supported & cap))
2568 lc->requested_speed = 0;
2569 lc->advertising = cap | FW_PORT_CAP_ANEG;
2571 lc->autoneg = cmd->autoneg;
2573 if (netif_running(dev))
2574 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2579 static void get_pauseparam(struct net_device *dev,
2580 struct ethtool_pauseparam *epause)
2582 struct port_info *p = netdev_priv(dev);
2584 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2585 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2586 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2589 static int set_pauseparam(struct net_device *dev,
2590 struct ethtool_pauseparam *epause)
2592 struct port_info *p = netdev_priv(dev);
2593 struct link_config *lc = &p->link_cfg;
2595 if (epause->autoneg == AUTONEG_DISABLE)
2596 lc->requested_fc = 0;
2597 else if (lc->supported & FW_PORT_CAP_ANEG)
2598 lc->requested_fc = PAUSE_AUTONEG;
2602 if (epause->rx_pause)
2603 lc->requested_fc |= PAUSE_RX;
2604 if (epause->tx_pause)
2605 lc->requested_fc |= PAUSE_TX;
2606 if (netif_running(dev))
2607 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2612 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2614 const struct port_info *pi = netdev_priv(dev);
2615 const struct sge *s = &pi->adapter->sge;
2617 e->rx_max_pending = MAX_RX_BUFFERS;
2618 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2619 e->rx_jumbo_max_pending = 0;
2620 e->tx_max_pending = MAX_TXQ_ENTRIES;
2622 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2623 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2624 e->rx_jumbo_pending = 0;
2625 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2628 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2631 const struct port_info *pi = netdev_priv(dev);
2632 struct adapter *adapter = pi->adapter;
2633 struct sge *s = &adapter->sge;
2635 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2636 e->tx_pending > MAX_TXQ_ENTRIES ||
2637 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2638 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2639 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2642 if (adapter->flags & FULL_INIT_DONE)
2645 for (i = 0; i < pi->nqsets; ++i) {
2646 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2647 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2648 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2653 static int closest_timer(const struct sge *s, int time)
2655 int i, delta, match = 0, min_delta = INT_MAX;
2657 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2658 delta = time - s->timer_val[i];
2661 if (delta < min_delta) {
2669 static int closest_thres(const struct sge *s, int thres)
2671 int i, delta, match = 0, min_delta = INT_MAX;
2673 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2674 delta = thres - s->counter_val[i];
2677 if (delta < min_delta) {
2686 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2688 static unsigned int qtimer_val(const struct adapter *adap,
2689 const struct sge_rspq *q)
2691 unsigned int idx = q->intr_params >> 1;
2693 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2697 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2699 * @us: the hold-off time in us, or 0 to disable timer
2700 * @cnt: the hold-off packet count, or 0 to disable counter
2702 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2703 * one of the two needs to be enabled for the queue to generate interrupts.
2705 static int set_rspq_intr_params(struct sge_rspq *q,
2706 unsigned int us, unsigned int cnt)
2708 struct adapter *adap = q->adap;
2710 if ((us | cnt) == 0)
2717 new_idx = closest_thres(&adap->sge, cnt);
2718 if (q->desc && q->pktcnt_idx != new_idx) {
2719 /* the queue has already been created, update it */
2720 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2721 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2722 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2723 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2728 q->pktcnt_idx = new_idx;
2731 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2732 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2737 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2738 * @dev: the network device
2739 * @us: the hold-off time in us, or 0 to disable timer
2740 * @cnt: the hold-off packet count, or 0 to disable counter
2742 * Set the RX interrupt hold-off parameters for a network device.
2744 static int set_rx_intr_params(struct net_device *dev,
2745 unsigned int us, unsigned int cnt)
2748 struct port_info *pi = netdev_priv(dev);
2749 struct adapter *adap = pi->adapter;
2750 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2752 for (i = 0; i < pi->nqsets; i++, q++) {
2753 err = set_rspq_intr_params(&q->rspq, us, cnt);
2760 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2763 struct port_info *pi = netdev_priv(dev);
2764 struct adapter *adap = pi->adapter;
2765 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2767 for (i = 0; i < pi->nqsets; i++, q++)
2768 q->rspq.adaptive_rx = adaptive_rx;
2773 static int get_adaptive_rx_setting(struct net_device *dev)
2775 struct port_info *pi = netdev_priv(dev);
2776 struct adapter *adap = pi->adapter;
2777 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2779 return q->rspq.adaptive_rx;
2782 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2784 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2785 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2786 c->rx_max_coalesced_frames);
2789 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2791 const struct port_info *pi = netdev_priv(dev);
2792 const struct adapter *adap = pi->adapter;
2793 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2795 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2796 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2797 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2798 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2803 * eeprom_ptov - translate a physical EEPROM address to virtual
2804 * @phys_addr: the physical EEPROM address
2805 * @fn: the PCI function number
2806 * @sz: size of function-specific area
2808 * Translate a physical EEPROM address to virtual. The first 1K is
2809 * accessed through virtual addresses starting at 31K, the rest is
2810 * accessed through virtual addresses starting at 0.
2812 * The mapping is as follows:
2813 * [0..1K) -> [31K..32K)
2814 * [1K..1K+A) -> [31K-A..31K)
2815 * [1K+A..ES) -> [0..ES-A-1K)
2817 * where A = @fn * @sz, and ES = EEPROM size.
2819 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2822 if (phys_addr < 1024)
2823 return phys_addr + (31 << 10);
2824 if (phys_addr < 1024 + fn)
2825 return 31744 - fn + phys_addr - 1024;
2826 if (phys_addr < EEPROMSIZE)
2827 return phys_addr - 1024 - fn;
2832 * The next two routines implement eeprom read/write from physical addresses.
2834 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2836 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2839 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2840 return vaddr < 0 ? vaddr : 0;
2843 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2845 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2848 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2849 return vaddr < 0 ? vaddr : 0;
2852 #define EEPROM_MAGIC 0x38E2F10C
2854 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2858 struct adapter *adapter = netdev2adap(dev);
2860 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2864 e->magic = EEPROM_MAGIC;
2865 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2866 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2869 memcpy(data, buf + e->offset, e->len);
2874 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2879 u32 aligned_offset, aligned_len, *p;
2880 struct adapter *adapter = netdev2adap(dev);
2882 if (eeprom->magic != EEPROM_MAGIC)
2885 aligned_offset = eeprom->offset & ~3;
2886 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2888 if (adapter->fn > 0) {
2889 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2891 if (aligned_offset < start ||
2892 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2896 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2898 * RMW possibly needed for first or last words.
2900 buf = kmalloc(aligned_len, GFP_KERNEL);
2903 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2904 if (!err && aligned_len > 4)
2905 err = eeprom_rd_phys(adapter,
2906 aligned_offset + aligned_len - 4,
2907 (u32 *)&buf[aligned_len - 4]);
2910 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2914 err = t4_seeprom_wp(adapter, false);
2918 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2919 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2920 aligned_offset += 4;
2924 err = t4_seeprom_wp(adapter, true);
2931 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2934 const struct firmware *fw;
2935 struct adapter *adap = netdev2adap(netdev);
2936 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2938 ef->data[sizeof(ef->data) - 1] = '\0';
2939 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2943 /* If the adapter has been fully initialized then we'll go ahead and
2944 * try to get the firmware's cooperation in upgrading to the new
2945 * firmware image otherwise we'll try to do the entire job from the
2946 * host ... and we always "force" the operation in this path.
2948 if (adap->flags & FULL_INIT_DONE)
2951 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2952 release_firmware(fw);
2954 dev_info(adap->pdev_dev, "loaded firmware %s,"
2955 " reload cxgb4 driver\n", ef->data);
2959 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2960 #define BCAST_CRC 0xa0ccc1a6
2962 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2964 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2965 wol->wolopts = netdev2adap(dev)->wol;
2966 memset(&wol->sopass, 0, sizeof(wol->sopass));
2969 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2972 struct port_info *pi = netdev_priv(dev);
2974 if (wol->wolopts & ~WOL_SUPPORTED)
2976 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2977 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2978 if (wol->wolopts & WAKE_BCAST) {
2979 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2982 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2983 ~6ULL, ~0ULL, BCAST_CRC, true);
2985 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2989 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2991 const struct port_info *pi = netdev_priv(dev);
2992 netdev_features_t changed = dev->features ^ features;
2995 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2998 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
3000 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
3002 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
3006 static u32 get_rss_table_size(struct net_device *dev)
3008 const struct port_info *pi = netdev_priv(dev);
3010 return pi->rss_size;
3013 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3015 const struct port_info *pi = netdev_priv(dev);
3016 unsigned int n = pi->rss_size;
3023 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3026 struct port_info *pi = netdev_priv(dev);
3028 for (i = 0; i < pi->rss_size; i++)
3030 if (pi->adapter->flags & FULL_INIT_DONE)
3031 return write_rss(pi, pi->rss);
3035 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3038 const struct port_info *pi = netdev_priv(dev);
3040 switch (info->cmd) {
3041 case ETHTOOL_GRXFH: {
3042 unsigned int v = pi->rss_mode;
3045 switch (info->flow_type) {
3047 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3048 info->data = RXH_IP_SRC | RXH_IP_DST |
3049 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3050 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3051 info->data = RXH_IP_SRC | RXH_IP_DST;
3054 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3055 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3056 info->data = RXH_IP_SRC | RXH_IP_DST |
3057 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3058 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3059 info->data = RXH_IP_SRC | RXH_IP_DST;
3062 case AH_ESP_V4_FLOW:
3064 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3065 info->data = RXH_IP_SRC | RXH_IP_DST;
3068 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3069 info->data = RXH_IP_SRC | RXH_IP_DST |
3070 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3071 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3072 info->data = RXH_IP_SRC | RXH_IP_DST;
3075 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3076 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3077 info->data = RXH_IP_SRC | RXH_IP_DST |
3078 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3079 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3080 info->data = RXH_IP_SRC | RXH_IP_DST;
3083 case AH_ESP_V6_FLOW:
3085 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3086 info->data = RXH_IP_SRC | RXH_IP_DST;
3091 case ETHTOOL_GRXRINGS:
3092 info->data = pi->nqsets;
3098 static const struct ethtool_ops cxgb_ethtool_ops = {
3099 .get_settings = get_settings,
3100 .set_settings = set_settings,
3101 .get_drvinfo = get_drvinfo,
3102 .get_msglevel = get_msglevel,
3103 .set_msglevel = set_msglevel,
3104 .get_ringparam = get_sge_param,
3105 .set_ringparam = set_sge_param,
3106 .get_coalesce = get_coalesce,
3107 .set_coalesce = set_coalesce,
3108 .get_eeprom_len = get_eeprom_len,
3109 .get_eeprom = get_eeprom,
3110 .set_eeprom = set_eeprom,
3111 .get_pauseparam = get_pauseparam,
3112 .set_pauseparam = set_pauseparam,
3113 .get_link = ethtool_op_get_link,
3114 .get_strings = get_strings,
3115 .set_phys_id = identify_port,
3116 .nway_reset = restart_autoneg,
3117 .get_sset_count = get_sset_count,
3118 .get_ethtool_stats = get_stats,
3119 .get_regs_len = get_regs_len,
3120 .get_regs = get_regs,
3123 .get_rxnfc = get_rxnfc,
3124 .get_rxfh_indir_size = get_rss_table_size,
3125 .get_rxfh = get_rss_table,
3126 .set_rxfh = set_rss_table,
3127 .flash_device = set_flash,
3130 static int setup_debugfs(struct adapter *adap)
3132 if (IS_ERR_OR_NULL(adap->debugfs_root))
3135 #ifdef CONFIG_DEBUG_FS
3136 t4_setup_debugfs(adap);
3142 * upper-layer driver support
3146 * Allocate an active-open TID and set it to the supplied value.
3148 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3152 spin_lock_bh(&t->atid_lock);
3154 union aopen_entry *p = t->afree;
3156 atid = (p - t->atid_tab) + t->atid_base;
3161 spin_unlock_bh(&t->atid_lock);
3164 EXPORT_SYMBOL(cxgb4_alloc_atid);
3167 * Release an active-open TID.
3169 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3171 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3173 spin_lock_bh(&t->atid_lock);
3177 spin_unlock_bh(&t->atid_lock);
3179 EXPORT_SYMBOL(cxgb4_free_atid);
3182 * Allocate a server TID and set it to the supplied value.
3184 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3188 spin_lock_bh(&t->stid_lock);
3189 if (family == PF_INET) {
3190 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3191 if (stid < t->nstids)
3192 __set_bit(stid, t->stid_bmap);
3196 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3201 t->stid_tab[stid].data = data;
3202 stid += t->stid_base;
3203 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3204 * This is equivalent to 4 TIDs. With CLIP enabled it
3207 if (family == PF_INET)
3210 t->stids_in_use += 4;
3212 spin_unlock_bh(&t->stid_lock);
3215 EXPORT_SYMBOL(cxgb4_alloc_stid);
3217 /* Allocate a server filter TID and set it to the supplied value.
3219 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3223 spin_lock_bh(&t->stid_lock);
3224 if (family == PF_INET) {
3225 stid = find_next_zero_bit(t->stid_bmap,
3226 t->nstids + t->nsftids, t->nstids);
3227 if (stid < (t->nstids + t->nsftids))
3228 __set_bit(stid, t->stid_bmap);
3235 t->stid_tab[stid].data = data;
3237 stid += t->sftid_base;
3240 spin_unlock_bh(&t->stid_lock);
3243 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3245 /* Release a server TID.
3247 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3249 /* Is it a server filter TID? */
3250 if (t->nsftids && (stid >= t->sftid_base)) {
3251 stid -= t->sftid_base;
3254 stid -= t->stid_base;
3257 spin_lock_bh(&t->stid_lock);
3258 if (family == PF_INET)
3259 __clear_bit(stid, t->stid_bmap);
3261 bitmap_release_region(t->stid_bmap, stid, 2);
3262 t->stid_tab[stid].data = NULL;
3263 if (family == PF_INET)
3266 t->stids_in_use -= 4;
3267 spin_unlock_bh(&t->stid_lock);
3269 EXPORT_SYMBOL(cxgb4_free_stid);
3272 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3274 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3277 struct cpl_tid_release *req;
3279 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3280 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3281 INIT_TP_WR(req, tid);
3282 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3286 * Queue a TID release request and if necessary schedule a work queue to
3289 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3292 void **p = &t->tid_tab[tid];
3293 struct adapter *adap = container_of(t, struct adapter, tids);
3295 spin_lock_bh(&adap->tid_release_lock);
3296 *p = adap->tid_release_head;
3297 /* Low 2 bits encode the Tx channel number */
3298 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3299 if (!adap->tid_release_task_busy) {
3300 adap->tid_release_task_busy = true;
3301 queue_work(adap->workq, &adap->tid_release_task);
3303 spin_unlock_bh(&adap->tid_release_lock);
3307 * Process the list of pending TID release requests.
3309 static void process_tid_release_list(struct work_struct *work)
3311 struct sk_buff *skb;
3312 struct adapter *adap;
3314 adap = container_of(work, struct adapter, tid_release_task);
3316 spin_lock_bh(&adap->tid_release_lock);
3317 while (adap->tid_release_head) {
3318 void **p = adap->tid_release_head;
3319 unsigned int chan = (uintptr_t)p & 3;
3320 p = (void *)p - chan;
3322 adap->tid_release_head = *p;
3324 spin_unlock_bh(&adap->tid_release_lock);
3326 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3328 schedule_timeout_uninterruptible(1);
3330 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3331 t4_ofld_send(adap, skb);
3332 spin_lock_bh(&adap->tid_release_lock);
3334 adap->tid_release_task_busy = false;
3335 spin_unlock_bh(&adap->tid_release_lock);
3339 * Release a TID and inform HW. If we are unable to allocate the release
3340 * message we defer to a work queue.
3342 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3345 struct sk_buff *skb;
3346 struct adapter *adap = container_of(t, struct adapter, tids);
3348 old = t->tid_tab[tid];
3349 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3351 t->tid_tab[tid] = NULL;
3352 mk_tid_release(skb, chan, tid);
3353 t4_ofld_send(adap, skb);
3355 cxgb4_queue_tid_release(t, chan, tid);
3357 atomic_dec(&t->tids_in_use);
3359 EXPORT_SYMBOL(cxgb4_remove_tid);
3362 * Allocate and initialize the TID tables. Returns 0 on success.
3364 static int tid_init(struct tid_info *t)
3367 unsigned int stid_bmap_size;
3368 unsigned int natids = t->natids;
3369 struct adapter *adap = container_of(t, struct adapter, tids);
3371 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3372 size = t->ntids * sizeof(*t->tid_tab) +
3373 natids * sizeof(*t->atid_tab) +
3374 t->nstids * sizeof(*t->stid_tab) +
3375 t->nsftids * sizeof(*t->stid_tab) +
3376 stid_bmap_size * sizeof(long) +
3377 t->nftids * sizeof(*t->ftid_tab) +
3378 t->nsftids * sizeof(*t->ftid_tab);
3380 t->tid_tab = t4_alloc_mem(size);
3384 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3385 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3386 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3387 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3388 spin_lock_init(&t->stid_lock);
3389 spin_lock_init(&t->atid_lock);
3391 t->stids_in_use = 0;
3393 t->atids_in_use = 0;
3394 atomic_set(&t->tids_in_use, 0);
3396 /* Setup the free list for atid_tab and clear the stid bitmap. */
3399 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3400 t->afree = t->atid_tab;
3402 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3403 /* Reserve stid 0 for T4/T5 adapters */
3404 if (!t->stid_base &&
3405 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3406 __set_bit(0, t->stid_bmap);
3411 int cxgb4_clip_get(const struct net_device *dev,
3412 const struct in6_addr *lip)
3414 struct adapter *adap;
3415 struct fw_clip_cmd c;
3417 adap = netdev2adap(dev);
3418 memset(&c, 0, sizeof(c));
3419 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3420 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3421 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3422 c.ip_hi = *(__be64 *)(lip->s6_addr);
3423 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3424 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3426 EXPORT_SYMBOL(cxgb4_clip_get);
3428 int cxgb4_clip_release(const struct net_device *dev,
3429 const struct in6_addr *lip)
3431 struct adapter *adap;
3432 struct fw_clip_cmd c;
3434 adap = netdev2adap(dev);
3435 memset(&c, 0, sizeof(c));
3436 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3437 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3438 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3439 c.ip_hi = *(__be64 *)(lip->s6_addr);
3440 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3441 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3443 EXPORT_SYMBOL(cxgb4_clip_release);
3446 * cxgb4_create_server - create an IP server
3448 * @stid: the server TID
3449 * @sip: local IP address to bind server to
3450 * @sport: the server's TCP port
3451 * @queue: queue to direct messages from this server to
3453 * Create an IP server for the given port and address.
3454 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3456 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3457 __be32 sip, __be16 sport, __be16 vlan,
3461 struct sk_buff *skb;
3462 struct adapter *adap;
3463 struct cpl_pass_open_req *req;
3466 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3470 adap = netdev2adap(dev);
3471 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3473 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3474 req->local_port = sport;
3475 req->peer_port = htons(0);
3476 req->local_ip = sip;
3477 req->peer_ip = htonl(0);
3478 chan = rxq_to_chan(&adap->sge, queue);
3479 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3480 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3481 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3482 ret = t4_mgmt_tx(adap, skb);
3483 return net_xmit_eval(ret);
3485 EXPORT_SYMBOL(cxgb4_create_server);
3487 /* cxgb4_create_server6 - create an IPv6 server
3489 * @stid: the server TID
3490 * @sip: local IPv6 address to bind server to
3491 * @sport: the server's TCP port
3492 * @queue: queue to direct messages from this server to
3494 * Create an IPv6 server for the given port and address.
3495 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3497 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3498 const struct in6_addr *sip, __be16 sport,
3502 struct sk_buff *skb;
3503 struct adapter *adap;
3504 struct cpl_pass_open_req6 *req;
3507 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3511 adap = netdev2adap(dev);
3512 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3514 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3515 req->local_port = sport;
3516 req->peer_port = htons(0);
3517 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3518 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3519 req->peer_ip_hi = cpu_to_be64(0);
3520 req->peer_ip_lo = cpu_to_be64(0);
3521 chan = rxq_to_chan(&adap->sge, queue);
3522 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3523 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3524 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3525 ret = t4_mgmt_tx(adap, skb);
3526 return net_xmit_eval(ret);
3528 EXPORT_SYMBOL(cxgb4_create_server6);
3530 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3531 unsigned int queue, bool ipv6)
3533 struct sk_buff *skb;
3534 struct adapter *adap;
3535 struct cpl_close_listsvr_req *req;
3538 adap = netdev2adap(dev);
3540 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3544 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3546 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3547 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3548 LISTSVR_IPV6(0)) | QUEUENO(queue));
3549 ret = t4_mgmt_tx(adap, skb);
3550 return net_xmit_eval(ret);
3552 EXPORT_SYMBOL(cxgb4_remove_server);
3555 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3556 * @mtus: the HW MTU table
3557 * @mtu: the target MTU
3558 * @idx: index of selected entry in the MTU table
3560 * Returns the index and the value in the HW MTU table that is closest to
3561 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3562 * table, in which case that smallest available value is selected.
3564 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3569 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3575 EXPORT_SYMBOL(cxgb4_best_mtu);
3578 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3579 * @mtus: the HW MTU table
3580 * @header_size: Header Size
3581 * @data_size_max: maximum Data Segment Size
3582 * @data_size_align: desired Data Segment Size Alignment (2^N)
3583 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3585 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3586 * MTU Table based solely on a Maximum MTU parameter, we break that
3587 * parameter up into a Header Size and Maximum Data Segment Size, and
3588 * provide a desired Data Segment Size Alignment. If we find an MTU in
3589 * the Hardware MTU Table which will result in a Data Segment Size with
3590 * the requested alignment _and_ that MTU isn't "too far" from the
3591 * closest MTU, then we'll return that rather than the closest MTU.
3593 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3594 unsigned short header_size,
3595 unsigned short data_size_max,
3596 unsigned short data_size_align,
3597 unsigned int *mtu_idxp)
3599 unsigned short max_mtu = header_size + data_size_max;
3600 unsigned short data_size_align_mask = data_size_align - 1;
3601 int mtu_idx, aligned_mtu_idx;
3603 /* Scan the MTU Table till we find an MTU which is larger than our
3604 * Maximum MTU or we reach the end of the table. Along the way,
3605 * record the last MTU found, if any, which will result in a Data
3606 * Segment Length matching the requested alignment.
3608 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3609 unsigned short data_size = mtus[mtu_idx] - header_size;
3611 /* If this MTU minus the Header Size would result in a
3612 * Data Segment Size of the desired alignment, remember it.
3614 if ((data_size & data_size_align_mask) == 0)
3615 aligned_mtu_idx = mtu_idx;
3617 /* If we're not at the end of the Hardware MTU Table and the
3618 * next element is larger than our Maximum MTU, drop out of
3621 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3625 /* If we fell out of the loop because we ran to the end of the table,
3626 * then we just have to use the last [largest] entry.
3628 if (mtu_idx == NMTUS)
3631 /* If we found an MTU which resulted in the requested Data Segment
3632 * Length alignment and that's "not far" from the largest MTU which is
3633 * less than or equal to the maximum MTU, then use that.
3635 if (aligned_mtu_idx >= 0 &&
3636 mtu_idx - aligned_mtu_idx <= 1)
3637 mtu_idx = aligned_mtu_idx;
3639 /* If the caller has passed in an MTU Index pointer, pass the
3640 * MTU Index back. Return the MTU value.
3643 *mtu_idxp = mtu_idx;
3644 return mtus[mtu_idx];
3646 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3649 * cxgb4_port_chan - get the HW channel of a port
3650 * @dev: the net device for the port
3652 * Return the HW Tx channel of the given port.
3654 unsigned int cxgb4_port_chan(const struct net_device *dev)
3656 return netdev2pinfo(dev)->tx_chan;
3658 EXPORT_SYMBOL(cxgb4_port_chan);
3660 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3662 struct adapter *adap = netdev2adap(dev);
3663 u32 v1, v2, lp_count, hp_count;
3665 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3666 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3667 if (is_t4(adap->params.chip)) {
3668 lp_count = G_LP_COUNT(v1);
3669 hp_count = G_HP_COUNT(v1);
3671 lp_count = G_LP_COUNT_T5(v1);
3672 hp_count = G_HP_COUNT_T5(v2);
3674 return lpfifo ? lp_count : hp_count;
3676 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3679 * cxgb4_port_viid - get the VI id of a port
3680 * @dev: the net device for the port
3682 * Return the VI id of the given port.
3684 unsigned int cxgb4_port_viid(const struct net_device *dev)
3686 return netdev2pinfo(dev)->viid;
3688 EXPORT_SYMBOL(cxgb4_port_viid);
3691 * cxgb4_port_idx - get the index of a port
3692 * @dev: the net device for the port
3694 * Return the index of the given port.
3696 unsigned int cxgb4_port_idx(const struct net_device *dev)
3698 return netdev2pinfo(dev)->port_id;
3700 EXPORT_SYMBOL(cxgb4_port_idx);
3702 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3703 struct tp_tcp_stats *v6)
3705 struct adapter *adap = pci_get_drvdata(pdev);
3707 spin_lock(&adap->stats_lock);
3708 t4_tp_get_tcp_stats(adap, v4, v6);
3709 spin_unlock(&adap->stats_lock);
3711 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3713 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3714 const unsigned int *pgsz_order)
3716 struct adapter *adap = netdev2adap(dev);
3718 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3719 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3720 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3721 HPZ3(pgsz_order[3]));
3723 EXPORT_SYMBOL(cxgb4_iscsi_init);
3725 int cxgb4_flush_eq_cache(struct net_device *dev)
3727 struct adapter *adap = netdev2adap(dev);
3730 ret = t4_fwaddrspace_write(adap, adap->mbox,
3731 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3734 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3736 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3738 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3742 spin_lock(&adap->win0_lock);
3743 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3744 sizeof(indices), (__be32 *)&indices,
3746 spin_unlock(&adap->win0_lock);
3748 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3749 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3754 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3757 struct adapter *adap = netdev2adap(dev);
3758 u16 hw_pidx, hw_cidx;
3761 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3765 if (pidx != hw_pidx) {
3768 if (pidx >= hw_pidx)
3769 delta = pidx - hw_pidx;
3771 delta = size - hw_pidx + pidx;
3773 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3774 QID(qid) | PIDX(delta));
3779 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3781 void cxgb4_disable_db_coalescing(struct net_device *dev)
3783 struct adapter *adap;
3785 adap = netdev2adap(dev);
3786 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3789 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3791 void cxgb4_enable_db_coalescing(struct net_device *dev)
3793 struct adapter *adap;
3795 adap = netdev2adap(dev);
3796 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3798 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3800 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3802 struct adapter *adap;
3803 u32 offset, memtype, memaddr;
3804 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
3805 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3808 adap = netdev2adap(dev);
3810 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3812 /* Figure out where the offset lands in the Memory Type/Address scheme.
3813 * This code assumes that the memory is laid out starting at offset 0
3814 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3815 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3816 * MC0, and some have both MC0 and MC1.
3818 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3819 edc0_size = EDRAM0_SIZE_G(size) << 20;
3820 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3821 edc1_size = EDRAM1_SIZE_G(size) << 20;
3822 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3823 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
3825 edc0_end = edc0_size;
3826 edc1_end = edc0_end + edc1_size;
3827 mc0_end = edc1_end + mc0_size;
3829 if (offset < edc0_end) {
3832 } else if (offset < edc1_end) {
3834 memaddr = offset - edc0_end;
3836 if (offset < mc0_end) {
3838 memaddr = offset - edc1_end;
3839 } else if (is_t4(adap->params.chip)) {
3840 /* T4 only has a single memory channel */
3843 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3844 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
3845 mc1_end = mc0_end + mc1_size;
3846 if (offset < mc1_end) {
3848 memaddr = offset - mc0_end;
3850 /* offset beyond the end of any memory */
3856 spin_lock(&adap->win0_lock);
3857 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3858 spin_unlock(&adap->win0_lock);
3862 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3866 EXPORT_SYMBOL(cxgb4_read_tpte);
3868 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3871 struct adapter *adap;
3873 adap = netdev2adap(dev);
3874 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3875 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3877 return ((u64)hi << 32) | (u64)lo;
3879 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3881 static struct pci_driver cxgb4_driver;
3883 static void check_neigh_update(struct neighbour *neigh)
3885 const struct device *parent;
3886 const struct net_device *netdev = neigh->dev;
3888 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3889 netdev = vlan_dev_real_dev(netdev);
3890 parent = netdev->dev.parent;
3891 if (parent && parent->driver == &cxgb4_driver.driver)
3892 t4_l2t_update(dev_get_drvdata(parent), neigh);
3895 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3899 case NETEVENT_NEIGH_UPDATE:
3900 check_neigh_update(data);
3902 case NETEVENT_REDIRECT:
3909 static bool netevent_registered;
3910 static struct notifier_block cxgb4_netevent_nb = {
3911 .notifier_call = netevent_cb
3914 static void drain_db_fifo(struct adapter *adap, int usecs)
3916 u32 v1, v2, lp_count, hp_count;
3919 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3920 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3921 if (is_t4(adap->params.chip)) {
3922 lp_count = G_LP_COUNT(v1);
3923 hp_count = G_HP_COUNT(v1);
3925 lp_count = G_LP_COUNT_T5(v1);
3926 hp_count = G_HP_COUNT_T5(v2);
3929 if (lp_count == 0 && hp_count == 0)
3931 set_current_state(TASK_UNINTERRUPTIBLE);
3932 schedule_timeout(usecs_to_jiffies(usecs));
3936 static void disable_txq_db(struct sge_txq *q)
3938 unsigned long flags;
3940 spin_lock_irqsave(&q->db_lock, flags);
3942 spin_unlock_irqrestore(&q->db_lock, flags);
3945 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3947 spin_lock_irq(&q->db_lock);
3948 if (q->db_pidx_inc) {
3949 /* Make sure that all writes to the TX descriptors
3950 * are committed before we tell HW about them.
3953 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3954 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3958 spin_unlock_irq(&q->db_lock);
3961 static void disable_dbs(struct adapter *adap)
3965 for_each_ethrxq(&adap->sge, i)
3966 disable_txq_db(&adap->sge.ethtxq[i].q);
3967 for_each_ofldrxq(&adap->sge, i)
3968 disable_txq_db(&adap->sge.ofldtxq[i].q);
3969 for_each_port(adap, i)
3970 disable_txq_db(&adap->sge.ctrlq[i].q);
3973 static void enable_dbs(struct adapter *adap)
3977 for_each_ethrxq(&adap->sge, i)
3978 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3979 for_each_ofldrxq(&adap->sge, i)
3980 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3981 for_each_port(adap, i)
3982 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3985 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3987 if (adap->uld_handle[CXGB4_ULD_RDMA])
3988 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3992 static void process_db_full(struct work_struct *work)
3994 struct adapter *adap;
3996 adap = container_of(work, struct adapter, db_full_task);
3998 drain_db_fifo(adap, dbfifo_drain_delay);
4000 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4001 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4002 DBFIFO_HP_INT | DBFIFO_LP_INT,
4003 DBFIFO_HP_INT | DBFIFO_LP_INT);
4006 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4008 u16 hw_pidx, hw_cidx;
4011 spin_lock_irq(&q->db_lock);
4012 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4015 if (q->db_pidx != hw_pidx) {
4018 if (q->db_pidx >= hw_pidx)
4019 delta = q->db_pidx - hw_pidx;
4021 delta = q->size - hw_pidx + q->db_pidx;
4023 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4024 QID(q->cntxt_id) | PIDX(delta));
4029 spin_unlock_irq(&q->db_lock);
4031 CH_WARN(adap, "DB drop recovery failed.\n");
4033 static void recover_all_queues(struct adapter *adap)
4037 for_each_ethrxq(&adap->sge, i)
4038 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4039 for_each_ofldrxq(&adap->sge, i)
4040 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4041 for_each_port(adap, i)
4042 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4045 static void process_db_drop(struct work_struct *work)
4047 struct adapter *adap;
4049 adap = container_of(work, struct adapter, db_drop_task);
4051 if (is_t4(adap->params.chip)) {
4052 drain_db_fifo(adap, dbfifo_drain_delay);
4053 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4054 drain_db_fifo(adap, dbfifo_drain_delay);
4055 recover_all_queues(adap);
4056 drain_db_fifo(adap, dbfifo_drain_delay);
4058 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4060 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4061 u16 qid = (dropped_db >> 15) & 0x1ffff;
4062 u16 pidx_inc = dropped_db & 0x1fff;
4064 unsigned short udb_density;
4065 unsigned long qpshift;
4069 dev_warn(adap->pdev_dev,
4070 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4072 (dropped_db >> 14) & 1,
4073 (dropped_db >> 13) & 1,
4076 drain_db_fifo(adap, 1);
4078 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4079 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4080 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4081 qpshift = PAGE_SHIFT - ilog2(udb_density);
4082 udb = qid << qpshift;
4084 page = udb / PAGE_SIZE;
4085 udb += (qid - (page * udb_density)) * 128;
4087 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4089 /* Re-enable BAR2 WC */
4090 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4093 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4096 void t4_db_full(struct adapter *adap)
4098 if (is_t4(adap->params.chip)) {
4100 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4101 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4102 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4103 queue_work(adap->workq, &adap->db_full_task);
4107 void t4_db_dropped(struct adapter *adap)
4109 if (is_t4(adap->params.chip)) {
4111 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4113 queue_work(adap->workq, &adap->db_drop_task);
4116 static void uld_attach(struct adapter *adap, unsigned int uld)
4119 struct cxgb4_lld_info lli;
4122 lli.pdev = adap->pdev;
4124 lli.l2t = adap->l2t;
4125 lli.tids = &adap->tids;
4126 lli.ports = adap->port;
4127 lli.vr = &adap->vres;
4128 lli.mtus = adap->params.mtus;
4129 if (uld == CXGB4_ULD_RDMA) {
4130 lli.rxq_ids = adap->sge.rdma_rxq;
4131 lli.ciq_ids = adap->sge.rdma_ciq;
4132 lli.nrxq = adap->sge.rdmaqs;
4133 lli.nciq = adap->sge.rdmaciqs;
4134 } else if (uld == CXGB4_ULD_ISCSI) {
4135 lli.rxq_ids = adap->sge.ofld_rxq;
4136 lli.nrxq = adap->sge.ofldqsets;
4138 lli.ntxq = adap->sge.ofldqsets;
4139 lli.nchan = adap->params.nports;
4140 lli.nports = adap->params.nports;
4141 lli.wr_cred = adap->params.ofldq_wr_cred;
4142 lli.adapter_type = adap->params.chip;
4143 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4144 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4145 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4146 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4148 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4149 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4151 lli.filt_mode = adap->params.tp.vlan_pri_map;
4152 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4153 for (i = 0; i < NCHAN; i++)
4155 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4156 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4157 lli.fw_vers = adap->params.fw_vers;
4158 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4159 lli.sge_ingpadboundary = adap->sge.fl_align;
4160 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4161 lli.sge_pktshift = adap->sge.pktshift;
4162 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4163 lli.max_ordird_qp = adap->params.max_ordird_qp;
4164 lli.max_ird_adapter = adap->params.max_ird_adapter;
4165 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4167 handle = ulds[uld].add(&lli);
4168 if (IS_ERR(handle)) {
4169 dev_warn(adap->pdev_dev,
4170 "could not attach to the %s driver, error %ld\n",
4171 uld_str[uld], PTR_ERR(handle));
4175 adap->uld_handle[uld] = handle;
4177 if (!netevent_registered) {
4178 register_netevent_notifier(&cxgb4_netevent_nb);
4179 netevent_registered = true;
4182 if (adap->flags & FULL_INIT_DONE)
4183 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4186 static void attach_ulds(struct adapter *adap)
4190 spin_lock(&adap_rcu_lock);
4191 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4192 spin_unlock(&adap_rcu_lock);
4194 mutex_lock(&uld_mutex);
4195 list_add_tail(&adap->list_node, &adapter_list);
4196 for (i = 0; i < CXGB4_ULD_MAX; i++)
4198 uld_attach(adap, i);
4199 mutex_unlock(&uld_mutex);
4202 static void detach_ulds(struct adapter *adap)
4206 mutex_lock(&uld_mutex);
4207 list_del(&adap->list_node);
4208 for (i = 0; i < CXGB4_ULD_MAX; i++)
4209 if (adap->uld_handle[i]) {
4210 ulds[i].state_change(adap->uld_handle[i],
4211 CXGB4_STATE_DETACH);
4212 adap->uld_handle[i] = NULL;
4214 if (netevent_registered && list_empty(&adapter_list)) {
4215 unregister_netevent_notifier(&cxgb4_netevent_nb);
4216 netevent_registered = false;
4218 mutex_unlock(&uld_mutex);
4220 spin_lock(&adap_rcu_lock);
4221 list_del_rcu(&adap->rcu_node);
4222 spin_unlock(&adap_rcu_lock);
4225 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4229 mutex_lock(&uld_mutex);
4230 for (i = 0; i < CXGB4_ULD_MAX; i++)
4231 if (adap->uld_handle[i])
4232 ulds[i].state_change(adap->uld_handle[i], new_state);
4233 mutex_unlock(&uld_mutex);
4237 * cxgb4_register_uld - register an upper-layer driver
4238 * @type: the ULD type
4239 * @p: the ULD methods
4241 * Registers an upper-layer driver with this driver and notifies the ULD
4242 * about any presently available devices that support its type. Returns
4243 * %-EBUSY if a ULD of the same type is already registered.
4245 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4248 struct adapter *adap;
4250 if (type >= CXGB4_ULD_MAX)
4252 mutex_lock(&uld_mutex);
4253 if (ulds[type].add) {
4258 list_for_each_entry(adap, &adapter_list, list_node)
4259 uld_attach(adap, type);
4260 out: mutex_unlock(&uld_mutex);
4263 EXPORT_SYMBOL(cxgb4_register_uld);
4266 * cxgb4_unregister_uld - unregister an upper-layer driver
4267 * @type: the ULD type
4269 * Unregisters an existing upper-layer driver.
4271 int cxgb4_unregister_uld(enum cxgb4_uld type)
4273 struct adapter *adap;
4275 if (type >= CXGB4_ULD_MAX)
4277 mutex_lock(&uld_mutex);
4278 list_for_each_entry(adap, &adapter_list, list_node)
4279 adap->uld_handle[type] = NULL;
4280 ulds[type].add = NULL;
4281 mutex_unlock(&uld_mutex);
4284 EXPORT_SYMBOL(cxgb4_unregister_uld);
4286 /* Check if netdev on which event is occured belongs to us or not. Return
4287 * success (true) if it belongs otherwise failure (false).
4288 * Called with rcu_read_lock() held.
4290 #if IS_ENABLED(CONFIG_IPV6)
4291 static bool cxgb4_netdev(const struct net_device *netdev)
4293 struct adapter *adap;
4296 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4297 for (i = 0; i < MAX_NPORTS; i++)
4298 if (adap->port[i] == netdev)
4303 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4304 unsigned long event)
4306 int ret = NOTIFY_DONE;
4309 if (cxgb4_netdev(event_dev)) {
4312 ret = cxgb4_clip_get(event_dev, &ifa->addr);
4320 cxgb4_clip_release(event_dev, &ifa->addr);
4331 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4332 unsigned long event, void *data)
4334 struct inet6_ifaddr *ifa = data;
4335 struct net_device *event_dev;
4336 int ret = NOTIFY_DONE;
4337 struct bonding *bond = netdev_priv(ifa->idev->dev);
4338 struct list_head *iter;
4339 struct slave *slave;
4340 struct pci_dev *first_pdev = NULL;
4342 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4343 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4344 ret = clip_add(event_dev, ifa, event);
4345 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4346 /* It is possible that two different adapters are bonded in one
4347 * bond. We need to find such different adapters and add clip
4348 * in all of them only once.
4350 bond_for_each_slave(bond, slave, iter) {
4352 ret = clip_add(slave->dev, ifa, event);
4353 /* If clip_add is success then only initialize
4354 * first_pdev since it means it is our device
4356 if (ret == NOTIFY_OK)
4357 first_pdev = to_pci_dev(
4358 slave->dev->dev.parent);
4359 } else if (first_pdev !=
4360 to_pci_dev(slave->dev->dev.parent))
4361 ret = clip_add(slave->dev, ifa, event);
4364 ret = clip_add(ifa->idev->dev, ifa, event);
4369 static struct notifier_block cxgb4_inet6addr_notifier = {
4370 .notifier_call = cxgb4_inet6addr_handler
4373 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4374 * a physical device.
4375 * The physical device reference is needed to send the actul CLIP command.
4377 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4379 struct inet6_dev *idev = NULL;
4380 struct inet6_ifaddr *ifa;
4383 idev = __in6_dev_get(root_dev);
4387 read_lock_bh(&idev->lock);
4388 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4389 ret = cxgb4_clip_get(dev, &ifa->addr);
4393 read_unlock_bh(&idev->lock);
4398 static int update_root_dev_clip(struct net_device *dev)
4400 struct net_device *root_dev = NULL;
4403 /* First populate the real net device's IPv6 addresses */
4404 ret = update_dev_clip(dev, dev);
4408 /* Parse all bond and vlan devices layered on top of the physical dev */
4409 root_dev = netdev_master_upper_dev_get_rcu(dev);
4411 ret = update_dev_clip(root_dev, dev);
4416 for (i = 0; i < VLAN_N_VID; i++) {
4417 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4421 ret = update_dev_clip(root_dev, dev);
4428 static void update_clip(const struct adapter *adap)
4431 struct net_device *dev;
4436 for (i = 0; i < MAX_NPORTS; i++) {
4437 dev = adap->port[i];
4441 ret = update_root_dev_clip(dev);
4448 #endif /* IS_ENABLED(CONFIG_IPV6) */
4451 * cxgb_up - enable the adapter
4452 * @adap: adapter being enabled
4454 * Called when the first port is enabled, this function performs the
4455 * actions necessary to make an adapter operational, such as completing
4456 * the initialization of HW modules, and enabling interrupts.
4458 * Must be called with the rtnl lock held.
4460 static int cxgb_up(struct adapter *adap)
4464 err = setup_sge_queues(adap);
4467 err = setup_rss(adap);
4471 if (adap->flags & USING_MSIX) {
4472 name_msix_vecs(adap);
4473 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4474 adap->msix_info[0].desc, adap);
4478 err = request_msix_queue_irqs(adap);
4480 free_irq(adap->msix_info[0].vec, adap);
4484 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4485 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4486 adap->port[0]->name, adap);
4492 t4_intr_enable(adap);
4493 adap->flags |= FULL_INIT_DONE;
4494 notify_ulds(adap, CXGB4_STATE_UP);
4495 #if IS_ENABLED(CONFIG_IPV6)
4501 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4503 t4_free_sge_resources(adap);
4507 static void cxgb_down(struct adapter *adapter)
4509 t4_intr_disable(adapter);
4510 cancel_work_sync(&adapter->tid_release_task);
4511 cancel_work_sync(&adapter->db_full_task);
4512 cancel_work_sync(&adapter->db_drop_task);
4513 adapter->tid_release_task_busy = false;
4514 adapter->tid_release_head = NULL;
4516 if (adapter->flags & USING_MSIX) {
4517 free_msix_queue_irqs(adapter);
4518 free_irq(adapter->msix_info[0].vec, adapter);
4520 free_irq(adapter->pdev->irq, adapter);
4521 quiesce_rx(adapter);
4522 t4_sge_stop(adapter);
4523 t4_free_sge_resources(adapter);
4524 adapter->flags &= ~FULL_INIT_DONE;
4528 * net_device operations
4530 static int cxgb_open(struct net_device *dev)
4533 struct port_info *pi = netdev_priv(dev);
4534 struct adapter *adapter = pi->adapter;
4536 netif_carrier_off(dev);
4538 if (!(adapter->flags & FULL_INIT_DONE)) {
4539 err = cxgb_up(adapter);
4544 err = link_start(dev);
4546 netif_tx_start_all_queues(dev);
4550 static int cxgb_close(struct net_device *dev)
4552 struct port_info *pi = netdev_priv(dev);
4553 struct adapter *adapter = pi->adapter;
4555 netif_tx_stop_all_queues(dev);
4556 netif_carrier_off(dev);
4557 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4560 /* Return an error number if the indicated filter isn't writable ...
4562 static int writable_filter(struct filter_entry *f)
4572 /* Delete the filter at the specified index (if valid). The checks for all
4573 * the common problems with doing this like the filter being locked, currently
4574 * pending in another operation, etc.
4576 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4578 struct filter_entry *f;
4581 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4584 f = &adapter->tids.ftid_tab[fidx];
4585 ret = writable_filter(f);
4589 return del_filter_wr(adapter, fidx);
4594 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4595 __be32 sip, __be16 sport, __be16 vlan,
4596 unsigned int queue, unsigned char port, unsigned char mask)
4599 struct filter_entry *f;
4600 struct adapter *adap;
4604 adap = netdev2adap(dev);
4606 /* Adjust stid to correct filter index */
4607 stid -= adap->tids.sftid_base;
4608 stid += adap->tids.nftids;
4610 /* Check to make sure the filter requested is writable ...
4612 f = &adap->tids.ftid_tab[stid];
4613 ret = writable_filter(f);
4617 /* Clear out any old resources being used by the filter before
4618 * we start constructing the new filter.
4621 clear_filter(adap, f);
4623 /* Clear out filter specifications */
4624 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4625 f->fs.val.lport = cpu_to_be16(sport);
4626 f->fs.mask.lport = ~0;
4628 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4629 for (i = 0; i < 4; i++) {
4630 f->fs.val.lip[i] = val[i];
4631 f->fs.mask.lip[i] = ~0;
4633 if (adap->params.tp.vlan_pri_map & F_PORT) {
4634 f->fs.val.iport = port;
4635 f->fs.mask.iport = mask;
4639 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4640 f->fs.val.proto = IPPROTO_TCP;
4641 f->fs.mask.proto = ~0;
4646 /* Mark filter as locked */
4650 ret = set_filter_wr(adap, stid);
4652 clear_filter(adap, f);
4658 EXPORT_SYMBOL(cxgb4_create_server_filter);
4660 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4661 unsigned int queue, bool ipv6)
4664 struct filter_entry *f;
4665 struct adapter *adap;
4667 adap = netdev2adap(dev);
4669 /* Adjust stid to correct filter index */
4670 stid -= adap->tids.sftid_base;
4671 stid += adap->tids.nftids;
4673 f = &adap->tids.ftid_tab[stid];
4674 /* Unlock the filter */
4677 ret = delete_filter(adap, stid);
4683 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4685 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4686 struct rtnl_link_stats64 *ns)
4688 struct port_stats stats;
4689 struct port_info *p = netdev_priv(dev);
4690 struct adapter *adapter = p->adapter;
4692 /* Block retrieving statistics during EEH error
4693 * recovery. Otherwise, the recovery might fail
4694 * and the PCI device will be removed permanently
4696 spin_lock(&adapter->stats_lock);
4697 if (!netif_device_present(dev)) {
4698 spin_unlock(&adapter->stats_lock);
4701 t4_get_port_stats(adapter, p->tx_chan, &stats);
4702 spin_unlock(&adapter->stats_lock);
4704 ns->tx_bytes = stats.tx_octets;
4705 ns->tx_packets = stats.tx_frames;
4706 ns->rx_bytes = stats.rx_octets;
4707 ns->rx_packets = stats.rx_frames;
4708 ns->multicast = stats.rx_mcast_frames;
4710 /* detailed rx_errors */
4711 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4713 ns->rx_over_errors = 0;
4714 ns->rx_crc_errors = stats.rx_fcs_err;
4715 ns->rx_frame_errors = stats.rx_symbol_err;
4716 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4717 stats.rx_ovflow2 + stats.rx_ovflow3 +
4718 stats.rx_trunc0 + stats.rx_trunc1 +
4719 stats.rx_trunc2 + stats.rx_trunc3;
4720 ns->rx_missed_errors = 0;
4722 /* detailed tx_errors */
4723 ns->tx_aborted_errors = 0;
4724 ns->tx_carrier_errors = 0;
4725 ns->tx_fifo_errors = 0;
4726 ns->tx_heartbeat_errors = 0;
4727 ns->tx_window_errors = 0;
4729 ns->tx_errors = stats.tx_error_frames;
4730 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4731 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4735 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4738 int ret = 0, prtad, devad;
4739 struct port_info *pi = netdev_priv(dev);
4740 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4744 if (pi->mdio_addr < 0)
4746 data->phy_id = pi->mdio_addr;
4750 if (mdio_phy_id_is_c45(data->phy_id)) {
4751 prtad = mdio_phy_id_prtad(data->phy_id);
4752 devad = mdio_phy_id_devad(data->phy_id);
4753 } else if (data->phy_id < 32) {
4754 prtad = data->phy_id;
4756 data->reg_num &= 0x1f;
4760 mbox = pi->adapter->fn;
4761 if (cmd == SIOCGMIIREG)
4762 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4763 data->reg_num, &data->val_out);
4765 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4766 data->reg_num, data->val_in);
4774 static void cxgb_set_rxmode(struct net_device *dev)
4776 /* unfortunately we can't return errors to the stack */
4777 set_rxmode(dev, -1, false);
4780 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4783 struct port_info *pi = netdev_priv(dev);
4785 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4787 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4794 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4797 struct sockaddr *addr = p;
4798 struct port_info *pi = netdev_priv(dev);
4800 if (!is_valid_ether_addr(addr->sa_data))
4801 return -EADDRNOTAVAIL;
4803 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4804 pi->xact_addr_filt, addr->sa_data, true, true);
4808 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4809 pi->xact_addr_filt = ret;
4813 #ifdef CONFIG_NET_POLL_CONTROLLER
4814 static void cxgb_netpoll(struct net_device *dev)
4816 struct port_info *pi = netdev_priv(dev);
4817 struct adapter *adap = pi->adapter;
4819 if (adap->flags & USING_MSIX) {
4821 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4823 for (i = pi->nqsets; i; i--, rx++)
4824 t4_sge_intr_msix(0, &rx->rspq);
4826 t4_intr_handler(adap)(0, adap);
4830 static const struct net_device_ops cxgb4_netdev_ops = {
4831 .ndo_open = cxgb_open,
4832 .ndo_stop = cxgb_close,
4833 .ndo_start_xmit = t4_eth_xmit,
4834 .ndo_select_queue = cxgb_select_queue,
4835 .ndo_get_stats64 = cxgb_get_stats,
4836 .ndo_set_rx_mode = cxgb_set_rxmode,
4837 .ndo_set_mac_address = cxgb_set_mac_addr,
4838 .ndo_set_features = cxgb_set_features,
4839 .ndo_validate_addr = eth_validate_addr,
4840 .ndo_do_ioctl = cxgb_ioctl,
4841 .ndo_change_mtu = cxgb_change_mtu,
4842 #ifdef CONFIG_NET_POLL_CONTROLLER
4843 .ndo_poll_controller = cxgb_netpoll,
4847 void t4_fatal_err(struct adapter *adap)
4849 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4850 t4_intr_disable(adap);
4851 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4854 /* Return the specified PCI-E Configuration Space register from our Physical
4855 * Function. We try first via a Firmware LDST Command since we prefer to let
4856 * the firmware own all of these registers, but if that fails we go for it
4857 * directly ourselves.
4859 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4861 struct fw_ldst_cmd ldst_cmd;
4865 /* Construct and send the Firmware LDST Command to retrieve the
4866 * specified PCI-E Configuration Space register.
4868 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4869 ldst_cmd.op_to_addrspace =
4870 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4873 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4874 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4875 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4876 ldst_cmd.u.pcie.ctrl_to_fn =
4877 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4878 ldst_cmd.u.pcie.r = reg;
4879 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4882 /* If the LDST Command suucceeded, exctract the returned register
4883 * value. Otherwise read it directly ourself.
4886 val = ntohl(ldst_cmd.u.pcie.data[0]);
4888 t4_hw_pci_read_cfg4(adap, reg, &val);
4893 static void setup_memwin(struct adapter *adap)
4895 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4897 if (is_t4(adap->params.chip)) {
4900 /* Truncation intentional: we only read the bottom 32-bits of
4901 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4902 * mechanism to read BAR0 instead of using
4903 * pci_resource_start() because we could be operating from
4904 * within a Virtual Machine which is trapping our accesses to
4905 * our Configuration Space and we need to set up the PCI-E
4906 * Memory Window decoders with the actual addresses which will
4907 * be coming across the PCI-E link.
4909 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4910 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4911 adap->t4_bar0 = bar0;
4913 mem_win0_base = bar0 + MEMWIN0_BASE;
4914 mem_win1_base = bar0 + MEMWIN1_BASE;
4915 mem_win2_base = bar0 + MEMWIN2_BASE;
4916 mem_win2_aperture = MEMWIN2_APERTURE;
4918 /* For T5, only relative offset inside the PCIe BAR is passed */
4919 mem_win0_base = MEMWIN0_BASE;
4920 mem_win1_base = MEMWIN1_BASE;
4921 mem_win2_base = MEMWIN2_BASE_T5;
4922 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4924 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4925 mem_win0_base | BIR(0) |
4926 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4927 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4928 mem_win1_base | BIR(0) |
4929 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4930 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4931 mem_win2_base | BIR(0) |
4932 WINDOW(ilog2(mem_win2_aperture) - 10));
4933 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4936 static void setup_memwin_rdma(struct adapter *adap)
4938 if (adap->vres.ocq.size) {
4942 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4943 start &= PCI_BASE_ADDRESS_MEM_MASK;
4944 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4945 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4947 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4948 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4950 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4951 adap->vres.ocq.start);
4953 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4957 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4962 /* get device capabilities */
4963 memset(c, 0, sizeof(*c));
4964 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4965 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4966 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4967 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4971 /* select capabilities we'll be using */
4972 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4974 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4976 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4977 } else if (vf_acls) {
4978 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4981 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4982 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4983 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4987 ret = t4_config_glbl_rss(adap, adap->fn,
4988 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4989 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4990 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4994 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4995 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5001 /* tweak some settings */
5002 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5003 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5004 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5005 v = t4_read_reg(adap, TP_PIO_DATA);
5006 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5008 /* first 4 Tx modulation queues point to consecutive Tx channels */
5009 adap->params.tp.tx_modq_map = 0xE4;
5010 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5011 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5013 /* associate each Tx modulation queue with consecutive Tx channels */
5015 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5016 &v, 1, A_TP_TX_SCHED_HDR);
5017 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5018 &v, 1, A_TP_TX_SCHED_FIFO);
5019 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5020 &v, 1, A_TP_TX_SCHED_PCMD);
5022 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5023 if (is_offload(adap)) {
5024 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5025 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5026 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5027 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5028 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5029 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5030 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5031 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5032 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5033 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5036 /* get basic stuff going */
5037 return t4_early_init(adap, adap->fn);
5041 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5043 #define MAX_ATIDS 8192U
5046 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5048 * If the firmware we're dealing with has Configuration File support, then
5049 * we use that to perform all configuration
5053 * Tweak configuration based on module parameters, etc. Most of these have
5054 * defaults assigned to them by Firmware Configuration Files (if we're using
5055 * them) but need to be explicitly set if we're using hard-coded
5056 * initialization. But even in the case of using Firmware Configuration
5057 * Files, we'd like to expose the ability to change these via module
5058 * parameters so these are essentially common tweaks/settings for
5059 * Configuration Files and hard-coded initialization ...
5061 static int adap_init0_tweaks(struct adapter *adapter)
5064 * Fix up various Host-Dependent Parameters like Page Size, Cache
5065 * Line Size, etc. The firmware default is for a 4KB Page Size and
5066 * 64B Cache Line Size ...
5068 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5071 * Process module parameters which affect early initialization.
5073 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5074 dev_err(&adapter->pdev->dev,
5075 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5079 t4_set_reg_field(adapter, SGE_CONTROL,
5081 PKTSHIFT(rx_dma_offset));
5084 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5085 * adds the pseudo header itself.
5087 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5088 CSUM_HAS_PSEUDO_HDR, 0);
5094 * Attempt to initialize the adapter via a Firmware Configuration File.
5096 static int adap_init0_config(struct adapter *adapter, int reset)
5098 struct fw_caps_config_cmd caps_cmd;
5099 const struct firmware *cf;
5100 unsigned long mtype = 0, maddr = 0;
5101 u32 finiver, finicsum, cfcsum;
5103 int config_issued = 0;
5104 char *fw_config_file, fw_config_file_path[256];
5105 char *config_name = NULL;
5108 * Reset device if necessary.
5111 ret = t4_fw_reset(adapter, adapter->mbox,
5112 PIORSTMODE | PIORST);
5118 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5119 * then use that. Otherwise, use the configuration file stored
5120 * in the adapter flash ...
5122 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5124 fw_config_file = FW4_CFNAME;
5127 fw_config_file = FW5_CFNAME;
5130 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5131 adapter->pdev->device);
5136 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5138 config_name = "On FLASH";
5139 mtype = FW_MEMTYPE_CF_FLASH;
5140 maddr = t4_flash_cfg_addr(adapter);
5142 u32 params[7], val[7];
5144 sprintf(fw_config_file_path,
5145 "/lib/firmware/%s", fw_config_file);
5146 config_name = fw_config_file_path;
5148 if (cf->size >= FLASH_CFG_MAX_SIZE)
5151 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5152 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5153 ret = t4_query_params(adapter, adapter->mbox,
5154 adapter->fn, 0, 1, params, val);
5157 * For t4_memory_rw() below addresses and
5158 * sizes have to be in terms of multiples of 4
5159 * bytes. So, if the Configuration File isn't
5160 * a multiple of 4 bytes in length we'll have
5161 * to write that out separately since we can't
5162 * guarantee that the bytes following the
5163 * residual byte in the buffer returned by
5164 * request_firmware() are zeroed out ...
5166 size_t resid = cf->size & 0x3;
5167 size_t size = cf->size & ~0x3;
5168 __be32 *data = (__be32 *)cf->data;
5170 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5171 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5173 spin_lock(&adapter->win0_lock);
5174 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5175 size, data, T4_MEMORY_WRITE);
5176 if (ret == 0 && resid != 0) {
5183 last.word = data[size >> 2];
5184 for (i = resid; i < 4; i++)
5186 ret = t4_memory_rw(adapter, 0, mtype,
5191 spin_unlock(&adapter->win0_lock);
5195 release_firmware(cf);
5201 * Issue a Capability Configuration command to the firmware to get it
5202 * to parse the Configuration File. We don't use t4_fw_config_file()
5203 * because we want the ability to modify various features after we've
5204 * processed the configuration file ...
5206 memset(&caps_cmd, 0, sizeof(caps_cmd));
5207 caps_cmd.op_to_write =
5208 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5211 caps_cmd.cfvalid_to_len16 =
5212 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5213 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5214 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5215 FW_LEN16(caps_cmd));
5216 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5219 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5220 * Configuration File in FLASH), our last gasp effort is to use the
5221 * Firmware Configuration File which is embedded in the firmware. A
5222 * very few early versions of the firmware didn't have one embedded
5223 * but we can ignore those.
5225 if (ret == -ENOENT) {
5226 memset(&caps_cmd, 0, sizeof(caps_cmd));
5227 caps_cmd.op_to_write =
5228 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5231 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5232 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5233 sizeof(caps_cmd), &caps_cmd);
5234 config_name = "Firmware Default";
5241 finiver = ntohl(caps_cmd.finiver);
5242 finicsum = ntohl(caps_cmd.finicsum);
5243 cfcsum = ntohl(caps_cmd.cfcsum);
5244 if (finicsum != cfcsum)
5245 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5246 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5250 * And now tell the firmware to use the configuration we just loaded.
5252 caps_cmd.op_to_write =
5253 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5256 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5257 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5263 * Tweak configuration based on system architecture, module
5266 ret = adap_init0_tweaks(adapter);
5271 * And finally tell the firmware to initialize itself using the
5272 * parameters from the Configuration File.
5274 ret = t4_fw_initialize(adapter, adapter->mbox);
5279 * Return successfully and note that we're operating with parameters
5280 * not supplied by the driver, rather than from hard-wired
5281 * initialization constants burried in the driver.
5283 adapter->flags |= USING_SOFT_PARAMS;
5284 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5285 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5286 config_name, finiver, cfcsum);
5290 * Something bad happened. Return the error ... (If the "error"
5291 * is that there's no Configuration File on the adapter we don't
5292 * want to issue a warning since this is fairly common.)
5295 if (config_issued && ret != -ENOENT)
5296 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5302 * Attempt to initialize the adapter via hard-coded, driver supplied
5305 static int adap_init0_no_config(struct adapter *adapter, int reset)
5307 struct sge *s = &adapter->sge;
5308 struct fw_caps_config_cmd caps_cmd;
5313 * Reset device if necessary
5316 ret = t4_fw_reset(adapter, adapter->mbox,
5317 PIORSTMODE | PIORST);
5323 * Get device capabilities and select which we'll be using.
5325 memset(&caps_cmd, 0, sizeof(caps_cmd));
5326 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5327 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5328 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5329 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5334 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5336 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5338 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5339 } else if (vf_acls) {
5340 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5343 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5344 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5345 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5351 * Tweak configuration based on system architecture, module
5354 ret = adap_init0_tweaks(adapter);
5359 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5360 * mode which maps each Virtual Interface to its own section of
5361 * the RSS Table and we turn on all map and hash enables ...
5363 adapter->flags |= RSS_TNLALLLOOKUP;
5364 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5365 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5366 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5367 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5368 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5369 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5374 * Set up our own fundamental resource provisioning ...
5376 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5377 PFRES_NEQ, PFRES_NETHCTRL,
5378 PFRES_NIQFLINT, PFRES_NIQ,
5379 PFRES_TC, PFRES_NVI,
5380 FW_PFVF_CMD_CMASK_MASK,
5381 pfvfres_pmask(adapter, adapter->fn, 0),
5383 PFRES_R_CAPS, PFRES_WX_CAPS);
5388 * Perform low level SGE initialization. We need to do this before we
5389 * send the firmware the INITIALIZE command because that will cause
5390 * any other PF Drivers which are waiting for the Master
5391 * Initialization to proceed forward.
5393 for (i = 0; i < SGE_NTIMERS - 1; i++)
5394 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5395 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5396 s->counter_val[0] = 1;
5397 for (i = 1; i < SGE_NCOUNTERS; i++)
5398 s->counter_val[i] = min(intr_cnt[i - 1],
5399 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5400 t4_sge_init(adapter);
5402 #ifdef CONFIG_PCI_IOV
5404 * Provision resource limits for Virtual Functions. We currently
5405 * grant them all the same static resource limits except for the Port
5406 * Access Rights Mask which we're assigning based on the PF. All of
5407 * the static provisioning stuff for both the PF and VF really needs
5408 * to be managed in a persistent manner for each device which the
5409 * firmware controls.
5414 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5415 if (num_vf[pf] <= 0)
5418 /* VF numbering starts at 1! */
5419 for (vf = 1; vf <= num_vf[pf]; vf++) {
5420 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5422 VFRES_NEQ, VFRES_NETHCTRL,
5423 VFRES_NIQFLINT, VFRES_NIQ,
5424 VFRES_TC, VFRES_NVI,
5425 FW_PFVF_CMD_CMASK_MASK,
5429 VFRES_R_CAPS, VFRES_WX_CAPS);
5431 dev_warn(adapter->pdev_dev,
5433 "provision pf/vf=%d/%d; "
5434 "err=%d\n", pf, vf, ret);
5441 * Set up the default filter mode. Later we'll want to implement this
5442 * via a firmware command, etc. ... This needs to be done before the
5443 * firmare initialization command ... If the selected set of fields
5444 * isn't equal to the default value, we'll need to make sure that the
5445 * field selections will fit in the 36-bit budget.
5447 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5450 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5451 switch (tp_vlan_pri_map & (1 << j)) {
5453 /* compressed filter field not enabled */
5473 case ETHERTYPE_MASK:
5479 case MPSHITTYPE_MASK:
5482 case FRAGMENTATION_MASK:
5488 dev_err(adapter->pdev_dev,
5489 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5490 " using %#x\n", tp_vlan_pri_map, bits,
5491 TP_VLAN_PRI_MAP_DEFAULT);
5492 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5495 v = tp_vlan_pri_map;
5496 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5497 &v, 1, TP_VLAN_PRI_MAP);
5500 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5501 * to support any of the compressed filter fields above. Newer
5502 * versions of the firmware do this automatically but it doesn't hurt
5503 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5504 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5505 * since the firmware automatically turns this on and off when we have
5506 * a non-zero number of filters active (since it does have a
5507 * performance impact).
5509 if (tp_vlan_pri_map)
5510 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5511 FIVETUPLELOOKUP_MASK,
5512 FIVETUPLELOOKUP_MASK);
5515 * Tweak some settings.
5517 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5518 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5519 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5520 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5523 * Get basic stuff going by issuing the Firmware Initialize command.
5524 * Note that this _must_ be after all PFVF commands ...
5526 ret = t4_fw_initialize(adapter, adapter->mbox);
5531 * Return successfully!
5533 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5534 "driver parameters\n");
5538 * Something bad happened. Return the error ...
5544 static struct fw_info fw_info_array[] = {
5547 .fs_name = FW4_CFNAME,
5548 .fw_mod_name = FW4_FNAME,
5550 .chip = FW_HDR_CHIP_T4,
5551 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5552 .intfver_nic = FW_INTFVER(T4, NIC),
5553 .intfver_vnic = FW_INTFVER(T4, VNIC),
5554 .intfver_ri = FW_INTFVER(T4, RI),
5555 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5556 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5560 .fs_name = FW5_CFNAME,
5561 .fw_mod_name = FW5_FNAME,
5563 .chip = FW_HDR_CHIP_T5,
5564 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5565 .intfver_nic = FW_INTFVER(T5, NIC),
5566 .intfver_vnic = FW_INTFVER(T5, VNIC),
5567 .intfver_ri = FW_INTFVER(T5, RI),
5568 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5569 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5574 static struct fw_info *find_fw_info(int chip)
5578 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5579 if (fw_info_array[i].chip == chip)
5580 return &fw_info_array[i];
5586 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5588 static int adap_init0(struct adapter *adap)
5592 enum dev_state state;
5593 u32 params[7], val[7];
5594 struct fw_caps_config_cmd caps_cmd;
5598 * Contact FW, advertising Master capability (and potentially forcing
5599 * ourselves as the Master PF if our module parameter force_init is
5602 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5603 force_init ? MASTER_MUST : MASTER_MAY,
5606 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5610 if (ret == adap->mbox)
5611 adap->flags |= MASTER_PF;
5612 if (force_init && state == DEV_STATE_INIT)
5613 state = DEV_STATE_UNINIT;
5616 * If we're the Master PF Driver and the device is uninitialized,
5617 * then let's consider upgrading the firmware ... (We always want
5618 * to check the firmware version number in order to A. get it for
5619 * later reporting and B. to warn if the currently loaded firmware
5620 * is excessively mismatched relative to the driver.)
5622 t4_get_fw_version(adap, &adap->params.fw_vers);
5623 t4_get_tp_version(adap, &adap->params.tp_vers);
5624 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5625 struct fw_info *fw_info;
5626 struct fw_hdr *card_fw;
5627 const struct firmware *fw;
5628 const u8 *fw_data = NULL;
5629 unsigned int fw_size = 0;
5631 /* This is the firmware whose headers the driver was compiled
5634 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5635 if (fw_info == NULL) {
5636 dev_err(adap->pdev_dev,
5637 "unable to get firmware info for chip %d.\n",
5638 CHELSIO_CHIP_VERSION(adap->params.chip));
5642 /* allocate memory to read the header of the firmware on the
5645 card_fw = t4_alloc_mem(sizeof(*card_fw));
5647 /* Get FW from from /lib/firmware/ */
5648 ret = request_firmware(&fw, fw_info->fw_mod_name,
5651 dev_err(adap->pdev_dev,
5652 "unable to load firmware image %s, error %d\n",
5653 fw_info->fw_mod_name, ret);
5659 /* upgrade FW logic */
5660 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5665 release_firmware(fw);
5666 t4_free_mem(card_fw);
5673 * Grab VPD parameters. This should be done after we establish a
5674 * connection to the firmware since some of the VPD parameters
5675 * (notably the Core Clock frequency) are retrieved via requests to
5676 * the firmware. On the other hand, we need these fairly early on
5677 * so we do this right after getting ahold of the firmware.
5679 ret = get_vpd_params(adap, &adap->params.vpd);
5684 * Find out what ports are available to us. Note that we need to do
5685 * this before calling adap_init0_no_config() since it needs nports
5689 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5690 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5691 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5695 adap->params.nports = hweight32(port_vec);
5696 adap->params.portvec = port_vec;
5699 * If the firmware is initialized already (and we're not forcing a
5700 * master initialization), note that we're living with existing
5701 * adapter parameters. Otherwise, it's time to try initializing the
5704 if (state == DEV_STATE_INIT) {
5705 dev_info(adap->pdev_dev, "Coming up as %s: "\
5706 "Adapter already initialized\n",
5707 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5708 adap->flags |= USING_SOFT_PARAMS;
5710 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5711 "Initializing adapter\n");
5713 * If the firmware doesn't support Configuration
5714 * Files warn user and exit,
5717 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5718 "configuration file.\n");
5720 ret = adap_init0_no_config(adap, reset);
5723 * Find out whether we're dealing with a version of
5724 * the firmware which has configuration file support.
5726 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5727 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5728 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5732 * If the firmware doesn't support Configuration
5733 * Files, use the old Driver-based, hard-wired
5734 * initialization. Otherwise, try using the
5735 * Configuration File support and fall back to the
5736 * Driver-based initialization if there's no
5737 * Configuration File found.
5740 ret = adap_init0_no_config(adap, reset);
5743 * The firmware provides us with a memory
5744 * buffer where we can load a Configuration
5745 * File from the host if we want to override
5746 * the Configuration File in flash.
5749 ret = adap_init0_config(adap, reset);
5750 if (ret == -ENOENT) {
5751 dev_info(adap->pdev_dev,
5752 "No Configuration File present "
5753 "on adapter. Using hard-wired "
5754 "configuration parameters.\n");
5756 ret = adap_init0_no_config(adap, reset);
5761 dev_err(adap->pdev_dev,
5762 "could not initialize adapter, error %d\n",
5769 * If we're living with non-hard-coded parameters (either from a
5770 * Firmware Configuration File or values programmed by a different PF
5771 * Driver), give the SGE code a chance to pull in anything that it
5772 * needs ... Note that this must be called after we retrieve our VPD
5773 * parameters in order to know how to convert core ticks to seconds.
5775 if (adap->flags & USING_SOFT_PARAMS) {
5776 ret = t4_sge_init(adap);
5781 if (is_bypass_device(adap->pdev->device))
5782 adap->params.bypass = 1;
5785 * Grab some of our basic fundamental operating parameters.
5787 #define FW_PARAM_DEV(param) \
5788 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5789 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5791 #define FW_PARAM_PFVF(param) \
5792 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5793 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5794 FW_PARAMS_PARAM_Y(0) | \
5795 FW_PARAMS_PARAM_Z(0)
5797 params[0] = FW_PARAM_PFVF(EQ_START);
5798 params[1] = FW_PARAM_PFVF(L2T_START);
5799 params[2] = FW_PARAM_PFVF(L2T_END);
5800 params[3] = FW_PARAM_PFVF(FILTER_START);
5801 params[4] = FW_PARAM_PFVF(FILTER_END);
5802 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5803 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5806 adap->sge.egr_start = val[0];
5807 adap->l2t_start = val[1];
5808 adap->l2t_end = val[2];
5809 adap->tids.ftid_base = val[3];
5810 adap->tids.nftids = val[4] - val[3] + 1;
5811 adap->sge.ingr_start = val[5];
5813 /* query params related to active filter region */
5814 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5815 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5816 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5817 /* If Active filter size is set we enable establishing
5818 * offload connection through firmware work request
5820 if ((val[0] != val[1]) && (ret >= 0)) {
5821 adap->flags |= FW_OFLD_CONN;
5822 adap->tids.aftid_base = val[0];
5823 adap->tids.aftid_end = val[1];
5826 /* If we're running on newer firmware, let it know that we're
5827 * prepared to deal with encapsulated CPL messages. Older
5828 * firmware won't understand this and we'll just get
5829 * unencapsulated messages ...
5831 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5833 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5836 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5837 * capability. Earlier versions of the firmware didn't have the
5838 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5839 * permission to use ULPTX MEMWRITE DSGL.
5841 if (is_t4(adap->params.chip)) {
5842 adap->params.ulptx_memwrite_dsgl = false;
5844 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5845 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5847 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5851 * Get device capabilities so we can determine what resources we need
5854 memset(&caps_cmd, 0, sizeof(caps_cmd));
5855 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5856 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5857 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5858 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5863 if (caps_cmd.ofldcaps) {
5864 /* query offload-related parameters */
5865 params[0] = FW_PARAM_DEV(NTID);
5866 params[1] = FW_PARAM_PFVF(SERVER_START);
5867 params[2] = FW_PARAM_PFVF(SERVER_END);
5868 params[3] = FW_PARAM_PFVF(TDDP_START);
5869 params[4] = FW_PARAM_PFVF(TDDP_END);
5870 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5871 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5875 adap->tids.ntids = val[0];
5876 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5877 adap->tids.stid_base = val[1];
5878 adap->tids.nstids = val[2] - val[1] + 1;
5880 * Setup server filter region. Divide the availble filter
5881 * region into two parts. Regular filters get 1/3rd and server
5882 * filters get 2/3rd part. This is only enabled if workarond
5884 * 1. For regular filters.
5885 * 2. Server filter: This are special filters which are used
5886 * to redirect SYN packets to offload queue.
5888 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5889 adap->tids.sftid_base = adap->tids.ftid_base +
5890 DIV_ROUND_UP(adap->tids.nftids, 3);
5891 adap->tids.nsftids = adap->tids.nftids -
5892 DIV_ROUND_UP(adap->tids.nftids, 3);
5893 adap->tids.nftids = adap->tids.sftid_base -
5894 adap->tids.ftid_base;
5896 adap->vres.ddp.start = val[3];
5897 adap->vres.ddp.size = val[4] - val[3] + 1;
5898 adap->params.ofldq_wr_cred = val[5];
5900 adap->params.offload = 1;
5902 if (caps_cmd.rdmacaps) {
5903 params[0] = FW_PARAM_PFVF(STAG_START);
5904 params[1] = FW_PARAM_PFVF(STAG_END);
5905 params[2] = FW_PARAM_PFVF(RQ_START);
5906 params[3] = FW_PARAM_PFVF(RQ_END);
5907 params[4] = FW_PARAM_PFVF(PBL_START);
5908 params[5] = FW_PARAM_PFVF(PBL_END);
5909 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5913 adap->vres.stag.start = val[0];
5914 adap->vres.stag.size = val[1] - val[0] + 1;
5915 adap->vres.rq.start = val[2];
5916 adap->vres.rq.size = val[3] - val[2] + 1;
5917 adap->vres.pbl.start = val[4];
5918 adap->vres.pbl.size = val[5] - val[4] + 1;
5920 params[0] = FW_PARAM_PFVF(SQRQ_START);
5921 params[1] = FW_PARAM_PFVF(SQRQ_END);
5922 params[2] = FW_PARAM_PFVF(CQ_START);
5923 params[3] = FW_PARAM_PFVF(CQ_END);
5924 params[4] = FW_PARAM_PFVF(OCQ_START);
5925 params[5] = FW_PARAM_PFVF(OCQ_END);
5926 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5930 adap->vres.qp.start = val[0];
5931 adap->vres.qp.size = val[1] - val[0] + 1;
5932 adap->vres.cq.start = val[2];
5933 adap->vres.cq.size = val[3] - val[2] + 1;
5934 adap->vres.ocq.start = val[4];
5935 adap->vres.ocq.size = val[5] - val[4] + 1;
5937 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5938 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5939 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5942 adap->params.max_ordird_qp = 8;
5943 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5946 adap->params.max_ordird_qp = val[0];
5947 adap->params.max_ird_adapter = val[1];
5949 dev_info(adap->pdev_dev,
5950 "max_ordird_qp %d max_ird_adapter %d\n",
5951 adap->params.max_ordird_qp,
5952 adap->params.max_ird_adapter);
5954 if (caps_cmd.iscsicaps) {
5955 params[0] = FW_PARAM_PFVF(ISCSI_START);
5956 params[1] = FW_PARAM_PFVF(ISCSI_END);
5957 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5961 adap->vres.iscsi.start = val[0];
5962 adap->vres.iscsi.size = val[1] - val[0] + 1;
5964 #undef FW_PARAM_PFVF
5967 /* The MTU/MSS Table is initialized by now, so load their values. If
5968 * we're initializing the adapter, then we'll make any modifications
5969 * we want to the MTU/MSS Table and also initialize the congestion
5972 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5973 if (state != DEV_STATE_INIT) {
5976 /* The default MTU Table contains values 1492 and 1500.
5977 * However, for TCP, it's better to have two values which are
5978 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5979 * This allows us to have a TCP Data Payload which is a
5980 * multiple of 8 regardless of what combination of TCP Options
5981 * are in use (always a multiple of 4 bytes) which is
5982 * important for performance reasons. For instance, if no
5983 * options are in use, then we have a 20-byte IP header and a
5984 * 20-byte TCP header. In this case, a 1500-byte MSS would
5985 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5986 * which is not a multiple of 8. So using an MSS of 1488 in
5987 * this case results in a TCP Data Payload of 1448 bytes which
5988 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5989 * Stamps have been negotiated, then an MTU of 1500 bytes
5990 * results in a TCP Data Payload of 1448 bytes which, as
5991 * above, is a multiple of 8 bytes ...
5993 for (i = 0; i < NMTUS; i++)
5994 if (adap->params.mtus[i] == 1492) {
5995 adap->params.mtus[i] = 1488;
5999 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6000 adap->params.b_wnd);
6002 t4_init_tp_params(adap);
6003 adap->flags |= FW_OK;
6007 * Something bad happened. If a command timed out or failed with EIO
6008 * FW does not operate within its spec or something catastrophic
6009 * happened to HW/FW, stop issuing commands.
6012 if (ret != -ETIMEDOUT && ret != -EIO)
6013 t4_fw_bye(adap, adap->mbox);
6019 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6020 pci_channel_state_t state)
6023 struct adapter *adap = pci_get_drvdata(pdev);
6029 adap->flags &= ~FW_OK;
6030 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6031 spin_lock(&adap->stats_lock);
6032 for_each_port(adap, i) {
6033 struct net_device *dev = adap->port[i];
6035 netif_device_detach(dev);
6036 netif_carrier_off(dev);
6038 spin_unlock(&adap->stats_lock);
6039 if (adap->flags & FULL_INIT_DONE)
6042 if ((adap->flags & DEV_ENABLED)) {
6043 pci_disable_device(pdev);
6044 adap->flags &= ~DEV_ENABLED;
6046 out: return state == pci_channel_io_perm_failure ?
6047 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6050 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6053 struct fw_caps_config_cmd c;
6054 struct adapter *adap = pci_get_drvdata(pdev);
6057 pci_restore_state(pdev);
6058 pci_save_state(pdev);
6059 return PCI_ERS_RESULT_RECOVERED;
6062 if (!(adap->flags & DEV_ENABLED)) {
6063 if (pci_enable_device(pdev)) {
6064 dev_err(&pdev->dev, "Cannot reenable PCI "
6065 "device after reset\n");
6066 return PCI_ERS_RESULT_DISCONNECT;
6068 adap->flags |= DEV_ENABLED;
6071 pci_set_master(pdev);
6072 pci_restore_state(pdev);
6073 pci_save_state(pdev);
6074 pci_cleanup_aer_uncorrect_error_status(pdev);
6076 if (t4_wait_dev_ready(adap->regs) < 0)
6077 return PCI_ERS_RESULT_DISCONNECT;
6078 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6079 return PCI_ERS_RESULT_DISCONNECT;
6080 adap->flags |= FW_OK;
6081 if (adap_init1(adap, &c))
6082 return PCI_ERS_RESULT_DISCONNECT;
6084 for_each_port(adap, i) {
6085 struct port_info *p = adap2pinfo(adap, i);
6087 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6090 return PCI_ERS_RESULT_DISCONNECT;
6092 p->xact_addr_filt = -1;
6095 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6096 adap->params.b_wnd);
6099 return PCI_ERS_RESULT_DISCONNECT;
6100 return PCI_ERS_RESULT_RECOVERED;
6103 static void eeh_resume(struct pci_dev *pdev)
6106 struct adapter *adap = pci_get_drvdata(pdev);
6112 for_each_port(adap, i) {
6113 struct net_device *dev = adap->port[i];
6115 if (netif_running(dev)) {
6117 cxgb_set_rxmode(dev);
6119 netif_device_attach(dev);
6124 static const struct pci_error_handlers cxgb4_eeh = {
6125 .error_detected = eeh_err_detected,
6126 .slot_reset = eeh_slot_reset,
6127 .resume = eeh_resume,
6130 static inline bool is_x_10g_port(const struct link_config *lc)
6132 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6133 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6136 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6137 unsigned int us, unsigned int cnt,
6138 unsigned int size, unsigned int iqe_size)
6141 set_rspq_intr_params(q, us, cnt);
6142 q->iqe_len = iqe_size;
6147 * Perform default configuration of DMA queues depending on the number and type
6148 * of ports we found and the number of available CPUs. Most settings can be
6149 * modified by the admin prior to actual use.
6151 static void cfg_queues(struct adapter *adap)
6153 struct sge *s = &adap->sge;
6154 int i, n10g = 0, qidx = 0;
6155 #ifndef CONFIG_CHELSIO_T4_DCB
6160 for_each_port(adap, i)
6161 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6162 #ifdef CONFIG_CHELSIO_T4_DCB
6163 /* For Data Center Bridging support we need to be able to support up
6164 * to 8 Traffic Priorities; each of which will be assigned to its
6165 * own TX Queue in order to prevent Head-Of-Line Blocking.
6167 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6168 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6169 MAX_ETH_QSETS, adap->params.nports * 8);
6173 for_each_port(adap, i) {
6174 struct port_info *pi = adap2pinfo(adap, i);
6176 pi->first_qset = qidx;
6180 #else /* !CONFIG_CHELSIO_T4_DCB */
6182 * We default to 1 queue per non-10G port and up to # of cores queues
6186 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6187 if (q10g > netif_get_num_default_rss_queues())
6188 q10g = netif_get_num_default_rss_queues();
6190 for_each_port(adap, i) {
6191 struct port_info *pi = adap2pinfo(adap, i);
6193 pi->first_qset = qidx;
6194 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6197 #endif /* !CONFIG_CHELSIO_T4_DCB */
6200 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6202 if (is_offload(adap)) {
6204 * For offload we use 1 queue/channel if all ports are up to 1G,
6205 * otherwise we divide all available queues amongst the channels
6206 * capped by the number of available cores.
6209 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6211 s->ofldqsets = roundup(i, adap->params.nports);
6213 s->ofldqsets = adap->params.nports;
6214 /* For RDMA one Rx queue per channel suffices */
6215 s->rdmaqs = adap->params.nports;
6216 s->rdmaciqs = adap->params.nports;
6219 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6220 struct sge_eth_rxq *r = &s->ethrxq[i];
6222 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6226 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6227 s->ethtxq[i].q.size = 1024;
6229 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6230 s->ctrlq[i].q.size = 512;
6232 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6233 s->ofldtxq[i].q.size = 1024;
6235 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6236 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6238 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6239 r->rspq.uld = CXGB4_ULD_ISCSI;
6243 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6244 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6246 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6247 r->rspq.uld = CXGB4_ULD_RDMA;
6251 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6252 if (ciq_size > SGE_MAX_IQ_SIZE) {
6253 CH_WARN(adap, "CIQ size too small for available IQs\n");
6254 ciq_size = SGE_MAX_IQ_SIZE;
6257 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6258 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6260 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6261 r->rspq.uld = CXGB4_ULD_RDMA;
6264 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6265 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6269 * Reduce the number of Ethernet queues across all ports to at most n.
6270 * n provides at least one queue per port.
6272 static void reduce_ethqs(struct adapter *adap, int n)
6275 struct port_info *pi;
6277 while (n < adap->sge.ethqsets)
6278 for_each_port(adap, i) {
6279 pi = adap2pinfo(adap, i);
6280 if (pi->nqsets > 1) {
6282 adap->sge.ethqsets--;
6283 if (adap->sge.ethqsets <= n)
6289 for_each_port(adap, i) {
6290 pi = adap2pinfo(adap, i);
6296 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6297 #define EXTRA_VECS 2
6299 static int enable_msix(struct adapter *adap)
6303 struct sge *s = &adap->sge;
6304 unsigned int nchan = adap->params.nports;
6305 struct msix_entry entries[MAX_INGQ + 1];
6307 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6308 entries[i].entry = i;
6310 want = s->max_ethqsets + EXTRA_VECS;
6311 if (is_offload(adap)) {
6312 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6313 /* need nchan for each possible ULD */
6314 ofld_need = 3 * nchan;
6316 #ifdef CONFIG_CHELSIO_T4_DCB
6317 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6320 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6322 need = adap->params.nports + EXTRA_VECS + ofld_need;
6324 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6329 * Distribute available vectors to the various queue groups.
6330 * Every group gets its minimum requirement and NIC gets top
6331 * priority for leftovers.
6333 i = want - EXTRA_VECS - ofld_need;
6334 if (i < s->max_ethqsets) {
6335 s->max_ethqsets = i;
6336 if (i < s->ethqsets)
6337 reduce_ethqs(adap, i);
6339 if (is_offload(adap)) {
6340 i = want - EXTRA_VECS - s->max_ethqsets;
6341 i -= ofld_need - nchan;
6342 s->ofldqsets = (i / nchan) * nchan; /* round down */
6344 for (i = 0; i < want; ++i)
6345 adap->msix_info[i].vec = entries[i].vector;
6352 static int init_rss(struct adapter *adap)
6356 for_each_port(adap, i) {
6357 struct port_info *pi = adap2pinfo(adap, i);
6359 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6362 for (j = 0; j < pi->rss_size; j++)
6363 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6368 static void print_port_info(const struct net_device *dev)
6372 const char *spd = "";
6373 const struct port_info *pi = netdev_priv(dev);
6374 const struct adapter *adap = pi->adapter;
6376 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6378 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6380 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6383 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6384 bufp += sprintf(bufp, "100/");
6385 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6386 bufp += sprintf(bufp, "1000/");
6387 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6388 bufp += sprintf(bufp, "10G/");
6389 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6390 bufp += sprintf(bufp, "40G/");
6393 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6395 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6396 adap->params.vpd.id,
6397 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6398 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6399 (adap->flags & USING_MSIX) ? " MSI-X" :
6400 (adap->flags & USING_MSI) ? " MSI" : "");
6401 netdev_info(dev, "S/N: %s, P/N: %s\n",
6402 adap->params.vpd.sn, adap->params.vpd.pn);
6405 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6407 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6411 * Free the following resources:
6412 * - memory used for tables
6415 * - resources FW is holding for us
6417 static void free_some_resources(struct adapter *adapter)
6421 t4_free_mem(adapter->l2t);
6422 t4_free_mem(adapter->tids.tid_tab);
6423 disable_msi(adapter);
6425 for_each_port(adapter, i)
6426 if (adapter->port[i]) {
6427 kfree(adap2pinfo(adapter, i)->rss);
6428 free_netdev(adapter->port[i]);
6430 if (adapter->flags & FW_OK)
6431 t4_fw_bye(adapter, adapter->fn);
6434 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6435 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6436 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6437 #define SEGMENT_SIZE 128
6439 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6441 int func, i, err, s_qpp, qpp, num_seg;
6442 struct port_info *pi;
6443 bool highdma = false;
6444 struct adapter *adapter = NULL;
6447 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6449 err = pci_request_regions(pdev, KBUILD_MODNAME);
6451 /* Just info, some other driver may have claimed the device. */
6452 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6456 err = pci_enable_device(pdev);
6458 dev_err(&pdev->dev, "cannot enable PCI device\n");
6459 goto out_release_regions;
6462 regs = pci_ioremap_bar(pdev, 0);
6464 dev_err(&pdev->dev, "cannot map device registers\n");
6466 goto out_disable_device;
6469 err = t4_wait_dev_ready(regs);
6471 goto out_unmap_bar0;
6473 /* We control everything through one PF */
6474 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6475 if (func != ent->driver_data) {
6477 pci_disable_device(pdev);
6478 pci_save_state(pdev); /* to restore SR-IOV later */
6482 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6484 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6486 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6487 "coherent allocations\n");
6488 goto out_unmap_bar0;
6491 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6493 dev_err(&pdev->dev, "no usable DMA configuration\n");
6494 goto out_unmap_bar0;
6498 pci_enable_pcie_error_reporting(pdev);
6499 enable_pcie_relaxed_ordering(pdev);
6500 pci_set_master(pdev);
6501 pci_save_state(pdev);
6503 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6506 goto out_unmap_bar0;
6509 adapter->workq = create_singlethread_workqueue("cxgb4");
6510 if (!adapter->workq) {
6512 goto out_free_adapter;
6515 /* PCI device has been enabled */
6516 adapter->flags |= DEV_ENABLED;
6518 adapter->regs = regs;
6519 adapter->pdev = pdev;
6520 adapter->pdev_dev = &pdev->dev;
6521 adapter->mbox = func;
6523 adapter->msg_enable = dflt_msg_enable;
6524 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6526 spin_lock_init(&adapter->stats_lock);
6527 spin_lock_init(&adapter->tid_release_lock);
6528 spin_lock_init(&adapter->win0_lock);
6530 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6531 INIT_WORK(&adapter->db_full_task, process_db_full);
6532 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6534 err = t4_prep_adapter(adapter);
6536 goto out_free_adapter;
6539 if (!is_t4(adapter->params.chip)) {
6540 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6541 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6542 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6543 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6545 /* Each segment size is 128B. Write coalescing is enabled only
6546 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6547 * queue is less no of segments that can be accommodated in
6550 if (qpp > num_seg) {
6552 "Incorrect number of egress queues per page\n");
6554 goto out_free_adapter;
6556 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6557 pci_resource_len(pdev, 2));
6558 if (!adapter->bar2) {
6559 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6561 goto out_free_adapter;
6565 setup_memwin(adapter);
6566 err = adap_init0(adapter);
6567 setup_memwin_rdma(adapter);
6571 for_each_port(adapter, i) {
6572 struct net_device *netdev;
6574 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6581 SET_NETDEV_DEV(netdev, &pdev->dev);
6583 adapter->port[i] = netdev;
6584 pi = netdev_priv(netdev);
6585 pi->adapter = adapter;
6586 pi->xact_addr_filt = -1;
6588 netdev->irq = pdev->irq;
6590 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6591 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6592 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6593 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6595 netdev->hw_features |= NETIF_F_HIGHDMA;
6596 netdev->features |= netdev->hw_features;
6597 netdev->vlan_features = netdev->features & VLAN_FEAT;
6599 netdev->priv_flags |= IFF_UNICAST_FLT;
6601 netdev->netdev_ops = &cxgb4_netdev_ops;
6602 #ifdef CONFIG_CHELSIO_T4_DCB
6603 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6604 cxgb4_dcb_state_init(netdev);
6606 netdev->ethtool_ops = &cxgb_ethtool_ops;
6609 pci_set_drvdata(pdev, adapter);
6611 if (adapter->flags & FW_OK) {
6612 err = t4_port_init(adapter, func, func, 0);
6618 * Configure queues and allocate tables now, they can be needed as
6619 * soon as the first register_netdev completes.
6621 cfg_queues(adapter);
6623 adapter->l2t = t4_init_l2t();
6624 if (!adapter->l2t) {
6625 /* We tolerate a lack of L2T, giving up some functionality */
6626 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6627 adapter->params.offload = 0;
6630 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6631 dev_warn(&pdev->dev, "could not allocate TID table, "
6633 adapter->params.offload = 0;
6636 /* See what interrupts we'll be using */
6637 if (msi > 1 && enable_msix(adapter) == 0)
6638 adapter->flags |= USING_MSIX;
6639 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6640 adapter->flags |= USING_MSI;
6642 err = init_rss(adapter);
6647 * The card is now ready to go. If any errors occur during device
6648 * registration we do not fail the whole card but rather proceed only
6649 * with the ports we manage to register successfully. However we must
6650 * register at least one net device.
6652 for_each_port(adapter, i) {
6653 pi = adap2pinfo(adapter, i);
6654 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6655 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6657 err = register_netdev(adapter->port[i]);
6660 adapter->chan_map[pi->tx_chan] = i;
6661 print_port_info(adapter->port[i]);
6664 dev_err(&pdev->dev, "could not register any net devices\n");
6668 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6672 if (cxgb4_debugfs_root) {
6673 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6674 cxgb4_debugfs_root);
6675 setup_debugfs(adapter);
6678 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6679 pdev->needs_freset = 1;
6681 if (is_offload(adapter))
6682 attach_ulds(adapter);
6685 #ifdef CONFIG_PCI_IOV
6686 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6687 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6688 dev_info(&pdev->dev,
6689 "instantiated %u virtual functions\n",
6695 free_some_resources(adapter);
6697 if (!is_t4(adapter->params.chip))
6698 iounmap(adapter->bar2);
6701 destroy_workqueue(adapter->workq);
6707 pci_disable_pcie_error_reporting(pdev);
6708 pci_disable_device(pdev);
6709 out_release_regions:
6710 pci_release_regions(pdev);
6714 static void remove_one(struct pci_dev *pdev)
6716 struct adapter *adapter = pci_get_drvdata(pdev);
6718 #ifdef CONFIG_PCI_IOV
6719 pci_disable_sriov(pdev);
6726 /* Tear down per-adapter Work Queue first since it can contain
6727 * references to our adapter data structure.
6729 destroy_workqueue(adapter->workq);
6731 if (is_offload(adapter))
6732 detach_ulds(adapter);
6734 for_each_port(adapter, i)
6735 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6736 unregister_netdev(adapter->port[i]);
6738 debugfs_remove_recursive(adapter->debugfs_root);
6740 /* If we allocated filters, free up state associated with any
6743 if (adapter->tids.ftid_tab) {
6744 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6745 for (i = 0; i < (adapter->tids.nftids +
6746 adapter->tids.nsftids); i++, f++)
6748 clear_filter(adapter, f);
6751 if (adapter->flags & FULL_INIT_DONE)
6754 free_some_resources(adapter);
6755 iounmap(adapter->regs);
6756 if (!is_t4(adapter->params.chip))
6757 iounmap(adapter->bar2);
6758 pci_disable_pcie_error_reporting(pdev);
6759 if ((adapter->flags & DEV_ENABLED)) {
6760 pci_disable_device(pdev);
6761 adapter->flags &= ~DEV_ENABLED;
6763 pci_release_regions(pdev);
6767 pci_release_regions(pdev);
6770 static struct pci_driver cxgb4_driver = {
6771 .name = KBUILD_MODNAME,
6772 .id_table = cxgb4_pci_tbl,
6774 .remove = remove_one,
6775 .shutdown = remove_one,
6776 .err_handler = &cxgb4_eeh,
6779 static int __init cxgb4_init_module(void)
6783 /* Debugfs support is optional, just warn if this fails */
6784 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6785 if (!cxgb4_debugfs_root)
6786 pr_warn("could not create debugfs entry, continuing\n");
6788 ret = pci_register_driver(&cxgb4_driver);
6790 debugfs_remove(cxgb4_debugfs_root);
6792 #if IS_ENABLED(CONFIG_IPV6)
6793 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6799 static void __exit cxgb4_cleanup_module(void)
6801 #if IS_ENABLED(CONFIG_IPV6)
6802 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6804 pci_unregister_driver(&cxgb4_driver);
6805 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6808 module_init(cxgb4_init_module);
6809 module_exit(cxgb4_cleanup_module);