2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include <linux/bitops.h>
41 #include <linux/cache.h>
42 #include <linux/interrupt.h>
43 #include <linux/list.h>
44 #include <linux/netdevice.h>
45 #include <linux/pci.h>
46 #include <linux/spinlock.h>
47 #include <linux/timer.h>
48 #include <linux/vmalloc.h>
50 #include "cxgb4_uld.h"
52 #define T4FW_VERSION_MAJOR 0x01
53 #define T4FW_VERSION_MINOR 0x0C
54 #define T4FW_VERSION_MICRO 0x19
55 #define T4FW_VERSION_BUILD 0x00
57 #define T5FW_VERSION_MAJOR 0x01
58 #define T5FW_VERSION_MINOR 0x0C
59 #define T5FW_VERSION_MICRO 0x19
60 #define T5FW_VERSION_BUILD 0x00
62 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
65 MAX_NPORTS = 4, /* max # of ports */
66 SERNUM_LEN = 24, /* Serial # length */
67 EC_LEN = 16, /* E/C length */
68 ID_LEN = 16, /* ID length */
69 PN_LEN = 16, /* Part Number length */
81 MEMWIN0_APERTURE = 2048,
82 MEMWIN0_BASE = 0x1b800,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN1_BASE_T5 = 0x52000,
86 MEMWIN2_APERTURE = 65536,
87 MEMWIN2_BASE = 0x30000,
88 MEMWIN2_APERTURE_T5 = 131072,
89 MEMWIN2_BASE_T5 = 0x60000,
107 PAUSE_AUTONEG = 1 << 2
111 u64 tx_octets; /* total # of octets in good frames */
112 u64 tx_frames; /* all good frames */
113 u64 tx_bcast_frames; /* all broadcast frames */
114 u64 tx_mcast_frames; /* all multicast frames */
115 u64 tx_ucast_frames; /* all unicast frames */
116 u64 tx_error_frames; /* all error frames */
118 u64 tx_frames_64; /* # of Tx frames in a particular range */
119 u64 tx_frames_65_127;
120 u64 tx_frames_128_255;
121 u64 tx_frames_256_511;
122 u64 tx_frames_512_1023;
123 u64 tx_frames_1024_1518;
124 u64 tx_frames_1519_max;
126 u64 tx_drop; /* # of dropped Tx frames */
127 u64 tx_pause; /* # of transmitted pause frames */
128 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
129 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
130 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
131 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
132 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
133 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
134 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
135 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
137 u64 rx_octets; /* total # of octets in good frames */
138 u64 rx_frames; /* all good frames */
139 u64 rx_bcast_frames; /* all broadcast frames */
140 u64 rx_mcast_frames; /* all multicast frames */
141 u64 rx_ucast_frames; /* all unicast frames */
142 u64 rx_too_long; /* # of frames exceeding MTU */
143 u64 rx_jabber; /* # of jabber frames */
144 u64 rx_fcs_err; /* # of received frames with bad FCS */
145 u64 rx_len_err; /* # of received frames with length error */
146 u64 rx_symbol_err; /* symbol errors */
147 u64 rx_runt; /* # of short frames */
149 u64 rx_frames_64; /* # of Rx frames in a particular range */
150 u64 rx_frames_65_127;
151 u64 rx_frames_128_255;
152 u64 rx_frames_256_511;
153 u64 rx_frames_512_1023;
154 u64 rx_frames_1024_1518;
155 u64 rx_frames_1519_max;
157 u64 rx_pause; /* # of received pause frames */
158 u64 rx_ppp0; /* # of received PPP prio 0 frames */
159 u64 rx_ppp1; /* # of received PPP prio 1 frames */
160 u64 rx_ppp2; /* # of received PPP prio 2 frames */
161 u64 rx_ppp3; /* # of received PPP prio 3 frames */
162 u64 rx_ppp4; /* # of received PPP prio 4 frames */
163 u64 rx_ppp5; /* # of received PPP prio 5 frames */
164 u64 rx_ppp6; /* # of received PPP prio 6 frames */
165 u64 rx_ppp7; /* # of received PPP prio 7 frames */
167 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
168 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
169 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
170 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
171 u64 rx_trunc0; /* buffer-group 0 truncated packets */
172 u64 rx_trunc1; /* buffer-group 1 truncated packets */
173 u64 rx_trunc2; /* buffer-group 2 truncated packets */
174 u64 rx_trunc3; /* buffer-group 3 truncated packets */
177 struct lb_port_stats {
190 u64 frames_1024_1518;
205 struct tp_tcp_stats {
212 struct tp_err_stats {
217 u32 ofldChanDrops[4];
219 u32 ofldVlanDrops[4];
226 u32 hps; /* host page size for our PF/VF */
227 u32 eq_qpp; /* egress queues/page for our PF/VF */
228 u32 iq_qpp; /* egress queues/page for our PF/VF */
232 unsigned int ntxchan; /* # of Tx channels */
233 unsigned int tre; /* log2 of core clocks per TP tick */
234 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
237 uint32_t dack_re; /* DACK timer resolution */
238 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
240 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
241 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
243 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
244 * subset of the set of fields which may be present in the Compressed
245 * Filter Tuple portion of filters and TCP TCB connections. The
246 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
247 * Since a variable number of fields may or may not be present, their
248 * shifted field positions within the Compressed Filter Tuple may
249 * vary, or not even be present if the field isn't selected in
250 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
251 * places we store their offsets here, or a -1 if the field isn't
263 u8 sn[SERNUM_LEN + 1];
273 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
274 #define CHELSIO_CHIP_FPGA 0x100
275 #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
276 #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
278 #define CHELSIO_T4 0x4
279 #define CHELSIO_T5 0x5
282 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
283 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
284 T4_FIRST_REV = T4_A1,
287 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
288 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
289 T5_FIRST_REV = T5_A0,
293 struct devlog_params {
294 u32 memtype; /* which memory (EDC0, EDC1, MC) */
295 u32 start; /* start of log in firmware memory */
296 u32 size; /* size of log */
299 struct adapter_params {
300 struct sge_params sge;
302 struct vpd_params vpd;
303 struct pci_params pci;
304 struct devlog_params devlog;
305 enum pcie_memwin drv_memwin;
307 unsigned int cim_la_size;
309 unsigned int sf_size; /* serial flash size in bytes */
310 unsigned int sf_nsec; /* # of flash sectors */
311 unsigned int sf_fw_start; /* start of FW image in flash */
313 unsigned int fw_vers;
314 unsigned int tp_vers;
317 unsigned short mtus[NMTUS];
318 unsigned short a_wnd[NCCTRL_WIN];
319 unsigned short b_wnd[NCCTRL_WIN];
321 unsigned char nports; /* # of ethernet ports */
322 unsigned char portvec;
323 enum chip_type chip; /* chip code */
324 unsigned char offload;
326 unsigned char bypass;
328 unsigned int ofldq_wr_cred;
329 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
331 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
332 unsigned int max_ird_adapter; /* Max read depth per adapter */
335 #include "t4fw_api.h"
337 #define FW_VERSION(chip) ( \
338 FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
339 FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
340 FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
341 FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
342 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
348 struct fw_hdr fw_hdr;
352 struct trace_params {
353 u32 data[TRACE_LEN / 4];
354 u32 mask[TRACE_LEN / 4];
355 unsigned short snap_len;
356 unsigned short min_len;
357 unsigned char skip_ofst;
358 unsigned char skip_len;
359 unsigned char invert;
364 unsigned short supported; /* link capabilities */
365 unsigned short advertising; /* advertised capabilities */
366 unsigned short requested_speed; /* speed user has requested */
367 unsigned short speed; /* actual link speed */
368 unsigned char requested_fc; /* flow control user has requested */
369 unsigned char fc; /* actual link flow control */
370 unsigned char autoneg; /* autonegotiating? */
371 unsigned char link_ok; /* link up? */
374 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
377 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
378 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
379 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
380 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
381 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
382 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
386 INGQ_EXTRAS = 2, /* firmware event queue and */
387 /* forwarded interrupts */
388 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
389 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
390 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
391 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
397 #include "cxgb4_dcb.h"
400 struct adapter *adapter;
402 s16 xact_addr_filt; /* index of exact MAC address filter */
403 u16 rss_size; /* size of VI's RSS table slice */
405 enum fw_port_type port_type;
409 u8 lport; /* associated offload logical port */
410 u8 nqsets; /* # of qsets */
411 u8 first_qset; /* index of first qset */
413 struct link_config link_cfg;
415 #ifdef CONFIG_CHELSIO_T4_DCB
416 struct port_dcb_info dcb; /* Data Center Bridging support */
423 enum { /* adapter flags */
424 FULL_INIT_DONE = (1 << 0),
425 DEV_ENABLED = (1 << 1),
426 USING_MSI = (1 << 2),
427 USING_MSIX = (1 << 3),
429 RSS_TNLALLLOOKUP = (1 << 5),
430 USING_SOFT_PARAMS = (1 << 6),
431 MASTER_PF = (1 << 7),
432 FW_OFLD_CONN = (1 << 9),
437 struct sge_fl { /* SGE free-buffer queue state */
438 unsigned int avail; /* # of available Rx buffers */
439 unsigned int pend_cred; /* new buffers since last FL DB ring */
440 unsigned int cidx; /* consumer index */
441 unsigned int pidx; /* producer index */
442 unsigned long alloc_failed; /* # of times buffer allocation failed */
443 unsigned long large_alloc_failed;
444 unsigned long starving;
446 unsigned int cntxt_id; /* SGE context id for the free list */
447 unsigned int size; /* capacity of free list */
448 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
449 __be64 *desc; /* address of HW Rx descriptor ring */
450 dma_addr_t addr; /* bus address of HW ring start */
451 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
452 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
455 /* A packet gather list */
457 struct page_frag frags[MAX_SKB_FRAGS];
458 void *va; /* virtual address of first byte */
459 unsigned int nfrags; /* # of fragments */
460 unsigned int tot_len; /* total length of fragments */
463 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
464 const struct pkt_gl *gl);
466 struct sge_rspq { /* state for an SGE response queue */
467 struct napi_struct napi;
468 const __be64 *cur_desc; /* current descriptor in queue */
469 unsigned int cidx; /* consumer index */
470 u8 gen; /* current generation bit */
471 u8 intr_params; /* interrupt holdoff parameters */
472 u8 next_intr_params; /* holdoff params for next interrupt */
474 u8 pktcnt_idx; /* interrupt packet threshold */
475 u8 uld; /* ULD handling this queue */
476 u8 idx; /* queue index within its group */
477 int offset; /* offset into current Rx buffer */
478 u16 cntxt_id; /* SGE context id for the response q */
479 u16 abs_id; /* absolute SGE id for the response q */
480 __be64 *desc; /* address of HW response ring */
481 dma_addr_t phys_addr; /* physical address of the ring */
482 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
483 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
484 unsigned int iqe_len; /* entry size */
485 unsigned int size; /* capacity of response queue */
486 struct adapter *adap;
487 struct net_device *netdev; /* associated net device */
488 rspq_handler_t handler;
491 struct sge_eth_stats { /* Ethernet queue statistics */
492 unsigned long pkts; /* # of ethernet packets */
493 unsigned long lro_pkts; /* # of LRO super packets */
494 unsigned long lro_merged; /* # of wire packets merged by LRO */
495 unsigned long rx_cso; /* # of Rx checksum offloads */
496 unsigned long vlan_ex; /* # of Rx VLAN extractions */
497 unsigned long rx_drops; /* # of packets dropped due to no mem */
500 struct sge_eth_rxq { /* SW Ethernet Rx queue */
501 struct sge_rspq rspq;
503 struct sge_eth_stats stats;
504 } ____cacheline_aligned_in_smp;
506 struct sge_ofld_stats { /* offload queue statistics */
507 unsigned long pkts; /* # of packets */
508 unsigned long imm; /* # of immediate-data packets */
509 unsigned long an; /* # of asynchronous notifications */
510 unsigned long nomem; /* # of responses deferred due to no mem */
513 struct sge_ofld_rxq { /* SW offload Rx queue */
514 struct sge_rspq rspq;
516 struct sge_ofld_stats stats;
517 } ____cacheline_aligned_in_smp;
526 unsigned int in_use; /* # of in-use Tx descriptors */
527 unsigned int size; /* # of descriptors */
528 unsigned int cidx; /* SW consumer index */
529 unsigned int pidx; /* producer index */
530 unsigned long stops; /* # of times q has been stopped */
531 unsigned long restarts; /* # of queue restarts */
532 unsigned int cntxt_id; /* SGE context id for the Tx q */
533 struct tx_desc *desc; /* address of HW Tx descriptor ring */
534 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
535 struct sge_qstat *stat; /* queue status entry */
536 dma_addr_t phys_addr; /* physical address of the ring */
539 unsigned short db_pidx;
540 unsigned short db_pidx_inc;
541 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
542 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
545 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
547 struct netdev_queue *txq; /* associated netdev TX queue */
548 #ifdef CONFIG_CHELSIO_T4_DCB
549 u8 dcb_prio; /* DCB Priority bound to queue */
551 unsigned long tso; /* # of TSO requests */
552 unsigned long tx_cso; /* # of Tx checksum offloads */
553 unsigned long vlan_ins; /* # of Tx VLAN insertions */
554 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
555 } ____cacheline_aligned_in_smp;
557 struct sge_ofld_txq { /* state for an SGE offload Tx queue */
559 struct adapter *adap;
560 struct sk_buff_head sendq; /* list of backpressured packets */
561 struct tasklet_struct qresume_tsk; /* restarts the queue */
562 u8 full; /* the Tx ring is full */
563 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
564 } ____cacheline_aligned_in_smp;
566 struct sge_ctrl_txq { /* state for an SGE control Tx queue */
568 struct adapter *adap;
569 struct sk_buff_head sendq; /* list of backpressured packets */
570 struct tasklet_struct qresume_tsk; /* restarts the queue */
571 u8 full; /* the Tx ring is full */
572 } ____cacheline_aligned_in_smp;
575 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
576 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
577 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
579 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
580 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
581 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
582 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
583 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
585 struct sge_rspq intrq ____cacheline_aligned_in_smp;
586 spinlock_t intrq_lock;
588 u16 max_ethqsets; /* # of available Ethernet queue sets */
589 u16 ethqsets; /* # of active Ethernet queue sets */
590 u16 ethtxq_rover; /* Tx queue to clean up next */
591 u16 ofldqsets; /* # of active offload queue sets */
592 u16 rdmaqs; /* # of available RDMA Rx queues */
593 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
594 u16 ofld_rxq[MAX_OFLD_QSETS];
597 u16 timer_val[SGE_NTIMERS];
598 u8 counter_val[SGE_NCOUNTERS];
599 u32 fl_pg_order; /* large page allocation size */
600 u32 stat_len; /* length of status page at ring end */
601 u32 pktshift; /* padding between CPL & packet data */
602 u32 fl_align; /* response queue message alignment */
603 u32 fl_starve_thres; /* Free List starvation threshold */
605 /* State variables for detecting an SGE Ingress DMA hang */
606 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
607 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
608 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
609 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
611 unsigned int egr_start;
612 unsigned int ingr_start;
613 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
614 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
615 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
616 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
617 struct timer_list rx_timer; /* refills starving FLs */
618 struct timer_list tx_timer; /* checks Tx queues */
621 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
622 #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
623 #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
624 #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
628 #ifdef CONFIG_PCI_IOV
630 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
631 * Configuration initialization for T5 only has SR-IOV functionality enabled
632 * on PF0-3 in order to simplify everything.
634 #define NUM_OF_PF_WITH_SRIOV 4
642 struct pci_dev *pdev;
643 struct device *pdev_dev;
651 struct adapter_params params;
652 struct cxgb4_virt_res vres;
659 char desc[IFNAMSIZ + 10];
660 } msix_info[MAX_INGQ + 1];
664 struct net_device *port[MAX_NPORTS];
665 u8 chan_map[NCHAN]; /* channel -> port map */
668 unsigned int l2t_start;
669 unsigned int l2t_end;
670 struct l2t_data *l2t;
671 void *uld_handle[CXGB4_ULD_MAX];
672 struct list_head list_node;
673 struct list_head rcu_node;
675 struct tid_info tids;
676 void **tid_release_head;
677 spinlock_t tid_release_lock;
678 struct workqueue_struct *workq;
679 struct work_struct tid_release_task;
680 struct work_struct db_full_task;
681 struct work_struct db_drop_task;
682 bool tid_release_task_busy;
684 struct dentry *debugfs_root;
686 spinlock_t stats_lock;
687 spinlock_t win0_lock ____cacheline_aligned_in_smp;
690 /* Defined bit width of user definable filter tuples
692 #define ETHTYPE_BITWIDTH 16
693 #define FRAG_BITWIDTH 1
694 #define MACIDX_BITWIDTH 9
695 #define FCOE_BITWIDTH 1
696 #define IPORT_BITWIDTH 3
697 #define MATCHTYPE_BITWIDTH 3
698 #define PROTO_BITWIDTH 8
699 #define TOS_BITWIDTH 8
700 #define PF_BITWIDTH 8
701 #define VF_BITWIDTH 8
702 #define IVLAN_BITWIDTH 16
703 #define OVLAN_BITWIDTH 16
705 /* Filter matching rules. These consist of a set of ingress packet field
706 * (value, mask) tuples. The associated ingress packet field matches the
707 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
708 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
709 * matches an ingress packet when all of the individual individual field
710 * matching rules are true.
712 * Partial field masks are always valid, however, while it may be easy to
713 * understand their meanings for some fields (e.g. IP address to match a
714 * subnet), for others making sensible partial masks is less intuitive (e.g.
715 * MPS match type) ...
717 * Most of the following data structures are modeled on T4 capabilities.
718 * Drivers for earlier chips use the subsets which make sense for those chips.
719 * We really need to come up with a hardware-independent mechanism to
720 * represent hardware filter capabilities ...
722 struct ch_filter_tuple {
723 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
724 * register selects which of these fields will participate in the
725 * filter match rules -- up to a maximum of 36 bits. Because
726 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
729 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
730 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
731 uint32_t ivlan_vld:1; /* inner VLAN valid */
732 uint32_t ovlan_vld:1; /* outer VLAN valid */
733 uint32_t pfvf_vld:1; /* PF/VF valid */
734 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
735 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
736 uint32_t iport:IPORT_BITWIDTH; /* ingress port */
737 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
738 uint32_t proto:PROTO_BITWIDTH; /* protocol type */
739 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
740 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
741 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
742 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
743 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
745 /* Uncompressed header matching field rules. These are always
746 * available for field rules.
748 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
749 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
750 uint16_t lport; /* local port */
751 uint16_t fport; /* foreign port */
754 /* A filter ioctl command.
756 struct ch_filter_specification {
757 /* Administrative fields for filter.
759 uint32_t hitcnts:1; /* count filter hits in TCB */
760 uint32_t prio:1; /* filter has priority over active/server */
762 /* Fundamental filter typing. This is the one element of filter
763 * matching that doesn't exist as a (value, mask) tuple.
765 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
767 /* Packet dispatch information. Ingress packets which match the
768 * filter rules will be dropped, passed to the host or switched back
769 * out as egress packets.
771 uint32_t action:2; /* drop, pass, switch */
773 uint32_t rpttid:1; /* report TID in RSS hash field */
775 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
776 uint32_t iq:10; /* ingress queue */
778 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
779 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
780 /* 1 => TCB contains IQ ID */
782 /* Switch proxy/rewrite fields. An ingress packet which matches a
783 * filter with "switch" set will be looped back out as an egress
784 * packet -- potentially with some Ethernet header rewriting.
786 uint32_t eport:2; /* egress port to switch packet out */
787 uint32_t newdmac:1; /* rewrite destination MAC address */
788 uint32_t newsmac:1; /* rewrite source MAC address */
789 uint32_t newvlan:2; /* rewrite VLAN Tag */
790 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
791 uint8_t smac[ETH_ALEN]; /* new source MAC address */
792 uint16_t vlan; /* VLAN Tag to insert */
794 /* Filter rule value/mask pairs.
796 struct ch_filter_tuple val;
797 struct ch_filter_tuple mask;
801 FILTER_PASS = 0, /* default */
807 VLAN_NOCHANGE = 0, /* default */
813 static inline int is_t5(enum chip_type chip)
815 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
818 static inline int is_t4(enum chip_type chip)
820 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
823 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
825 return readl(adap->regs + reg_addr);
828 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
830 writel(val, adap->regs + reg_addr);
834 static inline u64 readq(const volatile void __iomem *addr)
836 return readl(addr) + ((u64)readl(addr + 4) << 32);
839 static inline void writeq(u64 val, volatile void __iomem *addr)
842 writel(val >> 32, addr + 4);
846 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
848 return readq(adap->regs + reg_addr);
851 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
853 writeq(val, adap->regs + reg_addr);
857 * netdev2pinfo - return the port_info structure associated with a net_device
860 * Return the struct port_info associated with a net_device
862 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
864 return netdev_priv(dev);
868 * adap2pinfo - return the port_info of a port
870 * @idx: the port index
872 * Return the port_info structure for the port of the given index.
874 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
876 return netdev_priv(adap->port[idx]);
880 * netdev2adap - return the adapter structure associated with a net_device
883 * Return the struct adapter associated with a net_device
885 static inline struct adapter *netdev2adap(const struct net_device *dev)
887 return netdev2pinfo(dev)->adapter;
890 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
891 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
893 void *t4_alloc_mem(size_t size);
895 void t4_free_sge_resources(struct adapter *adap);
896 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
897 irq_handler_t t4_intr_handler(struct adapter *adap);
898 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
899 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
900 const struct pkt_gl *gl);
901 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
902 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
903 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
904 struct net_device *dev, int intr_idx,
905 struct sge_fl *fl, rspq_handler_t hnd);
906 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
907 struct net_device *dev, struct netdev_queue *netdevq,
909 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
910 struct net_device *dev, unsigned int iqid,
911 unsigned int cmplqid);
912 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
913 struct net_device *dev, unsigned int iqid);
914 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
915 int t4_sge_init(struct adapter *adap);
916 void t4_sge_start(struct adapter *adap);
917 void t4_sge_stop(struct adapter *adap);
918 extern int dbfifo_int_thresh;
920 #define for_each_port(adapter, iter) \
921 for (iter = 0; iter < (adapter)->params.nports; ++iter)
923 static inline int is_bypass(struct adapter *adap)
925 return adap->params.bypass;
928 static inline int is_bypass_device(int device)
930 /* this should be set based upon device capabilities */
940 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
942 return adap->params.vpd.cclk / 1000;
945 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
948 return (us * adap->params.vpd.cclk) / 1000;
951 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
954 /* add Core Clock / 2 to round ticks to nearest uS */
955 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
956 adapter->params.vpd.cclk);
959 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
962 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
963 void *rpl, bool sleep_ok);
965 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
968 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
971 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
974 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
977 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
978 unsigned int data_reg, const u32 *vals,
979 unsigned int nregs, unsigned int start_idx);
980 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
981 unsigned int data_reg, u32 *vals, unsigned int nregs,
982 unsigned int start_idx);
983 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
987 void t4_intr_enable(struct adapter *adapter);
988 void t4_intr_disable(struct adapter *adapter);
989 int t4_slow_intr_handler(struct adapter *adapter);
991 int t4_wait_dev_ready(void __iomem *regs);
992 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
993 struct link_config *lc);
994 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
996 #define T4_MEMORY_WRITE 0
997 #define T4_MEMORY_READ 1
998 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
999 __be32 *buf, int dir);
1000 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1001 u32 len, __be32 *buf)
1003 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
1006 int t4_seeprom_wp(struct adapter *adapter, bool enable);
1007 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1008 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1009 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1010 const u8 *fw_data, unsigned int size, int force);
1011 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1012 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1013 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1014 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1015 const u8 *fw_data, unsigned int fw_size,
1016 struct fw_hdr *card_fw, enum dev_state state, int *reset);
1017 int t4_prep_adapter(struct adapter *adapter);
1019 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
1020 int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
1022 enum t4_bar2_qtype qtype,
1024 unsigned int *pbar2_qid);
1026 int t4_init_sge_params(struct adapter *adapter);
1027 int t4_init_tp_params(struct adapter *adap);
1028 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
1029 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
1030 void t4_fatal_err(struct adapter *adapter);
1031 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1032 int start, int n, const u16 *rspq, unsigned int nrspq);
1033 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1034 unsigned int flags);
1035 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1037 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1039 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1040 unsigned int *valp);
1041 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1042 const unsigned int *valp);
1043 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1044 const char *t4_get_port_type_description(enum fw_port_type port_type);
1045 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1046 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1047 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1048 unsigned int mask, unsigned int val);
1049 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1050 struct tp_tcp_stats *v6);
1051 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1052 const unsigned short *alpha, const unsigned short *beta);
1054 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1056 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1058 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1059 u64 mask0, u64 mask1, unsigned int crc, bool enable);
1061 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1062 enum dev_master master, enum dev_state *state);
1063 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1064 int t4_early_init(struct adapter *adap, unsigned int mbox);
1065 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
1066 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1067 unsigned int cache_line_size);
1068 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
1069 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1070 unsigned int vf, unsigned int nparams, const u32 *params,
1072 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1073 unsigned int vf, unsigned int nparams, const u32 *params,
1075 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
1076 unsigned int pf, unsigned int vf,
1077 unsigned int nparams, const u32 *params,
1079 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1080 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1081 unsigned int rxqi, unsigned int rxq, unsigned int tc,
1082 unsigned int vi, unsigned int cmask, unsigned int pmask,
1083 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1084 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1085 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1086 unsigned int *rss_size);
1087 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
1088 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1090 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
1091 unsigned int viid, bool free, unsigned int naddr,
1092 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
1093 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1094 int idx, const u8 *addr, bool persist, bool add_smt);
1095 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1096 bool ucast, u64 vec, bool sleep_ok);
1097 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
1098 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
1099 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1100 bool rx_en, bool tx_en);
1101 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
1102 unsigned int nblinks);
1103 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1104 unsigned int mmd, unsigned int reg, u16 *valp);
1105 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1106 unsigned int mmd, unsigned int reg, u16 val);
1107 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1108 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1109 unsigned int fl0id, unsigned int fl1id);
1110 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1111 unsigned int vf, unsigned int eqid);
1112 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1113 unsigned int vf, unsigned int eqid);
1114 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1115 unsigned int vf, unsigned int eqid);
1116 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
1117 void t4_db_full(struct adapter *adapter);
1118 void t4_db_dropped(struct adapter *adapter);
1119 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1121 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1122 void t4_free_mem(void *addr);
1123 #endif /* __CXGB4_H__ */