1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-5"
61 #define DRV_MODULE_RELDATE "2009/11/09"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct *bnx2x_wq;
127 enum bnx2x_board_type {
133 /* indexed by board_type, above */
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static const struct pci_device_id bnx2x_pci_tbl[] = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 DMAE_CMD_ENDIANITY_DW_SWAP |
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 DMAE_CMD_ENDIANITY_DW_SWAP |
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
332 while (*wb_comp != DMAE_COMP_VAL) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
382 REG_RD_DMAE(bp, reg, wb_data, 2);
384 return HILO_U64(wb_data[0], wb_data[1]);
388 static int bnx2x_mc_assert(struct bnx2x *bp)
392 u32 row0, row1, row2, row3;
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
509 static void bnx2x_fw_dump(struct bnx2x *bp)
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516 mark = ((mark + 0x3) & ~0x3);
517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
519 printk(KERN_ERR PFX);
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
525 printk(KERN_CONT "%s", (char *)data);
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
532 printk(KERN_CONT "%s", (char *)data);
534 printk(KERN_ERR PFX "end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x *bp)
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
556 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
573 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod);
588 for_each_queue(bp, i) {
589 struct bnx2x_fastpath *fp = &bp->fp[i];
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593 for (j = start; j != end; j = RX_BD(j + 1)) {
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
603 for (j = start; j != end; j = RX_SGE(j + 1)) {
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
622 for_each_queue(bp, i) {
623 struct bnx2x_fastpath *fp = &bp->fp[i];
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x *bp)
651 int port = BP_PORT(bp);
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 REG_WR(bp, addr, val);
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
684 REG_WR(bp, addr, val);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
696 /* enable nig and gpio3 attention */
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 /* Make sure that interrupts are indeed enabled from here on */
709 static void bnx2x_int_disable(struct bnx2x *bp)
711 int port = BP_PORT(bp);
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 /* flush all outstanding writes */
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736 /* disable interrupt handling */
737 atomic_inc(&bp->intr_sem);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
744 /* make sure all ISRs are done */
746 synchronize_irq(bp->msix_table[0].vector);
751 for_each_queue(bp, i)
752 synchronize_irq(bp->msix_table[i + offset].vector);
754 synchronize_irq(bp->pdev->irq);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
764 * General service functions
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update)
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
772 struct igu_ack_register igu_ack;
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
785 /* Make sure that ACK is written */
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
792 struct host_status_block *fpsb = fp->status_blk;
794 barrier(); /* status block is written to by the chip */
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
813 * fast path service functions
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
818 /* Tell compiler that consumer and producer can change */
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
823 /* free skb in the packet ring at pos idx
824 * return idx of last bd freed
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
832 struct sk_buff *skb = tx_buf->skb;
833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851 BNX2X_ERR("BAD nbd!\n");
855 new_cons = nbd + tx_buf->first_bd;
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
860 /* Skip a parse bd... */
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
884 tx_buf->first_bd = 0;
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
904 #ifdef BNX2X_STOP_ON_ERROR
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 return (s16)(fp->bp->tx_ring_size) - used;
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
917 /* Tell compiler that status block fields can change */
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
925 struct bnx2x *bp = fp->bp;
926 struct netdev_queue *txq;
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
934 txq = netdev_get_tx_queue(bp->dev, fp->index);
935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
938 while (sw_cons != hw_cons) {
941 pkt_cons = TX_BD(sw_cons);
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
946 hw_cons, sw_cons, pkt_cons);
948 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
960 /* TBD need a thresh? */
961 if (unlikely(netif_tx_queue_stopped(txq))) {
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
971 if ((netif_tx_queue_stopped(txq)) &&
972 (bp->state == BNX2X_STATE_OPEN) &&
973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974 netif_tx_wake_queue(txq);
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
992 fp->index, cid, command, bp->state,
993 rr_cqe->ramrod_cqe.ramrod_type);
998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1003 fp->state = BNX2X_FP_STATE_OPEN;
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1009 fp->state = BNX2X_FP_STATE_HALTED;
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
1021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048 bp->set_mac_pending--;
1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054 bp->set_mac_pending--;
1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1060 command, bp->state);
1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073 /* Skip "next page" elements */
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081 sw_buf->page = NULL;
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1103 if (unlikely(page == NULL))
1106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107 PCI_DMA_FROMDEVICE);
1108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135 PCI_DMA_FROMDEVICE);
1136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1150 /* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
1166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1177 u16 last_max = fp->last_max_sge;
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1200 struct bnx2x *bp = fp->bp;
1201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202 le16_to_cpu(fp_cqe->len_on_bd)) >>
1204 u16 last_max, last_elem, first_elem;
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1291 #ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1298 fp->tpa_queue_used);
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1307 struct sw_rx_page *rx_pg, old_rx_pg;
1308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1316 /* This is needed in order to enable forwarding support */
1318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319 max(frag_size, (u32)len_on_bd));
1321 #ifdef BNX2X_STOP_ON_ERROR
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
1339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340 rx_pg = &fp->rx_page_ring[sge_idx];
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
1347 fp->eth_q_stats.rx_skb_alloc_failed++;
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1362 frag_size -= frag_len;
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1383 if (likely(new_skb)) {
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1395 prefetch(((char *)(skb)) + 128);
1397 #ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1407 skb_reserve(skb, pad);
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1416 iph = (struct iphdr *)skb->data;
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
1432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1437 netif_receive_skb(skb);
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1449 /* else drop the packet and keep the buffer in the bin */
1450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
1452 fp->eth_q_stats.rx_skb_alloc_failed++;
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1463 struct ustorm_eth_rx_producers rx_prods = {0};
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484 ((u32 *)&rx_prods)[i]);
1486 mmiowb(); /* keep prod updates ordered */
1488 DP(NETIF_MSG_RX_STATUS,
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1495 struct bnx2x *bp = fp->bp;
1496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1500 #ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
1507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
1513 bd_prod_fw = bd_prod;
1514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1524 fp->index, hw_comp_cons, sw_comp_cons);
1526 while (sw_comp_cons != hw_comp_cons) {
1527 struct sw_rx_bd *rx_buf = NULL;
1528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1544 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1554 /* is this a slowpath msg? */
1555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556 bnx2x_sp_event(fp, cqe);
1559 /* this is an rx packet */
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1564 prefetch((u8 *)skb + 256);
1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
1573 u16 queue = cqe->fast_path_cqe.queue_index;
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1580 bnx2x_tpa_start(fp, queue, skb,
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1594 /* This is a size of the linear data
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1616 prefetch(((char *)(skb)) + 128);
1618 /* is this an error packet? */
1619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620 DP(NETIF_MSG_RX_ERR,
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
1623 fp->eth_q_stats.rx_err_discard_pkt++;
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1634 new_skb = netdev_alloc_skb(bp->dev,
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
1638 "ERROR packet dropped "
1639 "because of alloc failure\n");
1640 fp->eth_q_stats.rx_skb_alloc_failed++;
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
1659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1664 DP(NETIF_MSG_RX_ERR,
1665 "ERROR packet dropped because "
1666 "of alloc failure\n");
1667 fp->eth_q_stats.rx_skb_alloc_failed++;
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1675 skb->ip_summed = CHECKSUM_NONE;
1677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
1680 fp->eth_q_stats.hw_csum_err++;
1684 skb_record_rx_queue(skb, fp->index);
1687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
1690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1694 netif_receive_skb(skb);
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
1702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1708 if (rx_pkt == budget)
1712 fp->rx_bd_cons = bd_cons;
1713 fp->rx_bd_prod = bd_prod_fw;
1714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1721 fp->rx_pkt += rx_pkt;
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739 fp->index, fp->sb_id);
1740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1742 #ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1759 struct bnx2x *bp = netdev_priv(dev_instance);
1760 u16 status = bnx2x_ack_int(bp);
1764 /* Return here if interrupt is shared and it's not for us */
1765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1771 /* Return here if interrupt is disabled */
1772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1777 #ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
1785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1805 c_ops = rcu_dereference(bp->cnic_ops);
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1814 if (unlikely(status & 0x1)) {
1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1829 /* end of fast path */
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1836 * General service functions
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1842 u32 resource_bit = (1 << resource);
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1862 /* Validating that the resource is not already taken */
1863 lock_status = REG_RD(bp, hw_lock_control_reg);
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
1872 /* Try to acquire the lock */
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
1875 if (lock_status & resource_bit)
1880 DP(NETIF_MSG_HW, "Timeout\n");
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1887 u32 resource_bit = (1 << resource);
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1906 /* Validating that the resource is currently taken */
1907 lock_status = REG_RD(bp, hw_lock_control_reg);
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1921 mutex_lock(&bp->port.phy_mutex);
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1932 mutex_unlock(&bp->port.phy_mutex);
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2066 u32 spio_mask = (1 << spio_num);
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2135 static void bnx2x_link_report(struct bnx2x *bp)
2137 if (bp->flags & MF_FUNC_DIS) {
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2143 if (bp->link_vars.link_up) {
2146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2150 line_speed = bp->link_vars.line_speed;
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2160 printk("%d Mbps ", line_speed);
2162 if (bp->link_vars.duplex == DUPLEX_FULL)
2163 printk("full duplex");
2165 printk("half duplex");
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169 printk(", receive ");
2170 if (bp->link_vars.flow_ctrl &
2172 printk("& transmit ");
2174 printk(", transmit ");
2176 printk("flow control ON");
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
2182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2188 if (!BP_NOMCP(bp)) {
2191 /* Initialize link parameters structure variables */
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
2194 if (bp->dev->mtu > 5000)
2195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2199 bnx2x_acquire_phy_lock(bp);
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2206 bnx2x_release_phy_lock(bp);
2208 bnx2x_calc_fc_adv(bp);
2210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212 bnx2x_link_report(bp);
2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2221 static void bnx2x_link_set(struct bnx2x *bp)
2223 if (!BP_NOMCP(bp)) {
2224 bnx2x_acquire_phy_lock(bp);
2225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226 bnx2x_release_phy_lock(bp);
2228 bnx2x_calc_fc_adv(bp);
2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2235 if (!BP_NOMCP(bp)) {
2236 bnx2x_acquire_phy_lock(bp);
2237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238 bnx2x_release_phy_lock(bp);
2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2247 bnx2x_acquire_phy_lock(bp);
2248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249 bnx2x_release_phy_lock(bp);
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
2271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2289 /* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2292 sum of vn_min_rates.
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2301 int port = BP_PORT(bp);
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2315 /* If min rate is zero - set it to 1 */
2317 vn_min_rate = DEF_MIN_RATE;
2321 bp->vn_weight_sum += vn_min_rate;
2324 /* ... only if all min rates are zeros - disable fairness */
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351 /* If min rate is zero - set it to 1 */
2353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2371 if (bp->vn_weight_sum) {
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2377 m_fair_vn.vn_credit_delta =
2378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2406 if (bp->link_vars.link_up) {
2408 /* dropless flow control */
2409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2429 if (bp->state == BNX2X_STATE_OPEN)
2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2433 /* indicate link status */
2434 bnx2x_link_report(bp);
2437 int port = BP_PORT(bp);
2441 /* Set the attention towards other drivers on the same port */
2442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2446 func = ((vn << 1) | port);
2447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2451 if (bp->link_vars.link_up) {
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2460 /* Store it to internal memory */
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2482 bnx2x_calc_vn_weight_sum(bp);
2484 /* indicate link status */
2485 bnx2x_link_report(bp);
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2490 int port = BP_PORT(bp);
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2509 * General service functions
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2521 mutex_lock(&bp->fw_mb_mutex);
2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2526 /* let the FW do it's magic ... */
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2542 BNX2X_ERR("FW failed to respond!\n");
2546 mutex_unlock(&bp->fw_mb_mutex);
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2557 int port = BP_PORT(bp);
2559 netif_tx_disable(bp->dev);
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2563 netif_carrier_off(bp->dev);
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2568 int port = BP_PORT(bp);
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2583 int port = BP_PORT(bp);
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2589 bnx2x_calc_vn_weight_sum(bp);
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628 bp->flags |= MF_FUNC_DIS;
2630 bnx2x_e1h_disable(bp);
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633 bp->flags &= ~MF_FUNC_DIS;
2635 bnx2x_e1h_enable(bp);
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2645 /* Report results to MCP */
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2671 int func = BP_FUNC(bp);
2673 /* Make sure that BD data is updated before writing the producer */
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2685 struct eth_spe *spe;
2687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2693 #ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2698 spin_lock_bh(&bp->spq_lock);
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
2702 spin_unlock_bh(&bp->spq_lock);
2707 spe = bnx2x_sp_get_next(bp);
2709 /* CID needs port number to be encoded int it */
2710 spe->hdr.conn_and_cmd_data =
2711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2723 bnx2x_sp_prod_update(bp);
2724 spin_unlock_bh(&bp->spq_lock);
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2736 for (j = 0; j < i*10; j++) {
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2745 if (!(val & (1L << 31))) {
2746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2766 barrier(); /* status block is written to by the chip */
2767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2791 * slow path service functions
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2796 int port = BP_PORT(bp);
2797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
2799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
2806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821 bp->attn_state |= asserted;
2822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
2827 bnx2x_acquire_phy_lock(bp);
2829 /* save nig interrupt mask */
2830 nig_mask = REG_RD(bp, nig_int_mask_addr);
2831 REG_WR(bp, nig_int_mask_addr, 0);
2833 bnx2x_link_attn(bp);
2835 /* handle unicore attn? */
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2877 } /* if hardwired */
2879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2881 REG_WR(bp, hc_addr, asserted);
2883 /* now set back the mask */
2884 if (asserted & ATTN_NIG_FOR_FUNC) {
2885 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886 bnx2x_release_phy_lock(bp);
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2892 int port = BP_PORT(bp);
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2909 int port = BP_PORT(bp);
2911 u32 val, swap_val, swap_override;
2913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2922 BNX2X_ERR("SPIO5 hw attention\n");
2924 /* Fan failure attention */
2925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927 /* Low power mode is controlled by GPIO 2 */
2928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2949 bnx2x_fan_failure(bp);
2952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2981 BNX2X_ERR("FATAL error from DORQ\n");
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2986 int port = BP_PORT(bp);
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3012 BNX2X_ERR("FATAL error from CFC\n");
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3021 BNX2X_ERR("FATAL error from PXP\n");
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3026 int port = BP_PORT(bp);
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
3054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
3058 bnx2x__link_status_update(bp);
3059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060 bnx2x_pmf_update(bp);
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3099 struct attn_route attn;
3100 struct attn_route group_mask;
3101 int port = BP_PORT(bp);
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
3109 bnx2x_acquire_alr(bp);
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
3126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
3135 if ((attn.sig[0] & group_mask.sig[0] &
3136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
3141 BNX2X_ERR("FATAL HW block parity attention\n");
3145 bnx2x_release_alr(bp);
3147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3152 REG_WR(bp, reg_addr, val);
3154 if (~bp->attn_state & deasserted)
3155 BNX2X_ERR("IGU ERROR\n");
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
3163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3178 /* read local copy of bits */
3179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3183 u32 attn_state = bp->attn_state;
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194 BNX2X_ERR("BAD attention state\n");
3196 /* handle bits that were raised */
3198 bnx2x_attn_int_asserted(bp, asserted);
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3204 static void bnx2x_sp_task(struct work_struct *work)
3206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3216 status = bnx2x_update_dsb_idx(bp);
3217 /* if (status == 0) */
3218 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3252 #ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3259 struct cnic_ops *c_ops;
3262 c_ops = rcu_dereference(bp->cnic_ops);
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3273 /* end of slow path */
3277 /****************************************************************************
3279 ****************************************************************************/
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3291 if (m_lo < s_lo) { \
3293 d_hi = m_hi - s_hi; \
3295 /* we can 'loan' 1 */ \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3299 /* m_hi <= s_hi */ \
3304 /* m_lo >= s_lo */ \
3305 if (m_hi < s_hi) { \
3309 /* m_hi >= s_hi */ \
3310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
3316 #define UPDATE_STAT64(s, t) \
3318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
3326 #define UPDATE_STAT64_NIG(s, t) \
3328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3341 #define UPDATE_EXTEND_STAT(s) \
3343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
3352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
3366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3378 SUB_64(m_hi, 0, m_lo, s); \
3381 #define SUB_EXTEND_USTAT(s, t) \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3388 * General service functions
3391 static inline long bnx2x_hilo(u32 *hiref)
3393 u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3397 return HILO_U64(hi, lo);
3404 * Init service functions
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
3413 ramrod_data.drv_counter = bp->stats_counter++;
3414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3422 /* stats ramrod has it's own slot on the spq */
3424 bp->stats_pending = 1;
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3434 *stats_comp = DMAE_COMP_VAL;
3435 if (CHIP_REV_IS_SLOW(bp))
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3442 memset(dmae, 0, sizeof(struct dmae_command));
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3471 } else if (bp->func_stx) {
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3483 while (*stats_comp != DMAE_COMP_VAL) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3495 * Statistics service functions
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3500 struct dmae_command *dmae;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
3511 bp->executer_idx = 0;
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
3539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
3543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3555 struct dmae_command *dmae;
3556 int port = BP_PORT(bp);
3557 int vn = BP_E1HVN(bp);
3559 int loader_idx = PMF_DMAE_C(bp);
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3569 bp->executer_idx = 0;
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3578 DMAE_CMD_ENDIANITY_DW_SWAP |
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
3583 if (bp->port.port_stx) {
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
3623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748 dmae->src_addr_hi = 0;
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3800 bnx2x_port_stats_init(bp);
3802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
3805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3948 else { /* unreached */
3949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
3955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
3958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3962 memcpy(old, new, sizeof(struct nig_stats));
3964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
3969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983 struct tstorm_per_port_stats *tport =
3984 &stats->tstorm_common.port_statistics;
3985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
3999 for_each_queue(bp, i) {
4000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016 bp->stats_counter) {
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023 bp->stats_counter) {
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4037 qstats->total_bytes_received_hi =
4038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039 qstats->total_bytes_received_lo =
4040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
4054 qstats->valid_bytes_received_lo =
4055 qstats->total_bytes_received_lo;
4057 qstats->error_bytes_received_hi =
4058 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059 qstats->error_bytes_received_lo =
4060 le32_to_cpu(tclient->rcv_error_bytes.lo);
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4087 qstats->total_bytes_transmitted_hi =
4088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089 qstats->total_bytes_transmitted_lo =
4090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
4173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
4184 le32_to_cpu(tport->brb_truncate_discard);
4185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4190 bp->stats_pending = 0;
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198 struct net_device_stats *nstats = &bp->dev->stats;
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4215 nstats->rx_dropped = estats->mac_discard;
4216 for_each_queue(bp, i)
4217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4220 nstats->tx_dropped = 0;
4223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4225 nstats->collisions =
4226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4228 nstats->rx_length_errors =
4229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
4244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
4247 nstats->tx_aborted_errors =
4248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4256 nstats->tx_errors = nstats->tx_aborted_errors +
4257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
4270 for_each_queue(bp, i) {
4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4284 if (*stats_comp != DMAE_COMP_VAL)
4288 bnx2x_hw_stats_update(bp);
4290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4299 if (bp->msglevel & NETIF_MSG_TIMER) {
4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306 struct net_device_stats *nstats = &bp->dev->stats;
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
4324 printk(KERN_DEBUG "tstats: checksum_discard %u "
4325 "packets_too_big_discard %lu no_buff_discard %lu "
4326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
4329 le32_to_cpu(old_tclient->checksum_discard),
4330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
4334 le32_to_cpu(old_tclient->ttl0_discard));
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4350 struct dmae_command *dmae;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4355 bp->executer_idx = 0;
4357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4363 DMAE_CMD_ENDIANITY_DW_SWAP |
4365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4368 if (bp->port.port_stx) {
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378 dmae->dst_addr_hi = 0;
4379 dmae->len = sizeof(struct host_port_stats) >> 2;
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
4397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4416 bnx2x_stats_comp(bp);
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4424 bnx2x_net_stats_update(bp);
4427 bnx2x_port_stats_stop(bp);
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4438 static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4444 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4450 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4459 enum bnx2x_stats_state state = bp->stats_state;
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4464 /* Make sure the state has been "changed" */
4467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4483 bp->executer_idx = 0;
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4526 for (vn = VN_0; vn < vn_max; vn++) {
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4593 bp->port.port_stx = 0;
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4626 bp->stats_state = STATS_STATE_DISABLED;
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4633 bnx2x_func_stats_base_init(bp);
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4639 static void bnx2x_timer(unsigned long data)
4641 struct bnx2x *bp = (struct bnx2x *) data;
4643 if (!netif_running(bp->dev))
4646 if (atomic_read(&bp->intr_sem) != 0)
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4654 rc = bnx2x_rx_int(fp, 1000);
4657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4681 if (bp->state == BNX2X_STATE_OPEN)
4682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4688 /* end of Statistics */
4693 * nic init service functions
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4698 int port = BP_PORT(bp);
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
4712 int port = BP_PORT(bp);
4713 int func = BP_FUNC(bp);
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4720 sb->u_status_block.status_block_id = sb_id;
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 sb->c_status_block.status_block_id = sb_id;
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
4740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
4742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4756 int func = BP_FUNC(bp);
4758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
4761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
4774 dma_addr_t mapping, int sb_id)
4776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
4778 int index, val, reg_offset;
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
4784 def_sb->atten_status_block.status_block_id = sb_id;
4788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4810 val = REG_RD(bp, reg_offset);
4812 REG_WR(bp, reg_offset, val);
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
4817 def_sb->u_def_status_block.status_block_id = sb_id;
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
4834 def_sb->c_def_status_block.status_block_id = sb_id;
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
4837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
4851 def_sb->t_def_status_block.status_block_id = sb_id;
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
4854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855 REG_WR(bp, BAR_TSTRORM_INTMEM +
4856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
4868 def_sb->x_def_status_block.status_block_id = sb_id;
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
4871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872 REG_WR(bp, BAR_XSTRORM_INTMEM +
4873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4882 bp->stats_pending = 0;
4883 bp->set_mac_pending = 0;
4885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4890 int port = BP_PORT(bp);
4893 for_each_queue(bp, i) {
4894 int sb_id = bp->fp[i].sb_id;
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
4900 bp->rx_ticks/(4 * BNX2X_BTR));
4901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
4904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
4910 bp->tx_ticks/(4 * BNX2X_BTR));
4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
4914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
4935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4944 int func = BP_FUNC(bp);
4945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
4950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4954 if (bp->flags & TPA_ENABLE_FLAG) {
4956 for_each_queue(bp, j) {
4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4959 for (i = 0; i < max_agg_queues; i++) {
4960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4979 for_each_queue(bp, j) {
4980 struct bnx2x_fastpath *fp = &bp->fp[j];
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4986 /* "next page" elements initialization */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5000 bnx2x_init_sge_ring_bit_mask(fp);
5003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040 fp->disable_tpa = 1;
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5046 fp->rx_sge_prod = ring_prod;
5048 /* Allocate BDs and initialize BD ring */
5049 fp->rx_comp_cons = 0;
5050 cqe_ring_prod = ring_prod = 0;
5051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
5054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
5058 ring_prod = NEXT_RX_IDX(ring_prod);
5059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060 WARN_ON(ring_prod <= i);
5063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5067 fp->rx_pkt = fp->rx_calls = 0;
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
5079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
5082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083 U64_HI(fp->rx_comp_mapping));
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5091 for_each_queue(bp, j) {
5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5098 tx_next_bd->addr_hi =
5099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101 tx_next_bd->addr_lo =
5102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5121 int func = BP_FUNC(bp);
5123 spin_lock_init(&bp->spq_lock);
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
5127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132 U64_LO(bp->spq_mapping));
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135 U64_HI(bp->spq_mapping));
5137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5141 static void bnx2x_init_context(struct bnx2x *bp)
5146 for_each_queue(bp, i) {
5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5149 u8 cl_id = fp->cl_id;
5151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
5153 context->ustorm_st_context.common.clientId = cl_id;
5154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155 context->ustorm_st_context.common.flags =
5156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5160 context->ustorm_st_context.common.mc_alignment_log_size =
5161 BNX2X_RX_ALIGN_SHIFT;
5162 context->ustorm_st_context.common.bd_buff_size =
5164 context->ustorm_st_context.common.bd_page_base_hi =
5165 U64_HI(fp->rx_desc_mapping);
5166 context->ustorm_st_context.common.bd_page_base_lo =
5167 U64_LO(fp->rx_desc_mapping);
5168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
5170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171 context->ustorm_st_context.common.sge_buff_size =
5172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
5199 for_each_queue(bp, i) {
5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
5202 bnx2x_sp(bp, context[i].eth);
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5219 int func = BP_FUNC(bp);
5222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230 bp->fp->cl_id + (i % bp->num_queues));
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5235 struct tstorm_eth_client_config tstorm_client = {0};
5236 int port = BP_PORT(bp);
5239 tstorm_client.mtu = bp->dev->mtu;
5240 tstorm_client.config_flags =
5241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245 tstorm_client.config_flags |=
5246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5251 for_each_queue(bp, i) {
5252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5254 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259 ((u32 *)&tstorm_client)[1]);
5262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269 int mode = bp->rx_mode;
5270 int mask = bp->rx_mode_cl_mask;
5271 int func = BP_FUNC(bp);
5272 int port = BP_PORT(bp);
5274 /* All but management unicast packets should pass to the host as well */
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
5285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
5290 case BNX2X_RX_MODE_NORMAL:
5291 tstorm_mac_filter.bcast_accept_all = mask;
5294 case BNX2X_RX_MODE_ALLMULTI:
5295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
5299 case BNX2X_RX_MODE_PROMISC:
5300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
5303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319 ((u32 *)&tstorm_mac_filter)[i]);
5321 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322 ((u32 *)&tstorm_mac_filter)[i]); */
5325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5342 int port = BP_PORT(bp);
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
5356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
5363 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364 tstorm_config.rss_result_mask = MULTI_MASK;
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5376 tstorm_config.leading_client_id = BP_L_ID(bp);
5378 REG_WR(bp, BAR_TSTRORM_INTMEM +
5379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380 (*(u32 *)&tstorm_config));
5382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384 bnx2x_set_storm_rx_mode(bp);
5386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
5411 /* Init statistics related context */
5412 stats_flags.collect_eth = 1;
5414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415 ((u32 *)&stats_flags)[0]);
5416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417 ((u32 *)&stats_flags)[1]);
5419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420 ((u32 *)&stats_flags)[0]);
5421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422 ((u32 *)&stats_flags)[1]);
5424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430 ((u32 *)&stats_flags)[0]);
5431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432 ((u32 *)&stats_flags)[1]);
5434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5474 for_each_queue(bp, i) {
5475 struct bnx2x_fastpath *fp = &bp->fp[i];
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
5478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
5481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482 U64_HI(fp->rx_comp_mapping));
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5492 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5509 for_each_queue(bp, i) {
5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5531 /* Init rate shaping and fairness contexts */
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543 bnx2x_calc_vn_weight_sum(bp);
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5548 /* Enable rate shaping and fairness */
5549 bp->cmng.flags.cmng_enables |=
5550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5553 /* rate shaping and fairness are disabled */
5555 "single function mode minmax will be disabled\n");
5559 /* Store it to internal memory */
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5596 fp->state = BNX2X_FP_STATE_CLOSED;
5598 fp->cl_id = BP_L_ID(bp) + i;
5600 fp->sb_id = fp->cl_id + 1;
5602 fp->sb_id = fp->cl_id;
5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5609 bnx2x_update_fpsb_idx(fp);
5612 /* ensure status block indices were read */
5616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5618 bnx2x_update_dsb_idx(bp);
5619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
5624 bnx2x_init_internal(bp, load_code);
5625 bnx2x_init_ind_table(bp);
5626 bnx2x_stats_init(bp);
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5631 /* flush all before enabling interrupts */
5635 bnx2x_int_enable(bp);
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
5643 /* end of nic init */
5646 * gzip service functions
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5662 if (bp->strm->workspace == NULL)
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678 " un-compression\n", bp->dev->name);
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5684 kfree(bp->strm->workspace);
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5700 /* check gzip header */
5701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5734 zlib_inflateEnd(bp->strm);
5736 if (rc == Z_STREAM_END)
5742 /* nic load/unload */
5745 * General service functions
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5753 /* Ethernet source and destination addresses */
5754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
5756 wb_write[2] = 0x20; /* SOP */
5757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5759 /* NON-IP protocol */
5760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
5762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5766 /* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5770 static int bnx2x_int_mem_test(struct bnx2x *bp)
5776 if (CHIP_REV_IS_FPGA(bp))
5778 else if (CHIP_REV_IS_EMUL(bp))
5783 DP(NETIF_MSG_HW, "start part1\n");
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5794 /* send Ethernet packet */
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5830 /* Reset and init BRB, PRS */
5831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5838 DP(NETIF_MSG_HW, "part2\n");
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5891 BNX2X_ERR("clear of NIG failed\n");
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5913 DP(NETIF_MSG_HW, "done\n");
5918 static void enable_blocks_attention(struct bnx2x *bp)
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5929 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5934 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5940 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5949 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5953 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5958 static void bnx2x_reset_common(struct bnx2x *bp)
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5966 static void bnx2x_init_pxp(struct bnx2x *bp)
5969 int r_order, w_order;
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5985 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6019 if (is_required == 0)
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6038 static int bnx2x_init_common(struct bnx2x *bp)
6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6047 bnx2x_reset_common(bp);
6048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6078 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6095 /* let the HW do it's magic ... */
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6100 BNX2X_ERR("PXP2 CFG failed\n");
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6114 /* clean the DMAE memory */
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
6239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6250 if (CHIP_REV_IS_SLOW(bp))
6253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
6271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
6276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6287 bp->port.need_hw_lock = 1;
6294 bnx2x_setup_fan_failure_detection(bp);
6296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6299 enable_blocks_attention(bp);
6301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6311 static int bnx2x_init_port(struct bnx2x *bp)
6313 int port = BP_PORT(bp);
6314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6380 /* configure PBF to work without PAUSE mtu 9000 */
6381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6383 /* update threshold */
6384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6385 /* update init credit */
6386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6423 if (CHIP_IS_E1H(bp)) {
6424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6446 /* The GPIO should be swapped if the swap register is
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6451 /* Select function upon port-swap configuration */
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6472 /* add SPIO 5 to group 0 */
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
6477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6478 REG_WR(bp, reg_addr, val);
6486 bnx2x__link_reset(bp);
6491 #define ILT_PER_FUNC (768/2)
6492 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493 /* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6498 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6501 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6504 #define CNIC_ILT_LINES 127
6505 #define CNIC_CTX_PER_ILT 16
6507 #define CNIC_ILT_LINES 0
6510 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6522 static int bnx2x_init_func(struct bnx2x *bp)
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6537 i = FUNC_ILT_BASE(func);
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6588 if (CHIP_IS_E1H(bp)) {
6589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6612 /* Reset PCIE errors for debug */
6613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
6619 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
6627 mutex_init(&bp->dmae_mutex);
6628 rc = bnx2x_gunzip_init(bp);
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6641 rc = bnx2x_init_port(bp);
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6648 rc = bnx2x_init_func(bp);
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
6661 bp->fw_drv_pulse_wr_seq =
6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663 DRV_PULSE_SEQ_MASK);
6664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6676 bnx2x_gunzip_end(bp);
6681 static void bnx2x_free_mem(struct bnx2x *bp)
6684 #define BNX2X_PCI_FREE(x, y, size) \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6693 #define BNX2X_FREE(x) \
6705 for_each_queue(bp, i) {
6708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
6710 sizeof(struct host_status_block));
6713 for_each_queue(bp, i) {
6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6733 for_each_queue(bp, i) {
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
6739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6741 /* end of fastpath */
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6744 sizeof(struct host_def_status_block));
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6747 sizeof(struct bnx2x_slowpath));
6750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
6757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6759 #undef BNX2X_PCI_FREE
6763 static int bnx2x_alloc_mem(struct bnx2x *bp)
6766 #define BNX2X_PCI_ALLOC(x, y, size) \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6774 #define BNX2X_ALLOC(x, size) \
6776 x = vmalloc(size); \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
6792 sizeof(struct host_status_block));
6795 for_each_queue(bp, i) {
6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6817 for_each_queue(bp, i) {
6819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
6824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6826 /* end of fastpath */
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6842 /* Initialize T2 (for 1024 connections) */
6843 for (i = 0; i < 16*1024; i += 64)
6844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6865 #undef BNX2X_PCI_ALLOC
6869 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6873 for_each_queue(bp, i) {
6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6887 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6891 for_each_queue(bp, j) {
6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
6903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6908 if (!fp->disable_tpa)
6909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
6911 ETH_MAX_AGGREGATION_QUEUES_E1H);
6915 static void bnx2x_free_skbs(struct bnx2x *bp)
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6921 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6925 free_irq(bp->msix_table[0].vector, bp->dev);
6926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6927 bp->msix_table[0].vector);
6932 for_each_queue(bp, i) {
6933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6934 "state %x\n", i, bp->msix_table[i + offset].vector,
6935 bnx2x_fp(bp, i, state));
6937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6941 static void bnx2x_free_irq(struct bnx2x *bp)
6943 if (bp->flags & USING_MSIX_FLAG) {
6944 bnx2x_free_msix_irqs(bp);
6945 pci_disable_msix(bp->pdev);
6946 bp->flags &= ~USING_MSIX_FLAG;
6948 } else if (bp->flags & USING_MSI_FLAG) {
6949 free_irq(bp->pdev->irq, bp->dev);
6950 pci_disable_msi(bp->pdev);
6951 bp->flags &= ~USING_MSI_FLAG;
6954 free_irq(bp->pdev->irq, bp->dev);
6957 static int bnx2x_enable_msix(struct bnx2x *bp)
6959 int i, rc, offset = 1;
6962 bp->msix_table[0].entry = igu_vec;
6963 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6966 igu_vec = BP_L_ID(bp) + offset;
6967 bp->msix_table[1].entry = igu_vec;
6968 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6971 for_each_queue(bp, i) {
6972 igu_vec = BP_L_ID(bp) + offset + i;
6973 bp->msix_table[i + offset].entry = igu_vec;
6974 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6975 "(fastpath #%u)\n", i + offset, igu_vec, i);
6978 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6979 BNX2X_NUM_QUEUES(bp) + offset);
6981 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6985 bp->flags |= USING_MSIX_FLAG;
6990 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6992 int i, rc, offset = 1;
6994 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6995 bp->dev->name, bp->dev);
6997 BNX2X_ERR("request sp irq failed\n");
7004 for_each_queue(bp, i) {
7005 struct bnx2x_fastpath *fp = &bp->fp[i];
7006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7009 rc = request_irq(bp->msix_table[i + offset].vector,
7010 bnx2x_msix_fp_int, 0, fp->name, fp);
7012 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7013 bnx2x_free_msix_irqs(bp);
7017 fp->state = BNX2X_FP_STATE_IRQ;
7020 i = BNX2X_NUM_QUEUES(bp);
7021 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7023 bp->dev->name, bp->msix_table[0].vector,
7024 0, bp->msix_table[offset].vector,
7025 i - 1, bp->msix_table[offset + i - 1].vector);
7030 static int bnx2x_enable_msi(struct bnx2x *bp)
7034 rc = pci_enable_msi(bp->pdev);
7036 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7039 bp->flags |= USING_MSI_FLAG;
7044 static int bnx2x_req_irq(struct bnx2x *bp)
7046 unsigned long flags;
7049 if (bp->flags & USING_MSI_FLAG)
7052 flags = IRQF_SHARED;
7054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7055 bp->dev->name, bp->dev);
7057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7062 static void bnx2x_napi_enable(struct bnx2x *bp)
7066 for_each_queue(bp, i)
7067 napi_enable(&bnx2x_fp(bp, i, napi));
7070 static void bnx2x_napi_disable(struct bnx2x *bp)
7074 for_each_queue(bp, i)
7075 napi_disable(&bnx2x_fp(bp, i, napi));
7078 static void bnx2x_netif_start(struct bnx2x *bp)
7082 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7083 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7086 if (netif_running(bp->dev)) {
7087 bnx2x_napi_enable(bp);
7088 bnx2x_int_enable(bp);
7089 if (bp->state == BNX2X_STATE_OPEN)
7090 netif_tx_wake_all_queues(bp->dev);
7095 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7097 bnx2x_int_disable_sync(bp, disable_hw);
7098 bnx2x_napi_disable(bp);
7099 netif_tx_disable(bp->dev);
7103 * Init service functions
7107 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7109 * @param bp driver descriptor
7110 * @param set set or clear an entry (1 or 0)
7111 * @param mac pointer to a buffer containing a MAC
7112 * @param cl_bit_vec bit vector of clients to register a MAC for
7113 * @param cam_offset offset in a CAM to use
7114 * @param with_bcast set broadcast MAC as well
7116 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117 u32 cl_bit_vec, u8 cam_offset,
7120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7121 int port = BP_PORT(bp);
7124 * unicasts 0-31:port0 32-63:port1
7125 * multicast 64-127:port0 128-191:port1
7127 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7128 config->hdr.offset = cam_offset;
7129 config->hdr.client_id = 0xff;
7130 config->hdr.reserved1 = 0;
7133 config->config_table[0].cam_entry.msb_mac_addr =
7134 swab16(*(u16 *)&mac[0]);
7135 config->config_table[0].cam_entry.middle_mac_addr =
7136 swab16(*(u16 *)&mac[2]);
7137 config->config_table[0].cam_entry.lsb_mac_addr =
7138 swab16(*(u16 *)&mac[4]);
7139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7141 config->config_table[0].target_table_entry.flags = 0;
7143 CAM_INVALIDATE(config->config_table[0]);
7144 config->config_table[0].target_table_entry.clients_bit_vector =
7145 cpu_to_le32(cl_bit_vec);
7146 config->config_table[0].target_table_entry.vlan_id = 0;
7148 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7149 (set ? "setting" : "clearing"),
7150 config->config_table[0].cam_entry.msb_mac_addr,
7151 config->config_table[0].cam_entry.middle_mac_addr,
7152 config->config_table[0].cam_entry.lsb_mac_addr);
7156 config->config_table[1].cam_entry.msb_mac_addr =
7157 cpu_to_le16(0xffff);
7158 config->config_table[1].cam_entry.middle_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.lsb_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7164 config->config_table[1].target_table_entry.flags =
7165 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7167 CAM_INVALIDATE(config->config_table[1]);
7168 config->config_table[1].target_table_entry.clients_bit_vector =
7169 cpu_to_le32(cl_bit_vec);
7170 config->config_table[1].target_table_entry.vlan_id = 0;
7173 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7174 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7175 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7179 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7181 * @param bp driver descriptor
7182 * @param set set or clear an entry (1 or 0)
7183 * @param mac pointer to a buffer containing a MAC
7184 * @param cl_bit_vec bit vector of clients to register a MAC for
7185 * @param cam_offset offset in a CAM to use
7187 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188 u32 cl_bit_vec, u8 cam_offset)
7190 struct mac_configuration_cmd_e1h *config =
7191 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7193 config->hdr.length = 1;
7194 config->hdr.offset = cam_offset;
7195 config->hdr.client_id = 0xff;
7196 config->hdr.reserved1 = 0;
7199 config->config_table[0].msb_mac_addr =
7200 swab16(*(u16 *)&mac[0]);
7201 config->config_table[0].middle_mac_addr =
7202 swab16(*(u16 *)&mac[2]);
7203 config->config_table[0].lsb_mac_addr =
7204 swab16(*(u16 *)&mac[4]);
7205 config->config_table[0].clients_bit_vector =
7206 cpu_to_le32(cl_bit_vec);
7207 config->config_table[0].vlan_id = 0;
7208 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7210 config->config_table[0].flags = BP_PORT(bp);
7212 config->config_table[0].flags =
7213 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7215 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7216 (set ? "setting" : "clearing"),
7217 config->config_table[0].msb_mac_addr,
7218 config->config_table[0].middle_mac_addr,
7219 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7221 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7223 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7227 int *state_p, int poll)
7229 /* can take a while if any port is running */
7232 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7233 poll ? "polling" : "waiting", state, idx);
7238 bnx2x_rx_int(bp->fp, 10);
7239 /* if index is different from 0
7240 * the reply for some commands will
7241 * be on the non default queue
7244 bnx2x_rx_int(&bp->fp[idx], 10);
7247 mb(); /* state is changed by bnx2x_sp_event() */
7248 if (*state_p == state) {
7249 #ifdef BNX2X_STOP_ON_ERROR
7250 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7262 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7263 poll ? "polling" : "waiting", state, idx);
7264 #ifdef BNX2X_STOP_ON_ERROR
7271 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7273 bp->set_mac_pending++;
7276 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277 (1 << bp->fp->cl_id), BP_FUNC(bp));
7279 /* Wait for a completion */
7280 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7285 bp->set_mac_pending++;
7288 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292 /* Wait for a completion */
7293 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7298 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299 * MAC(s). This function will wait until the ramdord completion
7302 * @param bp driver handle
7303 * @param set set or clear the CAM entry
7305 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7307 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7309 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7311 bp->set_mac_pending++;
7314 /* Send a SET_MAC ramrod */
7316 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320 /* CAM allocation for E1H
7321 * unicasts: by func number
7322 * multicast: 20+FUNC*20, 20 each
7324 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7327 /* Wait for a completion when setting */
7328 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7334 static int bnx2x_setup_leading(struct bnx2x *bp)
7338 /* reset IGU state */
7339 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7342 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7344 /* Wait for completion */
7345 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7350 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7352 struct bnx2x_fastpath *fp = &bp->fp[index];
7354 /* reset IGU state */
7355 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7358 fp->state = BNX2X_FP_STATE_OPENING;
7359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362 /* Wait for completion */
7363 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7367 static int bnx2x_poll(struct napi_struct *napi, int budget);
7369 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7372 switch (bp->multi_mode) {
7373 case ETH_RSS_MODE_DISABLED:
7377 case ETH_RSS_MODE_REGULAR:
7379 bp->num_queues = min_t(u32, num_queues,
7380 BNX2X_MAX_QUEUES(bp));
7382 bp->num_queues = min_t(u32, num_online_cpus(),
7383 BNX2X_MAX_QUEUES(bp));
7393 static int bnx2x_set_num_queues(struct bnx2x *bp)
7401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7406 /* Set number of queues according to bp->multi_mode value */
7407 bnx2x_set_num_queues_msix(bp);
7409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412 /* if we can't use MSI-X we only need one fp,
7413 * so try to enable MSI-X with the requested number of fp's
7414 * and fallback to MSI or legacy INTx with one fp
7416 rc = bnx2x_enable_msix(bp);
7418 /* failed to enable MSI-X */
7422 bp->dev->real_num_tx_queues = bp->num_queues;
7427 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431 /* must be called with rtnl_lock */
7432 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7437 #ifdef BNX2X_STOP_ON_ERROR
7438 if (unlikely(bp->panic))
7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7444 rc = bnx2x_set_num_queues(bp);
7446 if (bnx2x_alloc_mem(bp))
7449 for_each_queue(bp, i)
7450 bnx2x_fp(bp, i, disable_tpa) =
7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7453 for_each_queue(bp, i)
7454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7457 bnx2x_napi_enable(bp);
7459 if (bp->flags & USING_MSIX_FLAG) {
7460 rc = bnx2x_req_msix_irqs(bp);
7462 pci_disable_msix(bp->pdev);
7466 /* Fall to INTx if failed to enable MSI-X due to lack of
7467 memory (in bnx2x_set_num_queues()) */
7468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7469 bnx2x_enable_msi(bp);
7471 rc = bnx2x_req_irq(bp);
7473 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7474 if (bp->flags & USING_MSI_FLAG)
7475 pci_disable_msi(bp->pdev);
7478 if (bp->flags & USING_MSI_FLAG) {
7479 bp->dev->irq = bp->pdev->irq;
7480 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7481 bp->dev->name, bp->pdev->irq);
7485 /* Send LOAD_REQUEST command to MCP
7486 Returns the type of LOAD command:
7487 if it is the first port to be initialized
7488 common blocks should be initialized, otherwise - not
7490 if (!BP_NOMCP(bp)) {
7491 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7493 BNX2X_ERR("MCP response failure, aborting\n");
7497 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7498 rc = -EBUSY; /* other port in diagnostic mode */
7503 int port = BP_PORT(bp);
7505 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7506 load_count[0], load_count[1], load_count[2]);
7508 load_count[1 + port]++;
7509 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7510 load_count[0], load_count[1], load_count[2]);
7511 if (load_count[0] == 1)
7512 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7513 else if (load_count[1 + port] == 1)
7514 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7516 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7519 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7520 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7524 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7527 rc = bnx2x_init_hw(bp, load_code);
7529 BNX2X_ERR("HW init failed, aborting\n");
7533 /* Setup NIC internals and enable interrupts */
7534 bnx2x_nic_init(bp, load_code);
7536 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7537 (bp->common.shmem2_base))
7538 SHMEM2_WR(bp, dcc_support,
7539 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7540 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7542 /* Send LOAD_DONE command to MCP */
7543 if (!BP_NOMCP(bp)) {
7544 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7546 BNX2X_ERR("MCP response failure, aborting\n");
7552 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7554 rc = bnx2x_setup_leading(bp);
7556 BNX2X_ERR("Setup leading failed!\n");
7557 #ifndef BNX2X_STOP_ON_ERROR
7565 if (CHIP_IS_E1H(bp))
7566 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7567 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7568 bp->flags |= MF_FUNC_DIS;
7571 if (bp->state == BNX2X_STATE_OPEN) {
7573 /* Enable Timer scan */
7574 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7576 for_each_nondefault_queue(bp, i) {
7577 rc = bnx2x_setup_multi(bp, i);
7587 bnx2x_set_eth_mac_addr_e1(bp, 1);
7589 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7591 /* Set iSCSI L2 MAC */
7592 mutex_lock(&bp->cnic_mutex);
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7599 mutex_unlock(&bp->cnic_mutex);
7604 bnx2x_initial_phy_init(bp, load_mode);
7606 /* Start fast path */
7607 switch (load_mode) {
7609 if (bp->state == BNX2X_STATE_OPEN) {
7610 /* Tx queue should be only reenabled */
7611 netif_tx_wake_all_queues(bp->dev);
7613 /* Initialize the receive filter. */
7614 bnx2x_set_rx_mode(bp->dev);
7618 netif_tx_start_all_queues(bp->dev);
7619 if (bp->state != BNX2X_STATE_OPEN)
7620 netif_tx_disable(bp->dev);
7621 /* Initialize the receive filter. */
7622 bnx2x_set_rx_mode(bp->dev);
7626 /* Initialize the receive filter. */
7627 bnx2x_set_rx_mode(bp->dev);
7628 bp->state = BNX2X_STATE_DIAG;
7636 bnx2x__link_status_update(bp);
7638 /* start the timer */
7639 mod_timer(&bp->timer, jiffies + bp->current_interval);
7642 bnx2x_setup_cnic_irq_info(bp);
7643 if (bp->state == BNX2X_STATE_OPEN)
7644 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7651 /* Disable Timer scan */
7652 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7655 bnx2x_int_disable_sync(bp, 1);
7656 if (!BP_NOMCP(bp)) {
7657 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7661 /* Free SKBs, SGEs, TPA pool and driver internals */
7662 bnx2x_free_skbs(bp);
7663 for_each_queue(bp, i)
7664 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7669 bnx2x_napi_disable(bp);
7670 for_each_queue(bp, i)
7671 netif_napi_del(&bnx2x_fp(bp, i, napi));
7677 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7679 struct bnx2x_fastpath *fp = &bp->fp[index];
7682 /* halt the connection */
7683 fp->state = BNX2X_FP_STATE_HALTING;
7684 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7686 /* Wait for completion */
7687 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7689 if (rc) /* timeout */
7692 /* delete cfc entry */
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7695 /* Wait for completion */
7696 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7701 static int bnx2x_stop_leading(struct bnx2x *bp)
7703 __le16 dsb_sp_prod_idx;
7704 /* if the other port is handling traffic,
7705 this can take a lot of time */
7711 /* Send HALT ramrod */
7712 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7715 /* Wait for completion */
7716 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717 &(bp->fp[0].state), 1);
7718 if (rc) /* timeout */
7721 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7723 /* Send PORT_DELETE ramrod */
7724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7726 /* Wait for completion to arrive on default status block
7727 we are going to reset the chip anyway
7728 so there is not much to do if this times out
7730 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7732 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735 #ifdef BNX2X_STOP_ON_ERROR
7743 rmb(); /* Refresh the dsb_sp_prod */
7745 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7751 static void bnx2x_reset_func(struct bnx2x *bp)
7753 int port = BP_PORT(bp);
7754 int func = BP_FUNC(bp);
7758 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7762 /* Disable Timer scan */
7763 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7765 * Wait for at least 10ms and up to 2 second for the timers scan to
7768 for (i = 0; i < 200; i++) {
7770 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7775 base = FUNC_ILT_BASE(func);
7776 for (i = base; i < base + ILT_PER_FUNC; i++)
7777 bnx2x_ilt_wr(bp, i, 0);
7780 static void bnx2x_reset_port(struct bnx2x *bp)
7782 int port = BP_PORT(bp);
7785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7787 /* Do not rcv packets to BRB */
7788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789 /* Do not direct rcv packets that are not for MCP to the BRB */
7790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7797 /* Check for BRB port occupancy */
7798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7800 DP(NETIF_MSG_IFDOWN,
7801 "BRB1 is not empty %d blocks are occupied\n", val);
7803 /* TODO: Close Doorbell port? */
7806 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7809 BP_FUNC(bp), reset_code);
7811 switch (reset_code) {
7812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813 bnx2x_reset_port(bp);
7814 bnx2x_reset_func(bp);
7815 bnx2x_reset_common(bp);
7818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819 bnx2x_reset_port(bp);
7820 bnx2x_reset_func(bp);
7823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824 bnx2x_reset_func(bp);
7828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7833 /* must be called with rtnl_lock */
7834 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7836 int port = BP_PORT(bp);
7841 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7845 /* Set "drop all" */
7846 bp->rx_mode = BNX2X_RX_MODE_NONE;
7847 bnx2x_set_storm_rx_mode(bp);
7849 /* Disable HW interrupts, NAPI and Tx */
7850 bnx2x_netif_stop(bp, 1);
7852 del_timer_sync(&bp->timer);
7853 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7855 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7860 /* Wait until tx fastpath tasks complete */
7861 for_each_queue(bp, i) {
7862 struct bnx2x_fastpath *fp = &bp->fp[i];
7865 while (bnx2x_has_tx_work_unload(fp)) {
7869 BNX2X_ERR("timeout waiting for queue[%d]\n",
7871 #ifdef BNX2X_STOP_ON_ERROR
7882 /* Give HW time to discard old tx messages */
7885 if (CHIP_IS_E1(bp)) {
7886 struct mac_configuration_cmd *config =
7887 bnx2x_sp(bp, mcast_config);
7889 bnx2x_set_eth_mac_addr_e1(bp, 0);
7891 for (i = 0; i < config->hdr.length; i++)
7892 CAM_INVALIDATE(config->config_table[i]);
7894 config->hdr.length = i;
7895 if (CHIP_REV_IS_SLOW(bp))
7896 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7898 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7899 config->hdr.client_id = bp->fp->cl_id;
7900 config->hdr.reserved1 = 0;
7902 bp->set_mac_pending++;
7905 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7910 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7912 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7914 for (i = 0; i < MC_HASH_SIZE; i++)
7915 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7917 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7920 /* Clear iSCSI L2 MAC */
7921 mutex_lock(&bp->cnic_mutex);
7922 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7926 mutex_unlock(&bp->cnic_mutex);
7929 if (unload_mode == UNLOAD_NORMAL)
7930 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7932 else if (bp->flags & NO_WOL_FLAG)
7933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7936 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7937 u8 *mac_addr = bp->dev->dev_addr;
7939 /* The mac address is written to entries 1-4 to
7940 preserve entry 0 which is used by the PMF */
7941 u8 entry = (BP_E1HVN(bp) + 1)*8;
7943 val = (mac_addr[0] << 8) | mac_addr[1];
7944 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7946 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947 (mac_addr[4] << 8) | mac_addr[5];
7948 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7950 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7955 /* Close multi and leading connections
7956 Completions for ramrods are collected in a synchronous way */
7957 for_each_nondefault_queue(bp, i)
7958 if (bnx2x_stop_multi(bp, i))
7961 rc = bnx2x_stop_leading(bp);
7963 BNX2X_ERR("Stop leading failed!\n");
7964 #ifdef BNX2X_STOP_ON_ERROR
7973 reset_code = bnx2x_fw_command(bp, reset_code);
7975 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7976 load_count[0], load_count[1], load_count[2]);
7978 load_count[1 + port]--;
7979 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7980 load_count[0], load_count[1], load_count[2]);
7981 if (load_count[0] == 0)
7982 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7983 else if (load_count[1 + port] == 0)
7984 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7986 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7989 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991 bnx2x__link_reset(bp);
7993 /* Reset the chip */
7994 bnx2x_reset_chip(bp, reset_code);
7996 /* Report UNLOAD_DONE to MCP */
7998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8002 /* Free SKBs, SGEs, TPA pool and driver internals */
8003 bnx2x_free_skbs(bp);
8004 for_each_queue(bp, i)
8005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8006 for_each_queue(bp, i)
8007 netif_napi_del(&bnx2x_fp(bp, i, napi));
8010 bp->state = BNX2X_STATE_CLOSED;
8012 netif_carrier_off(bp->dev);
8017 static void bnx2x_reset_task(struct work_struct *work)
8019 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8021 #ifdef BNX2X_STOP_ON_ERROR
8022 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023 " so reset not done to allow debug dump,\n"
8024 " you will need to reboot when done\n");
8030 if (!netif_running(bp->dev))
8031 goto reset_task_exit;
8033 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034 bnx2x_nic_load(bp, LOAD_NORMAL);
8040 /* end of nic load/unload */
8045 * Init service functions
8048 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8051 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8060 BNX2X_ERR("Unsupported function index: %d\n", func);
8065 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8067 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8069 /* Flush all outstanding writes */
8072 /* Pretend to be function 0 */
8074 /* Flush the GRC transaction (in the chip) */
8075 new_val = REG_RD(bp, reg);
8077 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8082 /* From now we are in the "like-E1" mode */
8083 bnx2x_int_disable(bp);
8085 /* Flush all outstanding writes */
8088 /* Restore the original funtion settings */
8089 REG_WR(bp, reg, orig_func);
8090 new_val = REG_RD(bp, reg);
8091 if (new_val != orig_func) {
8092 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093 orig_func, new_val);
8098 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8100 if (CHIP_IS_E1H(bp))
8101 bnx2x_undi_int_disable_e1h(bp, func);
8103 bnx2x_int_disable(bp);
8106 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8110 /* Check if there is any driver already loaded */
8111 val = REG_RD(bp, MISC_REG_UNPREPARED);
8113 /* Check if it is the UNDI driver
8114 * UNDI driver initializes CID offset for normal bell to 0x7
8116 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8117 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8119 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8121 int func = BP_FUNC(bp);
8125 /* clear the UNDI indication */
8126 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8128 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8130 /* try unload UNDI on port 0 */
8133 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134 DRV_MSG_SEQ_NUMBER_MASK);
8135 reset_code = bnx2x_fw_command(bp, reset_code);
8137 /* if UNDI is loaded on the other port */
8138 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8140 /* send "DONE" for previous unload */
8141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143 /* unload UNDI on port 1 */
8146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147 DRV_MSG_SEQ_NUMBER_MASK);
8148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8150 bnx2x_fw_command(bp, reset_code);
8153 /* now it's safe to release the lock */
8154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8156 bnx2x_undi_int_disable(bp, func);
8158 /* close input traffic and wait for it */
8159 /* Do not rcv packets to BRB */
8161 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163 /* Do not direct rcv packets that are not for MCP to
8166 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8170 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8174 /* save NIG port swap info */
8175 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8179 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8184 /* take the NIG out of reset and restore swap values */
8186 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8191 /* send unload done to the MCP */
8192 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8194 /* restore our func and fw_seq */
8197 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198 DRV_MSG_SEQ_NUMBER_MASK);
8201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8205 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8207 u32 val, val2, val3, val4, id;
8210 /* Get the chip revision id and number. */
8211 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213 id = ((val & 0xffff) << 16);
8214 val = REG_RD(bp, MISC_REG_CHIP_REV);
8215 id |= ((val & 0xf) << 12);
8216 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217 id |= ((val & 0xff) << 4);
8218 val = REG_RD(bp, MISC_REG_BOND_ID);
8220 bp->common.chip_id = id;
8221 bp->link_params.chip_id = bp->common.chip_id;
8222 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8224 val = (REG_RD(bp, 0x2874) & 0x55);
8225 if ((bp->common.chip_id & 0x1) ||
8226 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227 bp->flags |= ONE_PORT_FLAG;
8228 BNX2X_DEV_INFO("single port device\n");
8231 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235 bp->common.flash_size, bp->common.flash_size);
8237 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8238 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8239 bp->link_params.shmem_base = bp->common.shmem_base;
8240 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8241 bp->common.shmem_base, bp->common.shmem2_base);
8243 if (!bp->common.shmem_base ||
8244 (bp->common.shmem_base < 0xA0000) ||
8245 (bp->common.shmem_base >= 0xC0000)) {
8246 BNX2X_DEV_INFO("MCP not active\n");
8247 bp->flags |= NO_MCP_FLAG;
8251 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 BNX2X_ERR("BAD MCP validity signature\n");
8256 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8257 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8259 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260 SHARED_HW_CFG_LED_MODE_MASK) >>
8261 SHARED_HW_CFG_LED_MODE_SHIFT);
8263 bp->link_params.feature_config_flags = 0;
8264 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266 bp->link_params.feature_config_flags |=
8267 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269 bp->link_params.feature_config_flags &=
8270 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8272 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273 bp->common.bc_ver = val;
8274 BNX2X_DEV_INFO("bc_ver %X\n", val);
8275 if (val < BNX2X_BC_VER) {
8276 /* for now only warn
8277 * later we might need to enforce this */
8278 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279 " please upgrade BC\n", BNX2X_BC_VER, val);
8281 bp->link_params.feature_config_flags |=
8282 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8285 if (BP_E1HVN(bp) == 0) {
8286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8289 /* no WOL capability for E1HVN != 0 */
8290 bp->flags |= NO_WOL_FLAG;
8292 BNX2X_DEV_INFO("%sWoL capable\n",
8293 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8295 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8300 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301 val, val2, val3, val4);
8304 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8307 int port = BP_PORT(bp);
8310 switch (switch_cfg) {
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8331 SUPPORTED_Asym_Pause);
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8347 SUPPORTED_Asym_Pause);
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
8353 bp->link_params.ext_phy_config);
8357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8383 SUPPORTED_Asym_Pause);
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391 SUPPORTED_1000baseT_Full |
8395 SUPPORTED_Asym_Pause);
8398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403 SUPPORTED_2500baseX_Full |
8404 SUPPORTED_1000baseT_Full |
8408 SUPPORTED_Asym_Pause);
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8418 SUPPORTED_Asym_Pause);
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8429 SUPPORTED_Asym_Pause);
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437 SUPPORTED_1000baseT_Full |
8441 SUPPORTED_Asym_Pause);
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8453 SUPPORTED_Asym_Pause);
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8464 SUPPORTED_Asym_Pause);
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8480 SUPPORTED_Asym_Pause);
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
8491 bp->link_params.ext_phy_config);
8495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503 bp->port.link_config);
8506 bp->link_params.phy_addr = bp->port.phy_addr;
8508 /* mask what we support according to speed_cap_mask */
8509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
8530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8541 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8543 bp->link_params.req_duplex = DUPLEX_FULL;
8545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8546 case PORT_FEATURE_LINK_SPEED_AUTO:
8547 if (bp->port.supported & SUPPORTED_Autoneg) {
8548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8549 bp->port.advertising = bp->port.supported;
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8558 /* force 10G, no AN */
8559 bp->link_params.req_line_speed = SPEED_10000;
8560 bp->port.advertising =
8561 (ADVERTISED_10000baseT_Full |
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
8568 bp->port.link_config);
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8575 bp->link_params.req_line_speed = SPEED_10;
8576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
8582 bp->port.link_config,
8583 bp->link_params.speed_cap_mask);
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
8592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
8598 bp->port.link_config,
8599 bp->link_params.speed_cap_mask);
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8606 bp->link_params.req_line_speed = SPEED_100;
8607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
8613 bp->port.link_config,
8614 bp->link_params.speed_cap_mask);
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
8623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
8629 bp->port.link_config,
8630 bp->link_params.speed_cap_mask);
8635 case PORT_FEATURE_LINK_SPEED_1G:
8636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8637 bp->link_params.req_line_speed = SPEED_1000;
8638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
8644 bp->port.link_config,
8645 bp->link_params.speed_cap_mask);
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
8651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8652 bp->link_params.req_line_speed = SPEED_2500;
8653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
8659 bp->port.link_config,
8660 bp->link_params.speed_cap_mask);
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
8668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8669 bp->link_params.req_line_speed = SPEED_10000;
8670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
8676 bp->port.link_config,
8677 bp->link_params.speed_cap_mask);
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
8685 bp->port.link_config);
8686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8687 bp->port.advertising = bp->port.supported;
8691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
8693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8694 !(bp->port.supported & SUPPORTED_Autoneg))
8695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8698 " advertising 0x%x\n",
8699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
8701 bp->link_params.req_flow_ctrl, bp->port.advertising);
8704 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8712 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8714 int port = BP_PORT(bp);
8720 bp->link_params.bp = bp;
8721 bp->link_params.port = port;
8723 bp->link_params.lane_config =
8724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8725 bp->link_params.ext_phy_config =
8727 dev_info.port_hw_config[port].external_phy_config);
8728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8739 bp->link_params.speed_cap_mask =
8741 dev_info.port_hw_config[port].speed_capability_mask);
8743 bp->port.link_config =
8744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8759 /* If the device is capable of WoL, set the default state according
8762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
8770 bp->link_params.speed_cap_mask, bp->port.link_config);
8772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8776 bnx2x_link_settings_requested(bp);
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8804 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8806 int func = BP_FUNC(bp);
8810 bnx2x_get_common_hwinfo(bp);
8814 if (CHIP_IS_E1H(bp)) {
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8819 FUNC_MF_CFG_E1HOV_TAG_MASK);
8820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8833 func, bp->e1hov, bp->e1hov);
8835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
8851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8877 /* only supposed to happen on emulation/FPGA */
8878 BNX2X_ERR("warning random MAC workaround active\n");
8879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8886 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8888 int func = BP_FUNC(bp);
8892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
8894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8896 mutex_init(&bp->port.phy_mutex);
8897 mutex_init(&bp->fw_mb_mutex);
8899 mutex_init(&bp->cnic_mutex);
8902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8905 rc = bnx2x_get_hwinfo(bp);
8907 /* need to reset chip if undi was active */
8909 bnx2x_undi_unload(bp);
8911 if (CHIP_REV_IS_FPGA(bp))
8912 printk(KERN_ERR PFX "FPGA detected\n");
8914 if (BP_NOMCP(bp) && (func == 0))
8916 "MCP disabled, must load devices in order!\n");
8918 /* Set multi queue mode */
8919 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8922 "Multi disabled since int_mode requested is not MSI-X\n");
8923 multi_mode = ETH_RSS_MODE_DISABLED;
8925 bp->multi_mode = multi_mode;
8930 bp->flags &= ~TPA_ENABLE_FLAG;
8931 bp->dev->features &= ~NETIF_F_LRO;
8933 bp->flags |= TPA_ENABLE_FLAG;
8934 bp->dev->features |= NETIF_F_LRO;
8938 bp->dropless_fc = 0;
8940 bp->dropless_fc = dropless_fc;
8944 bp->tx_ring_size = MAX_TX_AVAIL;
8945 bp->rx_ring_size = MAX_RX_AVAIL;
8949 /* make sure that the numbers are in the right granularity */
8950 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8953 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954 bp->current_interval = (poll ? poll : timer_interval);
8956 init_timer(&bp->timer);
8957 bp->timer.expires = jiffies + bp->current_interval;
8958 bp->timer.data = (unsigned long) bp;
8959 bp->timer.function = bnx2x_timer;
8965 * ethtool service functions
8968 /* All ethtool functions called with rtnl_lock */
8970 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8972 struct bnx2x *bp = netdev_priv(dev);
8974 cmd->supported = bp->port.supported;
8975 cmd->advertising = bp->port.advertising;
8977 if ((bp->state == BNX2X_STATE_OPEN) &&
8978 !(bp->flags & MF_FUNC_DIS) &&
8979 (bp->link_vars.link_up)) {
8980 cmd->speed = bp->link_vars.line_speed;
8981 cmd->duplex = bp->link_vars.duplex;
8986 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8987 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8988 if (vn_max_rate < cmd->speed)
8989 cmd->speed = vn_max_rate;
8996 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8998 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9000 switch (ext_phy_type) {
9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9008 cmd->port = PORT_FIBRE;
9011 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9013 cmd->port = PORT_TP;
9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018 bp->link_params.ext_phy_config);
9022 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9023 bp->link_params.ext_phy_config);
9027 cmd->port = PORT_TP;
9029 cmd->phy_address = bp->mdio.prtad;
9030 cmd->transceiver = XCVR_INTERNAL;
9032 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9033 cmd->autoneg = AUTONEG_ENABLE;
9035 cmd->autoneg = AUTONEG_DISABLE;
9040 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9042 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9043 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9044 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9051 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9053 struct bnx2x *bp = netdev_priv(dev);
9059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9067 if (cmd->autoneg == AUTONEG_ENABLE) {
9068 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9073 /* advertise the requested speed and duplex if supported */
9074 cmd->advertising &= bp->port.supported;
9076 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077 bp->link_params.req_duplex = DUPLEX_FULL;
9078 bp->port.advertising |= (ADVERTISED_Autoneg |
9081 } else { /* forced speed */
9082 /* advertise the requested speed and duplex if supported */
9083 switch (cmd->speed) {
9085 if (cmd->duplex == DUPLEX_FULL) {
9086 if (!(bp->port.supported &
9087 SUPPORTED_10baseT_Full)) {
9089 "10M full not supported\n");
9093 advertising = (ADVERTISED_10baseT_Full |
9096 if (!(bp->port.supported &
9097 SUPPORTED_10baseT_Half)) {
9099 "10M half not supported\n");
9103 advertising = (ADVERTISED_10baseT_Half |
9109 if (cmd->duplex == DUPLEX_FULL) {
9110 if (!(bp->port.supported &
9111 SUPPORTED_100baseT_Full)) {
9113 "100M full not supported\n");
9117 advertising = (ADVERTISED_100baseT_Full |
9120 if (!(bp->port.supported &
9121 SUPPORTED_100baseT_Half)) {
9123 "100M half not supported\n");
9127 advertising = (ADVERTISED_100baseT_Half |
9133 if (cmd->duplex != DUPLEX_FULL) {
9134 DP(NETIF_MSG_LINK, "1G half not supported\n");
9138 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9139 DP(NETIF_MSG_LINK, "1G full not supported\n");
9143 advertising = (ADVERTISED_1000baseT_Full |
9148 if (cmd->duplex != DUPLEX_FULL) {
9150 "2.5G half not supported\n");
9154 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9156 "2.5G full not supported\n");
9160 advertising = (ADVERTISED_2500baseX_Full |
9165 if (cmd->duplex != DUPLEX_FULL) {
9166 DP(NETIF_MSG_LINK, "10G half not supported\n");
9170 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9171 DP(NETIF_MSG_LINK, "10G full not supported\n");
9175 advertising = (ADVERTISED_10000baseT_Full |
9180 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9184 bp->link_params.req_line_speed = cmd->speed;
9185 bp->link_params.req_duplex = cmd->duplex;
9186 bp->port.advertising = advertising;
9189 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9190 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9191 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9192 bp->port.advertising);
9194 if (netif_running(dev)) {
9195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9202 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9205 static int bnx2x_get_regs_len(struct net_device *dev)
9207 struct bnx2x *bp = netdev_priv(dev);
9208 int regdump_len = 0;
9211 if (CHIP_IS_E1(bp)) {
9212 for (i = 0; i < REGS_COUNT; i++)
9213 if (IS_E1_ONLINE(reg_addrs[i].info))
9214 regdump_len += reg_addrs[i].size;
9216 for (i = 0; i < WREGS_COUNT_E1; i++)
9217 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218 regdump_len += wreg_addrs_e1[i].size *
9219 (1 + wreg_addrs_e1[i].read_regs_count);
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 regdump_len += reg_addrs[i].size;
9226 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228 regdump_len += wreg_addrs_e1h[i].size *
9229 (1 + wreg_addrs_e1h[i].read_regs_count);
9232 regdump_len += sizeof(struct dump_hdr);
9237 static void bnx2x_get_regs(struct net_device *dev,
9238 struct ethtool_regs *regs, void *_p)
9241 struct bnx2x *bp = netdev_priv(dev);
9242 struct dump_hdr dump_hdr = {0};
9245 memset(p, 0, regs->len);
9247 if (!netif_running(bp->dev))
9250 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251 dump_hdr.dump_sign = dump_sign_all;
9252 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9258 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259 p += dump_hdr.hdr_size + 1;
9261 if (CHIP_IS_E1(bp)) {
9262 for (i = 0; i < REGS_COUNT; i++)
9263 if (IS_E1_ONLINE(reg_addrs[i].info))
9264 for (j = 0; j < reg_addrs[i].size; j++)
9266 reg_addrs[i].addr + j*4);
9269 for (i = 0; i < REGS_COUNT; i++)
9270 if (IS_E1H_ONLINE(reg_addrs[i].info))
9271 for (j = 0; j < reg_addrs[i].size; j++)
9273 reg_addrs[i].addr + j*4);
9277 #define PHY_FW_VER_LEN 10
9279 static void bnx2x_get_drvinfo(struct net_device *dev,
9280 struct ethtool_drvinfo *info)
9282 struct bnx2x *bp = netdev_priv(dev);
9283 u8 phy_fw_ver[PHY_FW_VER_LEN];
9285 strcpy(info->driver, DRV_MODULE_NAME);
9286 strcpy(info->version, DRV_MODULE_VERSION);
9288 phy_fw_ver[0] = '\0';
9290 bnx2x_acquire_phy_lock(bp);
9291 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292 (bp->state != BNX2X_STATE_CLOSED),
9293 phy_fw_ver, PHY_FW_VER_LEN);
9294 bnx2x_release_phy_lock(bp);
9297 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298 (bp->common.bc_ver & 0xff0000) >> 16,
9299 (bp->common.bc_ver & 0xff00) >> 8,
9300 (bp->common.bc_ver & 0xff),
9301 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302 strcpy(info->bus_info, pci_name(bp->pdev));
9303 info->n_stats = BNX2X_NUM_STATS;
9304 info->testinfo_len = BNX2X_NUM_TESTS;
9305 info->eedump_len = bp->common.flash_size;
9306 info->regdump_len = bnx2x_get_regs_len(dev);
9309 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9311 struct bnx2x *bp = netdev_priv(dev);
9313 if (bp->flags & NO_WOL_FLAG) {
9317 wol->supported = WAKE_MAGIC;
9319 wol->wolopts = WAKE_MAGIC;
9323 memset(&wol->sopass, 0, sizeof(wol->sopass));
9326 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9328 struct bnx2x *bp = netdev_priv(dev);
9330 if (wol->wolopts & ~WAKE_MAGIC)
9333 if (wol->wolopts & WAKE_MAGIC) {
9334 if (bp->flags & NO_WOL_FLAG)
9344 static u32 bnx2x_get_msglevel(struct net_device *dev)
9346 struct bnx2x *bp = netdev_priv(dev);
9348 return bp->msglevel;
9351 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9353 struct bnx2x *bp = netdev_priv(dev);
9355 if (capable(CAP_NET_ADMIN))
9356 bp->msglevel = level;
9359 static int bnx2x_nway_reset(struct net_device *dev)
9361 struct bnx2x *bp = netdev_priv(dev);
9366 if (netif_running(dev)) {
9367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9374 static u32 bnx2x_get_link(struct net_device *dev)
9376 struct bnx2x *bp = netdev_priv(dev);
9378 if (bp->flags & MF_FUNC_DIS)
9381 return bp->link_vars.link_up;
9384 static int bnx2x_get_eeprom_len(struct net_device *dev)
9386 struct bnx2x *bp = netdev_priv(dev);
9388 return bp->common.flash_size;
9391 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9393 int port = BP_PORT(bp);
9397 /* adjust timeout for emulation/FPGA */
9398 count = NVRAM_TIMEOUT_COUNT;
9399 if (CHIP_REV_IS_SLOW(bp))
9402 /* request access to nvram interface */
9403 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9406 for (i = 0; i < count*10; i++) {
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9414 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9415 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9422 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9424 int port = BP_PORT(bp);
9428 /* adjust timeout for emulation/FPGA */
9429 count = NVRAM_TIMEOUT_COUNT;
9430 if (CHIP_REV_IS_SLOW(bp))
9433 /* relinquish nvram interface */
9434 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9437 for (i = 0; i < count*10; i++) {
9438 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9445 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9446 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9453 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9457 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9459 /* enable both bits, even on read */
9460 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9465 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9469 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9471 /* disable both bits, even after read */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9477 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9483 /* build the command word */
9484 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9486 /* need to clear DONE bit separately */
9487 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9489 /* address of the NVRAM to read from */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9493 /* issue a read command */
9494 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9496 /* adjust timeout for emulation/FPGA */
9497 count = NVRAM_TIMEOUT_COUNT;
9498 if (CHIP_REV_IS_SLOW(bp))
9501 /* wait for completion */
9504 for (i = 0; i < count; i++) {
9506 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9508 if (val & MCPR_NVM_COMMAND_DONE) {
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9510 /* we read nvram data in cpu order
9511 * but ethtool sees it as an array of bytes
9512 * converting to big-endian will do the work */
9513 *ret_val = cpu_to_be32(val);
9522 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9529 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9531 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9536 if (offset + buf_size > bp->common.flash_size) {
9537 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9538 " buf_size (0x%x) > flash_size (0x%x)\n",
9539 offset, buf_size, bp->common.flash_size);
9543 /* request access to nvram interface */
9544 rc = bnx2x_acquire_nvram_lock(bp);
9548 /* enable access to nvram interface */
9549 bnx2x_enable_nvram_access(bp);
9551 /* read the first word(s) */
9552 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555 memcpy(ret_buf, &val, 4);
9557 /* advance to the next dword */
9558 offset += sizeof(u32);
9559 ret_buf += sizeof(u32);
9560 buf_size -= sizeof(u32);
9565 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567 memcpy(ret_buf, &val, 4);
9570 /* disable access to nvram interface */
9571 bnx2x_disable_nvram_access(bp);
9572 bnx2x_release_nvram_lock(bp);
9577 static int bnx2x_get_eeprom(struct net_device *dev,
9578 struct ethtool_eeprom *eeprom, u8 *eebuf)
9580 struct bnx2x *bp = netdev_priv(dev);
9583 if (!netif_running(dev))
9586 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9587 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9588 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589 eeprom->len, eeprom->len);
9591 /* parameters already validated in ethtool_get_eeprom */
9593 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9598 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9603 /* build the command word */
9604 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9606 /* need to clear DONE bit separately */
9607 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9609 /* write the data */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9612 /* address of the NVRAM to write to */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9616 /* issue the write command */
9617 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9619 /* adjust timeout for emulation/FPGA */
9620 count = NVRAM_TIMEOUT_COUNT;
9621 if (CHIP_REV_IS_SLOW(bp))
9624 /* wait for completion */
9626 for (i = 0; i < count; i++) {
9628 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629 if (val & MCPR_NVM_COMMAND_DONE) {
9638 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9640 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9648 if (offset + buf_size > bp->common.flash_size) {
9649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9650 " buf_size (0x%x) > flash_size (0x%x)\n",
9651 offset, buf_size, bp->common.flash_size);
9655 /* request access to nvram interface */
9656 rc = bnx2x_acquire_nvram_lock(bp);
9660 /* enable access to nvram interface */
9661 bnx2x_enable_nvram_access(bp);
9663 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664 align_offset = (offset & ~0x03);
9665 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9668 val &= ~(0xff << BYTE_OFFSET(offset));
9669 val |= (*data_buf << BYTE_OFFSET(offset));
9671 /* nvram data is returned as an array of bytes
9672 * convert it back to cpu order */
9673 val = be32_to_cpu(val);
9675 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9679 /* disable access to nvram interface */
9680 bnx2x_disable_nvram_access(bp);
9681 bnx2x_release_nvram_lock(bp);
9686 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9694 if (buf_size == 1) /* ethtool */
9695 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9697 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9699 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9704 if (offset + buf_size > bp->common.flash_size) {
9705 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9706 " buf_size (0x%x) > flash_size (0x%x)\n",
9707 offset, buf_size, bp->common.flash_size);
9711 /* request access to nvram interface */
9712 rc = bnx2x_acquire_nvram_lock(bp);
9716 /* enable access to nvram interface */
9717 bnx2x_enable_nvram_access(bp);
9720 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721 while ((written_so_far < buf_size) && (rc == 0)) {
9722 if (written_so_far == (buf_size - sizeof(u32)))
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9729 memcpy(&val, data_buf, 4);
9731 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9733 /* advance to the next dword */
9734 offset += sizeof(u32);
9735 data_buf += sizeof(u32);
9736 written_so_far += sizeof(u32);
9740 /* disable access to nvram interface */
9741 bnx2x_disable_nvram_access(bp);
9742 bnx2x_release_nvram_lock(bp);
9747 static int bnx2x_set_eeprom(struct net_device *dev,
9748 struct ethtool_eeprom *eeprom, u8 *eebuf)
9750 struct bnx2x *bp = netdev_priv(dev);
9751 int port = BP_PORT(bp);
9754 if (!netif_running(dev))
9757 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9758 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9759 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760 eeprom->len, eeprom->len);
9762 /* parameters already validated in ethtool_set_eeprom */
9764 /* PHY eeprom can be accessed only by the PMF */
9765 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9769 if (eeprom->magic == 0x50485950) {
9770 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9773 bnx2x_acquire_phy_lock(bp);
9774 rc |= bnx2x_link_reset(&bp->link_params,
9776 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779 MISC_REGISTERS_GPIO_HIGH, port);
9780 bnx2x_release_phy_lock(bp);
9781 bnx2x_link_report(bp);
9783 } else if (eeprom->magic == 0x50485952) {
9784 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9785 if (bp->state == BNX2X_STATE_OPEN) {
9786 bnx2x_acquire_phy_lock(bp);
9787 rc |= bnx2x_link_reset(&bp->link_params,
9790 rc |= bnx2x_phy_init(&bp->link_params,
9792 bnx2x_release_phy_lock(bp);
9793 bnx2x_calc_fc_adv(bp);
9795 } else if (eeprom->magic == 0x53985943) {
9796 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9800 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9802 /* DSP Remove Download Mode */
9803 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804 MISC_REGISTERS_GPIO_LOW, port);
9806 bnx2x_acquire_phy_lock(bp);
9808 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9810 /* wait 0.5 sec to allow it to run */
9812 bnx2x_ext_phy_hw_reset(bp, port);
9814 bnx2x_release_phy_lock(bp);
9817 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9822 static int bnx2x_get_coalesce(struct net_device *dev,
9823 struct ethtool_coalesce *coal)
9825 struct bnx2x *bp = netdev_priv(dev);
9827 memset(coal, 0, sizeof(struct ethtool_coalesce));
9829 coal->rx_coalesce_usecs = bp->rx_ticks;
9830 coal->tx_coalesce_usecs = bp->tx_ticks;
9835 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9836 static int bnx2x_set_coalesce(struct net_device *dev,
9837 struct ethtool_coalesce *coal)
9839 struct bnx2x *bp = netdev_priv(dev);
9841 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9842 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9845 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9846 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9849 if (netif_running(dev))
9850 bnx2x_update_coalesce(bp);
9855 static void bnx2x_get_ringparam(struct net_device *dev,
9856 struct ethtool_ringparam *ering)
9858 struct bnx2x *bp = netdev_priv(dev);
9860 ering->rx_max_pending = MAX_RX_AVAIL;
9861 ering->rx_mini_max_pending = 0;
9862 ering->rx_jumbo_max_pending = 0;
9864 ering->rx_pending = bp->rx_ring_size;
9865 ering->rx_mini_pending = 0;
9866 ering->rx_jumbo_pending = 0;
9868 ering->tx_max_pending = MAX_TX_AVAIL;
9869 ering->tx_pending = bp->tx_ring_size;
9872 static int bnx2x_set_ringparam(struct net_device *dev,
9873 struct ethtool_ringparam *ering)
9875 struct bnx2x *bp = netdev_priv(dev);
9878 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879 (ering->tx_pending > MAX_TX_AVAIL) ||
9880 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9883 bp->rx_ring_size = ering->rx_pending;
9884 bp->tx_ring_size = ering->tx_pending;
9886 if (netif_running(dev)) {
9887 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9894 static void bnx2x_get_pauseparam(struct net_device *dev,
9895 struct ethtool_pauseparam *epause)
9897 struct bnx2x *bp = netdev_priv(dev);
9899 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900 BNX2X_FLOW_CTRL_AUTO) &&
9901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904 BNX2X_FLOW_CTRL_RX);
9905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906 BNX2X_FLOW_CTRL_TX);
9908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9913 static int bnx2x_set_pauseparam(struct net_device *dev,
9914 struct ethtool_pauseparam *epause)
9916 struct bnx2x *bp = netdev_priv(dev);
9921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9927 if (epause->rx_pause)
9928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9930 if (epause->tx_pause)
9931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9936 if (epause->autoneg) {
9937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9949 if (netif_running(dev)) {
9950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9959 struct bnx2x *bp = netdev_priv(dev);
9963 /* TPA requires Rx CSUM offloading */
9964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965 if (!(dev->features & NETIF_F_LRO)) {
9966 dev->features |= NETIF_F_LRO;
9967 bp->flags |= TPA_ENABLE_FLAG;
9971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9987 struct bnx2x *bp = netdev_priv(dev);
9992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9994 struct bnx2x *bp = netdev_priv(dev);
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10002 u32 flags = ethtool_op_get_flags(dev);
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10014 dev->features |= NETIF_F_TSO6;
10016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10017 dev->features &= ~NETIF_F_TSO6;
10023 static const struct {
10024 char string[ETH_GSTRING_LEN];
10025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
10032 { "idle check (online)" }
10035 static int bnx2x_test_registers(struct bnx2x *bp)
10037 int idx, i, rc = -ENODEV;
10039 int port = BP_PORT(bp);
10040 static const struct {
10045 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10083 { 0xffffffff, 0, 0x00000000 }
10086 if (!netif_running(bp->dev))
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10098 wr_val = 0xffffffff;
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10108 save_val = REG_RD(bp, offset);
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10128 static int bnx2x_test_memory(struct bnx2x *bp)
10130 int i, j, rc = -ENODEV;
10132 static const struct {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10146 static const struct {
10152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10159 { NULL, 0xffffffff, 0, 0 }
10162 if (!netif_running(bp->dev))
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
10173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10187 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10196 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
10201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
10205 u16 pkt_prod, bd_prod;
10206 struct sw_tx_bd *tx_buf;
10207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
10209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10212 struct sw_rx_bd *rx_buf;
10216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10222 case BNX2X_MAC_LOOPBACK:
10223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10236 goto test_loopback_exit;
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10245 /* send the loopback packet */
10247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
10256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
10260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10277 fp_tx->tx_db.data.prod += 2;
10279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10284 fp_tx->tx_bd_prod += 2; /* start + pbd */
10288 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10289 if (tx_idx != tx_start_idx + num_pkts)
10290 goto test_loopback_exit;
10292 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10293 if (rx_idx != rx_start_idx + num_pkts)
10294 goto test_loopback_exit;
10296 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10297 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299 goto test_loopback_rx_exit;
10301 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302 if (len != pkt_size)
10303 goto test_loopback_rx_exit;
10305 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10307 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310 goto test_loopback_rx_exit;
10314 test_loopback_rx_exit:
10316 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10321 /* Update producers */
10322 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323 fp_rx->rx_sge_prod);
10325 test_loopback_exit:
10326 bp->link_params.loopback_mode = LOOPBACK_NONE;
10331 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10335 if (!netif_running(bp->dev))
10336 return BNX2X_LOOPBACK_FAILED;
10338 bnx2x_netif_stop(bp, 1);
10339 bnx2x_acquire_phy_lock(bp);
10341 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10343 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10344 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10347 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10349 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10350 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10353 bnx2x_release_phy_lock(bp);
10354 bnx2x_netif_start(bp);
10359 #define CRC32_RESIDUAL 0xdebb20e3
10361 static int bnx2x_test_nvram(struct bnx2x *bp)
10363 static const struct {
10367 { 0, 0x14 }, /* bootstrap */
10368 { 0x14, 0xec }, /* dir */
10369 { 0x100, 0x350 }, /* manuf_info */
10370 { 0x450, 0xf0 }, /* feature_info */
10371 { 0x640, 0x64 }, /* upgrade_key_info */
10373 { 0x708, 0x70 }, /* manuf_key_info */
10377 __be32 buf[0x350 / 4];
10378 u8 *data = (u8 *)buf;
10382 rc = bnx2x_nvram_read(bp, 0, data, 4);
10384 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10385 goto test_nvram_exit;
10388 magic = be32_to_cpu(buf[0]);
10389 if (magic != 0x669955aa) {
10390 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10392 goto test_nvram_exit;
10395 for (i = 0; nvram_tbl[i].size; i++) {
10397 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398 nvram_tbl[i].size);
10400 DP(NETIF_MSG_PROBE,
10401 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10402 goto test_nvram_exit;
10405 crc = ether_crc_le(nvram_tbl[i].size, data);
10406 if (crc != CRC32_RESIDUAL) {
10407 DP(NETIF_MSG_PROBE,
10408 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10410 goto test_nvram_exit;
10418 static int bnx2x_test_intr(struct bnx2x *bp)
10420 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10423 if (!netif_running(bp->dev))
10426 config->hdr.length = 0;
10427 if (CHIP_IS_E1(bp))
10428 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10430 config->hdr.offset = BP_FUNC(bp);
10431 config->hdr.client_id = bp->fp->cl_id;
10432 config->hdr.reserved1 = 0;
10434 bp->set_mac_pending++;
10436 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10437 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10438 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440 for (i = 0; i < 10; i++) {
10441 if (!bp->set_mac_pending)
10444 msleep_interruptible(10);
10453 static void bnx2x_self_test(struct net_device *dev,
10454 struct ethtool_test *etest, u64 *buf)
10456 struct bnx2x *bp = netdev_priv(dev);
10458 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460 if (!netif_running(dev))
10463 /* offline tests are not supported in MF mode */
10465 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10468 int port = BP_PORT(bp);
10472 /* save current value of input enable for TX port IF */
10473 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10474 /* disable input for TX port IF */
10475 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477 link_up = (bnx2x_link_test(bp) == 0);
10478 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10479 bnx2x_nic_load(bp, LOAD_DIAG);
10480 /* wait until link state is restored */
10481 bnx2x_wait_for_link(bp, link_up);
10483 if (bnx2x_test_registers(bp) != 0) {
10485 etest->flags |= ETH_TEST_FL_FAILED;
10487 if (bnx2x_test_memory(bp) != 0) {
10489 etest->flags |= ETH_TEST_FL_FAILED;
10491 buf[2] = bnx2x_test_loopback(bp, link_up);
10493 etest->flags |= ETH_TEST_FL_FAILED;
10495 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10497 /* restore input for TX port IF */
10498 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500 bnx2x_nic_load(bp, LOAD_NORMAL);
10501 /* wait until link state is restored */
10502 bnx2x_wait_for_link(bp, link_up);
10504 if (bnx2x_test_nvram(bp) != 0) {
10506 etest->flags |= ETH_TEST_FL_FAILED;
10508 if (bnx2x_test_intr(bp) != 0) {
10510 etest->flags |= ETH_TEST_FL_FAILED;
10513 if (bnx2x_link_test(bp) != 0) {
10515 etest->flags |= ETH_TEST_FL_FAILED;
10518 #ifdef BNX2X_EXTRA_DEBUG
10519 bnx2x_panic_dump(bp);
10523 static const struct {
10526 u8 string[ETH_GSTRING_LEN];
10527 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10528 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10529 { Q_STATS_OFFSET32(error_bytes_received_hi),
10530 8, "[%d]: rx_error_bytes" },
10531 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10532 8, "[%d]: rx_ucast_packets" },
10533 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10534 8, "[%d]: rx_mcast_packets" },
10535 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10536 8, "[%d]: rx_bcast_packets" },
10537 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10538 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10539 4, "[%d]: rx_phy_ip_err_discards"},
10540 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10541 4, "[%d]: rx_skb_alloc_discard" },
10542 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10545 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10546 8, "[%d]: tx_packets" }
10549 static const struct {
10553 #define STATS_FLAGS_PORT 1
10554 #define STATS_FLAGS_FUNC 2
10555 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10556 u8 string[ETH_GSTRING_LEN];
10557 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10558 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10559 8, STATS_FLAGS_BOTH, "rx_bytes" },
10560 { STATS_OFFSET32(error_bytes_received_hi),
10561 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10562 { STATS_OFFSET32(total_unicast_packets_received_hi),
10563 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10564 { STATS_OFFSET32(total_multicast_packets_received_hi),
10565 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10566 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10567 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10568 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10569 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10570 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10571 8, STATS_FLAGS_PORT, "rx_align_errors" },
10572 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10573 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10574 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10575 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10576 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10577 8, STATS_FLAGS_PORT, "rx_fragments" },
10578 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10579 8, STATS_FLAGS_PORT, "rx_jabbers" },
10580 { STATS_OFFSET32(no_buff_discard_hi),
10581 8, STATS_FLAGS_BOTH, "rx_discards" },
10582 { STATS_OFFSET32(mac_filter_discard),
10583 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10584 { STATS_OFFSET32(xxoverflow_discard),
10585 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10586 { STATS_OFFSET32(brb_drop_hi),
10587 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10588 { STATS_OFFSET32(brb_truncate_hi),
10589 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10590 { STATS_OFFSET32(pause_frames_received_hi),
10591 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10592 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10593 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10594 { STATS_OFFSET32(nig_timer_max),
10595 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10596 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10597 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10598 { STATS_OFFSET32(rx_skb_alloc_failed),
10599 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10600 { STATS_OFFSET32(hw_csum_err),
10601 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603 { STATS_OFFSET32(total_bytes_transmitted_hi),
10604 8, STATS_FLAGS_BOTH, "tx_bytes" },
10605 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10606 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10607 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10608 8, STATS_FLAGS_BOTH, "tx_packets" },
10609 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10610 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10611 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10612 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10613 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10614 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10615 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10616 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10617 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10618 8, STATS_FLAGS_PORT, "tx_deferred" },
10619 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10620 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10621 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10622 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10623 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10624 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10625 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10626 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10627 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10628 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10629 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10630 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10631 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10632 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10633 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10634 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10635 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10636 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10637 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10638 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10639 { STATS_OFFSET32(pause_frames_sent_hi),
10640 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10643 #define IS_PORT_STAT(i) \
10644 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10645 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10646 #define IS_E1HMF_MODE_STAT(bp) \
10647 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10649 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651 struct bnx2x *bp = netdev_priv(dev);
10654 switch(stringset) {
10656 if (is_multi(bp)) {
10657 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10658 if (!IS_E1HMF_MODE_STAT(bp))
10659 num_stats += BNX2X_NUM_STATS;
10661 if (IS_E1HMF_MODE_STAT(bp)) {
10663 for (i = 0; i < BNX2X_NUM_STATS; i++)
10664 if (IS_FUNC_STAT(i))
10667 num_stats = BNX2X_NUM_STATS;
10672 return BNX2X_NUM_TESTS;
10679 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681 struct bnx2x *bp = netdev_priv(dev);
10684 switch (stringset) {
10686 if (is_multi(bp)) {
10688 for_each_queue(bp, i) {
10689 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10690 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10691 bnx2x_q_stats_arr[j].string, i);
10692 k += BNX2X_NUM_Q_STATS;
10694 if (IS_E1HMF_MODE_STAT(bp))
10696 for (j = 0; j < BNX2X_NUM_STATS; j++)
10697 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10698 bnx2x_stats_arr[j].string);
10700 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10701 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703 strcpy(buf + j*ETH_GSTRING_LEN,
10704 bnx2x_stats_arr[i].string);
10711 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10716 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10717 struct ethtool_stats *stats, u64 *buf)
10719 struct bnx2x *bp = netdev_priv(dev);
10720 u32 *hw_stats, *offset;
10723 if (is_multi(bp)) {
10725 for_each_queue(bp, i) {
10726 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10727 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10728 if (bnx2x_q_stats_arr[j].size == 0) {
10729 /* skip this counter */
10733 offset = (hw_stats +
10734 bnx2x_q_stats_arr[j].offset);
10735 if (bnx2x_q_stats_arr[j].size == 4) {
10736 /* 4-byte counter */
10737 buf[k + j] = (u64) *offset;
10740 /* 8-byte counter */
10741 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743 k += BNX2X_NUM_Q_STATS;
10745 if (IS_E1HMF_MODE_STAT(bp))
10747 hw_stats = (u32 *)&bp->eth_stats;
10748 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10749 if (bnx2x_stats_arr[j].size == 0) {
10750 /* skip this counter */
10754 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10755 if (bnx2x_stats_arr[j].size == 4) {
10756 /* 4-byte counter */
10757 buf[k + j] = (u64) *offset;
10760 /* 8-byte counter */
10761 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10764 hw_stats = (u32 *)&bp->eth_stats;
10765 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10766 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768 if (bnx2x_stats_arr[i].size == 0) {
10769 /* skip this counter */
10774 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10775 if (bnx2x_stats_arr[i].size == 4) {
10776 /* 4-byte counter */
10777 buf[j] = (u64) *offset;
10781 /* 8-byte counter */
10782 buf[j] = HILO_U64(*offset, *(offset + 1));
10788 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790 struct bnx2x *bp = netdev_priv(dev);
10793 if (!netif_running(dev))
10802 for (i = 0; i < (data * 2); i++) {
10804 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10807 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10809 msleep_interruptible(500);
10810 if (signal_pending(current))
10814 if (bp->link_vars.link_up)
10815 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10816 bp->link_vars.line_speed);
10821 static const struct ethtool_ops bnx2x_ethtool_ops = {
10822 .get_settings = bnx2x_get_settings,
10823 .set_settings = bnx2x_set_settings,
10824 .get_drvinfo = bnx2x_get_drvinfo,
10825 .get_regs_len = bnx2x_get_regs_len,
10826 .get_regs = bnx2x_get_regs,
10827 .get_wol = bnx2x_get_wol,
10828 .set_wol = bnx2x_set_wol,
10829 .get_msglevel = bnx2x_get_msglevel,
10830 .set_msglevel = bnx2x_set_msglevel,
10831 .nway_reset = bnx2x_nway_reset,
10832 .get_link = bnx2x_get_link,
10833 .get_eeprom_len = bnx2x_get_eeprom_len,
10834 .get_eeprom = bnx2x_get_eeprom,
10835 .set_eeprom = bnx2x_set_eeprom,
10836 .get_coalesce = bnx2x_get_coalesce,
10837 .set_coalesce = bnx2x_set_coalesce,
10838 .get_ringparam = bnx2x_get_ringparam,
10839 .set_ringparam = bnx2x_set_ringparam,
10840 .get_pauseparam = bnx2x_get_pauseparam,
10841 .set_pauseparam = bnx2x_set_pauseparam,
10842 .get_rx_csum = bnx2x_get_rx_csum,
10843 .set_rx_csum = bnx2x_set_rx_csum,
10844 .get_tx_csum = ethtool_op_get_tx_csum,
10845 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10846 .set_flags = bnx2x_set_flags,
10847 .get_flags = ethtool_op_get_flags,
10848 .get_sg = ethtool_op_get_sg,
10849 .set_sg = ethtool_op_set_sg,
10850 .get_tso = ethtool_op_get_tso,
10851 .set_tso = bnx2x_set_tso,
10852 .self_test = bnx2x_self_test,
10853 .get_sset_count = bnx2x_get_sset_count,
10854 .get_strings = bnx2x_get_strings,
10855 .phys_id = bnx2x_phys_id,
10856 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10859 /* end of ethtool_ops */
10861 /****************************************************************************
10862 * General service functions
10863 ****************************************************************************/
10865 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10869 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10873 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10874 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10875 PCI_PM_CTRL_PME_STATUS));
10877 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10878 /* delay required during transition out of D3hot */
10883 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10887 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10889 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10892 /* No more memory access after this point until
10893 * device is brought back to D0.
10903 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10907 /* Tell compiler that status block fields can change */
10909 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10910 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912 return (fp->rx_comp_cons != rx_cons_sb);
10916 * net_device service functions
10919 static int bnx2x_poll(struct napi_struct *napi, int budget)
10922 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924 struct bnx2x *bp = fp->bp;
10927 #ifdef BNX2X_STOP_ON_ERROR
10928 if (unlikely(bp->panic)) {
10929 napi_complete(napi);
10934 if (bnx2x_has_tx_work(fp))
10937 if (bnx2x_has_rx_work(fp)) {
10938 work_done += bnx2x_rx_int(fp, budget - work_done);
10940 /* must not complete if we consumed full budget */
10941 if (work_done >= budget)
10945 /* Fall out from the NAPI loop if needed */
10946 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10947 bnx2x_update_fpsb_idx(fp);
10948 /* bnx2x_has_rx_work() reads the status block, thus we need
10949 * to ensure that status block indices have been actually read
10950 * (bnx2x_update_fpsb_idx) prior to this check
10951 * (bnx2x_has_rx_work) so that we won't write the "newer"
10952 * value of the status block to IGU (if there was a DMA right
10953 * after bnx2x_has_rx_work and if there is no rmb, the memory
10954 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10955 * before bnx2x_ack_sb). In this case there will never be
10956 * another interrupt until there is another update of the
10957 * status block, while there is still unhandled work.
10961 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10962 napi_complete(napi);
10963 /* Re-enable interrupts */
10964 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10965 le16_to_cpu(fp->fp_c_idx),
10967 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10968 le16_to_cpu(fp->fp_u_idx),
10969 IGU_INT_ENABLE, 1);
10979 /* we split the first BD into headers and data BDs
10980 * to ease the pain of our fellow microcode engineers
10981 * we use one mapping for both BDs
10982 * So far this has only been observed to happen
10983 * in Other Operating Systems(TM)
10985 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10986 struct bnx2x_fastpath *fp,
10987 struct sw_tx_bd *tx_buf,
10988 struct eth_tx_start_bd **tx_bd, u16 hlen,
10989 u16 bd_prod, int nbd)
10991 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10992 struct eth_tx_bd *d_tx_bd;
10993 dma_addr_t mapping;
10994 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996 /* first fix first BD */
10997 h_tx_bd->nbd = cpu_to_le16(nbd);
10998 h_tx_bd->nbytes = cpu_to_le16(hlen);
11000 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11001 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11002 h_tx_bd->addr_lo, h_tx_bd->nbd);
11004 /* now get a new data BD
11005 * (after the pbd) and fill it */
11006 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11007 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11009 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11010 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11013 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11014 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11016 /* this marks the BD as one that has no individual mapping */
11017 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019 DP(NETIF_MSG_TX_QUEUED,
11020 "TSO split data size is %d (%x:%x)\n",
11021 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11024 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11029 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11032 csum = (u16) ~csum_fold(csum_sub(csum,
11033 csum_partial(t_header - fix, fix, 0)));
11036 csum = (u16) ~csum_fold(csum_add(csum,
11037 csum_partial(t_header, -fix, 0)));
11039 return swab16(csum);
11042 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11046 if (skb->ip_summed != CHECKSUM_PARTIAL)
11050 if (skb->protocol == htons(ETH_P_IPV6)) {
11052 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11053 rc |= XMIT_CSUM_TCP;
11057 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11058 rc |= XMIT_CSUM_TCP;
11062 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11063 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11065 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11066 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11071 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11072 /* check if packet requires linearization (packet is too fragmented)
11073 no need to check fragmentation if page size > 8K (there will be no
11074 violation to FW restrictions) */
11075 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11080 int first_bd_sz = 0;
11082 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11083 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085 if (xmit_type & XMIT_GSO) {
11086 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11087 /* Check if LSO packet needs to be copied:
11088 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11089 int wnd_size = MAX_FETCH_BD - 3;
11090 /* Number of windows to check */
11091 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11096 /* Headers length */
11097 hlen = (int)(skb_transport_header(skb) - skb->data) +
11100 /* Amount of data (w/o headers) on linear part of SKB*/
11101 first_bd_sz = skb_headlen(skb) - hlen;
11103 wnd_sum = first_bd_sz;
11105 /* Calculate the first sum - it's special */
11106 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108 skb_shinfo(skb)->frags[frag_idx].size;
11110 /* If there was data on linear skb data - check it */
11111 if (first_bd_sz > 0) {
11112 if (unlikely(wnd_sum < lso_mss)) {
11117 wnd_sum -= first_bd_sz;
11120 /* Others are easier: run through the frag list and
11121 check all windows */
11122 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126 if (unlikely(wnd_sum < lso_mss)) {
11131 skb_shinfo(skb)->frags[wnd_idx].size;
11134 /* in non-LSO too fragmented packet should always
11141 if (unlikely(to_copy))
11142 DP(NETIF_MSG_TX_QUEUED,
11143 "Linearization IS REQUIRED for %s packet. "
11144 "num_frags %d hlen %d first_bd_sz %d\n",
11145 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11146 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11152 /* called with netif_tx_lock
11153 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11154 * netif_wake_queue()
11156 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11158 struct bnx2x *bp = netdev_priv(dev);
11159 struct bnx2x_fastpath *fp;
11160 struct netdev_queue *txq;
11161 struct sw_tx_bd *tx_buf;
11162 struct eth_tx_start_bd *tx_start_bd;
11163 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11164 struct eth_tx_parse_bd *pbd = NULL;
11165 u16 pkt_prod, bd_prod;
11167 dma_addr_t mapping;
11168 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11171 __le16 pkt_size = 0;
11173 #ifdef BNX2X_STOP_ON_ERROR
11174 if (unlikely(bp->panic))
11175 return NETDEV_TX_BUSY;
11178 fp_index = skb_get_queue_mapping(skb);
11179 txq = netdev_get_tx_queue(dev, fp_index);
11181 fp = &bp->fp[fp_index];
11183 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11184 fp->eth_q_stats.driver_xoff++;
11185 netif_tx_stop_queue(txq);
11186 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11187 return NETDEV_TX_BUSY;
11190 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11191 " gso type %x xmit_type %x\n",
11192 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11193 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11196 /* First, check if we need to linearize the skb (due to FW
11197 restrictions). No need to check fragmentation if page size > 8K
11198 (there will be no violation to FW restrictions) */
11199 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11200 /* Statistics of linearization */
11202 if (skb_linearize(skb) != 0) {
11203 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11204 "silently dropping this SKB\n");
11205 dev_kfree_skb_any(skb);
11206 return NETDEV_TX_OK;
11212 Please read carefully. First we use one BD which we mark as start,
11213 then we have a parsing info BD (used for TSO or xsum),
11214 and only then we have the rest of the TSO BDs.
11215 (don't forget to mark the last one as last,
11216 and to unmap only AFTER you write to the BD ...)
11217 And above all, all pdb sizes are in words - NOT DWORDS!
11220 pkt_prod = fp->tx_pkt_prod++;
11221 bd_prod = TX_BD(fp->tx_bd_prod);
11223 /* get a tx_buf and first BD */
11224 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11225 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11227 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11228 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11229 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11231 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11233 /* remember the first BD of the packet */
11234 tx_buf->first_bd = fp->tx_bd_prod;
11238 DP(NETIF_MSG_TX_QUEUED,
11239 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11240 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11243 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11244 (bp->flags & HW_VLAN_TX_FLAG)) {
11245 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11246 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11249 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11251 /* turn on parsing and get a BD */
11252 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11253 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11255 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11257 if (xmit_type & XMIT_CSUM) {
11258 hlen = (skb_network_header(skb) - skb->data) / 2;
11260 /* for now NS flag is not used in Linux */
11262 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11263 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11265 pbd->ip_hlen = (skb_transport_header(skb) -
11266 skb_network_header(skb)) / 2;
11268 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11270 pbd->total_hlen = cpu_to_le16(hlen);
11273 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11275 if (xmit_type & XMIT_CSUM_V4)
11276 tx_start_bd->bd_flags.as_bitfield |=
11277 ETH_TX_BD_FLAGS_IP_CSUM;
11279 tx_start_bd->bd_flags.as_bitfield |=
11280 ETH_TX_BD_FLAGS_IPV6;
11282 if (xmit_type & XMIT_CSUM_TCP) {
11283 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11286 s8 fix = SKB_CS_OFF(skb); /* signed! */
11288 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11290 DP(NETIF_MSG_TX_QUEUED,
11291 "hlen %d fix %d csum before fix %x\n",
11292 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11294 /* HW bug: fixup the CSUM */
11295 pbd->tcp_pseudo_csum =
11296 bnx2x_csum_fix(skb_transport_header(skb),
11299 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11300 pbd->tcp_pseudo_csum);
11304 mapping = pci_map_single(bp->pdev, skb->data,
11305 skb_headlen(skb), PCI_DMA_TODEVICE);
11307 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11308 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11309 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11310 tx_start_bd->nbd = cpu_to_le16(nbd);
11311 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11312 pkt_size = tx_start_bd->nbytes;
11314 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11315 " nbytes %d flags %x vlan %x\n",
11316 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11317 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11318 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11320 if (xmit_type & XMIT_GSO) {
11322 DP(NETIF_MSG_TX_QUEUED,
11323 "TSO packet len %d hlen %d total len %d tso size %d\n",
11324 skb->len, hlen, skb_headlen(skb),
11325 skb_shinfo(skb)->gso_size);
11327 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11329 if (unlikely(skb_headlen(skb) > hlen))
11330 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11331 hlen, bd_prod, ++nbd);
11333 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11334 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11335 pbd->tcp_flags = pbd_tcp_flags(skb);
11337 if (xmit_type & XMIT_GSO_V4) {
11338 pbd->ip_id = swab16(ip_hdr(skb)->id);
11339 pbd->tcp_pseudo_csum =
11340 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11341 ip_hdr(skb)->daddr,
11342 0, IPPROTO_TCP, 0));
11345 pbd->tcp_pseudo_csum =
11346 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11347 &ipv6_hdr(skb)->daddr,
11348 0, IPPROTO_TCP, 0));
11350 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11354 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11355 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11357 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11358 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11359 if (total_pkt_bd == NULL)
11360 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11362 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11363 frag->size, PCI_DMA_TODEVICE);
11365 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11368 le16_add_cpu(&pkt_size, frag->size);
11370 DP(NETIF_MSG_TX_QUEUED,
11371 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11372 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11373 le16_to_cpu(tx_data_bd->nbytes));
11376 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11378 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380 /* now send a tx doorbell, counting the next BD
11381 * if the packet contains or ends with it
11383 if (TX_BD_POFF(bd_prod) < nbd)
11386 if (total_pkt_bd != NULL)
11387 total_pkt_bd->total_pkt_bytes = pkt_size;
11390 DP(NETIF_MSG_TX_QUEUED,
11391 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11392 " tcp_flags %x xsum %x seq %u hlen %u\n",
11393 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11394 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11395 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11397 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11400 * Make sure that the BD data is updated before updating the producer
11401 * since FW might read the BD right after the producer is updated.
11402 * This is only applicable for weak-ordered memory model archs such
11403 * as IA-64. The following barrier is also mandatory since FW will
11404 * assumes packets must have BDs.
11408 fp->tx_db.data.prod += nbd;
11410 DOORBELL(bp, fp->index, fp->tx_db.raw);
11414 fp->tx_bd_prod += nbd;
11416 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11417 netif_tx_stop_queue(txq);
11418 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11419 if we put Tx into XOFF state. */
11421 fp->eth_q_stats.driver_xoff++;
11422 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11423 netif_tx_wake_queue(txq);
11427 return NETDEV_TX_OK;
11430 /* called with rtnl_lock */
11431 static int bnx2x_open(struct net_device *dev)
11433 struct bnx2x *bp = netdev_priv(dev);
11435 netif_carrier_off(dev);
11437 bnx2x_set_power_state(bp, PCI_D0);
11439 return bnx2x_nic_load(bp, LOAD_OPEN);
11442 /* called with rtnl_lock */
11443 static int bnx2x_close(struct net_device *dev)
11445 struct bnx2x *bp = netdev_priv(dev);
11447 /* Unload the driver, release IRQs */
11448 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11449 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11450 if (!CHIP_REV_IS_SLOW(bp))
11451 bnx2x_set_power_state(bp, PCI_D3hot);
11456 /* called with netif_tx_lock from dev_mcast.c */
11457 static void bnx2x_set_rx_mode(struct net_device *dev)
11459 struct bnx2x *bp = netdev_priv(dev);
11460 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11461 int port = BP_PORT(bp);
11463 if (bp->state != BNX2X_STATE_OPEN) {
11464 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11468 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470 if (dev->flags & IFF_PROMISC)
11471 rx_mode = BNX2X_RX_MODE_PROMISC;
11473 else if ((dev->flags & IFF_ALLMULTI) ||
11474 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11475 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11477 else { /* some multicasts */
11478 if (CHIP_IS_E1(bp)) {
11479 int i, old, offset;
11480 struct dev_mc_list *mclist;
11481 struct mac_configuration_cmd *config =
11482 bnx2x_sp(bp, mcast_config);
11484 for (i = 0, mclist = dev->mc_list;
11485 mclist && (i < dev->mc_count);
11486 i++, mclist = mclist->next) {
11488 config->config_table[i].
11489 cam_entry.msb_mac_addr =
11490 swab16(*(u16 *)&mclist->dmi_addr[0]);
11491 config->config_table[i].
11492 cam_entry.middle_mac_addr =
11493 swab16(*(u16 *)&mclist->dmi_addr[2]);
11494 config->config_table[i].
11495 cam_entry.lsb_mac_addr =
11496 swab16(*(u16 *)&mclist->dmi_addr[4]);
11497 config->config_table[i].cam_entry.flags =
11499 config->config_table[i].
11500 target_table_entry.flags = 0;
11501 config->config_table[i].target_table_entry.
11502 clients_bit_vector =
11503 cpu_to_le32(1 << BP_L_ID(bp));
11504 config->config_table[i].
11505 target_table_entry.vlan_id = 0;
11508 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11509 config->config_table[i].
11510 cam_entry.msb_mac_addr,
11511 config->config_table[i].
11512 cam_entry.middle_mac_addr,
11513 config->config_table[i].
11514 cam_entry.lsb_mac_addr);
11516 old = config->hdr.length;
11518 for (; i < old; i++) {
11519 if (CAM_IS_INVALID(config->
11520 config_table[i])) {
11521 /* already invalidated */
11525 CAM_INVALIDATE(config->
11530 if (CHIP_REV_IS_SLOW(bp))
11531 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11533 offset = BNX2X_MAX_MULTICAST*(1 + port);
11535 config->hdr.length = i;
11536 config->hdr.offset = offset;
11537 config->hdr.client_id = bp->fp->cl_id;
11538 config->hdr.reserved1 = 0;
11540 bp->set_mac_pending++;
11543 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11544 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11545 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11548 /* Accept one or more multicasts */
11549 struct dev_mc_list *mclist;
11550 u32 mc_filter[MC_HASH_SIZE];
11551 u32 crc, bit, regidx;
11554 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11556 for (i = 0, mclist = dev->mc_list;
11557 mclist && (i < dev->mc_count);
11558 i++, mclist = mclist->next) {
11560 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11563 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11564 bit = (crc >> 24) & 0xff;
11567 mc_filter[regidx] |= (1 << bit);
11570 for (i = 0; i < MC_HASH_SIZE; i++)
11571 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11576 bp->rx_mode = rx_mode;
11577 bnx2x_set_storm_rx_mode(bp);
11580 /* called with rtnl_lock */
11581 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11583 struct sockaddr *addr = p;
11584 struct bnx2x *bp = netdev_priv(dev);
11586 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11589 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11590 if (netif_running(dev)) {
11591 if (CHIP_IS_E1(bp))
11592 bnx2x_set_eth_mac_addr_e1(bp, 1);
11594 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11600 /* called with rtnl_lock */
11601 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11602 int devad, u16 addr)
11604 struct bnx2x *bp = netdev_priv(netdev);
11607 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11609 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11610 prtad, devad, addr);
11612 if (prtad != bp->mdio.prtad) {
11613 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11614 prtad, bp->mdio.prtad);
11618 /* The HW expects different devad if CL22 is used */
11619 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11621 bnx2x_acquire_phy_lock(bp);
11622 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11623 devad, addr, &value);
11624 bnx2x_release_phy_lock(bp);
11625 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11632 /* called with rtnl_lock */
11633 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11634 u16 addr, u16 value)
11636 struct bnx2x *bp = netdev_priv(netdev);
11637 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11640 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11641 " value 0x%x\n", prtad, devad, addr, value);
11643 if (prtad != bp->mdio.prtad) {
11644 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11645 prtad, bp->mdio.prtad);
11649 /* The HW expects different devad if CL22 is used */
11650 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11652 bnx2x_acquire_phy_lock(bp);
11653 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11654 devad, addr, value);
11655 bnx2x_release_phy_lock(bp);
11659 /* called with rtnl_lock */
11660 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11662 struct bnx2x *bp = netdev_priv(dev);
11663 struct mii_ioctl_data *mdio = if_mii(ifr);
11665 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11666 mdio->phy_id, mdio->reg_num, mdio->val_in);
11668 if (!netif_running(dev))
11671 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11674 /* called with rtnl_lock */
11675 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11677 struct bnx2x *bp = netdev_priv(dev);
11680 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11681 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11684 /* This does not race with packet allocation
11685 * because the actual alloc size is
11686 * only updated as part of load
11688 dev->mtu = new_mtu;
11690 if (netif_running(dev)) {
11691 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11692 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11698 static void bnx2x_tx_timeout(struct net_device *dev)
11700 struct bnx2x *bp = netdev_priv(dev);
11702 #ifdef BNX2X_STOP_ON_ERROR
11706 /* This allows the netif to be shutdown gracefully before resetting */
11707 schedule_work(&bp->reset_task);
11711 /* called with rtnl_lock */
11712 static void bnx2x_vlan_rx_register(struct net_device *dev,
11713 struct vlan_group *vlgrp)
11715 struct bnx2x *bp = netdev_priv(dev);
11719 /* Set flags according to the required capabilities */
11720 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11722 if (dev->features & NETIF_F_HW_VLAN_TX)
11723 bp->flags |= HW_VLAN_TX_FLAG;
11725 if (dev->features & NETIF_F_HW_VLAN_RX)
11726 bp->flags |= HW_VLAN_RX_FLAG;
11728 if (netif_running(dev))
11729 bnx2x_set_client_config(bp);
11734 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11735 static void poll_bnx2x(struct net_device *dev)
11737 struct bnx2x *bp = netdev_priv(dev);
11739 disable_irq(bp->pdev->irq);
11740 bnx2x_interrupt(bp->pdev->irq, dev);
11741 enable_irq(bp->pdev->irq);
11745 static const struct net_device_ops bnx2x_netdev_ops = {
11746 .ndo_open = bnx2x_open,
11747 .ndo_stop = bnx2x_close,
11748 .ndo_start_xmit = bnx2x_start_xmit,
11749 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11750 .ndo_set_mac_address = bnx2x_change_mac_addr,
11751 .ndo_validate_addr = eth_validate_addr,
11752 .ndo_do_ioctl = bnx2x_ioctl,
11753 .ndo_change_mtu = bnx2x_change_mtu,
11754 .ndo_tx_timeout = bnx2x_tx_timeout,
11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11758 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11759 .ndo_poll_controller = poll_bnx2x,
11763 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11764 struct net_device *dev)
11769 SET_NETDEV_DEV(dev, &pdev->dev);
11770 bp = netdev_priv(dev);
11775 bp->func = PCI_FUNC(pdev->devfn);
11777 rc = pci_enable_device(pdev);
11779 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11783 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11784 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11787 goto err_out_disable;
11790 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11791 printk(KERN_ERR PFX "Cannot find second PCI device"
11792 " base address, aborting\n");
11794 goto err_out_disable;
11797 if (atomic_read(&pdev->enable_cnt) == 1) {
11798 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11800 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11802 goto err_out_disable;
11805 pci_set_master(pdev);
11806 pci_save_state(pdev);
11809 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11810 if (bp->pm_cap == 0) {
11811 printk(KERN_ERR PFX "Cannot find power management"
11812 " capability, aborting\n");
11814 goto err_out_release;
11817 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11818 if (bp->pcie_cap == 0) {
11819 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11822 goto err_out_release;
11825 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11826 bp->flags |= USING_DAC_FLAG;
11827 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11828 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11829 " failed, aborting\n");
11831 goto err_out_release;
11834 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11835 printk(KERN_ERR PFX "System does not support DMA,"
11838 goto err_out_release;
11841 dev->mem_start = pci_resource_start(pdev, 0);
11842 dev->base_addr = dev->mem_start;
11843 dev->mem_end = pci_resource_end(pdev, 0);
11845 dev->irq = pdev->irq;
11847 bp->regview = pci_ioremap_bar(pdev, 0);
11848 if (!bp->regview) {
11849 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11851 goto err_out_release;
11854 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11855 min_t(u64, BNX2X_DB_SIZE,
11856 pci_resource_len(pdev, 2)));
11857 if (!bp->doorbells) {
11858 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11860 goto err_out_unmap;
11863 bnx2x_set_power_state(bp, PCI_D0);
11865 /* clean indirect addresses */
11866 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11867 PCICFG_VENDOR_ID_OFFSET);
11868 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11870 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11871 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11873 dev->watchdog_timeo = TX_TIMEOUT;
11875 dev->netdev_ops = &bnx2x_netdev_ops;
11876 dev->ethtool_ops = &bnx2x_ethtool_ops;
11877 dev->features |= NETIF_F_SG;
11878 dev->features |= NETIF_F_HW_CSUM;
11879 if (bp->flags & USING_DAC_FLAG)
11880 dev->features |= NETIF_F_HIGHDMA;
11881 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11882 dev->features |= NETIF_F_TSO6;
11884 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11885 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11887 dev->vlan_features |= NETIF_F_SG;
11888 dev->vlan_features |= NETIF_F_HW_CSUM;
11889 if (bp->flags & USING_DAC_FLAG)
11890 dev->vlan_features |= NETIF_F_HIGHDMA;
11891 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11892 dev->vlan_features |= NETIF_F_TSO6;
11895 /* get_port_hwinfo() will set prtad and mmds properly */
11896 bp->mdio.prtad = MDIO_PRTAD_NONE;
11898 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11899 bp->mdio.dev = dev;
11900 bp->mdio.mdio_read = bnx2x_mdio_read;
11901 bp->mdio.mdio_write = bnx2x_mdio_write;
11907 iounmap(bp->regview);
11908 bp->regview = NULL;
11910 if (bp->doorbells) {
11911 iounmap(bp->doorbells);
11912 bp->doorbells = NULL;
11916 if (atomic_read(&pdev->enable_cnt) == 1)
11917 pci_release_regions(pdev);
11920 pci_disable_device(pdev);
11921 pci_set_drvdata(pdev, NULL);
11927 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11928 int *width, int *speed)
11930 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11932 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11934 /* return value of 1=2.5GHz 2=5GHz */
11935 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11938 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11940 const struct firmware *firmware = bp->firmware;
11941 struct bnx2x_fw_file_hdr *fw_hdr;
11942 struct bnx2x_fw_file_section *sections;
11943 u32 offset, len, num_ops;
11948 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11951 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11952 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11954 /* Make sure none of the offsets and sizes make us read beyond
11955 * the end of the firmware data */
11956 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11957 offset = be32_to_cpu(sections[i].offset);
11958 len = be32_to_cpu(sections[i].len);
11959 if (offset + len > firmware->size) {
11960 printk(KERN_ERR PFX "Section %d length is out of "
11966 /* Likewise for the init_ops offsets */
11967 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11968 ops_offsets = (u16 *)(firmware->data + offset);
11969 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11971 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11972 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11973 printk(KERN_ERR PFX "Section offset %d is out of "
11979 /* Check FW version */
11980 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11981 fw_ver = firmware->data + offset;
11982 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11983 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11984 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11985 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11986 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11987 " Should be %d.%d.%d.%d\n",
11988 fw_ver[0], fw_ver[1], fw_ver[2],
11989 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11990 BCM_5710_FW_MINOR_VERSION,
11991 BCM_5710_FW_REVISION_VERSION,
11992 BCM_5710_FW_ENGINEERING_VERSION);
11999 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12001 const __be32 *source = (const __be32 *)_source;
12002 u32 *target = (u32 *)_target;
12005 for (i = 0; i < n/4; i++)
12006 target[i] = be32_to_cpu(source[i]);
12010 Ops array is stored in the following format:
12011 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12013 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12015 const __be32 *source = (const __be32 *)_source;
12016 struct raw_op *target = (struct raw_op *)_target;
12019 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12020 tmp = be32_to_cpu(source[j]);
12021 target[i].op = (tmp >> 24) & 0xff;
12022 target[i].offset = tmp & 0xffffff;
12023 target[i].raw_data = be32_to_cpu(source[j+1]);
12027 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12029 const __be16 *source = (const __be16 *)_source;
12030 u16 *target = (u16 *)_target;
12033 for (i = 0; i < n/2; i++)
12034 target[i] = be16_to_cpu(source[i]);
12037 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12039 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12040 bp->arr = kmalloc(len, GFP_KERNEL); \
12042 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12043 "for "#arr"\n", len); \
12046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12047 (u8 *)bp->arr, len); \
12050 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12052 const char *fw_file_name;
12053 struct bnx2x_fw_file_hdr *fw_hdr;
12056 if (CHIP_IS_E1(bp))
12057 fw_file_name = FW_FILE_NAME_E1;
12059 fw_file_name = FW_FILE_NAME_E1H;
12061 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12063 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12065 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12067 goto request_firmware_exit;
12070 rc = bnx2x_check_firmware(bp);
12072 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12073 goto request_firmware_exit;
12076 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12078 /* Initialize the pointers to the init arrays */
12080 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12083 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12086 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12089 /* STORMs firmware */
12090 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12091 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12092 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12093 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12094 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12096 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12097 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12098 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12099 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12100 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12101 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12102 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12103 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12104 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12105 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12109 init_offsets_alloc_err:
12110 kfree(bp->init_ops);
12111 init_ops_alloc_err:
12112 kfree(bp->init_data);
12113 request_firmware_exit:
12114 release_firmware(bp->firmware);
12120 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12121 const struct pci_device_id *ent)
12123 struct net_device *dev = NULL;
12125 int pcie_width, pcie_speed;
12128 /* dev zeroed in init_etherdev */
12129 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131 printk(KERN_ERR PFX "Cannot allocate net device\n");
12135 bp = netdev_priv(dev);
12136 bp->msglevel = debug;
12138 pci_set_drvdata(pdev, dev);
12140 rc = bnx2x_init_dev(pdev, dev);
12146 rc = bnx2x_init_bp(bp);
12148 goto init_one_exit;
12150 /* Set init arrays */
12151 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 printk(KERN_ERR PFX "Error loading firmware\n");
12154 goto init_one_exit;
12157 rc = register_netdev(dev);
12159 dev_err(&pdev->dev, "Cannot register net device\n");
12160 goto init_one_exit;
12163 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12164 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12165 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12166 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12167 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12168 dev->base_addr, bp->pdev->irq);
12169 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12175 iounmap(bp->regview);
12178 iounmap(bp->doorbells);
12182 if (atomic_read(&pdev->enable_cnt) == 1)
12183 pci_release_regions(pdev);
12185 pci_disable_device(pdev);
12186 pci_set_drvdata(pdev, NULL);
12191 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12193 struct net_device *dev = pci_get_drvdata(pdev);
12197 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12200 bp = netdev_priv(dev);
12202 unregister_netdev(dev);
12204 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops);
12206 kfree(bp->init_data);
12207 release_firmware(bp->firmware);
12210 iounmap(bp->regview);
12213 iounmap(bp->doorbells);
12217 if (atomic_read(&pdev->enable_cnt) == 1)
12218 pci_release_regions(pdev);
12220 pci_disable_device(pdev);
12221 pci_set_drvdata(pdev, NULL);
12224 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12226 struct net_device *dev = pci_get_drvdata(pdev);
12230 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12233 bp = netdev_priv(dev);
12237 pci_save_state(pdev);
12239 if (!netif_running(dev)) {
12244 netif_device_detach(dev);
12246 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12248 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12255 static int bnx2x_resume(struct pci_dev *pdev)
12257 struct net_device *dev = pci_get_drvdata(pdev);
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12265 bp = netdev_priv(dev);
12269 pci_restore_state(pdev);
12271 if (!netif_running(dev)) {
12276 bnx2x_set_power_state(bp, PCI_D0);
12277 netif_device_attach(dev);
12279 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12286 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12290 bp->state = BNX2X_STATE_ERROR;
12292 bp->rx_mode = BNX2X_RX_MODE_NONE;
12294 bnx2x_netif_stop(bp, 0);
12296 del_timer_sync(&bp->timer);
12297 bp->stats_state = STATS_STATE_DISABLED;
12298 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12301 bnx2x_free_irq(bp);
12303 if (CHIP_IS_E1(bp)) {
12304 struct mac_configuration_cmd *config =
12305 bnx2x_sp(bp, mcast_config);
12307 for (i = 0; i < config->hdr.length; i++)
12308 CAM_INVALIDATE(config->config_table[i]);
12311 /* Free SKBs, SGEs, TPA pool and driver internals */
12312 bnx2x_free_skbs(bp);
12313 for_each_queue(bp, i)
12314 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12315 for_each_queue(bp, i)
12316 netif_napi_del(&bnx2x_fp(bp, i, napi));
12317 bnx2x_free_mem(bp);
12319 bp->state = BNX2X_STATE_CLOSED;
12321 netif_carrier_off(bp->dev);
12326 static void bnx2x_eeh_recover(struct bnx2x *bp)
12330 mutex_init(&bp->port.phy_mutex);
12332 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12333 bp->link_params.shmem_base = bp->common.shmem_base;
12334 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12336 if (!bp->common.shmem_base ||
12337 (bp->common.shmem_base < 0xA0000) ||
12338 (bp->common.shmem_base >= 0xC0000)) {
12339 BNX2X_DEV_INFO("MCP not active\n");
12340 bp->flags |= NO_MCP_FLAG;
12344 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12345 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12346 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347 BNX2X_ERR("BAD MCP validity signature\n");
12349 if (!BP_NOMCP(bp)) {
12350 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12351 & DRV_MSG_SEQ_NUMBER_MASK);
12352 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12357 * bnx2x_io_error_detected - called when PCI error is detected
12358 * @pdev: Pointer to PCI device
12359 * @state: The current pci connection state
12361 * This function is called after a PCI bus error affecting
12362 * this device has been detected.
12364 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12365 pci_channel_state_t state)
12367 struct net_device *dev = pci_get_drvdata(pdev);
12368 struct bnx2x *bp = netdev_priv(dev);
12372 netif_device_detach(dev);
12374 if (state == pci_channel_io_perm_failure) {
12376 return PCI_ERS_RESULT_DISCONNECT;
12379 if (netif_running(dev))
12380 bnx2x_eeh_nic_unload(bp);
12382 pci_disable_device(pdev);
12386 /* Request a slot reset */
12387 return PCI_ERS_RESULT_NEED_RESET;
12391 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12392 * @pdev: Pointer to PCI device
12394 * Restart the card from scratch, as if from a cold-boot.
12396 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12398 struct net_device *dev = pci_get_drvdata(pdev);
12399 struct bnx2x *bp = netdev_priv(dev);
12403 if (pci_enable_device(pdev)) {
12404 dev_err(&pdev->dev,
12405 "Cannot re-enable PCI device after reset\n");
12407 return PCI_ERS_RESULT_DISCONNECT;
12410 pci_set_master(pdev);
12411 pci_restore_state(pdev);
12413 if (netif_running(dev))
12414 bnx2x_set_power_state(bp, PCI_D0);
12418 return PCI_ERS_RESULT_RECOVERED;
12422 * bnx2x_io_resume - called when traffic can start flowing again
12423 * @pdev: Pointer to PCI device
12425 * This callback is called when the error recovery driver tells us that
12426 * its OK to resume normal operation.
12428 static void bnx2x_io_resume(struct pci_dev *pdev)
12430 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev);
12435 bnx2x_eeh_recover(bp);
12437 if (netif_running(dev))
12438 bnx2x_nic_load(bp, LOAD_NORMAL);
12440 netif_device_attach(dev);
12445 static struct pci_error_handlers bnx2x_err_handler = {
12446 .error_detected = bnx2x_io_error_detected,
12447 .slot_reset = bnx2x_io_slot_reset,
12448 .resume = bnx2x_io_resume,
12451 static struct pci_driver bnx2x_pci_driver = {
12452 .name = DRV_MODULE_NAME,
12453 .id_table = bnx2x_pci_tbl,
12454 .probe = bnx2x_init_one,
12455 .remove = __devexit_p(bnx2x_remove_one),
12456 .suspend = bnx2x_suspend,
12457 .resume = bnx2x_resume,
12458 .err_handler = &bnx2x_err_handler,
12461 static int __init bnx2x_init(void)
12465 printk(KERN_INFO "%s", version);
12467 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12468 if (bnx2x_wq == NULL) {
12469 printk(KERN_ERR PFX "Cannot create workqueue\n");
12473 ret = pci_register_driver(&bnx2x_pci_driver);
12475 printk(KERN_ERR PFX "Cannot register driver\n");
12476 destroy_workqueue(bnx2x_wq);
12481 static void __exit bnx2x_cleanup(void)
12483 pci_unregister_driver(&bnx2x_pci_driver);
12485 destroy_workqueue(bnx2x_wq);
12488 module_init(bnx2x_init);
12489 module_exit(bnx2x_cleanup);
12493 /* count denotes the number of new completions we have seen */
12494 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12496 struct eth_spe *spe;
12498 #ifdef BNX2X_STOP_ON_ERROR
12499 if (unlikely(bp->panic))
12503 spin_lock_bh(&bp->spq_lock);
12504 bp->cnic_spq_pending -= count;
12506 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12507 bp->cnic_spq_pending++) {
12509 if (!bp->cnic_kwq_pending)
12512 spe = bnx2x_sp_get_next(bp);
12513 *spe = *bp->cnic_kwq_cons;
12515 bp->cnic_kwq_pending--;
12517 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12518 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12520 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12521 bp->cnic_kwq_cons = bp->cnic_kwq;
12523 bp->cnic_kwq_cons++;
12525 bnx2x_sp_prod_update(bp);
12526 spin_unlock_bh(&bp->spq_lock);
12529 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12530 struct kwqe_16 *kwqes[], u32 count)
12532 struct bnx2x *bp = netdev_priv(dev);
12535 #ifdef BNX2X_STOP_ON_ERROR
12536 if (unlikely(bp->panic))
12540 spin_lock_bh(&bp->spq_lock);
12542 for (i = 0; i < count; i++) {
12543 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12545 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12548 *bp->cnic_kwq_prod = *spe;
12550 bp->cnic_kwq_pending++;
12552 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12553 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12554 spe->data.mac_config_addr.hi,
12555 spe->data.mac_config_addr.lo,
12556 bp->cnic_kwq_pending);
12558 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12559 bp->cnic_kwq_prod = bp->cnic_kwq;
12561 bp->cnic_kwq_prod++;
12564 spin_unlock_bh(&bp->spq_lock);
12566 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12567 bnx2x_cnic_sp_post(bp, 0);
12572 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12574 struct cnic_ops *c_ops;
12577 mutex_lock(&bp->cnic_mutex);
12578 c_ops = bp->cnic_ops;
12580 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12581 mutex_unlock(&bp->cnic_mutex);
12586 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12588 struct cnic_ops *c_ops;
12592 c_ops = rcu_dereference(bp->cnic_ops);
12594 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12601 * for commands that have no data
12603 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12605 struct cnic_ctl_info ctl = {0};
12609 return bnx2x_cnic_ctl_send(bp, &ctl);
12612 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12614 struct cnic_ctl_info ctl;
12616 /* first we tell CNIC and only then we count this as a completion */
12617 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12618 ctl.data.comp.cid = cid;
12620 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12621 bnx2x_cnic_sp_post(bp, 1);
12624 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12626 struct bnx2x *bp = netdev_priv(dev);
12629 switch (ctl->cmd) {
12630 case DRV_CTL_CTXTBL_WR_CMD: {
12631 u32 index = ctl->data.io.offset;
12632 dma_addr_t addr = ctl->data.io.dma_addr;
12634 bnx2x_ilt_wr(bp, index, addr);
12638 case DRV_CTL_COMPLETION_CMD: {
12639 int count = ctl->data.comp.comp_count;
12641 bnx2x_cnic_sp_post(bp, count);
12645 /* rtnl_lock is held. */
12646 case DRV_CTL_START_L2_CMD: {
12647 u32 cli = ctl->data.ring.client_id;
12649 bp->rx_mode_cl_mask |= (1 << cli);
12650 bnx2x_set_storm_rx_mode(bp);
12654 /* rtnl_lock is held. */
12655 case DRV_CTL_STOP_L2_CMD: {
12656 u32 cli = ctl->data.ring.client_id;
12658 bp->rx_mode_cl_mask &= ~(1 << cli);
12659 bnx2x_set_storm_rx_mode(bp);
12664 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12671 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12673 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12675 if (bp->flags & USING_MSIX_FLAG) {
12676 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12677 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12678 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12680 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12683 cp->irq_arr[0].status_blk = bp->cnic_sb;
12684 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12685 cp->irq_arr[1].status_blk = bp->def_status_blk;
12686 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12691 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12694 struct bnx2x *bp = netdev_priv(dev);
12695 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12700 if (atomic_read(&bp->intr_sem) != 0)
12703 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12707 bp->cnic_kwq_cons = bp->cnic_kwq;
12708 bp->cnic_kwq_prod = bp->cnic_kwq;
12709 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12711 bp->cnic_spq_pending = 0;
12712 bp->cnic_kwq_pending = 0;
12714 bp->cnic_data = data;
12717 cp->drv_state = CNIC_DRV_STATE_REGD;
12719 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12721 bnx2x_setup_cnic_irq_info(bp);
12722 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12723 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12724 rcu_assign_pointer(bp->cnic_ops, ops);
12729 static int bnx2x_unregister_cnic(struct net_device *dev)
12731 struct bnx2x *bp = netdev_priv(dev);
12732 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12734 mutex_lock(&bp->cnic_mutex);
12735 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12736 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12737 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12740 rcu_assign_pointer(bp->cnic_ops, NULL);
12741 mutex_unlock(&bp->cnic_mutex);
12743 kfree(bp->cnic_kwq);
12744 bp->cnic_kwq = NULL;
12749 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12751 struct bnx2x *bp = netdev_priv(dev);
12752 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12754 cp->drv_owner = THIS_MODULE;
12755 cp->chip_id = CHIP_ID(bp);
12756 cp->pdev = bp->pdev;
12757 cp->io_base = bp->regview;
12758 cp->io_base2 = bp->doorbells;
12759 cp->max_kwqe_pending = 8;
12760 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12761 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12762 cp->ctx_tbl_len = CNIC_ILT_LINES;
12763 cp->starting_cid = BCM_CNIC_CID_START;
12764 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12765 cp->drv_ctl = bnx2x_drv_ctl;
12766 cp->drv_register_cnic = bnx2x_register_cnic;
12767 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12771 EXPORT_SYMBOL(bnx2x_cnic_probe);
12773 #endif /* BCM_CNIC */