1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
59 #define DRV_MODULE_NAME "bnx2"
60 #define DRV_MODULE_VERSION "2.2.1"
61 #define DRV_MODULE_RELDATE "Dec 18, 2011"
62 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
68 #define RUN_AT(x) (jiffies + (x))
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 static int disable_msi = 0;
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
105 /* indexed by board_t, above */
108 } board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 { PCI_VENDOR_ID_BROADCOM, 0x163b,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 { PCI_VENDOR_ID_BROADCOM, 0x163c,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
148 static const struct flash_spec flash_table[] =
150 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 "Non-buffered flash (256kB)"},
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 "Non-buffered flash (64kB)"},
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 "Buffered flash (256kB)"},
237 static const struct flash_spec flash_5709 = {
238 .flags = BNX2_NV_BUFFERED,
239 .page_bits = BCM5709_FLASH_PAGE_BITS,
240 .page_size = BCM5709_FLASH_PAGE_SIZE,
241 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
242 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
243 .name = "5709 Buffered flash (256kB)",
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
255 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
258 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped.
261 diff = txr->tx_prod - txr->tx_cons;
262 if (unlikely(diff >= TX_DESC_CNT)) {
264 if (diff == TX_DESC_CNT)
265 diff = MAX_TX_DESC_CNT;
267 return bp->tx_ring_size - diff;
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
275 spin_lock_bh(&bp->indirect_lock);
276 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_bh(&bp->indirect_lock);
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 spin_lock_bh(&bp->indirect_lock);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 spin_unlock_bh(&bp->indirect_lock);
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 spin_lock_bh(&bp->indirect_lock);
308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
311 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 for (i = 0; i < 5; i++) {
315 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 REG_WR(bp, BNX2_CTX_DATA, val);
324 spin_unlock_bh(&bp->indirect_lock);
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 struct bnx2 *bp = netdev_priv(dev);
332 struct drv_ctl_io *io = &info->data.io;
335 case DRV_CTL_IO_WR_CMD:
336 bnx2_reg_wr_ind(bp, io->offset, io->data);
338 case DRV_CTL_IO_RD_CMD:
339 io->data = bnx2_reg_rd_ind(bp, io->offset);
341 case DRV_CTL_CTX_WR_CMD:
342 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
356 if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_present = 0;
359 sb_id = bp->irq_nvecs;
360 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 bnapi->cnic_tag = bnapi->last_status_idx;
364 bnapi->cnic_present = 1;
366 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
369 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 cp->irq_arr[0].status_blk = (void *)
371 ((unsigned long) bnapi->status_blk.msi +
372 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 cp->irq_arr[0].status_blk_num = sb_id;
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
380 struct bnx2 *bp = netdev_priv(dev);
381 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 if (cp->drv_state & CNIC_DRV_STATE_REGD)
389 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
392 bp->cnic_data = data;
393 rcu_assign_pointer(bp->cnic_ops, ops);
396 cp->drv_state = CNIC_DRV_STATE_REGD;
398 bnx2_setup_cnic_irq_info(bp);
403 static int bnx2_unregister_cnic(struct net_device *dev)
405 struct bnx2 *bp = netdev_priv(dev);
406 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 mutex_lock(&bp->cnic_lock);
411 bnapi->cnic_present = 0;
412 RCU_INIT_POINTER(bp->cnic_ops, NULL);
413 mutex_unlock(&bp->cnic_lock);
418 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 struct bnx2 *bp = netdev_priv(dev);
421 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 if (!cp->max_iscsi_conn)
426 cp->drv_owner = THIS_MODULE;
427 cp->chip_id = bp->chip_id;
429 cp->io_base = bp->regview;
430 cp->drv_ctl = bnx2_drv_ctl;
431 cp->drv_register_cnic = bnx2_register_cnic;
432 cp->drv_unregister_cnic = bnx2_unregister_cnic;
436 EXPORT_SYMBOL(bnx2_cnic_probe);
439 bnx2_cnic_stop(struct bnx2 *bp)
441 struct cnic_ops *c_ops;
442 struct cnic_ctl_info info;
444 mutex_lock(&bp->cnic_lock);
445 c_ops = rcu_dereference_protected(bp->cnic_ops,
446 lockdep_is_held(&bp->cnic_lock));
448 info.cmd = CNIC_CTL_STOP_CMD;
449 c_ops->cnic_ctl(bp->cnic_data, &info);
451 mutex_unlock(&bp->cnic_lock);
455 bnx2_cnic_start(struct bnx2 *bp)
457 struct cnic_ops *c_ops;
458 struct cnic_ctl_info info;
460 mutex_lock(&bp->cnic_lock);
461 c_ops = rcu_dereference_protected(bp->cnic_ops,
462 lockdep_is_held(&bp->cnic_lock));
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467 bnapi->cnic_tag = bnapi->last_status_idx;
469 info.cmd = CNIC_CTL_START_CMD;
470 c_ops->cnic_ctl(bp->cnic_data, &info);
472 mutex_unlock(&bp->cnic_lock);
478 bnx2_cnic_stop(struct bnx2 *bp)
483 bnx2_cnic_start(struct bnx2 *bp)
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
505 val1 = (bp->phy_addr << 21) | (reg << 16) |
506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 BNX2_EMAC_MDIO_COMM_START_BUSY;
508 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 for (i = 0; i < 50; i++) {
513 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
517 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
524 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 for (i = 0; i < 50; i++) {
570 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
577 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
596 bnx2_disable_int(struct bnx2 *bp)
599 struct bnx2_napi *bnapi;
601 for (i = 0; i < bp->irq_nvecs; i++) {
602 bnapi = &bp->bnx2_napi[i];
603 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610 bnx2_enable_int(struct bnx2 *bp)
613 struct bnx2_napi *bnapi;
615 for (i = 0; i < bp->irq_nvecs; i++) {
616 bnapi = &bp->bnx2_napi[i];
618 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 bnapi->last_status_idx);
623 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 bnapi->last_status_idx);
627 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631 bnx2_disable_int_sync(struct bnx2 *bp)
635 atomic_inc(&bp->intr_sem);
636 if (!netif_running(bp->dev))
639 bnx2_disable_int(bp);
640 for (i = 0; i < bp->irq_nvecs; i++)
641 synchronize_irq(bp->irq_tbl[i].vector);
645 bnx2_napi_disable(struct bnx2 *bp)
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_disable(&bp->bnx2_napi[i].napi);
654 bnx2_napi_enable(struct bnx2 *bp)
658 for (i = 0; i < bp->irq_nvecs; i++)
659 napi_enable(&bp->bnx2_napi[i].napi);
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667 if (netif_running(bp->dev)) {
668 bnx2_napi_disable(bp);
669 netif_tx_disable(bp->dev);
671 bnx2_disable_int_sync(bp);
672 netif_carrier_off(bp->dev); /* prevent tx timeout */
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
680 netif_tx_wake_all_queues(bp->dev);
681 spin_lock_bh(&bp->phy_lock);
683 netif_carrier_on(bp->dev);
684 spin_unlock_bh(&bp->phy_lock);
685 bnx2_napi_enable(bp);
694 bnx2_free_tx_mem(struct bnx2 *bp)
698 for (i = 0; i < bp->num_tx_rings; i++) {
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702 if (txr->tx_desc_ring) {
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
705 txr->tx_desc_mapping);
706 txr->tx_desc_ring = NULL;
708 kfree(txr->tx_buf_ring);
709 txr->tx_buf_ring = NULL;
714 bnx2_free_rx_mem(struct bnx2 *bp)
718 for (i = 0; i < bp->num_rx_rings; i++) {
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
723 for (j = 0; j < bp->rx_max_ring; j++) {
724 if (rxr->rx_desc_ring[j])
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 rxr->rx_desc_ring[j],
727 rxr->rx_desc_mapping[j]);
728 rxr->rx_desc_ring[j] = NULL;
730 vfree(rxr->rx_buf_ring);
731 rxr->rx_buf_ring = NULL;
733 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 if (rxr->rx_pg_desc_ring[j])
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 rxr->rx_pg_desc_ring[j],
737 rxr->rx_pg_desc_mapping[j]);
738 rxr->rx_pg_desc_ring[j] = NULL;
740 vfree(rxr->rx_pg_ring);
741 rxr->rx_pg_ring = NULL;
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
750 for (i = 0; i < bp->num_tx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 if (txr->tx_buf_ring == NULL)
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 &txr->tx_desc_mapping, GFP_KERNEL);
761 if (txr->tx_desc_ring == NULL)
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
772 for (i = 0; i < bp->num_rx_rings; i++) {
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 if (rxr->rx_buf_ring == NULL)
782 for (j = 0; j < bp->rx_max_ring; j++) {
783 rxr->rx_desc_ring[j] =
784 dma_alloc_coherent(&bp->pdev->dev,
786 &rxr->rx_desc_mapping[j],
788 if (rxr->rx_desc_ring[j] == NULL)
793 if (bp->rx_pg_ring_size) {
794 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
796 if (rxr->rx_pg_ring == NULL)
801 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 rxr->rx_pg_desc_ring[j] =
803 dma_alloc_coherent(&bp->pdev->dev,
805 &rxr->rx_pg_desc_mapping[j],
807 if (rxr->rx_pg_desc_ring[j] == NULL)
816 bnx2_free_mem(struct bnx2 *bp)
819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821 bnx2_free_tx_mem(bp);
822 bnx2_free_rx_mem(bp);
824 for (i = 0; i < bp->ctx_pages; i++) {
825 if (bp->ctx_blk[i]) {
826 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
828 bp->ctx_blk_mapping[i]);
829 bp->ctx_blk[i] = NULL;
832 if (bnapi->status_blk.msi) {
833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 bnapi->status_blk.msi,
835 bp->status_blk_mapping);
836 bnapi->status_blk.msi = NULL;
837 bp->stats_blk = NULL;
842 bnx2_alloc_mem(struct bnx2 *bp)
844 int i, status_blk_size, err;
845 struct bnx2_napi *bnapi;
848 /* Combine status and statistics blocks into one allocation. */
849 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block);
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL);
858 if (status_blk == NULL)
861 memset(status_blk, 0, bp->status_stats_size);
863 bnapi = &bp->bnx2_napi[0];
864 bnapi->status_blk.msi = status_blk;
865 bnapi->hw_tx_cons_ptr =
866 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867 bnapi->hw_rx_cons_ptr =
868 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870 for (i = 1; i < bp->irq_nvecs; i++) {
871 struct status_block_msix *sblk;
873 bnapi = &bp->bnx2_napi[i];
875 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 bnapi->status_blk.msix = sblk;
877 bnapi->hw_tx_cons_ptr =
878 &sblk->status_tx_quick_consumer_index;
879 bnapi->hw_rx_cons_ptr =
880 &sblk->status_rx_quick_consumer_index;
881 bnapi->int_num = i << 24;
885 bp->stats_blk = status_blk + status_blk_size;
887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891 if (bp->ctx_pages == 0)
893 for (i = 0; i < bp->ctx_pages; i++) {
894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896 &bp->ctx_blk_mapping[i],
898 if (bp->ctx_blk[i] == NULL)
903 err = bnx2_alloc_rx_mem(bp);
907 err = bnx2_alloc_tx_mem(bp);
919 bnx2_report_fw_link(struct bnx2 *bp)
921 u32 fw_link_status = 0;
923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
929 switch (bp->line_speed) {
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_10HALF;
934 fw_link_status = BNX2_LINK_STATUS_10FULL;
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_100HALF;
940 fw_link_status = BNX2_LINK_STATUS_100FULL;
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_1000HALF;
946 fw_link_status = BNX2_LINK_STATUS_1000FULL;
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_2500HALF;
952 fw_link_status = BNX2_LINK_STATUS_2500FULL;
956 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
959 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
972 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
978 bnx2_xceiver_str(struct bnx2 *bp)
980 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
986 bnx2_report_link(struct bnx2 *bp)
989 netif_carrier_on(bp->dev);
990 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991 bnx2_xceiver_str(bp),
993 bp->duplex == DUPLEX_FULL ? "full" : "half");
996 if (bp->flow_ctrl & FLOW_CTRL_RX) {
997 pr_cont(", receive ");
998 if (bp->flow_ctrl & FLOW_CTRL_TX)
999 pr_cont("& transmit ");
1002 pr_cont(", transmit ");
1004 pr_cont("flow control ON");
1008 netif_carrier_off(bp->dev);
1009 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010 bnx2_xceiver_str(bp));
1013 bnx2_report_fw_link(bp);
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 u32 local_adv, remote_adv;
1022 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025 if (bp->duplex == DUPLEX_FULL) {
1026 bp->flow_ctrl = bp->req_flow_ctrl;
1031 if (bp->duplex != DUPLEX_FULL) {
1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_TX;
1042 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_RX;
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051 u32 new_local_adv = 0;
1052 u32 new_remote_adv = 0;
1054 if (local_adv & ADVERTISE_1000XPAUSE)
1055 new_local_adv |= ADVERTISE_PAUSE_CAP;
1056 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058 if (remote_adv & ADVERTISE_1000XPAUSE)
1059 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063 local_adv = new_local_adv;
1064 remote_adv = new_remote_adv;
1067 /* See Table 28B-3 of 802.3ab-1999 spec. */
1068 if (local_adv & ADVERTISE_PAUSE_CAP) {
1069 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074 bp->flow_ctrl = FLOW_CTRL_RX;
1078 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1083 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087 bp->flow_ctrl = FLOW_CTRL_TX;
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104 bp->line_speed = bp->req_line_speed;
1105 bp->duplex = bp->req_duplex;
1108 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111 bp->line_speed = SPEED_10;
1113 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114 bp->line_speed = SPEED_100;
1116 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118 bp->line_speed = SPEED_1000;
1120 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121 bp->line_speed = SPEED_2500;
1124 if (val & MII_BNX2_GP_TOP_AN_FD)
1125 bp->duplex = DUPLEX_FULL;
1127 bp->duplex = DUPLEX_HALF;
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1137 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139 case BCM5708S_1000X_STAT1_SPEED_10:
1140 bp->line_speed = SPEED_10;
1142 case BCM5708S_1000X_STAT1_SPEED_100:
1143 bp->line_speed = SPEED_100;
1145 case BCM5708S_1000X_STAT1_SPEED_1G:
1146 bp->line_speed = SPEED_1000;
1148 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149 bp->line_speed = SPEED_2500;
1152 if (val & BCM5708S_1000X_STAT1_FD)
1153 bp->duplex = DUPLEX_FULL;
1155 bp->duplex = DUPLEX_HALF;
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1163 u32 bmcr, local_adv, remote_adv, common;
1166 bp->line_speed = SPEED_1000;
1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169 if (bmcr & BMCR_FULLDPLX) {
1170 bp->duplex = DUPLEX_FULL;
1173 bp->duplex = DUPLEX_HALF;
1176 if (!(bmcr & BMCR_ANENABLE)) {
1180 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183 common = local_adv & remote_adv;
1184 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186 if (common & ADVERTISE_1000XFULL) {
1187 bp->duplex = DUPLEX_FULL;
1190 bp->duplex = DUPLEX_HALF;
1198 bnx2_copper_linkup(struct bnx2 *bp)
1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1250 bp->line_speed = SPEED_10;
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1256 bp->duplex = DUPLEX_HALF;
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1272 if (bp->flow_ctrl & FLOW_CTRL_TX)
1273 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1279 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1284 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1287 bnx2_init_rx_context(bp, cid);
1292 bnx2_set_mac_link(struct bnx2 *bp)
1296 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298 (bp->duplex == DUPLEX_HALF)) {
1299 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1302 /* Configure the EMAC mode register. */
1303 val = REG_RD(bp, BNX2_EMAC_MODE);
1305 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307 BNX2_EMAC_MODE_25G_MODE);
1310 switch (bp->line_speed) {
1312 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1318 val |= BNX2_EMAC_MODE_PORT_MII;
1321 val |= BNX2_EMAC_MODE_25G_MODE;
1324 val |= BNX2_EMAC_MODE_PORT_GMII;
1329 val |= BNX2_EMAC_MODE_PORT_GMII;
1332 /* Set the MAC to operate in the appropriate duplex mode. */
1333 if (bp->duplex == DUPLEX_HALF)
1334 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335 REG_WR(bp, BNX2_EMAC_MODE, val);
1337 /* Enable/disable rx PAUSE. */
1338 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340 if (bp->flow_ctrl & FLOW_CTRL_RX)
1341 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 /* Enable/disable tx PAUSE. */
1345 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 if (bp->flow_ctrl & FLOW_CTRL_TX)
1349 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 /* Acknowledge the interrupt. */
1353 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355 bnx2_init_all_rx_contexts(bp);
1359 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362 (CHIP_NUM(bp) == CHIP_NUM_5709))
1363 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364 MII_BNX2_BLK_ADDR_GP_STATUS);
1368 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371 (CHIP_NUM(bp) == CHIP_NUM_5709))
1372 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1377 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1382 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1385 if (bp->autoneg & AUTONEG_SPEED)
1386 bp->advertising |= ADVERTISED_2500baseX_Full;
1388 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 bnx2_read_phy(bp, bp->mii_up1, &up1);
1392 if (!(up1 & BCM5708S_UP1_2G5)) {
1393 up1 |= BCM5708S_UP1_2G5;
1394 bnx2_write_phy(bp, bp->mii_up1, up1);
1398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1406 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 bnx2_read_phy(bp, bp->mii_up1, &up1);
1418 if (up1 & BCM5708S_UP1_2G5) {
1419 up1 &= ~BCM5708S_UP1_2G5;
1420 bnx2_write_phy(bp, bp->mii_up1, up1);
1424 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1432 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 u32 uninitialized_var(bmcr);
1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_SERDES_DIG);
1445 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447 val |= MII_BNX2_SD_MISC1_FORCE |
1448 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1452 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1457 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459 bmcr |= BCM5708S_BMCR_FORCE_2500;
1467 if (bp->autoneg & AUTONEG_SPEED) {
1468 bmcr &= ~BMCR_ANENABLE;
1469 if (bp->req_duplex == DUPLEX_FULL)
1470 bmcr |= BMCR_FULLDPLX;
1472 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 u32 uninitialized_var(bmcr);
1481 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1484 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488 MII_BNX2_BLK_ADDR_SERDES_DIG);
1489 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490 val &= ~MII_BNX2_SD_MISC1_FORCE;
1491 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1509 if (bp->autoneg & AUTONEG_SPEED)
1510 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1519 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1528 bnx2_set_link(struct bnx2 *bp)
1533 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1538 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1541 link_up = bp->link_up;
1543 bnx2_enable_bmsr1(bp);
1544 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_disable_bmsr1(bp);
1548 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1552 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553 bnx2_5706s_force_link_dn(bp, 0);
1554 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556 val = REG_RD(bp, BNX2_EMAC_STATUS);
1558 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564 bmsr |= BMSR_LSTATUS;
1566 bmsr &= ~BMSR_LSTATUS;
1569 if (bmsr & BMSR_LSTATUS) {
1572 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574 bnx2_5706s_linkup(bp);
1575 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576 bnx2_5708s_linkup(bp);
1577 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578 bnx2_5709s_linkup(bp);
1581 bnx2_copper_linkup(bp);
1583 bnx2_resolve_flow_ctrl(bp);
1586 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587 (bp->autoneg & AUTONEG_SPEED))
1588 bnx2_disable_forced_2g5(bp);
1590 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1593 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594 bmcr |= BMCR_ANENABLE;
1595 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1602 if (bp->link_up != link_up) {
1603 bnx2_report_link(bp);
1606 bnx2_set_mac_link(bp);
1612 bnx2_reset_phy(struct bnx2 *bp)
1617 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619 #define PHY_RESET_MAX_WAIT 100
1620 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1623 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1624 if (!(reg & BMCR_RESET)) {
1629 if (i == PHY_RESET_MAX_WAIT) {
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1640 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 adv = ADVERTISE_1000XPAUSE;
1647 adv = ADVERTISE_PAUSE_CAP;
1650 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 adv = ADVERTISE_1000XPSE_ASYM;
1655 adv = ADVERTISE_PAUSE_ASYM;
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1663 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1676 u32 speed_arg = 0, pause_adv;
1678 pause_adv = bnx2_phy_get_pause_adv(bp);
1680 if (bp->autoneg & AUTONEG_SPEED) {
1681 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682 if (bp->advertising & ADVERTISED_10baseT_Half)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684 if (bp->advertising & ADVERTISED_10baseT_Full)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686 if (bp->advertising & ADVERTISED_100baseT_Half)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688 if (bp->advertising & ADVERTISED_100baseT_Full)
1689 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 if (bp->req_line_speed == SPEED_2500)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 else if (bp->req_line_speed == SPEED_1000)
1698 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 else if (bp->req_line_speed == SPEED_100) {
1700 if (bp->req_duplex == DUPLEX_FULL)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704 } else if (bp->req_line_speed == SPEED_10) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717 if (port == PORT_TP)
1718 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723 spin_unlock_bh(&bp->phy_lock);
1724 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725 spin_lock_bh(&bp->phy_lock);
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1738 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739 return bnx2_setup_remote_phy(bp, port);
1741 if (!(bp->autoneg & AUTONEG_SPEED)) {
1743 int force_link_down = 0;
1745 if (bp->req_line_speed == SPEED_2500) {
1746 if (!bnx2_test_and_enable_2g5(bp))
1747 force_link_down = 1;
1748 } else if (bp->req_line_speed == SPEED_1000) {
1749 if (bnx2_test_and_disable_2g5(bp))
1750 force_link_down = 1;
1752 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757 new_bmcr |= BMCR_SPEED1000;
1759 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760 if (bp->req_line_speed == SPEED_2500)
1761 bnx2_enable_forced_2g5(bp);
1762 else if (bp->req_line_speed == SPEED_1000) {
1763 bnx2_disable_forced_2g5(bp);
1764 new_bmcr &= ~0x2000;
1767 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768 if (bp->req_line_speed == SPEED_2500)
1769 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1774 if (bp->req_duplex == DUPLEX_FULL) {
1775 adv |= ADVERTISE_1000XFULL;
1776 new_bmcr |= BMCR_FULLDPLX;
1779 adv |= ADVERTISE_1000XHALF;
1780 new_bmcr &= ~BMCR_FULLDPLX;
1782 if ((new_bmcr != bmcr) || (force_link_down)) {
1783 /* Force a link down visible on the other side */
1785 bnx2_write_phy(bp, bp->mii_adv, adv &
1786 ~(ADVERTISE_1000XFULL |
1787 ADVERTISE_1000XHALF));
1788 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789 BMCR_ANRESTART | BMCR_ANENABLE);
1792 netif_carrier_off(bp->dev);
1793 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794 bnx2_report_link(bp);
1796 bnx2_write_phy(bp, bp->mii_adv, adv);
1797 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799 bnx2_resolve_flow_ctrl(bp);
1800 bnx2_set_mac_link(bp);
1805 bnx2_test_and_enable_2g5(bp);
1807 if (bp->advertising & ADVERTISED_1000baseT_Full)
1808 new_adv |= ADVERTISE_1000XFULL;
1810 new_adv |= bnx2_phy_get_pause_adv(bp);
1812 bnx2_read_phy(bp, bp->mii_adv, &adv);
1813 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815 bp->serdes_an_pending = 0;
1816 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817 /* Force a link down visible on the other side */
1819 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820 spin_unlock_bh(&bp->phy_lock);
1822 spin_lock_bh(&bp->phy_lock);
1825 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828 /* Speed up link-up time when the link partner
1829 * does not autonegotiate which is very common
1830 * in blade servers. Some blade servers use
1831 * IPMI for kerboard input and it's important
1832 * to minimize link disruptions. Autoneg. involves
1833 * exchanging base pages plus 3 next pages and
1834 * normally completes in about 120 msec.
1836 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837 bp->serdes_an_pending = 1;
1838 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840 bnx2_resolve_flow_ctrl(bp);
1841 bnx2_set_mac_link(bp);
1847 #define ETHTOOL_ALL_FIBRE_SPEED \
1848 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1849 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850 (ADVERTISED_1000baseT_Full)
1852 #define ETHTOOL_ALL_COPPER_SPEED \
1853 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1854 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1855 ADVERTISED_1000baseT_Full)
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1867 if (bp->phy_port == PORT_TP)
1868 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873 bp->req_line_speed = 0;
1874 bp->autoneg |= AUTONEG_SPEED;
1875 bp->advertising = ADVERTISED_Autoneg;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877 bp->advertising |= ADVERTISED_10baseT_Half;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879 bp->advertising |= ADVERTISED_10baseT_Full;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881 bp->advertising |= ADVERTISED_100baseT_Half;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883 bp->advertising |= ADVERTISED_100baseT_Full;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885 bp->advertising |= ADVERTISED_1000baseT_Full;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887 bp->advertising |= ADVERTISED_2500baseX_Full;
1890 bp->advertising = 0;
1891 bp->req_duplex = DUPLEX_FULL;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893 bp->req_line_speed = SPEED_10;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895 bp->req_duplex = DUPLEX_HALF;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898 bp->req_line_speed = SPEED_100;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903 bp->req_line_speed = SPEED_1000;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905 bp->req_line_speed = SPEED_2500;
1910 bnx2_set_default_link(struct bnx2 *bp)
1912 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913 bnx2_set_default_remote_link(bp);
1917 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918 bp->req_line_speed = 0;
1919 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1922 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928 bp->req_line_speed = bp->line_speed = SPEED_1000;
1929 bp->req_duplex = DUPLEX_FULL;
1932 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1941 spin_lock(&bp->indirect_lock);
1942 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946 spin_unlock(&bp->indirect_lock);
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1953 u8 link_up = bp->link_up;
1956 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959 bnx2_send_heart_beat(bp);
1961 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970 bp->duplex = DUPLEX_FULL;
1972 case BNX2_LINK_STATUS_10HALF:
1973 bp->duplex = DUPLEX_HALF;
1974 case BNX2_LINK_STATUS_10FULL:
1975 bp->line_speed = SPEED_10;
1977 case BNX2_LINK_STATUS_100HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_100BASE_T4:
1980 case BNX2_LINK_STATUS_100FULL:
1981 bp->line_speed = SPEED_100;
1983 case BNX2_LINK_STATUS_1000HALF:
1984 bp->duplex = DUPLEX_HALF;
1985 case BNX2_LINK_STATUS_1000FULL:
1986 bp->line_speed = SPEED_1000;
1988 case BNX2_LINK_STATUS_2500HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_2500FULL:
1991 bp->line_speed = SPEED_2500;
1999 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001 if (bp->duplex == DUPLEX_FULL)
2002 bp->flow_ctrl = bp->req_flow_ctrl;
2004 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005 bp->flow_ctrl |= FLOW_CTRL_TX;
2006 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007 bp->flow_ctrl |= FLOW_CTRL_RX;
2010 old_port = bp->phy_port;
2011 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012 bp->phy_port = PORT_FIBRE;
2014 bp->phy_port = PORT_TP;
2016 if (old_port != bp->phy_port)
2017 bnx2_set_default_link(bp);
2020 if (bp->link_up != link_up)
2021 bnx2_report_link(bp);
2023 bnx2_set_mac_link(bp);
2027 bnx2_set_remote_link(struct bnx2 *bp)
2031 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034 bnx2_remote_phy_event(bp);
2036 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038 bnx2_send_heart_beat(bp);
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054 if (bp->autoneg & AUTONEG_SPEED) {
2055 u32 adv_reg, adv1000_reg;
2057 u32 new_adv1000 = 0;
2059 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061 ADVERTISE_PAUSE_ASYM);
2063 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064 adv1000_reg &= PHY_ALL_1000_SPEED;
2066 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2067 new_adv |= ADVERTISE_CSMA;
2068 new_adv |= bnx2_phy_get_pause_adv(bp);
2070 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2072 if ((adv1000_reg != new_adv1000) ||
2073 (adv_reg != new_adv) ||
2074 ((bmcr & BMCR_ANENABLE) == 0)) {
2076 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2077 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2078 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2081 else if (bp->link_up) {
2082 /* Flow ctrl may have changed from auto to forced */
2083 /* or vice-versa. */
2085 bnx2_resolve_flow_ctrl(bp);
2086 bnx2_set_mac_link(bp);
2092 if (bp->req_line_speed == SPEED_100) {
2093 new_bmcr |= BMCR_SPEED100;
2095 if (bp->req_duplex == DUPLEX_FULL) {
2096 new_bmcr |= BMCR_FULLDPLX;
2098 if (new_bmcr != bmcr) {
2101 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2102 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 if (bmsr & BMSR_LSTATUS) {
2105 /* Force link down */
2106 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2107 spin_unlock_bh(&bp->phy_lock);
2109 spin_lock_bh(&bp->phy_lock);
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117 /* Normally, the new speed is setup after the link has
2118 * gone down and up again. In some cases, link will not go
2119 * down so we need to set up the new speed here.
2121 if (bmsr & BMSR_LSTATUS) {
2122 bp->line_speed = bp->req_line_speed;
2123 bp->duplex = bp->req_duplex;
2124 bnx2_resolve_flow_ctrl(bp);
2125 bnx2_set_mac_link(bp);
2128 bnx2_resolve_flow_ctrl(bp);
2129 bnx2_set_mac_link(bp);
2135 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2136 __releases(&bp->phy_lock)
2137 __acquires(&bp->phy_lock)
2139 if (bp->loopback == MAC_LOOPBACK)
2142 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2143 return bnx2_setup_serdes_phy(bp, port);
2146 return bnx2_setup_copper_phy(bp);
2151 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 bp->mii_bmcr = MII_BMCR + 0x10;
2156 bp->mii_bmsr = MII_BMSR + 0x10;
2157 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2158 bp->mii_adv = MII_ADVERTISE + 0x10;
2159 bp->mii_lpa = MII_LPA + 0x10;
2160 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2163 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2172 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2173 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2174 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2177 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2178 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2179 val |= BCM5708S_UP1_2G5;
2181 val &= ~BCM5708S_UP1_2G5;
2182 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2185 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2186 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2187 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2192 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2193 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2208 bp->mii_up1 = BCM5708S_UP1;
2210 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2211 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2215 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2216 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2219 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2220 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2223 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2224 val |= BCM5708S_UP1_2G5;
2225 bnx2_write_phy(bp, BCM5708S_UP1, val);
2228 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2229 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2230 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2231 /* increase tx signal amplitude */
2232 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2233 BCM5708S_BLK_ADDR_TX_MISC);
2234 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2235 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2236 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2237 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2240 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2241 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2246 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2247 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2249 BCM5708S_BLK_ADDR_TX_MISC);
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252 BCM5708S_BLK_ADDR_DIG);
2259 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2264 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2267 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269 if (bp->dev->mtu > 1500) {
2272 /* Set extended packet length bit */
2273 bnx2_write_phy(bp, 0x18, 0x7);
2274 bnx2_read_phy(bp, 0x18, &val);
2275 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277 bnx2_write_phy(bp, 0x1c, 0x6c00);
2278 bnx2_read_phy(bp, 0x1c, &val);
2279 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2284 bnx2_write_phy(bp, 0x18, 0x7);
2285 bnx2_read_phy(bp, 0x18, &val);
2286 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288 bnx2_write_phy(bp, 0x1c, 0x6c00);
2289 bnx2_read_phy(bp, 0x1c, &val);
2290 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2297 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2304 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2305 bnx2_write_phy(bp, 0x18, 0x0c00);
2306 bnx2_write_phy(bp, 0x17, 0x000a);
2307 bnx2_write_phy(bp, 0x15, 0x310b);
2308 bnx2_write_phy(bp, 0x17, 0x201f);
2309 bnx2_write_phy(bp, 0x15, 0x9506);
2310 bnx2_write_phy(bp, 0x17, 0x401f);
2311 bnx2_write_phy(bp, 0x15, 0x14e2);
2312 bnx2_write_phy(bp, 0x18, 0x0400);
2315 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2316 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2317 MII_BNX2_DSP_EXPAND_REG | 0x8);
2318 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2323 if (bp->dev->mtu > 1500) {
2324 /* Set extended packet length bit */
2325 bnx2_write_phy(bp, 0x18, 0x7);
2326 bnx2_read_phy(bp, 0x18, &val);
2327 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329 bnx2_read_phy(bp, 0x10, &val);
2330 bnx2_write_phy(bp, 0x10, val | 0x1);
2333 bnx2_write_phy(bp, 0x18, 0x7);
2334 bnx2_read_phy(bp, 0x18, &val);
2335 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337 bnx2_read_phy(bp, 0x10, &val);
2338 bnx2_write_phy(bp, 0x10, val & ~0x1);
2341 /* ethernet@wirespeed */
2342 bnx2_write_phy(bp, 0x18, 0x7007);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2351 __releases(&bp->phy_lock)
2352 __acquires(&bp->phy_lock)
2357 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2358 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360 bp->mii_bmcr = MII_BMCR;
2361 bp->mii_bmsr = MII_BMSR;
2362 bp->mii_bmsr1 = MII_BMSR;
2363 bp->mii_adv = MII_ADVERTISE;
2364 bp->mii_lpa = MII_LPA;
2366 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2371 bnx2_read_phy(bp, MII_PHYSID1, &val);
2372 bp->phy_id = val << 16;
2373 bnx2_read_phy(bp, MII_PHYSID2, &val);
2374 bp->phy_id |= val & 0xffff;
2376 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2377 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2378 rc = bnx2_init_5706s_phy(bp, reset_phy);
2379 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2380 rc = bnx2_init_5708s_phy(bp, reset_phy);
2381 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2382 rc = bnx2_init_5709s_phy(bp, reset_phy);
2385 rc = bnx2_init_copper_phy(bp, reset_phy);
2390 rc = bnx2_setup_phy(bp, bp->phy_port);
2396 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2401 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2402 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2403 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2408 static int bnx2_test_link(struct bnx2 *);
2411 bnx2_set_phy_loopback(struct bnx2 *bp)
2416 spin_lock_bh(&bp->phy_lock);
2417 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419 spin_unlock_bh(&bp->phy_lock);
2423 for (i = 0; i < 10; i++) {
2424 if (bnx2_test_link(bp) == 0)
2429 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2430 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2431 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2432 BNX2_EMAC_MODE_25G_MODE);
2434 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2435 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441 bnx2_dump_mcp_state(struct bnx2 *bp)
2443 struct net_device *dev = bp->dev;
2446 netdev_err(dev, "<--- start MCP states dump --->\n");
2447 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2448 mcp_p0 = BNX2_MCP_STATE_P0;
2449 mcp_p1 = BNX2_MCP_STATE_P1;
2451 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2452 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2454 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2455 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2456 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2457 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2458 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2459 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2460 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2461 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2462 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2464 netdev_err(dev, "DEBUG: shmem states:\n");
2465 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2466 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2467 bnx2_shmem_rd(bp, BNX2_FW_MB),
2468 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2469 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2470 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2471 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2472 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2473 pr_cont(" condition[%08x]\n",
2474 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2475 DP_SHMEM_LINE(bp, 0x3cc);
2476 DP_SHMEM_LINE(bp, 0x3dc);
2477 DP_SHMEM_LINE(bp, 0x3ec);
2478 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2479 netdev_err(dev, "<--- end MCP states dump --->\n");
2483 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2489 msg_data |= bp->fw_wr_seq;
2491 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496 /* wait for an acknowledgement. */
2497 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2500 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2502 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2505 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2508 /* If we timed out, inform the firmware that this is the case. */
2509 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2510 msg_data &= ~BNX2_DRV_MSG_CODE;
2511 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2513 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2515 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2516 bnx2_dump_mcp_state(bp);
2522 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2529 bnx2_init_5709_context(struct bnx2 *bp)
2534 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2535 val |= (BCM_PAGE_BITS - 8) << 16;
2536 REG_WR(bp, BNX2_CTX_COMMAND, val);
2537 for (i = 0; i < 10; i++) {
2538 val = REG_RD(bp, BNX2_CTX_COMMAND);
2539 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2543 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2546 for (i = 0; i < bp->ctx_pages; i++) {
2550 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2554 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2555 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2556 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2557 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2558 (u64) bp->ctx_blk_mapping[i] >> 32);
2559 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2560 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2561 for (j = 0; j < 10; j++) {
2563 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2564 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2568 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2577 bnx2_init_context(struct bnx2 *bp)
2583 u32 vcid_addr, pcid_addr, offset;
2588 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2591 vcid_addr = GET_PCID_ADDR(vcid);
2593 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2598 pcid_addr = GET_PCID_ADDR(new_vcid);
2601 vcid_addr = GET_CID_ADDR(vcid);
2602 pcid_addr = vcid_addr;
2605 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2606 vcid_addr += (i << PHY_CTX_SHIFT);
2607 pcid_addr += (i << PHY_CTX_SHIFT);
2609 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2610 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2612 /* Zero out the context. */
2613 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2614 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2620 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2626 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2627 if (good_mbuf == NULL)
2630 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2631 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2635 /* Allocate a bunch of mbufs and save the good ones in an array. */
2636 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2637 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2638 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2639 BNX2_RBUF_COMMAND_ALLOC_REQ);
2641 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2643 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2645 /* The addresses with Bit 9 set are bad memory blocks. */
2646 if (!(val & (1 << 9))) {
2647 good_mbuf[good_mbuf_cnt] = (u16) val;
2651 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2654 /* Free the good ones back to the mbuf pool thus discarding
2655 * all the bad ones. */
2656 while (good_mbuf_cnt) {
2659 val = good_mbuf[good_mbuf_cnt];
2660 val = (val << 9) | val | 1;
2662 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2669 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2673 val = (mac_addr[0] << 8) | mac_addr[1];
2675 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2677 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2678 (mac_addr[4] << 8) | mac_addr[5];
2680 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2684 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2687 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2688 struct rx_bd *rxbd =
2689 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2690 struct page *page = alloc_page(gfp);
2694 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2695 PCI_DMA_FROMDEVICE);
2696 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2702 dma_unmap_addr_set(rx_pg, mapping, mapping);
2703 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2704 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2709 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2711 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2712 struct page *page = rx_pg->page;
2717 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2718 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2725 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2728 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2730 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2732 data = kmalloc(bp->rx_buf_size, gfp);
2736 mapping = dma_map_single(&bp->pdev->dev,
2738 bp->rx_buf_use_size,
2739 PCI_DMA_FROMDEVICE);
2740 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2745 rx_buf->data = data;
2746 dma_unmap_addr_set(rx_buf, mapping, mapping);
2748 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2749 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2751 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2759 struct status_block *sblk = bnapi->status_blk.msi;
2760 u32 new_link_state, old_link_state;
2763 new_link_state = sblk->status_attn_bits & event;
2764 old_link_state = sblk->status_attn_bits_ack & event;
2765 if (new_link_state != old_link_state) {
2767 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2769 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2777 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2779 spin_lock(&bp->phy_lock);
2781 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2783 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2784 bnx2_set_remote_link(bp);
2786 spin_unlock(&bp->phy_lock);
2791 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2795 /* Tell compiler that status block fields can change. */
2797 cons = *bnapi->hw_tx_cons_ptr;
2799 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2805 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2807 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2808 u16 hw_cons, sw_cons, sw_ring_cons;
2809 int tx_pkt = 0, index;
2810 unsigned int tx_bytes = 0;
2811 struct netdev_queue *txq;
2813 index = (bnapi - bp->bnx2_napi);
2814 txq = netdev_get_tx_queue(bp->dev, index);
2816 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2817 sw_cons = txr->tx_cons;
2819 while (sw_cons != hw_cons) {
2820 struct sw_tx_bd *tx_buf;
2821 struct sk_buff *skb;
2824 sw_ring_cons = TX_RING_IDX(sw_cons);
2826 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2829 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2830 prefetch(&skb->end);
2832 /* partial BD completions possible with TSO packets */
2833 if (tx_buf->is_gso) {
2834 u16 last_idx, last_ring_idx;
2836 last_idx = sw_cons + tx_buf->nr_frags + 1;
2837 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2838 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2841 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2846 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2847 skb_headlen(skb), PCI_DMA_TODEVICE);
2850 last = tx_buf->nr_frags;
2852 for (i = 0; i < last; i++) {
2853 sw_cons = NEXT_TX_BD(sw_cons);
2855 dma_unmap_page(&bp->pdev->dev,
2857 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2859 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2863 sw_cons = NEXT_TX_BD(sw_cons);
2865 tx_bytes += skb->len;
2868 if (tx_pkt == budget)
2871 if (hw_cons == sw_cons)
2872 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2875 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2876 txr->hw_tx_cons = hw_cons;
2877 txr->tx_cons = sw_cons;
2879 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2880 * before checking for netif_tx_queue_stopped(). Without the
2881 * memory barrier, there is a small possibility that bnx2_start_xmit()
2882 * will miss it and cause the queue to be stopped forever.
2886 if (unlikely(netif_tx_queue_stopped(txq)) &&
2887 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2888 __netif_tx_lock(txq, smp_processor_id());
2889 if ((netif_tx_queue_stopped(txq)) &&
2890 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2891 netif_tx_wake_queue(txq);
2892 __netif_tx_unlock(txq);
2899 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2900 struct sk_buff *skb, int count)
2902 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2903 struct rx_bd *cons_bd, *prod_bd;
2906 u16 cons = rxr->rx_pg_cons;
2908 cons_rx_pg = &rxr->rx_pg_ring[cons];
2910 /* The caller was unable to allocate a new page to replace the
2911 * last one in the frags array, so we need to recycle that page
2912 * and then free the skb.
2916 struct skb_shared_info *shinfo;
2918 shinfo = skb_shinfo(skb);
2920 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2921 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2923 cons_rx_pg->page = page;
2927 hw_prod = rxr->rx_pg_prod;
2929 for (i = 0; i < count; i++) {
2930 prod = RX_PG_RING_IDX(hw_prod);
2932 prod_rx_pg = &rxr->rx_pg_ring[prod];
2933 cons_rx_pg = &rxr->rx_pg_ring[cons];
2934 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2935 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2938 prod_rx_pg->page = cons_rx_pg->page;
2939 cons_rx_pg->page = NULL;
2940 dma_unmap_addr_set(prod_rx_pg, mapping,
2941 dma_unmap_addr(cons_rx_pg, mapping));
2943 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2944 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2947 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2948 hw_prod = NEXT_RX_BD(hw_prod);
2950 rxr->rx_pg_prod = hw_prod;
2951 rxr->rx_pg_cons = cons;
2955 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2956 u8 *data, u16 cons, u16 prod)
2958 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2959 struct rx_bd *cons_bd, *prod_bd;
2961 cons_rx_buf = &rxr->rx_buf_ring[cons];
2962 prod_rx_buf = &rxr->rx_buf_ring[prod];
2964 dma_sync_single_for_device(&bp->pdev->dev,
2965 dma_unmap_addr(cons_rx_buf, mapping),
2966 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2968 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2970 prod_rx_buf->data = data;
2975 dma_unmap_addr_set(prod_rx_buf, mapping,
2976 dma_unmap_addr(cons_rx_buf, mapping));
2978 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2979 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2980 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2981 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2984 static struct sk_buff *
2985 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2986 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2990 u16 prod = ring_idx & 0xffff;
2991 struct sk_buff *skb;
2993 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
2994 if (unlikely(err)) {
2995 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
2998 unsigned int raw_len = len + 4;
2999 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3001 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3006 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3007 PCI_DMA_FROMDEVICE);
3008 skb = build_skb(data, 0);
3013 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3018 unsigned int i, frag_len, frag_size, pages;
3019 struct sw_pg *rx_pg;
3020 u16 pg_cons = rxr->rx_pg_cons;
3021 u16 pg_prod = rxr->rx_pg_prod;
3023 frag_size = len + 4 - hdr_len;
3024 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3025 skb_put(skb, hdr_len);
3027 for (i = 0; i < pages; i++) {
3028 dma_addr_t mapping_old;
3030 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3031 if (unlikely(frag_len <= 4)) {
3032 unsigned int tail = 4 - frag_len;
3034 rxr->rx_pg_cons = pg_cons;
3035 rxr->rx_pg_prod = pg_prod;
3036 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3043 &skb_shinfo(skb)->frags[i - 1];
3044 skb_frag_size_sub(frag, tail);
3045 skb->data_len -= tail;
3049 rx_pg = &rxr->rx_pg_ring[pg_cons];
3051 /* Don't unmap yet. If we're unable to allocate a new
3052 * page, we need to recycle the page and the DMA addr.
3054 mapping_old = dma_unmap_addr(rx_pg, mapping);
3058 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3061 err = bnx2_alloc_rx_page(bp, rxr,
3062 RX_PG_RING_IDX(pg_prod),
3064 if (unlikely(err)) {
3065 rxr->rx_pg_cons = pg_cons;
3066 rxr->rx_pg_prod = pg_prod;
3067 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3072 dma_unmap_page(&bp->pdev->dev, mapping_old,
3073 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3075 frag_size -= frag_len;
3076 skb->data_len += frag_len;
3077 skb->truesize += PAGE_SIZE;
3078 skb->len += frag_len;
3080 pg_prod = NEXT_RX_BD(pg_prod);
3081 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3083 rxr->rx_pg_prod = pg_prod;
3084 rxr->rx_pg_cons = pg_cons;
3090 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3094 /* Tell compiler that status block fields can change. */
3096 cons = *bnapi->hw_rx_cons_ptr;
3098 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3104 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3106 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3107 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3108 struct l2_fhdr *rx_hdr;
3109 int rx_pkt = 0, pg_ring_used = 0;
3111 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3112 sw_cons = rxr->rx_cons;
3113 sw_prod = rxr->rx_prod;
3115 /* Memory barrier necessary as speculative reads of the rx
3116 * buffer can be ahead of the index in the status block
3119 while (sw_cons != hw_cons) {
3120 unsigned int len, hdr_len;
3122 struct sw_bd *rx_buf, *next_rx_buf;
3123 struct sk_buff *skb;
3124 dma_addr_t dma_addr;
3127 sw_ring_cons = RX_RING_IDX(sw_cons);
3128 sw_ring_prod = RX_RING_IDX(sw_prod);
3130 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3131 data = rx_buf->data;
3132 rx_buf->data = NULL;
3134 rx_hdr = get_l2_fhdr(data);
3137 dma_addr = dma_unmap_addr(rx_buf, mapping);
3139 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3140 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3141 PCI_DMA_FROMDEVICE);
3144 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3145 prefetch(get_l2_fhdr(next_rx_buf->data));
3147 len = rx_hdr->l2_fhdr_pkt_len;
3148 status = rx_hdr->l2_fhdr_status;
3151 if (status & L2_FHDR_STATUS_SPLIT) {
3152 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3154 } else if (len > bp->rx_jumbo_thresh) {
3155 hdr_len = bp->rx_jumbo_thresh;
3159 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3160 L2_FHDR_ERRORS_PHY_DECODE |
3161 L2_FHDR_ERRORS_ALIGNMENT |
3162 L2_FHDR_ERRORS_TOO_SHORT |
3163 L2_FHDR_ERRORS_GIANT_FRAME))) {
3165 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3170 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3172 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3179 if (len <= bp->rx_copy_thresh) {
3180 skb = netdev_alloc_skb(bp->dev, len + 6);
3182 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3189 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3191 skb_reserve(skb, 6);
3194 bnx2_reuse_rx_data(bp, rxr, data,
3195 sw_ring_cons, sw_ring_prod);
3198 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3199 (sw_ring_cons << 16) | sw_ring_prod);
3203 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3204 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3205 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3207 skb->protocol = eth_type_trans(skb, bp->dev);
3209 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3210 (ntohs(skb->protocol) != 0x8100)) {
3217 skb_checksum_none_assert(skb);
3218 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3219 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3220 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3222 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3223 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3224 skb->ip_summed = CHECKSUM_UNNECESSARY;
3226 if ((bp->dev->features & NETIF_F_RXHASH) &&
3227 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3228 L2_FHDR_STATUS_USE_RXHASH))
3229 skb->rxhash = rx_hdr->l2_fhdr_hash;
3231 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3232 napi_gro_receive(&bnapi->napi, skb);
3236 sw_cons = NEXT_RX_BD(sw_cons);
3237 sw_prod = NEXT_RX_BD(sw_prod);
3239 if ((rx_pkt == budget))
3242 /* Refresh hw_cons to see if there is new work */
3243 if (sw_cons == hw_cons) {
3244 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3248 rxr->rx_cons = sw_cons;
3249 rxr->rx_prod = sw_prod;
3252 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3254 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3256 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3264 /* MSI ISR - The only difference between this and the INTx ISR
3265 * is that the MSI interrupt is always serviced.
3268 bnx2_msi(int irq, void *dev_instance)
3270 struct bnx2_napi *bnapi = dev_instance;
3271 struct bnx2 *bp = bnapi->bp;
3273 prefetch(bnapi->status_blk.msi);
3274 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3275 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3276 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3278 /* Return here if interrupt is disabled. */
3279 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3282 napi_schedule(&bnapi->napi);
3288 bnx2_msi_1shot(int irq, void *dev_instance)
3290 struct bnx2_napi *bnapi = dev_instance;
3291 struct bnx2 *bp = bnapi->bp;
3293 prefetch(bnapi->status_blk.msi);
3295 /* Return here if interrupt is disabled. */
3296 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3299 napi_schedule(&bnapi->napi);
3305 bnx2_interrupt(int irq, void *dev_instance)
3307 struct bnx2_napi *bnapi = dev_instance;
3308 struct bnx2 *bp = bnapi->bp;
3309 struct status_block *sblk = bnapi->status_blk.msi;
3311 /* When using INTx, it is possible for the interrupt to arrive
3312 * at the CPU before the status block posted prior to the
3313 * interrupt. Reading a register will flush the status block.
3314 * When using MSI, the MSI message will always complete after
3315 * the status block write.
3317 if ((sblk->status_idx == bnapi->last_status_idx) &&
3318 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3319 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3323 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3324 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3326 /* Read back to deassert IRQ immediately to avoid too many
3327 * spurious interrupts.
3329 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3331 /* Return here if interrupt is shared and is disabled. */
3332 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3335 if (napi_schedule_prep(&bnapi->napi)) {
3336 bnapi->last_status_idx = sblk->status_idx;
3337 __napi_schedule(&bnapi->napi);
3344 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3346 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3347 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3349 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3350 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3355 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3356 STATUS_ATTN_BITS_TIMER_ABORT)
3359 bnx2_has_work(struct bnx2_napi *bnapi)
3361 struct status_block *sblk = bnapi->status_blk.msi;
3363 if (bnx2_has_fast_work(bnapi))
3367 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3371 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3372 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3379 bnx2_chk_missed_msi(struct bnx2 *bp)
3381 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3384 if (bnx2_has_work(bnapi)) {
3385 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3386 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3389 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3390 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3391 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3392 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3393 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3397 bp->idle_chk_status_idx = bnapi->last_status_idx;
3401 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3403 struct cnic_ops *c_ops;
3405 if (!bnapi->cnic_present)
3409 c_ops = rcu_dereference(bp->cnic_ops);
3411 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3412 bnapi->status_blk.msi);
3417 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3419 struct status_block *sblk = bnapi->status_blk.msi;
3420 u32 status_attn_bits = sblk->status_attn_bits;
3421 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3423 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3424 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3426 bnx2_phy_int(bp, bnapi);
3428 /* This is needed to take care of transient status
3429 * during link changes.
3431 REG_WR(bp, BNX2_HC_COMMAND,
3432 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3433 REG_RD(bp, BNX2_HC_COMMAND);
3437 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3438 int work_done, int budget)
3440 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3441 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3443 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3444 bnx2_tx_int(bp, bnapi, 0);
3446 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3447 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3452 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3454 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3455 struct bnx2 *bp = bnapi->bp;
3457 struct status_block_msix *sblk = bnapi->status_blk.msix;
3460 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3461 if (unlikely(work_done >= budget))
3464 bnapi->last_status_idx = sblk->status_idx;
3465 /* status idx must be read before checking for more work. */
3467 if (likely(!bnx2_has_fast_work(bnapi))) {
3469 napi_complete(napi);
3470 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3471 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472 bnapi->last_status_idx);
3479 static int bnx2_poll(struct napi_struct *napi, int budget)
3481 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3482 struct bnx2 *bp = bnapi->bp;
3484 struct status_block *sblk = bnapi->status_blk.msi;
3487 bnx2_poll_link(bp, bnapi);
3489 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3492 bnx2_poll_cnic(bp, bnapi);
3495 /* bnapi->last_status_idx is used below to tell the hw how
3496 * much work has been processed, so we must read it before
3497 * checking for more work.
3499 bnapi->last_status_idx = sblk->status_idx;
3501 if (unlikely(work_done >= budget))
3505 if (likely(!bnx2_has_work(bnapi))) {
3506 napi_complete(napi);
3507 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3508 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3509 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3510 bnapi->last_status_idx);
3513 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3514 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3515 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3516 bnapi->last_status_idx);
3518 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3519 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520 bnapi->last_status_idx);
3528 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3529 * from set_multicast.
3532 bnx2_set_rx_mode(struct net_device *dev)
3534 struct bnx2 *bp = netdev_priv(dev);
3535 u32 rx_mode, sort_mode;
3536 struct netdev_hw_addr *ha;
3539 if (!netif_running(dev))
3542 spin_lock_bh(&bp->phy_lock);
3544 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3545 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3546 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3547 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3548 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3549 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3550 if (dev->flags & IFF_PROMISC) {
3551 /* Promiscuous mode. */
3552 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3553 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3554 BNX2_RPM_SORT_USER0_PROM_VLAN;
3556 else if (dev->flags & IFF_ALLMULTI) {
3557 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3558 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3561 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3564 /* Accept one or more multicast(s). */
3565 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3570 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3572 netdev_for_each_mc_addr(ha, dev) {
3573 crc = ether_crc_le(ETH_ALEN, ha->addr);
3575 regidx = (bit & 0xe0) >> 5;
3577 mc_filter[regidx] |= (1 << bit);
3580 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3581 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3585 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3588 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3589 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3590 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3591 BNX2_RPM_SORT_USER0_PROM_VLAN;
3592 } else if (!(dev->flags & IFF_PROMISC)) {
3593 /* Add all entries into to the match filter list */
3595 netdev_for_each_uc_addr(ha, dev) {
3596 bnx2_set_mac_addr(bp, ha->addr,
3597 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3599 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3605 if (rx_mode != bp->rx_mode) {
3606 bp->rx_mode = rx_mode;
3607 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3610 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3611 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3612 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3614 spin_unlock_bh(&bp->phy_lock);
3618 check_fw_section(const struct firmware *fw,
3619 const struct bnx2_fw_file_section *section,
3620 u32 alignment, bool non_empty)
3622 u32 offset = be32_to_cpu(section->offset);
3623 u32 len = be32_to_cpu(section->len);
3625 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3627 if ((non_empty && len == 0) || len > fw->size - offset ||
3628 len & (alignment - 1))
3634 check_mips_fw_entry(const struct firmware *fw,
3635 const struct bnx2_mips_fw_file_entry *entry)
3637 if (check_fw_section(fw, &entry->text, 4, true) ||
3638 check_fw_section(fw, &entry->data, 4, false) ||
3639 check_fw_section(fw, &entry->rodata, 4, false))
3644 static void bnx2_release_firmware(struct bnx2 *bp)
3646 if (bp->rv2p_firmware) {
3647 release_firmware(bp->mips_firmware);
3648 release_firmware(bp->rv2p_firmware);
3649 bp->rv2p_firmware = NULL;
3653 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3655 const char *mips_fw_file, *rv2p_fw_file;
3656 const struct bnx2_mips_fw_file *mips_fw;
3657 const struct bnx2_rv2p_fw_file *rv2p_fw;
3660 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661 mips_fw_file = FW_MIPS_FILE_09;
3662 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3666 rv2p_fw_file = FW_RV2P_FILE_09;
3668 mips_fw_file = FW_MIPS_FILE_06;
3669 rv2p_fw_file = FW_RV2P_FILE_06;
3672 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3674 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3678 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3680 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3681 goto err_release_mips_firmware;
3683 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3691 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3693 goto err_release_firmware;
3695 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3696 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3697 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3698 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3700 goto err_release_firmware;
3705 err_release_firmware:
3706 release_firmware(bp->rv2p_firmware);
3707 bp->rv2p_firmware = NULL;
3708 err_release_mips_firmware:
3709 release_firmware(bp->mips_firmware);
3713 static int bnx2_request_firmware(struct bnx2 *bp)
3715 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3719 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3722 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3723 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3724 rv2p_code |= RV2P_BD_PAGE_SIZE;
3731 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3732 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3734 u32 rv2p_code_len, file_offset;
3739 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3740 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3742 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3744 if (rv2p_proc == RV2P_PROC1) {
3745 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3746 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3748 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3749 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3752 for (i = 0; i < rv2p_code_len; i += 8) {
3753 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3755 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3758 val = (i / 8) | cmd;
3759 REG_WR(bp, addr, val);
3762 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3763 for (i = 0; i < 8; i++) {
3766 loc = be32_to_cpu(fw_entry->fixup[i]);
3767 if (loc && ((loc * 4) < rv2p_code_len)) {
3768 code = be32_to_cpu(*(rv2p_code + loc - 1));
3769 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3770 code = be32_to_cpu(*(rv2p_code + loc));
3771 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3772 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3774 val = (loc / 2) | cmd;
3775 REG_WR(bp, addr, val);
3779 /* Reset the processor, un-stall is done later. */
3780 if (rv2p_proc == RV2P_PROC1) {
3781 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3784 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3791 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3792 const struct bnx2_mips_fw_file_entry *fw_entry)
3794 u32 addr, len, file_offset;
3800 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3801 val |= cpu_reg->mode_value_halt;
3802 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3803 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3805 /* Load the Text area. */
3806 addr = be32_to_cpu(fw_entry->text.addr);
3807 len = be32_to_cpu(fw_entry->text.len);
3808 file_offset = be32_to_cpu(fw_entry->text.offset);
3809 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3811 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3815 for (j = 0; j < (len / 4); j++, offset += 4)
3816 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3819 /* Load the Data area. */
3820 addr = be32_to_cpu(fw_entry->data.addr);
3821 len = be32_to_cpu(fw_entry->data.len);
3822 file_offset = be32_to_cpu(fw_entry->data.offset);
3823 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3825 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3829 for (j = 0; j < (len / 4); j++, offset += 4)
3830 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3833 /* Load the Read-Only area. */
3834 addr = be32_to_cpu(fw_entry->rodata.addr);
3835 len = be32_to_cpu(fw_entry->rodata.len);
3836 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3837 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3839 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3843 for (j = 0; j < (len / 4); j++, offset += 4)
3844 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3847 /* Clear the pre-fetch instruction. */
3848 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3850 val = be32_to_cpu(fw_entry->start_addr);
3851 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3853 /* Start the CPU. */
3854 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3855 val &= ~cpu_reg->mode_value_halt;
3856 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3857 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3863 bnx2_init_cpus(struct bnx2 *bp)
3865 const struct bnx2_mips_fw_file *mips_fw =
3866 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3867 const struct bnx2_rv2p_fw_file *rv2p_fw =
3868 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3871 /* Initialize the RV2P processor. */
3872 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3873 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3875 /* Initialize the RX Processor. */
3876 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3880 /* Initialize the TX Processor. */
3881 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3885 /* Initialize the TX Patch-up Processor. */
3886 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3890 /* Initialize the Completion Processor. */
3891 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3895 /* Initialize the Command Processor. */
3896 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3903 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3907 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3913 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3914 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3915 PCI_PM_CTRL_PME_STATUS);
3917 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3918 /* delay required during transition out of D3hot */
3921 val = REG_RD(bp, BNX2_EMAC_MODE);
3922 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3923 val &= ~BNX2_EMAC_MODE_MPKT;
3924 REG_WR(bp, BNX2_EMAC_MODE, val);
3926 val = REG_RD(bp, BNX2_RPM_CONFIG);
3927 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3928 REG_WR(bp, BNX2_RPM_CONFIG, val);
3939 autoneg = bp->autoneg;
3940 advertising = bp->advertising;
3942 if (bp->phy_port == PORT_TP) {
3943 bp->autoneg = AUTONEG_SPEED;
3944 bp->advertising = ADVERTISED_10baseT_Half |
3945 ADVERTISED_10baseT_Full |
3946 ADVERTISED_100baseT_Half |
3947 ADVERTISED_100baseT_Full |
3951 spin_lock_bh(&bp->phy_lock);
3952 bnx2_setup_phy(bp, bp->phy_port);
3953 spin_unlock_bh(&bp->phy_lock);
3955 bp->autoneg = autoneg;
3956 bp->advertising = advertising;
3958 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3960 val = REG_RD(bp, BNX2_EMAC_MODE);
3962 /* Enable port mode. */
3963 val &= ~BNX2_EMAC_MODE_PORT;
3964 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965 BNX2_EMAC_MODE_ACPI_RCVD |
3966 BNX2_EMAC_MODE_MPKT;
3967 if (bp->phy_port == PORT_TP)
3968 val |= BNX2_EMAC_MODE_PORT_MII;
3970 val |= BNX2_EMAC_MODE_PORT_GMII;
3971 if (bp->line_speed == SPEED_2500)
3972 val |= BNX2_EMAC_MODE_25G_MODE;
3975 REG_WR(bp, BNX2_EMAC_MODE, val);
3977 /* receive all multicast */
3978 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3982 REG_WR(bp, BNX2_EMAC_RX_MODE,
3983 BNX2_EMAC_RX_MODE_SORT_MODE);
3985 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3986 BNX2_RPM_SORT_USER0_MC_EN;
3987 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3988 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3989 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3990 BNX2_RPM_SORT_USER0_ENA);
3992 /* Need to enable EMAC and RPM for WOL. */
3993 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3994 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3995 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3996 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998 val = REG_RD(bp, BNX2_RPM_CONFIG);
3999 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4000 REG_WR(bp, BNX2_RPM_CONFIG, val);
4002 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4005 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4008 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4009 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4012 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4013 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4014 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4023 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4025 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4028 /* No more memory access after this point until
4029 * device is brought back to D0.
4041 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4046 /* Request access to the flash interface. */
4047 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4048 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4049 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4050 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4056 if (j >= NVRAM_TIMEOUT_COUNT)
4063 bnx2_release_nvram_lock(struct bnx2 *bp)
4068 /* Relinquish nvram interface. */
4069 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4071 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4072 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4073 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4079 if (j >= NVRAM_TIMEOUT_COUNT)
4087 bnx2_enable_nvram_write(struct bnx2 *bp)
4091 val = REG_RD(bp, BNX2_MISC_CFG);
4092 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4094 if (bp->flash_info->flags & BNX2_NV_WREN) {
4097 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4098 REG_WR(bp, BNX2_NVM_COMMAND,
4099 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4101 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4104 val = REG_RD(bp, BNX2_NVM_COMMAND);
4105 if (val & BNX2_NVM_COMMAND_DONE)
4109 if (j >= NVRAM_TIMEOUT_COUNT)
4116 bnx2_disable_nvram_write(struct bnx2 *bp)
4120 val = REG_RD(bp, BNX2_MISC_CFG);
4121 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4126 bnx2_enable_nvram_access(struct bnx2 *bp)
4130 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4131 /* Enable both bits, even on read. */
4132 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4133 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4137 bnx2_disable_nvram_access(struct bnx2 *bp)
4141 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4142 /* Disable both bits, even after read. */
4143 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4144 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4145 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4149 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4154 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4155 /* Buffered flash, no erase needed */
4158 /* Build an erase command */
4159 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4160 BNX2_NVM_COMMAND_DOIT;
4162 /* Need to clear DONE bit separately. */
4163 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4165 /* Address of the NVRAM to read from. */
4166 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4168 /* Issue an erase command. */
4169 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4171 /* Wait for completion. */
4172 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4177 val = REG_RD(bp, BNX2_NVM_COMMAND);
4178 if (val & BNX2_NVM_COMMAND_DONE)
4182 if (j >= NVRAM_TIMEOUT_COUNT)
4189 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4194 /* Build the command word. */
4195 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4197 /* Calculate an offset of a buffered flash, not needed for 5709. */
4198 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4199 offset = ((offset / bp->flash_info->page_size) <<
4200 bp->flash_info->page_bits) +
4201 (offset % bp->flash_info->page_size);
4204 /* Need to clear DONE bit separately. */
4205 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4207 /* Address of the NVRAM to read from. */
4208 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4210 /* Issue a read command. */
4211 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4213 /* Wait for completion. */
4214 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4219 val = REG_RD(bp, BNX2_NVM_COMMAND);
4220 if (val & BNX2_NVM_COMMAND_DONE) {
4221 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4222 memcpy(ret_val, &v, 4);
4226 if (j >= NVRAM_TIMEOUT_COUNT)
4234 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4240 /* Build the command word. */
4241 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4243 /* Calculate an offset of a buffered flash, not needed for 5709. */
4244 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4245 offset = ((offset / bp->flash_info->page_size) <<
4246 bp->flash_info->page_bits) +
4247 (offset % bp->flash_info->page_size);
4250 /* Need to clear DONE bit separately. */
4251 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4253 memcpy(&val32, val, 4);
4255 /* Write the data. */
4256 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4258 /* Address of the NVRAM to write to. */
4259 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4261 /* Issue the write command. */
4262 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4264 /* Wait for completion. */
4265 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4268 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4271 if (j >= NVRAM_TIMEOUT_COUNT)
4278 bnx2_init_nvram(struct bnx2 *bp)
4281 int j, entry_count, rc = 0;
4282 const struct flash_spec *flash;
4284 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4285 bp->flash_info = &flash_5709;
4286 goto get_flash_size;
4289 /* Determine the selected interface. */
4290 val = REG_RD(bp, BNX2_NVM_CFG1);
4292 entry_count = ARRAY_SIZE(flash_table);
4294 if (val & 0x40000000) {
4296 /* Flash interface has been reconfigured */
4297 for (j = 0, flash = &flash_table[0]; j < entry_count;
4299 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4300 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4301 bp->flash_info = flash;
4308 /* Not yet been reconfigured */
4310 if (val & (1 << 23))
4311 mask = FLASH_BACKUP_STRAP_MASK;
4313 mask = FLASH_STRAP_MASK;
4315 for (j = 0, flash = &flash_table[0]; j < entry_count;
4318 if ((val & mask) == (flash->strapping & mask)) {
4319 bp->flash_info = flash;
4321 /* Request access to the flash interface. */
4322 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4325 /* Enable access to flash interface */
4326 bnx2_enable_nvram_access(bp);
4328 /* Reconfigure the flash interface */
4329 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4330 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4331 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4332 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4334 /* Disable access to flash interface */
4335 bnx2_disable_nvram_access(bp);
4336 bnx2_release_nvram_lock(bp);
4341 } /* if (val & 0x40000000) */
4343 if (j == entry_count) {
4344 bp->flash_info = NULL;
4345 pr_alert("Unknown flash/EEPROM type\n");
4350 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4351 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4353 bp->flash_size = val;
4355 bp->flash_size = bp->flash_info->total_size;
4361 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4365 u32 cmd_flags, offset32, len32, extra;
4370 /* Request access to the flash interface. */
4371 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4374 /* Enable access to flash interface */
4375 bnx2_enable_nvram_access(bp);
4388 pre_len = 4 - (offset & 3);
4390 if (pre_len >= len32) {
4392 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4393 BNX2_NVM_COMMAND_LAST;
4396 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4399 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4404 memcpy(ret_buf, buf + (offset & 3), pre_len);
4411 extra = 4 - (len32 & 3);
4412 len32 = (len32 + 4) & ~3;
4419 cmd_flags = BNX2_NVM_COMMAND_LAST;
4421 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4422 BNX2_NVM_COMMAND_LAST;
4424 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4426 memcpy(ret_buf, buf, 4 - extra);
4428 else if (len32 > 0) {
4431 /* Read the first word. */
4435 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4439 /* Advance to the next dword. */
4444 while (len32 > 4 && rc == 0) {
4445 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4447 /* Advance to the next dword. */
4456 cmd_flags = BNX2_NVM_COMMAND_LAST;
4457 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4459 memcpy(ret_buf, buf, 4 - extra);
4462 /* Disable access to flash interface */
4463 bnx2_disable_nvram_access(bp);
4465 bnx2_release_nvram_lock(bp);
4471 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4474 u32 written, offset32, len32;
4475 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4477 int align_start, align_end;
4482 align_start = align_end = 0;
4484 if ((align_start = (offset32 & 3))) {
4486 len32 += align_start;
4489 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4494 align_end = 4 - (len32 & 3);
4496 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4500 if (align_start || align_end) {
4501 align_buf = kmalloc(len32, GFP_KERNEL);
4502 if (align_buf == NULL)
4505 memcpy(align_buf, start, 4);
4508 memcpy(align_buf + len32 - 4, end, 4);
4510 memcpy(align_buf + align_start, data_buf, buf_size);
4514 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4515 flash_buffer = kmalloc(264, GFP_KERNEL);
4516 if (flash_buffer == NULL) {
4518 goto nvram_write_end;
4523 while ((written < len32) && (rc == 0)) {
4524 u32 page_start, page_end, data_start, data_end;
4525 u32 addr, cmd_flags;
4528 /* Find the page_start addr */
4529 page_start = offset32 + written;
4530 page_start -= (page_start % bp->flash_info->page_size);
4531 /* Find the page_end addr */
4532 page_end = page_start + bp->flash_info->page_size;
4533 /* Find the data_start addr */
4534 data_start = (written == 0) ? offset32 : page_start;
4535 /* Find the data_end addr */
4536 data_end = (page_end > offset32 + len32) ?
4537 (offset32 + len32) : page_end;
4539 /* Request access to the flash interface. */
4540 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4541 goto nvram_write_end;
4543 /* Enable access to flash interface */
4544 bnx2_enable_nvram_access(bp);
4546 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4547 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4550 /* Read the whole page into the buffer
4551 * (non-buffer flash only) */
4552 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4553 if (j == (bp->flash_info->page_size - 4)) {
4554 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4556 rc = bnx2_nvram_read_dword(bp,
4562 goto nvram_write_end;
4568 /* Enable writes to flash interface (unlock write-protect) */
4569 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4570 goto nvram_write_end;
4572 /* Loop to write back the buffer data from page_start to
4575 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4576 /* Erase the page */
4577 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4578 goto nvram_write_end;
4580 /* Re-enable the write again for the actual write */
4581 bnx2_enable_nvram_write(bp);
4583 for (addr = page_start; addr < data_start;
4584 addr += 4, i += 4) {
4586 rc = bnx2_nvram_write_dword(bp, addr,
4587 &flash_buffer[i], cmd_flags);
4590 goto nvram_write_end;
4596 /* Loop to write the new data from data_start to data_end */
4597 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4598 if ((addr == page_end - 4) ||
4599 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4600 (addr == data_end - 4))) {
4602 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4604 rc = bnx2_nvram_write_dword(bp, addr, buf,
4608 goto nvram_write_end;
4614 /* Loop to write back the buffer data from data_end
4616 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4617 for (addr = data_end; addr < page_end;
4618 addr += 4, i += 4) {
4620 if (addr == page_end-4) {
4621 cmd_flags = BNX2_NVM_COMMAND_LAST;
4623 rc = bnx2_nvram_write_dword(bp, addr,
4624 &flash_buffer[i], cmd_flags);
4627 goto nvram_write_end;
4633 /* Disable writes to flash interface (lock write-protect) */
4634 bnx2_disable_nvram_write(bp);
4636 /* Disable access to flash interface */
4637 bnx2_disable_nvram_access(bp);
4638 bnx2_release_nvram_lock(bp);
4640 /* Increment written */
4641 written += data_end - data_start;
4645 kfree(flash_buffer);
4651 bnx2_init_fw_cap(struct bnx2 *bp)
4655 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4656 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4658 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4659 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4662 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4665 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4666 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4667 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4670 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4671 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4674 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4676 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4677 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4678 bp->phy_port = PORT_FIBRE;
4680 bp->phy_port = PORT_TP;
4682 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4683 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4686 if (netif_running(bp->dev) && sig)
4687 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4691 bnx2_setup_msix_tbl(struct bnx2 *bp)
4693 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4695 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4696 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4700 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4706 /* Wait for the current PCI transaction to complete before
4707 * issuing a reset. */
4708 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4709 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4710 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4711 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4712 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4713 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4714 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4715 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4718 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4719 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4720 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4721 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4723 for (i = 0; i < 100; i++) {
4725 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4726 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4731 /* Wait for the firmware to tell us it is ok to issue a reset. */
4732 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4734 /* Deposit a driver reset signature so the firmware knows that
4735 * this is a soft reset. */
4736 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4737 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4739 /* Do a dummy read to force the chip to complete all current transaction
4740 * before we issue a reset. */
4741 val = REG_RD(bp, BNX2_MISC_ID);
4743 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4744 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4745 REG_RD(bp, BNX2_MISC_COMMAND);
4748 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4749 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4755 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4756 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4759 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4761 /* Reading back any register after chip reset will hang the
4762 * bus on 5706 A0 and A1. The msleep below provides plenty
4763 * of margin for write posting.
4765 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4766 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4769 /* Reset takes approximate 30 usec */
4770 for (i = 0; i < 10; i++) {
4771 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4772 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4773 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4778 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4779 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4780 pr_err("Chip reset did not complete\n");
4785 /* Make sure byte swapping is properly configured. */
4786 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4787 if (val != 0x01020304) {
4788 pr_err("Chip not in correct endian mode\n");
4792 /* Wait for the firmware to finish its initialization. */
4793 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4797 spin_lock_bh(&bp->phy_lock);
4798 old_port = bp->phy_port;
4799 bnx2_init_fw_cap(bp);
4800 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4801 old_port != bp->phy_port)
4802 bnx2_set_default_remote_link(bp);
4803 spin_unlock_bh(&bp->phy_lock);
4805 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4806 /* Adjust the voltage regular to two steps lower. The default
4807 * of this register is 0x0000000e. */
4808 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4810 /* Remove bad rbuf memory from the free pool. */
4811 rc = bnx2_alloc_bad_rbuf(bp);
4814 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4815 bnx2_setup_msix_tbl(bp);
4816 /* Prevent MSIX table reads and write from timing out */
4817 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4818 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4825 bnx2_init_chip(struct bnx2 *bp)
4830 /* Make sure the interrupt is not active. */
4831 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4833 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4834 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4836 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4838 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4839 DMA_READ_CHANS << 12 |
4840 DMA_WRITE_CHANS << 16;
4842 val |= (0x2 << 20) | (1 << 11);
4844 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4847 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4848 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4849 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4851 REG_WR(bp, BNX2_DMA_CONFIG, val);
4853 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4854 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4855 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4856 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4859 if (bp->flags & BNX2_FLAG_PCIX) {
4862 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4864 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4865 val16 & ~PCI_X_CMD_ERO);
4868 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4869 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4870 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4871 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4873 /* Initialize context mapping and zero out the quick contexts. The
4874 * context block must have already been enabled. */
4875 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4876 rc = bnx2_init_5709_context(bp);
4880 bnx2_init_context(bp);
4882 if ((rc = bnx2_init_cpus(bp)) != 0)
4885 bnx2_init_nvram(bp);
4887 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4889 val = REG_RD(bp, BNX2_MQ_CONFIG);
4890 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4891 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4892 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4893 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4894 if (CHIP_REV(bp) == CHIP_REV_Ax)
4895 val |= BNX2_MQ_CONFIG_HALT_DIS;
4898 REG_WR(bp, BNX2_MQ_CONFIG, val);
4900 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4901 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4902 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4904 val = (BCM_PAGE_BITS - 8) << 24;
4905 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4907 /* Configure page size. */
4908 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4909 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4910 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4911 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4913 val = bp->mac_addr[0] +
4914 (bp->mac_addr[1] << 8) +
4915 (bp->mac_addr[2] << 16) +
4917 (bp->mac_addr[4] << 8) +
4918 (bp->mac_addr[5] << 16);
4919 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4921 /* Program the MTU. Also include 4 bytes for CRC32. */
4923 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4924 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4925 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4926 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4931 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4932 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4933 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4935 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4936 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4937 bp->bnx2_napi[i].last_status_idx = 0;
4939 bp->idle_chk_status_idx = 0xffff;
4941 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4943 /* Set up how to generate a link change interrupt. */
4944 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4946 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4947 (u64) bp->status_blk_mapping & 0xffffffff);
4948 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4950 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4951 (u64) bp->stats_blk_mapping & 0xffffffff);
4952 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4953 (u64) bp->stats_blk_mapping >> 32);
4955 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4956 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4958 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4959 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4961 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4962 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4964 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4966 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4968 REG_WR(bp, BNX2_HC_COM_TICKS,
4969 (bp->com_ticks_int << 16) | bp->com_ticks);
4971 REG_WR(bp, BNX2_HC_CMD_TICKS,
4972 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4974 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4975 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4977 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4978 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4980 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4981 val = BNX2_HC_CONFIG_COLLECT_STATS;
4983 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4984 BNX2_HC_CONFIG_COLLECT_STATS;
4987 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4988 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4989 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4991 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4994 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4995 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4997 REG_WR(bp, BNX2_HC_CONFIG, val);
4999 if (bp->rx_ticks < 25)
5000 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5002 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5004 for (i = 1; i < bp->irq_nvecs; i++) {
5005 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5006 BNX2_HC_SB_CONFIG_1;
5009 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5010 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5011 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5013 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5014 (bp->tx_quick_cons_trip_int << 16) |
5015 bp->tx_quick_cons_trip);
5017 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5018 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5020 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5021 (bp->rx_quick_cons_trip_int << 16) |
5022 bp->rx_quick_cons_trip);
5024 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5025 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5028 /* Clear internal stats counters. */
5029 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5031 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5033 /* Initialize the receive filter. */
5034 bnx2_set_rx_mode(bp->dev);
5036 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5037 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5038 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5039 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5041 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5044 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5045 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5049 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5055 bnx2_clear_ring_states(struct bnx2 *bp)
5057 struct bnx2_napi *bnapi;
5058 struct bnx2_tx_ring_info *txr;
5059 struct bnx2_rx_ring_info *rxr;
5062 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5063 bnapi = &bp->bnx2_napi[i];
5064 txr = &bnapi->tx_ring;
5065 rxr = &bnapi->rx_ring;
5068 txr->hw_tx_cons = 0;
5069 rxr->rx_prod_bseq = 0;
5072 rxr->rx_pg_prod = 0;
5073 rxr->rx_pg_cons = 0;
5078 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5080 u32 val, offset0, offset1, offset2, offset3;
5081 u32 cid_addr = GET_CID_ADDR(cid);
5083 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5084 offset0 = BNX2_L2CTX_TYPE_XI;
5085 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5086 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5087 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5089 offset0 = BNX2_L2CTX_TYPE;
5090 offset1 = BNX2_L2CTX_CMD_TYPE;
5091 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5092 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5094 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5095 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5097 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5098 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5100 val = (u64) txr->tx_desc_mapping >> 32;
5101 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5103 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5104 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5108 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5112 struct bnx2_napi *bnapi;
5113 struct bnx2_tx_ring_info *txr;
5115 bnapi = &bp->bnx2_napi[ring_num];
5116 txr = &bnapi->tx_ring;
5121 cid = TX_TSS_CID + ring_num - 1;
5123 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5125 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5127 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5128 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5131 txr->tx_prod_bseq = 0;
5133 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5134 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5136 bnx2_init_tx_context(bp, cid, txr);
5140 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5146 for (i = 0; i < num_rings; i++) {
5149 rxbd = &rx_ring[i][0];
5150 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5151 rxbd->rx_bd_len = buf_size;
5152 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5154 if (i == (num_rings - 1))
5158 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5159 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5164 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5167 u16 prod, ring_prod;
5168 u32 cid, rx_cid_addr, val;
5169 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5170 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5175 cid = RX_RSS_CID + ring_num - 1;
5177 rx_cid_addr = GET_CID_ADDR(cid);
5179 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5180 bp->rx_buf_use_size, bp->rx_max_ring);
5182 bnx2_init_rx_context(bp, cid);
5184 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5185 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5186 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5189 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5190 if (bp->rx_pg_ring_size) {
5191 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5192 rxr->rx_pg_desc_mapping,
5193 PAGE_SIZE, bp->rx_max_pg_ring);
5194 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5195 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5196 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5197 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5199 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5200 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5202 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5203 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5205 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5206 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5209 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5210 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5212 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5213 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5215 ring_prod = prod = rxr->rx_pg_prod;
5216 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5217 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5218 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5219 ring_num, i, bp->rx_pg_ring_size);
5222 prod = NEXT_RX_BD(prod);
5223 ring_prod = RX_PG_RING_IDX(prod);
5225 rxr->rx_pg_prod = prod;
5227 ring_prod = prod = rxr->rx_prod;
5228 for (i = 0; i < bp->rx_ring_size; i++) {
5229 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5230 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5231 ring_num, i, bp->rx_ring_size);
5234 prod = NEXT_RX_BD(prod);
5235 ring_prod = RX_RING_IDX(prod);
5237 rxr->rx_prod = prod;
5239 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5240 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5241 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5243 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5244 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5246 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5250 bnx2_init_all_rings(struct bnx2 *bp)
5255 bnx2_clear_ring_states(bp);
5257 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5258 for (i = 0; i < bp->num_tx_rings; i++)
5259 bnx2_init_tx_ring(bp, i);
5261 if (bp->num_tx_rings > 1)
5262 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5265 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5266 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5268 for (i = 0; i < bp->num_rx_rings; i++)
5269 bnx2_init_rx_ring(bp, i);
5271 if (bp->num_rx_rings > 1) {
5274 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5275 int shift = (i % 8) << 2;
5277 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5279 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5280 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5281 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5282 BNX2_RLUP_RSS_COMMAND_WRITE |
5283 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5288 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5289 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5291 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5296 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5298 u32 max, num_rings = 1;
5300 while (ring_size > MAX_RX_DESC_CNT) {
5301 ring_size -= MAX_RX_DESC_CNT;
5304 /* round to next power of 2 */
5306 while ((max & num_rings) == 0)
5309 if (num_rings != max)
5316 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5318 u32 rx_size, rx_space, jumbo_size;
5320 /* 8 for CRC and VLAN */
5321 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5323 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5324 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5326 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5327 bp->rx_pg_ring_size = 0;
5328 bp->rx_max_pg_ring = 0;
5329 bp->rx_max_pg_ring_idx = 0;
5330 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5331 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5333 jumbo_size = size * pages;
5334 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5335 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5337 bp->rx_pg_ring_size = jumbo_size;
5338 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5340 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5341 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5342 bp->rx_copy_thresh = 0;
5345 bp->rx_buf_use_size = rx_size;
5346 /* hw alignment + build_skb() overhead*/
5347 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5348 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5349 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5350 bp->rx_ring_size = size;
5351 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5352 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5356 bnx2_free_tx_skbs(struct bnx2 *bp)
5360 for (i = 0; i < bp->num_tx_rings; i++) {
5361 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5362 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5365 if (txr->tx_buf_ring == NULL)
5368 for (j = 0; j < TX_DESC_CNT; ) {
5369 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5370 struct sk_buff *skb = tx_buf->skb;
5378 dma_unmap_single(&bp->pdev->dev,
5379 dma_unmap_addr(tx_buf, mapping),
5385 last = tx_buf->nr_frags;
5387 for (k = 0; k < last; k++, j++) {
5388 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5389 dma_unmap_page(&bp->pdev->dev,
5390 dma_unmap_addr(tx_buf, mapping),
5391 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5396 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5401 bnx2_free_rx_skbs(struct bnx2 *bp)
5405 for (i = 0; i < bp->num_rx_rings; i++) {
5406 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5407 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5410 if (rxr->rx_buf_ring == NULL)
5413 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5414 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5415 u8 *data = rx_buf->data;
5420 dma_unmap_single(&bp->pdev->dev,
5421 dma_unmap_addr(rx_buf, mapping),
5422 bp->rx_buf_use_size,
5423 PCI_DMA_FROMDEVICE);
5425 rx_buf->data = NULL;
5429 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5430 bnx2_free_rx_page(bp, rxr, j);
5435 bnx2_free_skbs(struct bnx2 *bp)
5437 bnx2_free_tx_skbs(bp);
5438 bnx2_free_rx_skbs(bp);
5442 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5446 rc = bnx2_reset_chip(bp, reset_code);
5451 if ((rc = bnx2_init_chip(bp)) != 0)
5454 bnx2_init_all_rings(bp);
5459 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5463 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5466 spin_lock_bh(&bp->phy_lock);
5467 bnx2_init_phy(bp, reset_phy);
5469 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5470 bnx2_remote_phy_event(bp);
5471 spin_unlock_bh(&bp->phy_lock);
5476 bnx2_shutdown_chip(struct bnx2 *bp)
5480 if (bp->flags & BNX2_FLAG_NO_WOL)
5481 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5483 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5485 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5487 return bnx2_reset_chip(bp, reset_code);
5491 bnx2_test_registers(struct bnx2 *bp)
5495 static const struct {
5498 #define BNX2_FL_NOT_5709 1
5502 { 0x006c, 0, 0x00000000, 0x0000003f },
5503 { 0x0090, 0, 0xffffffff, 0x00000000 },
5504 { 0x0094, 0, 0x00000000, 0x00000000 },
5506 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5507 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5508 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5510 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5511 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5512 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5513 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5516 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5517 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5518 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5519 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5520 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5521 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5523 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5524 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5525 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5527 { 0x1000, 0, 0x00000000, 0x00000001 },
5528 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5530 { 0x1408, 0, 0x01c00800, 0x00000000 },
5531 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5532 { 0x14a8, 0, 0x00000000, 0x000001ff },
5533 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5534 { 0x14b0, 0, 0x00000002, 0x00000001 },
5535 { 0x14b8, 0, 0x00000000, 0x00000000 },
5536 { 0x14c0, 0, 0x00000000, 0x00000009 },
5537 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5538 { 0x14cc, 0, 0x00000000, 0x00000001 },
5539 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5541 { 0x1800, 0, 0x00000000, 0x00000001 },
5542 { 0x1804, 0, 0x00000000, 0x00000003 },
5544 { 0x2800, 0, 0x00000000, 0x00000001 },
5545 { 0x2804, 0, 0x00000000, 0x00003f01 },
5546 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5547 { 0x2810, 0, 0xffff0000, 0x00000000 },
5548 { 0x2814, 0, 0xffff0000, 0x00000000 },
5549 { 0x2818, 0, 0xffff0000, 0x00000000 },
5550 { 0x281c, 0, 0xffff0000, 0x00000000 },
5551 { 0x2834, 0, 0xffffffff, 0x00000000 },
5552 { 0x2840, 0, 0x00000000, 0xffffffff },
5553 { 0x2844, 0, 0x00000000, 0xffffffff },
5554 { 0x2848, 0, 0xffffffff, 0x00000000 },
5555 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5557 { 0x2c00, 0, 0x00000000, 0x00000011 },
5558 { 0x2c04, 0, 0x00000000, 0x00030007 },
5560 { 0x3c00, 0, 0x00000000, 0x00000001 },
5561 { 0x3c04, 0, 0x00000000, 0x00070000 },
5562 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5563 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5564 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5565 { 0x3c14, 0, 0x00000000, 0xffffffff },
5566 { 0x3c18, 0, 0x00000000, 0xffffffff },
5567 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5568 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5570 { 0x5004, 0, 0x00000000, 0x0000007f },
5571 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5573 { 0x5c00, 0, 0x00000000, 0x00000001 },
5574 { 0x5c04, 0, 0x00000000, 0x0003000f },
5575 { 0x5c08, 0, 0x00000003, 0x00000000 },
5576 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5577 { 0x5c10, 0, 0x00000000, 0xffffffff },
5578 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5579 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5580 { 0x5c88, 0, 0x00000000, 0x00077373 },
5581 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5583 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5584 { 0x680c, 0, 0xffffffff, 0x00000000 },
5585 { 0x6810, 0, 0xffffffff, 0x00000000 },
5586 { 0x6814, 0, 0xffffffff, 0x00000000 },
5587 { 0x6818, 0, 0xffffffff, 0x00000000 },
5588 { 0x681c, 0, 0xffffffff, 0x00000000 },
5589 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5590 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5591 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5592 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5593 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5594 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5595 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5596 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5597 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5598 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5599 { 0x684c, 0, 0xffffffff, 0x00000000 },
5600 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5601 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5602 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5603 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5604 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5605 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5607 { 0xffff, 0, 0x00000000, 0x00000000 },
5612 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5615 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5616 u32 offset, rw_mask, ro_mask, save_val, val;
5617 u16 flags = reg_tbl[i].flags;
5619 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5622 offset = (u32) reg_tbl[i].offset;
5623 rw_mask = reg_tbl[i].rw_mask;
5624 ro_mask = reg_tbl[i].ro_mask;
5626 save_val = readl(bp->regview + offset);
5628 writel(0, bp->regview + offset);
5630 val = readl(bp->regview + offset);
5631 if ((val & rw_mask) != 0) {
5635 if ((val & ro_mask) != (save_val & ro_mask)) {
5639 writel(0xffffffff, bp->regview + offset);
5641 val = readl(bp->regview + offset);
5642 if ((val & rw_mask) != rw_mask) {
5646 if ((val & ro_mask) != (save_val & ro_mask)) {
5650 writel(save_val, bp->regview + offset);
5654 writel(save_val, bp->regview + offset);
5662 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5664 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5665 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5668 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5671 for (offset = 0; offset < size; offset += 4) {
5673 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5675 if (bnx2_reg_rd_ind(bp, start + offset) !=
5685 bnx2_test_memory(struct bnx2 *bp)
5689 static struct mem_entry {
5692 } mem_tbl_5706[] = {
5693 { 0x60000, 0x4000 },
5694 { 0xa0000, 0x3000 },
5695 { 0xe0000, 0x4000 },
5696 { 0x120000, 0x4000 },
5697 { 0x1a0000, 0x4000 },
5698 { 0x160000, 0x4000 },
5702 { 0x60000, 0x4000 },
5703 { 0xa0000, 0x3000 },
5704 { 0xe0000, 0x4000 },
5705 { 0x120000, 0x4000 },
5706 { 0x1a0000, 0x4000 },
5709 struct mem_entry *mem_tbl;
5711 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5712 mem_tbl = mem_tbl_5709;
5714 mem_tbl = mem_tbl_5706;
5716 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5717 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5718 mem_tbl[i].len)) != 0) {
5726 #define BNX2_MAC_LOOPBACK 0
5727 #define BNX2_PHY_LOOPBACK 1
5730 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5732 unsigned int pkt_size, num_pkts, i;
5733 struct sk_buff *skb;
5735 unsigned char *packet;
5736 u16 rx_start_idx, rx_idx;
5739 struct sw_bd *rx_buf;
5740 struct l2_fhdr *rx_hdr;
5742 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5743 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5744 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5748 txr = &tx_napi->tx_ring;
5749 rxr = &bnapi->rx_ring;
5750 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5751 bp->loopback = MAC_LOOPBACK;
5752 bnx2_set_mac_loopback(bp);
5754 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5755 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5758 bp->loopback = PHY_LOOPBACK;
5759 bnx2_set_phy_loopback(bp);
5764 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5765 skb = netdev_alloc_skb(bp->dev, pkt_size);
5768 packet = skb_put(skb, pkt_size);
5769 memcpy(packet, bp->dev->dev_addr, 6);
5770 memset(packet + 6, 0x0, 8);
5771 for (i = 14; i < pkt_size; i++)
5772 packet[i] = (unsigned char) (i & 0xff);
5774 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5776 if (dma_mapping_error(&bp->pdev->dev, map)) {
5781 REG_WR(bp, BNX2_HC_COMMAND,
5782 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5784 REG_RD(bp, BNX2_HC_COMMAND);
5787 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5791 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5793 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5794 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5795 txbd->tx_bd_mss_nbytes = pkt_size;
5796 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5799 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5800 txr->tx_prod_bseq += pkt_size;
5802 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5803 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5807 REG_WR(bp, BNX2_HC_COMMAND,
5808 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5810 REG_RD(bp, BNX2_HC_COMMAND);
5814 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5817 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5818 goto loopback_test_done;
5820 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5821 if (rx_idx != rx_start_idx + num_pkts) {
5822 goto loopback_test_done;
5825 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5826 data = rx_buf->data;
5828 rx_hdr = get_l2_fhdr(data);
5829 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5831 dma_sync_single_for_cpu(&bp->pdev->dev,
5832 dma_unmap_addr(rx_buf, mapping),
5833 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5835 if (rx_hdr->l2_fhdr_status &
5836 (L2_FHDR_ERRORS_BAD_CRC |
5837 L2_FHDR_ERRORS_PHY_DECODE |
5838 L2_FHDR_ERRORS_ALIGNMENT |
5839 L2_FHDR_ERRORS_TOO_SHORT |
5840 L2_FHDR_ERRORS_GIANT_FRAME)) {
5842 goto loopback_test_done;
5845 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5846 goto loopback_test_done;
5849 for (i = 14; i < pkt_size; i++) {
5850 if (*(data + i) != (unsigned char) (i & 0xff)) {
5851 goto loopback_test_done;
5862 #define BNX2_MAC_LOOPBACK_FAILED 1
5863 #define BNX2_PHY_LOOPBACK_FAILED 2
5864 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5865 BNX2_PHY_LOOPBACK_FAILED)
5868 bnx2_test_loopback(struct bnx2 *bp)
5872 if (!netif_running(bp->dev))
5873 return BNX2_LOOPBACK_FAILED;
5875 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5876 spin_lock_bh(&bp->phy_lock);
5877 bnx2_init_phy(bp, 1);
5878 spin_unlock_bh(&bp->phy_lock);
5879 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5880 rc |= BNX2_MAC_LOOPBACK_FAILED;
5881 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5882 rc |= BNX2_PHY_LOOPBACK_FAILED;
5886 #define NVRAM_SIZE 0x200
5887 #define CRC32_RESIDUAL 0xdebb20e3
5890 bnx2_test_nvram(struct bnx2 *bp)
5892 __be32 buf[NVRAM_SIZE / 4];
5893 u8 *data = (u8 *) buf;
5897 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5898 goto test_nvram_done;
5900 magic = be32_to_cpu(buf[0]);
5901 if (magic != 0x669955aa) {
5903 goto test_nvram_done;
5906 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5907 goto test_nvram_done;
5909 csum = ether_crc_le(0x100, data);
5910 if (csum != CRC32_RESIDUAL) {
5912 goto test_nvram_done;
5915 csum = ether_crc_le(0x100, data + 0x100);
5916 if (csum != CRC32_RESIDUAL) {
5925 bnx2_test_link(struct bnx2 *bp)
5929 if (!netif_running(bp->dev))
5932 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5937 spin_lock_bh(&bp->phy_lock);
5938 bnx2_enable_bmsr1(bp);
5939 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5940 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5941 bnx2_disable_bmsr1(bp);
5942 spin_unlock_bh(&bp->phy_lock);
5944 if (bmsr & BMSR_LSTATUS) {
5951 bnx2_test_intr(struct bnx2 *bp)
5956 if (!netif_running(bp->dev))
5959 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5961 /* This register is not touched during run-time. */
5962 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5963 REG_RD(bp, BNX2_HC_COMMAND);
5965 for (i = 0; i < 10; i++) {
5966 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5972 msleep_interruptible(10);
5980 /* Determining link for parallel detection. */
5982 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5984 u32 mode_ctl, an_dbg, exp;
5986 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5989 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5990 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5992 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5995 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5996 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5997 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5999 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6002 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6003 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6004 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6006 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6013 bnx2_5706_serdes_timer(struct bnx2 *bp)
6017 spin_lock(&bp->phy_lock);
6018 if (bp->serdes_an_pending) {
6019 bp->serdes_an_pending--;
6021 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6024 bp->current_interval = BNX2_TIMER_INTERVAL;
6026 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6028 if (bmcr & BMCR_ANENABLE) {
6029 if (bnx2_5706_serdes_has_link(bp)) {
6030 bmcr &= ~BMCR_ANENABLE;
6031 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6032 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6033 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6037 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6038 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6041 bnx2_write_phy(bp, 0x17, 0x0f01);
6042 bnx2_read_phy(bp, 0x15, &phy2);
6046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6047 bmcr |= BMCR_ANENABLE;
6048 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6050 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6053 bp->current_interval = BNX2_TIMER_INTERVAL;
6058 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6059 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6060 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6062 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6063 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6064 bnx2_5706s_force_link_dn(bp, 1);
6065 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6068 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6071 spin_unlock(&bp->phy_lock);
6075 bnx2_5708_serdes_timer(struct bnx2 *bp)
6077 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6080 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6081 bp->serdes_an_pending = 0;
6085 spin_lock(&bp->phy_lock);
6086 if (bp->serdes_an_pending)
6087 bp->serdes_an_pending--;
6088 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6091 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6092 if (bmcr & BMCR_ANENABLE) {
6093 bnx2_enable_forced_2g5(bp);
6094 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6096 bnx2_disable_forced_2g5(bp);
6097 bp->serdes_an_pending = 2;
6098 bp->current_interval = BNX2_TIMER_INTERVAL;
6102 bp->current_interval = BNX2_TIMER_INTERVAL;
6104 spin_unlock(&bp->phy_lock);
6108 bnx2_timer(unsigned long data)
6110 struct bnx2 *bp = (struct bnx2 *) data;
6112 if (!netif_running(bp->dev))
6115 if (atomic_read(&bp->intr_sem) != 0)
6116 goto bnx2_restart_timer;
6118 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6119 BNX2_FLAG_USING_MSI)
6120 bnx2_chk_missed_msi(bp);
6122 bnx2_send_heart_beat(bp);
6124 bp->stats_blk->stat_FwRxDrop =
6125 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6127 /* workaround occasional corrupted counters */
6128 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6129 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6130 BNX2_HC_COMMAND_STATS_NOW);
6132 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6133 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6134 bnx2_5706_serdes_timer(bp);
6136 bnx2_5708_serdes_timer(bp);
6140 mod_timer(&bp->timer, jiffies + bp->current_interval);
6144 bnx2_request_irq(struct bnx2 *bp)
6146 unsigned long flags;
6147 struct bnx2_irq *irq;
6150 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6153 flags = IRQF_SHARED;
6155 for (i = 0; i < bp->irq_nvecs; i++) {
6156 irq = &bp->irq_tbl[i];
6157 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6167 __bnx2_free_irq(struct bnx2 *bp)
6169 struct bnx2_irq *irq;
6172 for (i = 0; i < bp->irq_nvecs; i++) {
6173 irq = &bp->irq_tbl[i];
6175 free_irq(irq->vector, &bp->bnx2_napi[i]);
6181 bnx2_free_irq(struct bnx2 *bp)
6184 __bnx2_free_irq(bp);
6185 if (bp->flags & BNX2_FLAG_USING_MSI)
6186 pci_disable_msi(bp->pdev);
6187 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6188 pci_disable_msix(bp->pdev);
6190 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6194 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6196 int i, total_vecs, rc;
6197 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6198 struct net_device *dev = bp->dev;
6199 const int len = sizeof(bp->irq_tbl[0].name);
6201 bnx2_setup_msix_tbl(bp);
6202 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6203 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6204 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6206 /* Need to flush the previous three writes to ensure MSI-X
6207 * is setup properly */
6208 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6210 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6211 msix_ent[i].entry = i;
6212 msix_ent[i].vector = 0;
6215 total_vecs = msix_vecs;
6220 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6221 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6231 msix_vecs = total_vecs;
6235 bp->irq_nvecs = msix_vecs;
6236 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6237 for (i = 0; i < total_vecs; i++) {
6238 bp->irq_tbl[i].vector = msix_ent[i].vector;
6239 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6240 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6245 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6247 int cpus = num_online_cpus();
6250 if (!bp->num_req_rx_rings)
6251 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6252 else if (!bp->num_req_tx_rings)
6253 msix_vecs = max(cpus, bp->num_req_rx_rings);
6255 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6257 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6259 bp->irq_tbl[0].handler = bnx2_interrupt;
6260 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6262 bp->irq_tbl[0].vector = bp->pdev->irq;
6264 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6265 bnx2_enable_msix(bp, msix_vecs);
6267 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6268 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6269 if (pci_enable_msi(bp->pdev) == 0) {
6270 bp->flags |= BNX2_FLAG_USING_MSI;
6271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6272 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6273 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6275 bp->irq_tbl[0].handler = bnx2_msi;
6277 bp->irq_tbl[0].vector = bp->pdev->irq;
6281 if (!bp->num_req_tx_rings)
6282 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6284 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6286 if (!bp->num_req_rx_rings)
6287 bp->num_rx_rings = bp->irq_nvecs;
6289 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6291 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6293 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6296 /* Called with rtnl_lock */
6298 bnx2_open(struct net_device *dev)
6300 struct bnx2 *bp = netdev_priv(dev);
6303 rc = bnx2_request_firmware(bp);
6307 netif_carrier_off(dev);
6309 bnx2_set_power_state(bp, PCI_D0);
6310 bnx2_disable_int(bp);
6312 rc = bnx2_setup_int_mode(bp, disable_msi);
6316 bnx2_napi_enable(bp);
6317 rc = bnx2_alloc_mem(bp);
6321 rc = bnx2_request_irq(bp);
6325 rc = bnx2_init_nic(bp, 1);
6329 mod_timer(&bp->timer, jiffies + bp->current_interval);
6331 atomic_set(&bp->intr_sem, 0);
6333 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6335 bnx2_enable_int(bp);
6337 if (bp->flags & BNX2_FLAG_USING_MSI) {
6338 /* Test MSI to make sure it is working
6339 * If MSI test fails, go back to INTx mode
6341 if (bnx2_test_intr(bp) != 0) {
6342 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6344 bnx2_disable_int(bp);
6347 bnx2_setup_int_mode(bp, 1);
6349 rc = bnx2_init_nic(bp, 0);
6352 rc = bnx2_request_irq(bp);
6355 del_timer_sync(&bp->timer);
6358 bnx2_enable_int(bp);
6361 if (bp->flags & BNX2_FLAG_USING_MSI)
6362 netdev_info(dev, "using MSI\n");
6363 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6364 netdev_info(dev, "using MSIX\n");
6366 netif_tx_start_all_queues(dev);
6371 bnx2_napi_disable(bp);
6376 bnx2_release_firmware(bp);
6381 bnx2_reset_task(struct work_struct *work)
6383 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6387 if (!netif_running(bp->dev)) {
6392 bnx2_netif_stop(bp, true);
6394 rc = bnx2_init_nic(bp, 1);
6396 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6397 bnx2_napi_enable(bp);
6403 atomic_set(&bp->intr_sem, 1);
6404 bnx2_netif_start(bp, true);
6409 bnx2_dump_state(struct bnx2 *bp)
6411 struct net_device *dev = bp->dev;
6414 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6415 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6416 atomic_read(&bp->intr_sem), val1);
6417 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6418 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6419 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6420 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6421 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6422 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6423 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6424 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6425 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6426 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6427 if (bp->flags & BNX2_FLAG_USING_MSIX)
6428 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6429 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6433 bnx2_tx_timeout(struct net_device *dev)
6435 struct bnx2 *bp = netdev_priv(dev);
6437 bnx2_dump_state(bp);
6438 bnx2_dump_mcp_state(bp);
6440 /* This allows the netif to be shutdown gracefully before resetting */
6441 schedule_work(&bp->reset_task);
6444 /* Called with netif_tx_lock.
6445 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6446 * netif_wake_queue().
6449 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6451 struct bnx2 *bp = netdev_priv(dev);
6454 struct sw_tx_bd *tx_buf;
6455 u32 len, vlan_tag_flags, last_frag, mss;
6456 u16 prod, ring_prod;
6458 struct bnx2_napi *bnapi;
6459 struct bnx2_tx_ring_info *txr;
6460 struct netdev_queue *txq;
6462 /* Determine which tx ring we will be placed on */
6463 i = skb_get_queue_mapping(skb);
6464 bnapi = &bp->bnx2_napi[i];
6465 txr = &bnapi->tx_ring;
6466 txq = netdev_get_tx_queue(dev, i);
6468 if (unlikely(bnx2_tx_avail(bp, txr) <
6469 (skb_shinfo(skb)->nr_frags + 1))) {
6470 netif_tx_stop_queue(txq);
6471 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6473 return NETDEV_TX_BUSY;
6475 len = skb_headlen(skb);
6476 prod = txr->tx_prod;
6477 ring_prod = TX_RING_IDX(prod);
6480 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6481 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6484 if (vlan_tx_tag_present(skb)) {
6486 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6489 if ((mss = skb_shinfo(skb)->gso_size)) {
6493 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6495 tcp_opt_len = tcp_optlen(skb);
6497 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6498 u32 tcp_off = skb_transport_offset(skb) -
6499 sizeof(struct ipv6hdr) - ETH_HLEN;
6501 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6502 TX_BD_FLAGS_SW_FLAGS;
6503 if (likely(tcp_off == 0))
6504 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6507 vlan_tag_flags |= ((tcp_off & 0x3) <<
6508 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6509 ((tcp_off & 0x10) <<
6510 TX_BD_FLAGS_TCP6_OFF4_SHL);
6511 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6515 if (tcp_opt_len || (iph->ihl > 5)) {
6516 vlan_tag_flags |= ((iph->ihl - 5) +
6517 (tcp_opt_len >> 2)) << 8;
6523 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6524 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6526 return NETDEV_TX_OK;
6529 tx_buf = &txr->tx_buf_ring[ring_prod];
6531 dma_unmap_addr_set(tx_buf, mapping, mapping);
6533 txbd = &txr->tx_desc_ring[ring_prod];
6535 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6536 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6537 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6538 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6540 last_frag = skb_shinfo(skb)->nr_frags;
6541 tx_buf->nr_frags = last_frag;
6542 tx_buf->is_gso = skb_is_gso(skb);
6544 for (i = 0; i < last_frag; i++) {
6545 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6547 prod = NEXT_TX_BD(prod);
6548 ring_prod = TX_RING_IDX(prod);
6549 txbd = &txr->tx_desc_ring[ring_prod];
6551 len = skb_frag_size(frag);
6552 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6554 if (dma_mapping_error(&bp->pdev->dev, mapping))
6556 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6559 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6560 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6561 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6562 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6565 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6567 /* Sync BD data before updating TX mailbox */
6570 netdev_tx_sent_queue(txq, skb->len);
6572 prod = NEXT_TX_BD(prod);
6573 txr->tx_prod_bseq += skb->len;
6575 REG_WR16(bp, txr->tx_bidx_addr, prod);
6576 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6580 txr->tx_prod = prod;
6582 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6583 netif_tx_stop_queue(txq);
6585 /* netif_tx_stop_queue() must be done before checking
6586 * tx index in bnx2_tx_avail() below, because in
6587 * bnx2_tx_int(), we update tx index before checking for
6588 * netif_tx_queue_stopped().
6591 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6592 netif_tx_wake_queue(txq);
6595 return NETDEV_TX_OK;
6597 /* save value of frag that failed */
6600 /* start back at beginning and unmap skb */
6601 prod = txr->tx_prod;
6602 ring_prod = TX_RING_IDX(prod);
6603 tx_buf = &txr->tx_buf_ring[ring_prod];
6605 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6606 skb_headlen(skb), PCI_DMA_TODEVICE);
6608 /* unmap remaining mapped pages */
6609 for (i = 0; i < last_frag; i++) {
6610 prod = NEXT_TX_BD(prod);
6611 ring_prod = TX_RING_IDX(prod);
6612 tx_buf = &txr->tx_buf_ring[ring_prod];
6613 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6614 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6619 return NETDEV_TX_OK;
6622 /* Called with rtnl_lock */
6624 bnx2_close(struct net_device *dev)
6626 struct bnx2 *bp = netdev_priv(dev);
6628 bnx2_disable_int_sync(bp);
6629 bnx2_napi_disable(bp);
6630 del_timer_sync(&bp->timer);
6631 bnx2_shutdown_chip(bp);
6637 netif_carrier_off(bp->dev);
6638 bnx2_set_power_state(bp, PCI_D3hot);
6643 bnx2_save_stats(struct bnx2 *bp)
6645 u32 *hw_stats = (u32 *) bp->stats_blk;
6646 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6649 /* The 1st 10 counters are 64-bit counters */
6650 for (i = 0; i < 20; i += 2) {
6654 hi = temp_stats[i] + hw_stats[i];
6655 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6656 if (lo > 0xffffffff)
6659 temp_stats[i + 1] = lo & 0xffffffff;
6662 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6663 temp_stats[i] += hw_stats[i];
6666 #define GET_64BIT_NET_STATS64(ctr) \
6667 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6669 #define GET_64BIT_NET_STATS(ctr) \
6670 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6671 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6673 #define GET_32BIT_NET_STATS(ctr) \
6674 (unsigned long) (bp->stats_blk->ctr + \
6675 bp->temp_stats_blk->ctr)
6677 static struct rtnl_link_stats64 *
6678 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6680 struct bnx2 *bp = netdev_priv(dev);
6682 if (bp->stats_blk == NULL)
6685 net_stats->rx_packets =
6686 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6687 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6688 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6690 net_stats->tx_packets =
6691 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6692 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6693 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6695 net_stats->rx_bytes =
6696 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6698 net_stats->tx_bytes =
6699 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6701 net_stats->multicast =
6702 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6704 net_stats->collisions =
6705 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6707 net_stats->rx_length_errors =
6708 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6709 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6711 net_stats->rx_over_errors =
6712 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6713 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6715 net_stats->rx_frame_errors =
6716 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6718 net_stats->rx_crc_errors =
6719 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6721 net_stats->rx_errors = net_stats->rx_length_errors +
6722 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6723 net_stats->rx_crc_errors;
6725 net_stats->tx_aborted_errors =
6726 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6727 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6729 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6730 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6731 net_stats->tx_carrier_errors = 0;
6733 net_stats->tx_carrier_errors =
6734 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6737 net_stats->tx_errors =
6738 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6739 net_stats->tx_aborted_errors +
6740 net_stats->tx_carrier_errors;
6742 net_stats->rx_missed_errors =
6743 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6744 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6745 GET_32BIT_NET_STATS(stat_FwRxDrop);
6750 /* All ethtool functions called with rtnl_lock */
6753 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6755 struct bnx2 *bp = netdev_priv(dev);
6756 int support_serdes = 0, support_copper = 0;
6758 cmd->supported = SUPPORTED_Autoneg;
6759 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6762 } else if (bp->phy_port == PORT_FIBRE)
6767 if (support_serdes) {
6768 cmd->supported |= SUPPORTED_1000baseT_Full |
6770 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6771 cmd->supported |= SUPPORTED_2500baseX_Full;
6774 if (support_copper) {
6775 cmd->supported |= SUPPORTED_10baseT_Half |
6776 SUPPORTED_10baseT_Full |
6777 SUPPORTED_100baseT_Half |
6778 SUPPORTED_100baseT_Full |
6779 SUPPORTED_1000baseT_Full |
6784 spin_lock_bh(&bp->phy_lock);
6785 cmd->port = bp->phy_port;
6786 cmd->advertising = bp->advertising;
6788 if (bp->autoneg & AUTONEG_SPEED) {
6789 cmd->autoneg = AUTONEG_ENABLE;
6791 cmd->autoneg = AUTONEG_DISABLE;
6794 if (netif_carrier_ok(dev)) {
6795 ethtool_cmd_speed_set(cmd, bp->line_speed);
6796 cmd->duplex = bp->duplex;
6799 ethtool_cmd_speed_set(cmd, -1);
6802 spin_unlock_bh(&bp->phy_lock);
6804 cmd->transceiver = XCVR_INTERNAL;
6805 cmd->phy_address = bp->phy_addr;
6811 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6813 struct bnx2 *bp = netdev_priv(dev);
6814 u8 autoneg = bp->autoneg;
6815 u8 req_duplex = bp->req_duplex;
6816 u16 req_line_speed = bp->req_line_speed;
6817 u32 advertising = bp->advertising;
6820 spin_lock_bh(&bp->phy_lock);
6822 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6823 goto err_out_unlock;
6825 if (cmd->port != bp->phy_port &&
6826 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6827 goto err_out_unlock;
6829 /* If device is down, we can store the settings only if the user
6830 * is setting the currently active port.
6832 if (!netif_running(dev) && cmd->port != bp->phy_port)
6833 goto err_out_unlock;
6835 if (cmd->autoneg == AUTONEG_ENABLE) {
6836 autoneg |= AUTONEG_SPEED;
6838 advertising = cmd->advertising;
6839 if (cmd->port == PORT_TP) {
6840 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6842 advertising = ETHTOOL_ALL_COPPER_SPEED;
6844 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6846 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6848 advertising |= ADVERTISED_Autoneg;
6851 u32 speed = ethtool_cmd_speed(cmd);
6852 if (cmd->port == PORT_FIBRE) {
6853 if ((speed != SPEED_1000 &&
6854 speed != SPEED_2500) ||
6855 (cmd->duplex != DUPLEX_FULL))
6856 goto err_out_unlock;
6858 if (speed == SPEED_2500 &&
6859 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6860 goto err_out_unlock;
6861 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6862 goto err_out_unlock;
6864 autoneg &= ~AUTONEG_SPEED;
6865 req_line_speed = speed;
6866 req_duplex = cmd->duplex;
6870 bp->autoneg = autoneg;
6871 bp->advertising = advertising;
6872 bp->req_line_speed = req_line_speed;
6873 bp->req_duplex = req_duplex;
6876 /* If device is down, the new settings will be picked up when it is
6879 if (netif_running(dev))
6880 err = bnx2_setup_phy(bp, cmd->port);
6883 spin_unlock_bh(&bp->phy_lock);
6889 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6891 struct bnx2 *bp = netdev_priv(dev);
6893 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6894 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6895 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6896 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6899 #define BNX2_REGDUMP_LEN (32 * 1024)
6902 bnx2_get_regs_len(struct net_device *dev)
6904 return BNX2_REGDUMP_LEN;
6908 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6910 u32 *p = _p, i, offset;
6912 struct bnx2 *bp = netdev_priv(dev);
6913 static const u32 reg_boundaries[] = {
6914 0x0000, 0x0098, 0x0400, 0x045c,
6915 0x0800, 0x0880, 0x0c00, 0x0c10,
6916 0x0c30, 0x0d08, 0x1000, 0x101c,
6917 0x1040, 0x1048, 0x1080, 0x10a4,
6918 0x1400, 0x1490, 0x1498, 0x14f0,
6919 0x1500, 0x155c, 0x1580, 0x15dc,
6920 0x1600, 0x1658, 0x1680, 0x16d8,
6921 0x1800, 0x1820, 0x1840, 0x1854,
6922 0x1880, 0x1894, 0x1900, 0x1984,
6923 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6924 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6925 0x2000, 0x2030, 0x23c0, 0x2400,
6926 0x2800, 0x2820, 0x2830, 0x2850,
6927 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6928 0x3c00, 0x3c94, 0x4000, 0x4010,
6929 0x4080, 0x4090, 0x43c0, 0x4458,
6930 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6931 0x4fc0, 0x5010, 0x53c0, 0x5444,
6932 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6933 0x5fc0, 0x6000, 0x6400, 0x6428,
6934 0x6800, 0x6848, 0x684c, 0x6860,
6935 0x6888, 0x6910, 0x8000
6940 memset(p, 0, BNX2_REGDUMP_LEN);
6942 if (!netif_running(bp->dev))
6946 offset = reg_boundaries[0];
6948 while (offset < BNX2_REGDUMP_LEN) {
6949 *p++ = REG_RD(bp, offset);
6951 if (offset == reg_boundaries[i + 1]) {
6952 offset = reg_boundaries[i + 2];
6953 p = (u32 *) (orig_p + offset);
6960 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6962 struct bnx2 *bp = netdev_priv(dev);
6964 if (bp->flags & BNX2_FLAG_NO_WOL) {
6969 wol->supported = WAKE_MAGIC;
6971 wol->wolopts = WAKE_MAGIC;
6975 memset(&wol->sopass, 0, sizeof(wol->sopass));
6979 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6981 struct bnx2 *bp = netdev_priv(dev);
6983 if (wol->wolopts & ~WAKE_MAGIC)
6986 if (wol->wolopts & WAKE_MAGIC) {
6987 if (bp->flags & BNX2_FLAG_NO_WOL)
6999 bnx2_nway_reset(struct net_device *dev)
7001 struct bnx2 *bp = netdev_priv(dev);
7004 if (!netif_running(dev))
7007 if (!(bp->autoneg & AUTONEG_SPEED)) {
7011 spin_lock_bh(&bp->phy_lock);
7013 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7016 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7017 spin_unlock_bh(&bp->phy_lock);
7021 /* Force a link down visible on the other side */
7022 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7023 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7024 spin_unlock_bh(&bp->phy_lock);
7028 spin_lock_bh(&bp->phy_lock);
7030 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7031 bp->serdes_an_pending = 1;
7032 mod_timer(&bp->timer, jiffies + bp->current_interval);
7035 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7036 bmcr &= ~BMCR_LOOPBACK;
7037 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7039 spin_unlock_bh(&bp->phy_lock);
7045 bnx2_get_link(struct net_device *dev)
7047 struct bnx2 *bp = netdev_priv(dev);
7053 bnx2_get_eeprom_len(struct net_device *dev)
7055 struct bnx2 *bp = netdev_priv(dev);
7057 if (bp->flash_info == NULL)
7060 return (int) bp->flash_size;
7064 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7067 struct bnx2 *bp = netdev_priv(dev);
7070 if (!netif_running(dev))
7073 /* parameters already validated in ethtool_get_eeprom */
7075 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7081 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7084 struct bnx2 *bp = netdev_priv(dev);
7087 if (!netif_running(dev))
7090 /* parameters already validated in ethtool_set_eeprom */
7092 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7098 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7100 struct bnx2 *bp = netdev_priv(dev);
7102 memset(coal, 0, sizeof(struct ethtool_coalesce));
7104 coal->rx_coalesce_usecs = bp->rx_ticks;
7105 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7106 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7107 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7109 coal->tx_coalesce_usecs = bp->tx_ticks;
7110 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7111 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7112 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7114 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7120 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7122 struct bnx2 *bp = netdev_priv(dev);
7124 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7125 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7127 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7128 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7130 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7131 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7133 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7134 if (bp->rx_quick_cons_trip_int > 0xff)
7135 bp->rx_quick_cons_trip_int = 0xff;
7137 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7138 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7140 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7141 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7143 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7144 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7146 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7147 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7150 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7151 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7152 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7153 bp->stats_ticks = USEC_PER_SEC;
7155 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7156 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7157 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7159 if (netif_running(bp->dev)) {
7160 bnx2_netif_stop(bp, true);
7161 bnx2_init_nic(bp, 0);
7162 bnx2_netif_start(bp, true);
7169 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7171 struct bnx2 *bp = netdev_priv(dev);
7173 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7174 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7176 ering->rx_pending = bp->rx_ring_size;
7177 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7179 ering->tx_max_pending = MAX_TX_DESC_CNT;
7180 ering->tx_pending = bp->tx_ring_size;
7184 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7186 if (netif_running(bp->dev)) {
7187 /* Reset will erase chipset stats; save them */
7188 bnx2_save_stats(bp);
7190 bnx2_netif_stop(bp, true);
7191 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7196 __bnx2_free_irq(bp);
7202 bnx2_set_rx_ring_size(bp, rx);
7203 bp->tx_ring_size = tx;
7205 if (netif_running(bp->dev)) {
7209 rc = bnx2_setup_int_mode(bp, disable_msi);
7214 rc = bnx2_alloc_mem(bp);
7217 rc = bnx2_request_irq(bp);
7220 rc = bnx2_init_nic(bp, 0);
7223 bnx2_napi_enable(bp);
7228 mutex_lock(&bp->cnic_lock);
7229 /* Let cnic know about the new status block. */
7230 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7231 bnx2_setup_cnic_irq_info(bp);
7232 mutex_unlock(&bp->cnic_lock);
7234 bnx2_netif_start(bp, true);
7240 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7242 struct bnx2 *bp = netdev_priv(dev);
7245 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7246 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7247 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7251 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7257 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7259 struct bnx2 *bp = netdev_priv(dev);
7261 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7262 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7263 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7267 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7269 struct bnx2 *bp = netdev_priv(dev);
7271 bp->req_flow_ctrl = 0;
7272 if (epause->rx_pause)
7273 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7274 if (epause->tx_pause)
7275 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7277 if (epause->autoneg) {
7278 bp->autoneg |= AUTONEG_FLOW_CTRL;
7281 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7284 if (netif_running(dev)) {
7285 spin_lock_bh(&bp->phy_lock);
7286 bnx2_setup_phy(bp, bp->phy_port);
7287 spin_unlock_bh(&bp->phy_lock);
7294 char string[ETH_GSTRING_LEN];
7295 } bnx2_stats_str_arr[] = {
7297 { "rx_error_bytes" },
7299 { "tx_error_bytes" },
7300 { "rx_ucast_packets" },
7301 { "rx_mcast_packets" },
7302 { "rx_bcast_packets" },
7303 { "tx_ucast_packets" },
7304 { "tx_mcast_packets" },
7305 { "tx_bcast_packets" },
7306 { "tx_mac_errors" },
7307 { "tx_carrier_errors" },
7308 { "rx_crc_errors" },
7309 { "rx_align_errors" },
7310 { "tx_single_collisions" },
7311 { "tx_multi_collisions" },
7313 { "tx_excess_collisions" },
7314 { "tx_late_collisions" },
7315 { "tx_total_collisions" },
7318 { "rx_undersize_packets" },
7319 { "rx_oversize_packets" },
7320 { "rx_64_byte_packets" },
7321 { "rx_65_to_127_byte_packets" },
7322 { "rx_128_to_255_byte_packets" },
7323 { "rx_256_to_511_byte_packets" },
7324 { "rx_512_to_1023_byte_packets" },
7325 { "rx_1024_to_1522_byte_packets" },
7326 { "rx_1523_to_9022_byte_packets" },
7327 { "tx_64_byte_packets" },
7328 { "tx_65_to_127_byte_packets" },
7329 { "tx_128_to_255_byte_packets" },
7330 { "tx_256_to_511_byte_packets" },
7331 { "tx_512_to_1023_byte_packets" },
7332 { "tx_1024_to_1522_byte_packets" },
7333 { "tx_1523_to_9022_byte_packets" },
7334 { "rx_xon_frames" },
7335 { "rx_xoff_frames" },
7336 { "tx_xon_frames" },
7337 { "tx_xoff_frames" },
7338 { "rx_mac_ctrl_frames" },
7339 { "rx_filtered_packets" },
7340 { "rx_ftq_discards" },
7342 { "rx_fw_discards" },
7345 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7347 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7349 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7350 STATS_OFFSET32(stat_IfHCInOctets_hi),
7351 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7352 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7353 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7354 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7355 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7356 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7357 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7358 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7359 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7360 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7361 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7362 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7363 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7364 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7365 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7366 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7367 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7368 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7369 STATS_OFFSET32(stat_EtherStatsCollisions),
7370 STATS_OFFSET32(stat_EtherStatsFragments),
7371 STATS_OFFSET32(stat_EtherStatsJabbers),
7372 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7373 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7374 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7375 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7376 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7377 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7378 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7379 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7380 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7381 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7382 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7383 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7384 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7385 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7386 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7387 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7388 STATS_OFFSET32(stat_XonPauseFramesReceived),
7389 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7390 STATS_OFFSET32(stat_OutXonSent),
7391 STATS_OFFSET32(stat_OutXoffSent),
7392 STATS_OFFSET32(stat_MacControlFramesReceived),
7393 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7394 STATS_OFFSET32(stat_IfInFTQDiscards),
7395 STATS_OFFSET32(stat_IfInMBUFDiscards),
7396 STATS_OFFSET32(stat_FwRxDrop),
7399 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7400 * skipped because of errata.
7402 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7403 8,0,8,8,8,8,8,8,8,8,
7404 4,0,4,4,4,4,4,4,4,4,
7405 4,4,4,4,4,4,4,4,4,4,
7406 4,4,4,4,4,4,4,4,4,4,
7410 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7411 8,0,8,8,8,8,8,8,8,8,
7412 4,4,4,4,4,4,4,4,4,4,
7413 4,4,4,4,4,4,4,4,4,4,
7414 4,4,4,4,4,4,4,4,4,4,
7418 #define BNX2_NUM_TESTS 6
7421 char string[ETH_GSTRING_LEN];
7422 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7423 { "register_test (offline)" },
7424 { "memory_test (offline)" },
7425 { "loopback_test (offline)" },
7426 { "nvram_test (online)" },
7427 { "interrupt_test (online)" },
7428 { "link_test (online)" },
7432 bnx2_get_sset_count(struct net_device *dev, int sset)
7436 return BNX2_NUM_TESTS;
7438 return BNX2_NUM_STATS;
7445 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7447 struct bnx2 *bp = netdev_priv(dev);
7449 bnx2_set_power_state(bp, PCI_D0);
7451 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7452 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7455 bnx2_netif_stop(bp, true);
7456 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7459 if (bnx2_test_registers(bp) != 0) {
7461 etest->flags |= ETH_TEST_FL_FAILED;
7463 if (bnx2_test_memory(bp) != 0) {
7465 etest->flags |= ETH_TEST_FL_FAILED;
7467 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7468 etest->flags |= ETH_TEST_FL_FAILED;
7470 if (!netif_running(bp->dev))
7471 bnx2_shutdown_chip(bp);
7473 bnx2_init_nic(bp, 1);
7474 bnx2_netif_start(bp, true);
7477 /* wait for link up */
7478 for (i = 0; i < 7; i++) {
7481 msleep_interruptible(1000);
7485 if (bnx2_test_nvram(bp) != 0) {
7487 etest->flags |= ETH_TEST_FL_FAILED;
7489 if (bnx2_test_intr(bp) != 0) {
7491 etest->flags |= ETH_TEST_FL_FAILED;
7494 if (bnx2_test_link(bp) != 0) {
7496 etest->flags |= ETH_TEST_FL_FAILED;
7499 if (!netif_running(bp->dev))
7500 bnx2_set_power_state(bp, PCI_D3hot);
7504 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7506 switch (stringset) {
7508 memcpy(buf, bnx2_stats_str_arr,
7509 sizeof(bnx2_stats_str_arr));
7512 memcpy(buf, bnx2_tests_str_arr,
7513 sizeof(bnx2_tests_str_arr));
7519 bnx2_get_ethtool_stats(struct net_device *dev,
7520 struct ethtool_stats *stats, u64 *buf)
7522 struct bnx2 *bp = netdev_priv(dev);
7524 u32 *hw_stats = (u32 *) bp->stats_blk;
7525 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7526 u8 *stats_len_arr = NULL;
7528 if (hw_stats == NULL) {
7529 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7533 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7534 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7535 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7536 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7537 stats_len_arr = bnx2_5706_stats_len_arr;
7539 stats_len_arr = bnx2_5708_stats_len_arr;
7541 for (i = 0; i < BNX2_NUM_STATS; i++) {
7542 unsigned long offset;
7544 if (stats_len_arr[i] == 0) {
7545 /* skip this counter */
7550 offset = bnx2_stats_offset_arr[i];
7551 if (stats_len_arr[i] == 4) {
7552 /* 4-byte counter */
7553 buf[i] = (u64) *(hw_stats + offset) +
7554 *(temp_stats + offset);
7557 /* 8-byte counter */
7558 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7559 *(hw_stats + offset + 1) +
7560 (((u64) *(temp_stats + offset)) << 32) +
7561 *(temp_stats + offset + 1);
7566 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7568 struct bnx2 *bp = netdev_priv(dev);
7571 case ETHTOOL_ID_ACTIVE:
7572 bnx2_set_power_state(bp, PCI_D0);
7574 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7575 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7576 return 1; /* cycle on/off once per second */
7579 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7580 BNX2_EMAC_LED_1000MB_OVERRIDE |
7581 BNX2_EMAC_LED_100MB_OVERRIDE |
7582 BNX2_EMAC_LED_10MB_OVERRIDE |
7583 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7584 BNX2_EMAC_LED_TRAFFIC);
7587 case ETHTOOL_ID_OFF:
7588 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7591 case ETHTOOL_ID_INACTIVE:
7592 REG_WR(bp, BNX2_EMAC_LED, 0);
7593 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7595 if (!netif_running(dev))
7596 bnx2_set_power_state(bp, PCI_D3hot);
7603 static netdev_features_t
7604 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7606 struct bnx2 *bp = netdev_priv(dev);
7608 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7609 features |= NETIF_F_HW_VLAN_RX;
7615 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7617 struct bnx2 *bp = netdev_priv(dev);
7619 /* TSO with VLAN tag won't work with current firmware */
7620 if (features & NETIF_F_HW_VLAN_TX)
7621 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7623 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7625 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7626 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7627 netif_running(dev)) {
7628 bnx2_netif_stop(bp, false);
7629 dev->features = features;
7630 bnx2_set_rx_mode(dev);
7631 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7632 bnx2_netif_start(bp, false);
7639 static void bnx2_get_channels(struct net_device *dev,
7640 struct ethtool_channels *channels)
7642 struct bnx2 *bp = netdev_priv(dev);
7643 u32 max_rx_rings = 1;
7644 u32 max_tx_rings = 1;
7646 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7647 max_rx_rings = RX_MAX_RINGS;
7648 max_tx_rings = TX_MAX_RINGS;
7651 channels->max_rx = max_rx_rings;
7652 channels->max_tx = max_tx_rings;
7653 channels->max_other = 0;
7654 channels->max_combined = 0;
7655 channels->rx_count = bp->num_rx_rings;
7656 channels->tx_count = bp->num_tx_rings;
7657 channels->other_count = 0;
7658 channels->combined_count = 0;
7661 static int bnx2_set_channels(struct net_device *dev,
7662 struct ethtool_channels *channels)
7664 struct bnx2 *bp = netdev_priv(dev);
7665 u32 max_rx_rings = 1;
7666 u32 max_tx_rings = 1;
7669 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7670 max_rx_rings = RX_MAX_RINGS;
7671 max_tx_rings = TX_MAX_RINGS;
7673 if (channels->rx_count > max_rx_rings ||
7674 channels->tx_count > max_tx_rings)
7677 bp->num_req_rx_rings = channels->rx_count;
7678 bp->num_req_tx_rings = channels->tx_count;
7680 if (netif_running(dev))
7681 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7682 bp->tx_ring_size, true);
7687 static const struct ethtool_ops bnx2_ethtool_ops = {
7688 .get_settings = bnx2_get_settings,
7689 .set_settings = bnx2_set_settings,
7690 .get_drvinfo = bnx2_get_drvinfo,
7691 .get_regs_len = bnx2_get_regs_len,
7692 .get_regs = bnx2_get_regs,
7693 .get_wol = bnx2_get_wol,
7694 .set_wol = bnx2_set_wol,
7695 .nway_reset = bnx2_nway_reset,
7696 .get_link = bnx2_get_link,
7697 .get_eeprom_len = bnx2_get_eeprom_len,
7698 .get_eeprom = bnx2_get_eeprom,
7699 .set_eeprom = bnx2_set_eeprom,
7700 .get_coalesce = bnx2_get_coalesce,
7701 .set_coalesce = bnx2_set_coalesce,
7702 .get_ringparam = bnx2_get_ringparam,
7703 .set_ringparam = bnx2_set_ringparam,
7704 .get_pauseparam = bnx2_get_pauseparam,
7705 .set_pauseparam = bnx2_set_pauseparam,
7706 .self_test = bnx2_self_test,
7707 .get_strings = bnx2_get_strings,
7708 .set_phys_id = bnx2_set_phys_id,
7709 .get_ethtool_stats = bnx2_get_ethtool_stats,
7710 .get_sset_count = bnx2_get_sset_count,
7711 .get_channels = bnx2_get_channels,
7712 .set_channels = bnx2_set_channels,
7715 /* Called with rtnl_lock */
7717 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7719 struct mii_ioctl_data *data = if_mii(ifr);
7720 struct bnx2 *bp = netdev_priv(dev);
7725 data->phy_id = bp->phy_addr;
7731 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7734 if (!netif_running(dev))
7737 spin_lock_bh(&bp->phy_lock);
7738 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7739 spin_unlock_bh(&bp->phy_lock);
7741 data->val_out = mii_regval;
7747 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7750 if (!netif_running(dev))
7753 spin_lock_bh(&bp->phy_lock);
7754 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7755 spin_unlock_bh(&bp->phy_lock);
7766 /* Called with rtnl_lock */
7768 bnx2_change_mac_addr(struct net_device *dev, void *p)
7770 struct sockaddr *addr = p;
7771 struct bnx2 *bp = netdev_priv(dev);
7773 if (!is_valid_ether_addr(addr->sa_data))
7774 return -EADDRNOTAVAIL;
7776 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7777 if (netif_running(dev))
7778 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7783 /* Called with rtnl_lock */
7785 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7787 struct bnx2 *bp = netdev_priv(dev);
7789 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7790 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7794 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7798 #ifdef CONFIG_NET_POLL_CONTROLLER
7800 poll_bnx2(struct net_device *dev)
7802 struct bnx2 *bp = netdev_priv(dev);
7805 for (i = 0; i < bp->irq_nvecs; i++) {
7806 struct bnx2_irq *irq = &bp->irq_tbl[i];
7808 disable_irq(irq->vector);
7809 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7810 enable_irq(irq->vector);
7815 static void __devinit
7816 bnx2_get_5709_media(struct bnx2 *bp)
7818 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7819 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7822 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7824 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7825 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7829 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7830 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7832 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7834 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7839 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7847 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7853 static void __devinit
7854 bnx2_get_pci_speed(struct bnx2 *bp)
7858 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7859 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7862 bp->flags |= BNX2_FLAG_PCIX;
7864 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7866 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7868 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7869 bp->bus_speed_mhz = 133;
7872 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7873 bp->bus_speed_mhz = 100;
7876 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7877 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7878 bp->bus_speed_mhz = 66;
7881 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7882 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7883 bp->bus_speed_mhz = 50;
7886 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7887 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7888 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7889 bp->bus_speed_mhz = 33;
7894 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7895 bp->bus_speed_mhz = 66;
7897 bp->bus_speed_mhz = 33;
7900 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7901 bp->flags |= BNX2_FLAG_PCI_32BIT;
7905 static void __devinit
7906 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7910 unsigned int block_end, rosize, len;
7912 #define BNX2_VPD_NVRAM_OFFSET 0x300
7913 #define BNX2_VPD_LEN 128
7914 #define BNX2_MAX_VER_SLEN 30
7916 data = kmalloc(256, GFP_KERNEL);
7920 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7925 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7926 data[i] = data[i + BNX2_VPD_LEN + 3];
7927 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7928 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7929 data[i + 3] = data[i + BNX2_VPD_LEN];
7932 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7936 rosize = pci_vpd_lrdt_size(&data[i]);
7937 i += PCI_VPD_LRDT_TAG_SIZE;
7938 block_end = i + rosize;
7940 if (block_end > BNX2_VPD_LEN)
7943 j = pci_vpd_find_info_keyword(data, i, rosize,
7944 PCI_VPD_RO_KEYWORD_MFR_ID);
7948 len = pci_vpd_info_field_size(&data[j]);
7950 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7951 if (j + len > block_end || len != 4 ||
7952 memcmp(&data[j], "1028", 4))
7955 j = pci_vpd_find_info_keyword(data, i, rosize,
7956 PCI_VPD_RO_KEYWORD_VENDOR0);
7960 len = pci_vpd_info_field_size(&data[j]);
7962 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7963 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7966 memcpy(bp->fw_version, &data[j], len);
7967 bp->fw_version[len] = ' ';
7973 static int __devinit
7974 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7979 u64 dma_mask, persist_dma_mask;
7982 SET_NETDEV_DEV(dev, &pdev->dev);
7983 bp = netdev_priv(dev);
7988 bp->temp_stats_blk =
7989 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7991 if (bp->temp_stats_blk == NULL) {
7996 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7997 rc = pci_enable_device(pdev);
7999 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8003 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8005 "Cannot find PCI device base address, aborting\n");
8007 goto err_out_disable;
8010 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8012 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8013 goto err_out_disable;
8016 pci_set_master(pdev);
8018 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8019 if (bp->pm_cap == 0) {
8021 "Cannot find power management capability, aborting\n");
8023 goto err_out_release;
8029 spin_lock_init(&bp->phy_lock);
8030 spin_lock_init(&bp->indirect_lock);
8032 mutex_init(&bp->cnic_lock);
8034 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8036 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8037 TX_MAX_TSS_RINGS + 1));
8039 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8041 goto err_out_release;
8044 bnx2_set_power_state(bp, PCI_D0);
8046 /* Configure byte swap and enable write to the reg_window registers.
8047 * Rely on CPU to do target byte swapping on big endian systems
8048 * The chip's target access swapping will not swap all accesses
8050 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8051 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8052 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8054 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
8056 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8057 if (!pci_is_pcie(pdev)) {
8058 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8062 bp->flags |= BNX2_FLAG_PCIE;
8063 if (CHIP_REV(bp) == CHIP_REV_Ax)
8064 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8066 /* AER (Advanced Error Reporting) hooks */
8067 err = pci_enable_pcie_error_reporting(pdev);
8069 bp->flags |= BNX2_FLAG_AER_ENABLED;
8072 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8073 if (bp->pcix_cap == 0) {
8075 "Cannot find PCIX capability, aborting\n");
8079 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8082 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8083 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8084 bp->flags |= BNX2_FLAG_MSIX_CAP;
8087 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8088 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8089 bp->flags |= BNX2_FLAG_MSI_CAP;
8092 /* 5708 cannot support DMA addresses > 40-bit. */
8093 if (CHIP_NUM(bp) == CHIP_NUM_5708)
8094 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8096 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8098 /* Configure DMA attributes. */
8099 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8100 dev->features |= NETIF_F_HIGHDMA;
8101 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8104 "pci_set_consistent_dma_mask failed, aborting\n");
8107 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8108 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8112 if (!(bp->flags & BNX2_FLAG_PCIE))
8113 bnx2_get_pci_speed(bp);
8115 /* 5706A0 may falsely detect SERR and PERR. */
8116 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8117 reg = REG_RD(bp, PCI_COMMAND);
8118 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8119 REG_WR(bp, PCI_COMMAND, reg);
8121 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8122 !(bp->flags & BNX2_FLAG_PCIX)) {
8125 "5706 A1 can only be used in a PCIX bus, aborting\n");
8129 bnx2_init_nvram(bp);
8131 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8133 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8134 BNX2_SHM_HDR_SIGNATURE_SIG) {
8135 u32 off = PCI_FUNC(pdev->devfn) << 2;
8137 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8139 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8141 /* Get the permanent MAC address. First we need to make sure the
8142 * firmware is actually running.
8144 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8146 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8147 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8148 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8153 bnx2_read_vpd_fw_ver(bp);
8155 j = strlen(bp->fw_version);
8156 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8157 for (i = 0; i < 3 && j < 24; i++) {
8161 bp->fw_version[j++] = 'b';
8162 bp->fw_version[j++] = 'c';
8163 bp->fw_version[j++] = ' ';
8165 num = (u8) (reg >> (24 - (i * 8)));
8166 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8167 if (num >= k || !skip0 || k == 1) {
8168 bp->fw_version[j++] = (num / k) + '0';
8173 bp->fw_version[j++] = '.';
8175 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8176 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8179 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8180 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8182 for (i = 0; i < 30; i++) {
8183 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8184 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8189 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8190 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8191 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8192 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8193 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8196 bp->fw_version[j++] = ' ';
8197 for (i = 0; i < 3 && j < 28; i++) {
8198 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8199 reg = be32_to_cpu(reg);
8200 memcpy(&bp->fw_version[j], ®, 4);
8205 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8206 bp->mac_addr[0] = (u8) (reg >> 8);
8207 bp->mac_addr[1] = (u8) reg;
8209 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8210 bp->mac_addr[2] = (u8) (reg >> 24);
8211 bp->mac_addr[3] = (u8) (reg >> 16);
8212 bp->mac_addr[4] = (u8) (reg >> 8);
8213 bp->mac_addr[5] = (u8) reg;
8215 bp->tx_ring_size = MAX_TX_DESC_CNT;
8216 bnx2_set_rx_ring_size(bp, 255);
8218 bp->tx_quick_cons_trip_int = 2;
8219 bp->tx_quick_cons_trip = 20;
8220 bp->tx_ticks_int = 18;
8223 bp->rx_quick_cons_trip_int = 2;
8224 bp->rx_quick_cons_trip = 12;
8225 bp->rx_ticks_int = 18;
8228 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8230 bp->current_interval = BNX2_TIMER_INTERVAL;
8234 /* Disable WOL support if we are running on a SERDES chip. */
8235 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8236 bnx2_get_5709_media(bp);
8237 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8238 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8240 bp->phy_port = PORT_TP;
8241 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8242 bp->phy_port = PORT_FIBRE;
8243 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8244 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8245 bp->flags |= BNX2_FLAG_NO_WOL;
8248 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8249 /* Don't do parallel detect on this board because of
8250 * some board problems. The link will not go down
8251 * if we do parallel detect.
8253 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8254 pdev->subsystem_device == 0x310c)
8255 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8258 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8259 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8261 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8262 CHIP_NUM(bp) == CHIP_NUM_5708)
8263 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8264 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8265 (CHIP_REV(bp) == CHIP_REV_Ax ||
8266 CHIP_REV(bp) == CHIP_REV_Bx))
8267 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8269 bnx2_init_fw_cap(bp);
8271 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8272 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8273 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8274 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8275 bp->flags |= BNX2_FLAG_NO_WOL;
8279 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8280 bp->tx_quick_cons_trip_int =
8281 bp->tx_quick_cons_trip;
8282 bp->tx_ticks_int = bp->tx_ticks;
8283 bp->rx_quick_cons_trip_int =
8284 bp->rx_quick_cons_trip;
8285 bp->rx_ticks_int = bp->rx_ticks;
8286 bp->comp_prod_trip_int = bp->comp_prod_trip;
8287 bp->com_ticks_int = bp->com_ticks;
8288 bp->cmd_ticks_int = bp->cmd_ticks;
8291 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8293 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8294 * with byte enables disabled on the unused 32-bit word. This is legal
8295 * but causes problems on the AMD 8132 which will eventually stop
8296 * responding after a while.
8298 * AMD believes this incompatibility is unique to the 5706, and
8299 * prefers to locally disable MSI rather than globally disabling it.
8301 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8302 struct pci_dev *amd_8132 = NULL;
8304 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8305 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8308 if (amd_8132->revision >= 0x10 &&
8309 amd_8132->revision <= 0x13) {
8311 pci_dev_put(amd_8132);
8317 bnx2_set_default_link(bp);
8318 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8320 init_timer(&bp->timer);
8321 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8322 bp->timer.data = (unsigned long) bp;
8323 bp->timer.function = bnx2_timer;
8326 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8327 bp->cnic_eth_dev.max_iscsi_conn =
8328 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8329 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8331 pci_save_state(pdev);
8336 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8337 pci_disable_pcie_error_reporting(pdev);
8338 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8341 pci_iounmap(pdev, bp->regview);
8345 pci_release_regions(pdev);
8348 pci_disable_device(pdev);
8349 pci_set_drvdata(pdev, NULL);
8355 static char * __devinit
8356 bnx2_bus_string(struct bnx2 *bp, char *str)
8360 if (bp->flags & BNX2_FLAG_PCIE) {
8361 s += sprintf(s, "PCI Express");
8363 s += sprintf(s, "PCI");
8364 if (bp->flags & BNX2_FLAG_PCIX)
8365 s += sprintf(s, "-X");
8366 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8367 s += sprintf(s, " 32-bit");
8369 s += sprintf(s, " 64-bit");
8370 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8376 bnx2_del_napi(struct bnx2 *bp)
8380 for (i = 0; i < bp->irq_nvecs; i++)
8381 netif_napi_del(&bp->bnx2_napi[i].napi);
8385 bnx2_init_napi(struct bnx2 *bp)
8389 for (i = 0; i < bp->irq_nvecs; i++) {
8390 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8391 int (*poll)(struct napi_struct *, int);
8396 poll = bnx2_poll_msix;
8398 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8403 static const struct net_device_ops bnx2_netdev_ops = {
8404 .ndo_open = bnx2_open,
8405 .ndo_start_xmit = bnx2_start_xmit,
8406 .ndo_stop = bnx2_close,
8407 .ndo_get_stats64 = bnx2_get_stats64,
8408 .ndo_set_rx_mode = bnx2_set_rx_mode,
8409 .ndo_do_ioctl = bnx2_ioctl,
8410 .ndo_validate_addr = eth_validate_addr,
8411 .ndo_set_mac_address = bnx2_change_mac_addr,
8412 .ndo_change_mtu = bnx2_change_mtu,
8413 .ndo_fix_features = bnx2_fix_features,
8414 .ndo_set_features = bnx2_set_features,
8415 .ndo_tx_timeout = bnx2_tx_timeout,
8416 #ifdef CONFIG_NET_POLL_CONTROLLER
8417 .ndo_poll_controller = poll_bnx2,
8421 static int __devinit
8422 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8424 static int version_printed = 0;
8425 struct net_device *dev;
8430 if (version_printed++ == 0)
8431 pr_info("%s", version);
8433 /* dev zeroed in init_etherdev */
8434 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8438 rc = bnx2_init_board(pdev, dev);
8442 dev->netdev_ops = &bnx2_netdev_ops;
8443 dev->watchdog_timeo = TX_TIMEOUT;
8444 dev->ethtool_ops = &bnx2_ethtool_ops;
8446 bp = netdev_priv(dev);
8448 pci_set_drvdata(pdev, dev);
8450 memcpy(dev->dev_addr, bp->mac_addr, 6);
8451 memcpy(dev->perm_addr, bp->mac_addr, 6);
8453 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8454 NETIF_F_TSO | NETIF_F_TSO_ECN |
8455 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8457 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8458 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8460 dev->vlan_features = dev->hw_features;
8461 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8462 dev->features |= dev->hw_features;
8463 dev->priv_flags |= IFF_UNICAST_FLT;
8465 if ((rc = register_netdev(dev))) {
8466 dev_err(&pdev->dev, "Cannot register net device\n");
8470 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8471 "node addr %pM\n", board_info[ent->driver_data].name,
8472 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8473 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8474 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8475 pdev->irq, dev->dev_addr);
8480 iounmap(bp->regview);
8481 pci_release_regions(pdev);
8482 pci_disable_device(pdev);
8483 pci_set_drvdata(pdev, NULL);
8489 static void __devexit
8490 bnx2_remove_one(struct pci_dev *pdev)
8492 struct net_device *dev = pci_get_drvdata(pdev);
8493 struct bnx2 *bp = netdev_priv(dev);
8495 unregister_netdev(dev);
8497 del_timer_sync(&bp->timer);
8498 cancel_work_sync(&bp->reset_task);
8500 pci_iounmap(bp->pdev, bp->regview);
8502 kfree(bp->temp_stats_blk);
8504 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8505 pci_disable_pcie_error_reporting(pdev);
8506 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8509 bnx2_release_firmware(bp);
8513 pci_release_regions(pdev);
8514 pci_disable_device(pdev);
8515 pci_set_drvdata(pdev, NULL);
8519 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8521 struct net_device *dev = pci_get_drvdata(pdev);
8522 struct bnx2 *bp = netdev_priv(dev);
8524 /* PCI register 4 needs to be saved whether netif_running() or not.
8525 * MSI address and data need to be saved if using MSI and
8528 pci_save_state(pdev);
8529 if (!netif_running(dev))
8532 cancel_work_sync(&bp->reset_task);
8533 bnx2_netif_stop(bp, true);
8534 netif_device_detach(dev);
8535 del_timer_sync(&bp->timer);
8536 bnx2_shutdown_chip(bp);
8538 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8543 bnx2_resume(struct pci_dev *pdev)
8545 struct net_device *dev = pci_get_drvdata(pdev);
8546 struct bnx2 *bp = netdev_priv(dev);
8548 pci_restore_state(pdev);
8549 if (!netif_running(dev))
8552 bnx2_set_power_state(bp, PCI_D0);
8553 netif_device_attach(dev);
8554 bnx2_init_nic(bp, 1);
8555 bnx2_netif_start(bp, true);
8560 * bnx2_io_error_detected - called when PCI error is detected
8561 * @pdev: Pointer to PCI device
8562 * @state: The current pci connection state
8564 * This function is called after a PCI bus error affecting
8565 * this device has been detected.
8567 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8568 pci_channel_state_t state)
8570 struct net_device *dev = pci_get_drvdata(pdev);
8571 struct bnx2 *bp = netdev_priv(dev);
8574 netif_device_detach(dev);
8576 if (state == pci_channel_io_perm_failure) {
8578 return PCI_ERS_RESULT_DISCONNECT;
8581 if (netif_running(dev)) {
8582 bnx2_netif_stop(bp, true);
8583 del_timer_sync(&bp->timer);
8584 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8587 pci_disable_device(pdev);
8590 /* Request a slot slot reset. */
8591 return PCI_ERS_RESULT_NEED_RESET;
8595 * bnx2_io_slot_reset - called after the pci bus has been reset.
8596 * @pdev: Pointer to PCI device
8598 * Restart the card from scratch, as if from a cold-boot.
8600 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8602 struct net_device *dev = pci_get_drvdata(pdev);
8603 struct bnx2 *bp = netdev_priv(dev);
8604 pci_ers_result_t result;
8608 if (pci_enable_device(pdev)) {
8610 "Cannot re-enable PCI device after reset\n");
8611 result = PCI_ERS_RESULT_DISCONNECT;
8613 pci_set_master(pdev);
8614 pci_restore_state(pdev);
8615 pci_save_state(pdev);
8617 if (netif_running(dev)) {
8618 bnx2_set_power_state(bp, PCI_D0);
8619 bnx2_init_nic(bp, 1);
8621 result = PCI_ERS_RESULT_RECOVERED;
8625 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8628 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8631 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8632 err); /* non-fatal, continue */
8639 * bnx2_io_resume - called when traffic can start flowing again.
8640 * @pdev: Pointer to PCI device
8642 * This callback is called when the error recovery driver tells us that
8643 * its OK to resume normal operation.
8645 static void bnx2_io_resume(struct pci_dev *pdev)
8647 struct net_device *dev = pci_get_drvdata(pdev);
8648 struct bnx2 *bp = netdev_priv(dev);
8651 if (netif_running(dev))
8652 bnx2_netif_start(bp, true);
8654 netif_device_attach(dev);
8658 static struct pci_error_handlers bnx2_err_handler = {
8659 .error_detected = bnx2_io_error_detected,
8660 .slot_reset = bnx2_io_slot_reset,
8661 .resume = bnx2_io_resume,
8664 static struct pci_driver bnx2_pci_driver = {
8665 .name = DRV_MODULE_NAME,
8666 .id_table = bnx2_pci_tbl,
8667 .probe = bnx2_init_one,
8668 .remove = __devexit_p(bnx2_remove_one),
8669 .suspend = bnx2_suspend,
8670 .resume = bnx2_resume,
8671 .err_handler = &bnx2_err_handler,
8674 static int __init bnx2_init(void)
8676 return pci_register_driver(&bnx2_pci_driver);
8679 static void __exit bnx2_cleanup(void)
8681 pci_unregister_driver(&bnx2_pci_driver);
8684 module_init(bnx2_init);
8685 module_exit(bnx2_cleanup);