ff5d3c1f121738307a1bcd969cf1de6fe6c94824
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58
59 #define DRV_MODULE_NAME         "bnx2"
60 #define DRV_MODULE_VERSION      "2.2.1"
61 #define DRV_MODULE_RELDATE      "Dec 18, 2011"
62 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
67
68 #define RUN_AT(x) (jiffies + (x))
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72
73 static char version[] __devinitdata =
74         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85
86 static int disable_msi = 0;
87
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91 typedef enum {
92         BCM5706 = 0,
93         NC370T,
94         NC370I,
95         BCM5706S,
96         NC370F,
97         BCM5708,
98         BCM5708S,
99         BCM5709,
100         BCM5709S,
101         BCM5716,
102         BCM5716S,
103 } board_t;
104
105 /* indexed by board_t, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110         { "HP NC370T Multifunction Gigabit Server Adapter" },
111         { "HP NC370i Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113         { "HP NC370F Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120         };
121
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141         { PCI_VENDOR_ID_BROADCOM, 0x163b,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143         { PCI_VENDOR_ID_BROADCOM, 0x163c,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145         { 0, }
146 };
147
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
152         /* Slow EEPROM */
153         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156          "EEPROM - slow"},
157         /* Expansion entry 0001 */
158         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161          "Entry 0001"},
162         /* Saifun SA25F010 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167          "Non-buffered flash (128kB)"},
168         /* Saifun SA25F020 (non-buffered flash) */
169         /* strap, cfg1, & write1 need updates */
170         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173          "Non-buffered flash (256kB)"},
174         /* Expansion entry 0100 */
175         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178          "Entry 0100"},
179         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189         /* Saifun SA25F005 (non-buffered flash) */
190         /* strap, cfg1, & write1 need updates */
191         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194          "Non-buffered flash (64kB)"},
195         /* Fast EEPROM */
196         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199          "EEPROM - fast"},
200         /* Expansion entry 1001 */
201         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1001"},
205         /* Expansion entry 1010 */
206         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1010"},
210         /* ATMEL AT45DB011B (buffered flash) */
211         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214          "Buffered flash (128kB)"},
215         /* Expansion entry 1100 */
216         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1100"},
220         /* Expansion entry 1101 */
221         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224          "Entry 1101"},
225         /* Ateml Expansion entry 1110 */
226         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229          "Entry 1110 (Atmel)"},
230         /* ATMEL AT45DB021B (buffered flash) */
231         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234          "Buffered flash (256kB)"},
235 };
236
237 static const struct flash_spec flash_5709 = {
238         .flags          = BNX2_NV_BUFFERED,
239         .page_bits      = BCM5709_FLASH_PAGE_BITS,
240         .page_size      = BCM5709_FLASH_PAGE_SIZE,
241         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
242         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
243         .name           = "5709 Buffered flash (256kB)",
244 };
245
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
256         barrier();
257
258         /* The ring uses 256 indices for 255 entries, one of them
259          * needs to be skipped.
260          */
261         diff = txr->tx_prod - txr->tx_cons;
262         if (unlikely(diff >= TX_DESC_CNT)) {
263                 diff &= 0xffff;
264                 if (diff == TX_DESC_CNT)
265                         diff = MAX_TX_DESC_CNT;
266         }
267         return bp->tx_ring_size - diff;
268 }
269
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273         u32 val;
274
275         spin_lock_bh(&bp->indirect_lock);
276         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278         spin_unlock_bh(&bp->indirect_lock);
279         return val;
280 }
281
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         spin_lock_bh(&bp->indirect_lock);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288         spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301 }
302
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306         offset += cid_addr;
307         spin_lock_bh(&bp->indirect_lock);
308         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309                 int i;
310
311                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 REG_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_bh(&bp->indirect_lock);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390                 return -ENODEV;
391
392         bp->cnic_data = data;
393         rcu_assign_pointer(bp->cnic_ops, ops);
394
395         cp->num_irq = 0;
396         cp->drv_state = CNIC_DRV_STATE_REGD;
397
398         bnx2_setup_cnic_irq_info(bp);
399
400         return 0;
401 }
402
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405         struct bnx2 *bp = netdev_priv(dev);
406         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
409         mutex_lock(&bp->cnic_lock);
410         cp->drv_state = 0;
411         bnapi->cnic_present = 0;
412         RCU_INIT_POINTER(bp->cnic_ops, NULL);
413         mutex_unlock(&bp->cnic_lock);
414         synchronize_rcu();
415         return 0;
416 }
417
418 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420         struct bnx2 *bp = netdev_priv(dev);
421         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
423         if (!cp->max_iscsi_conn)
424                 return NULL;
425
426         cp->drv_owner = THIS_MODULE;
427         cp->chip_id = bp->chip_id;
428         cp->pdev = bp->pdev;
429         cp->io_base = bp->regview;
430         cp->drv_ctl = bnx2_drv_ctl;
431         cp->drv_register_cnic = bnx2_register_cnic;
432         cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434         return cp;
435 }
436 EXPORT_SYMBOL(bnx2_cnic_probe);
437
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441         struct cnic_ops *c_ops;
442         struct cnic_ctl_info info;
443
444         mutex_lock(&bp->cnic_lock);
445         c_ops = rcu_dereference_protected(bp->cnic_ops,
446                                           lockdep_is_held(&bp->cnic_lock));
447         if (c_ops) {
448                 info.cmd = CNIC_CTL_STOP_CMD;
449                 c_ops->cnic_ctl(bp->cnic_data, &info);
450         }
451         mutex_unlock(&bp->cnic_lock);
452 }
453
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457         struct cnic_ops *c_ops;
458         struct cnic_ctl_info info;
459
460         mutex_lock(&bp->cnic_lock);
461         c_ops = rcu_dereference_protected(bp->cnic_ops,
462                                           lockdep_is_held(&bp->cnic_lock));
463         if (c_ops) {
464                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467                         bnapi->cnic_tag = bnapi->last_status_idx;
468                 }
469                 info.cmd = CNIC_CTL_START_CMD;
470                 c_ops->cnic_ctl(bp->cnic_data, &info);
471         }
472         mutex_unlock(&bp->cnic_lock);
473 }
474
475 #else
476
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486
487 #endif
488
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492         u32 val1;
493         int i, ret;
494
495         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502                 udelay(40);
503         }
504
505         val1 = (bp->phy_addr << 21) | (reg << 16) |
506                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507                 BNX2_EMAC_MDIO_COMM_START_BUSY;
508         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510         for (i = 0; i < 50; i++) {
511                 udelay(10);
512
513                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515                         udelay(5);
516
517                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520                         break;
521                 }
522         }
523
524         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525                 *val = 0x0;
526                 ret = -EBUSY;
527         }
528         else {
529                 *val = val1;
530                 ret = 0;
531         }
532
533         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540                 udelay(40);
541         }
542
543         return ret;
544 }
545
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549         u32 val1;
550         int i, ret;
551
552         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559                 udelay(40);
560         }
561
562         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567         for (i = 0; i < 50; i++) {
568                 udelay(10);
569
570                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572                         udelay(5);
573                         break;
574                 }
575         }
576
577         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578                 ret = -EBUSY;
579         else
580                 ret = 0;
581
582         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589                 udelay(40);
590         }
591
592         return ret;
593 }
594
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598         int i;
599         struct bnx2_napi *bnapi;
600
601         for (i = 0; i < bp->irq_nvecs; i++) {
602                 bnapi = &bp->bnx2_napi[i];
603                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605         }
606         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612         int i;
613         struct bnx2_napi *bnapi;
614
615         for (i = 0; i < bp->irq_nvecs; i++) {
616                 bnapi = &bp->bnx2_napi[i];
617
618                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621                        bnapi->last_status_idx);
622
623                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625                        bnapi->last_status_idx);
626         }
627         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633         int i;
634
635         atomic_inc(&bp->intr_sem);
636         if (!netif_running(bp->dev))
637                 return;
638
639         bnx2_disable_int(bp);
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 synchronize_irq(bp->irq_tbl[i].vector);
642 }
643
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_disable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656         int i;
657
658         for (i = 0; i < bp->irq_nvecs; i++)
659                 napi_enable(&bp->bnx2_napi[i].napi);
660 }
661
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665         if (stop_cnic)
666                 bnx2_cnic_stop(bp);
667         if (netif_running(bp->dev)) {
668                 bnx2_napi_disable(bp);
669                 netif_tx_disable(bp->dev);
670         }
671         bnx2_disable_int_sync(bp);
672         netif_carrier_off(bp->dev);     /* prevent tx timeout */
673 }
674
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678         if (atomic_dec_and_test(&bp->intr_sem)) {
679                 if (netif_running(bp->dev)) {
680                         netif_tx_wake_all_queues(bp->dev);
681                         spin_lock_bh(&bp->phy_lock);
682                         if (bp->link_up)
683                                 netif_carrier_on(bp->dev);
684                         spin_unlock_bh(&bp->phy_lock);
685                         bnx2_napi_enable(bp);
686                         bnx2_enable_int(bp);
687                         if (start_cnic)
688                                 bnx2_cnic_start(bp);
689                 }
690         }
691 }
692
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696         int i;
697
698         for (i = 0; i < bp->num_tx_rings; i++) {
699                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702                 if (txr->tx_desc_ring) {
703                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704                                           txr->tx_desc_ring,
705                                           txr->tx_desc_mapping);
706                         txr->tx_desc_ring = NULL;
707                 }
708                 kfree(txr->tx_buf_ring);
709                 txr->tx_buf_ring = NULL;
710         }
711 }
712
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716         int i;
717
718         for (i = 0; i < bp->num_rx_rings; i++) {
719                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721                 int j;
722
723                 for (j = 0; j < bp->rx_max_ring; j++) {
724                         if (rxr->rx_desc_ring[j])
725                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726                                                   rxr->rx_desc_ring[j],
727                                                   rxr->rx_desc_mapping[j]);
728                         rxr->rx_desc_ring[j] = NULL;
729                 }
730                 vfree(rxr->rx_buf_ring);
731                 rxr->rx_buf_ring = NULL;
732
733                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734                         if (rxr->rx_pg_desc_ring[j])
735                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736                                                   rxr->rx_pg_desc_ring[j],
737                                                   rxr->rx_pg_desc_mapping[j]);
738                         rxr->rx_pg_desc_ring[j] = NULL;
739                 }
740                 vfree(rxr->rx_pg_ring);
741                 rxr->rx_pg_ring = NULL;
742         }
743 }
744
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748         int i;
749
750         for (i = 0; i < bp->num_tx_rings; i++) {
751                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755                 if (txr->tx_buf_ring == NULL)
756                         return -ENOMEM;
757
758                 txr->tx_desc_ring =
759                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760                                            &txr->tx_desc_mapping, GFP_KERNEL);
761                 if (txr->tx_desc_ring == NULL)
762                         return -ENOMEM;
763         }
764         return 0;
765 }
766
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770         int i;
771
772         for (i = 0; i < bp->num_rx_rings; i++) {
773                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775                 int j;
776
777                 rxr->rx_buf_ring =
778                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779                 if (rxr->rx_buf_ring == NULL)
780                         return -ENOMEM;
781
782                 for (j = 0; j < bp->rx_max_ring; j++) {
783                         rxr->rx_desc_ring[j] =
784                                 dma_alloc_coherent(&bp->pdev->dev,
785                                                    RXBD_RING_SIZE,
786                                                    &rxr->rx_desc_mapping[j],
787                                                    GFP_KERNEL);
788                         if (rxr->rx_desc_ring[j] == NULL)
789                                 return -ENOMEM;
790
791                 }
792
793                 if (bp->rx_pg_ring_size) {
794                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795                                                   bp->rx_max_pg_ring);
796                         if (rxr->rx_pg_ring == NULL)
797                                 return -ENOMEM;
798
799                 }
800
801                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802                         rxr->rx_pg_desc_ring[j] =
803                                 dma_alloc_coherent(&bp->pdev->dev,
804                                                    RXBD_RING_SIZE,
805                                                    &rxr->rx_pg_desc_mapping[j],
806                                                    GFP_KERNEL);
807                         if (rxr->rx_pg_desc_ring[j] == NULL)
808                                 return -ENOMEM;
809
810                 }
811         }
812         return 0;
813 }
814
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818         int i;
819         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821         bnx2_free_tx_mem(bp);
822         bnx2_free_rx_mem(bp);
823
824         for (i = 0; i < bp->ctx_pages; i++) {
825                 if (bp->ctx_blk[i]) {
826                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
827                                           bp->ctx_blk[i],
828                                           bp->ctx_blk_mapping[i]);
829                         bp->ctx_blk[i] = NULL;
830                 }
831         }
832         if (bnapi->status_blk.msi) {
833                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834                                   bnapi->status_blk.msi,
835                                   bp->status_blk_mapping);
836                 bnapi->status_blk.msi = NULL;
837                 bp->stats_blk = NULL;
838         }
839 }
840
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844         int i, status_blk_size, err;
845         struct bnx2_napi *bnapi;
846         void *status_blk;
847
848         /* Combine status and statistics blocks into one allocation. */
849         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850         if (bp->flags & BNX2_FLAG_MSIX_CAP)
851                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
853         bp->status_stats_size = status_blk_size +
854                                 sizeof(struct statistics_block);
855
856         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857                                         &bp->status_blk_mapping, GFP_KERNEL);
858         if (status_blk == NULL)
859                 goto alloc_mem_err;
860
861         memset(status_blk, 0, bp->status_stats_size);
862
863         bnapi = &bp->bnx2_napi[0];
864         bnapi->status_blk.msi = status_blk;
865         bnapi->hw_tx_cons_ptr =
866                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867         bnapi->hw_rx_cons_ptr =
868                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870                 for (i = 1; i < bp->irq_nvecs; i++) {
871                         struct status_block_msix *sblk;
872
873                         bnapi = &bp->bnx2_napi[i];
874
875                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876                         bnapi->status_blk.msix = sblk;
877                         bnapi->hw_tx_cons_ptr =
878                                 &sblk->status_tx_quick_consumer_index;
879                         bnapi->hw_rx_cons_ptr =
880                                 &sblk->status_rx_quick_consumer_index;
881                         bnapi->int_num = i << 24;
882                 }
883         }
884
885         bp->stats_blk = status_blk + status_blk_size;
886
887         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
888
889         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891                 if (bp->ctx_pages == 0)
892                         bp->ctx_pages = 1;
893                 for (i = 0; i < bp->ctx_pages; i++) {
894                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
895                                                 BCM_PAGE_SIZE,
896                                                 &bp->ctx_blk_mapping[i],
897                                                 GFP_KERNEL);
898                         if (bp->ctx_blk[i] == NULL)
899                                 goto alloc_mem_err;
900                 }
901         }
902
903         err = bnx2_alloc_rx_mem(bp);
904         if (err)
905                 goto alloc_mem_err;
906
907         err = bnx2_alloc_tx_mem(bp);
908         if (err)
909                 goto alloc_mem_err;
910
911         return 0;
912
913 alloc_mem_err:
914         bnx2_free_mem(bp);
915         return -ENOMEM;
916 }
917
918 static void
919 bnx2_report_fw_link(struct bnx2 *bp)
920 {
921         u32 fw_link_status = 0;
922
923         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
924                 return;
925
926         if (bp->link_up) {
927                 u32 bmsr;
928
929                 switch (bp->line_speed) {
930                 case SPEED_10:
931                         if (bp->duplex == DUPLEX_HALF)
932                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
933                         else
934                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
935                         break;
936                 case SPEED_100:
937                         if (bp->duplex == DUPLEX_HALF)
938                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
939                         else
940                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
941                         break;
942                 case SPEED_1000:
943                         if (bp->duplex == DUPLEX_HALF)
944                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945                         else
946                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947                         break;
948                 case SPEED_2500:
949                         if (bp->duplex == DUPLEX_HALF)
950                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951                         else
952                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953                         break;
954                 }
955
956                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958                 if (bp->autoneg) {
959                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
961                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963
964                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967                         else
968                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969                 }
970         }
971         else
972                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
974         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
975 }
976
977 static char *
978 bnx2_xceiver_str(struct bnx2 *bp)
979 {
980         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982                  "Copper");
983 }
984
985 static void
986 bnx2_report_link(struct bnx2 *bp)
987 {
988         if (bp->link_up) {
989                 netif_carrier_on(bp->dev);
990                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991                             bnx2_xceiver_str(bp),
992                             bp->line_speed,
993                             bp->duplex == DUPLEX_FULL ? "full" : "half");
994
995                 if (bp->flow_ctrl) {
996                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
997                                 pr_cont(", receive ");
998                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
999                                         pr_cont("& transmit ");
1000                         }
1001                         else {
1002                                 pr_cont(", transmit ");
1003                         }
1004                         pr_cont("flow control ON");
1005                 }
1006                 pr_cont("\n");
1007         } else {
1008                 netif_carrier_off(bp->dev);
1009                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010                            bnx2_xceiver_str(bp));
1011         }
1012
1013         bnx2_report_fw_link(bp);
1014 }
1015
1016 static void
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 {
1019         u32 local_adv, remote_adv;
1020
1021         bp->flow_ctrl = 0;
1022         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025                 if (bp->duplex == DUPLEX_FULL) {
1026                         bp->flow_ctrl = bp->req_flow_ctrl;
1027                 }
1028                 return;
1029         }
1030
1031         if (bp->duplex != DUPLEX_FULL) {
1032                 return;
1033         }
1034
1035         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037                 u32 val;
1038
1039                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041                         bp->flow_ctrl |= FLOW_CTRL_TX;
1042                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043                         bp->flow_ctrl |= FLOW_CTRL_RX;
1044                 return;
1045         }
1046
1047         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051                 u32 new_local_adv = 0;
1052                 u32 new_remote_adv = 0;
1053
1054                 if (local_adv & ADVERTISE_1000XPAUSE)
1055                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1056                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058                 if (remote_adv & ADVERTISE_1000XPAUSE)
1059                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063                 local_adv = new_local_adv;
1064                 remote_adv = new_remote_adv;
1065         }
1066
1067         /* See Table 28B-3 of 802.3ab-1999 spec. */
1068         if (local_adv & ADVERTISE_PAUSE_CAP) {
1069                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                         }
1073                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074                                 bp->flow_ctrl = FLOW_CTRL_RX;
1075                         }
1076                 }
1077                 else {
1078                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080                         }
1081                 }
1082         }
1083         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087                         bp->flow_ctrl = FLOW_CTRL_TX;
1088                 }
1089         }
1090 }
1091
1092 static int
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1094 {
1095         u32 val, speed;
1096
1097         bp->link_up = 1;
1098
1099         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104                 bp->line_speed = bp->req_line_speed;
1105                 bp->duplex = bp->req_duplex;
1106                 return 0;
1107         }
1108         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109         switch (speed) {
1110                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111                         bp->line_speed = SPEED_10;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114                         bp->line_speed = SPEED_100;
1115                         break;
1116                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118                         bp->line_speed = SPEED_1000;
1119                         break;
1120                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121                         bp->line_speed = SPEED_2500;
1122                         break;
1123         }
1124         if (val & MII_BNX2_GP_TOP_AN_FD)
1125                 bp->duplex = DUPLEX_FULL;
1126         else
1127                 bp->duplex = DUPLEX_HALF;
1128         return 0;
1129 }
1130
1131 static int
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1133 {
1134         u32 val;
1135
1136         bp->link_up = 1;
1137         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139                 case BCM5708S_1000X_STAT1_SPEED_10:
1140                         bp->line_speed = SPEED_10;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_100:
1143                         bp->line_speed = SPEED_100;
1144                         break;
1145                 case BCM5708S_1000X_STAT1_SPEED_1G:
1146                         bp->line_speed = SPEED_1000;
1147                         break;
1148                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149                         bp->line_speed = SPEED_2500;
1150                         break;
1151         }
1152         if (val & BCM5708S_1000X_STAT1_FD)
1153                 bp->duplex = DUPLEX_FULL;
1154         else
1155                 bp->duplex = DUPLEX_HALF;
1156
1157         return 0;
1158 }
1159
1160 static int
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1162 {
1163         u32 bmcr, local_adv, remote_adv, common;
1164
1165         bp->link_up = 1;
1166         bp->line_speed = SPEED_1000;
1167
1168         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169         if (bmcr & BMCR_FULLDPLX) {
1170                 bp->duplex = DUPLEX_FULL;
1171         }
1172         else {
1173                 bp->duplex = DUPLEX_HALF;
1174         }
1175
1176         if (!(bmcr & BMCR_ANENABLE)) {
1177                 return 0;
1178         }
1179
1180         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183         common = local_adv & remote_adv;
1184         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186                 if (common & ADVERTISE_1000XFULL) {
1187                         bp->duplex = DUPLEX_FULL;
1188                 }
1189                 else {
1190                         bp->duplex = DUPLEX_HALF;
1191                 }
1192         }
1193
1194         return 0;
1195 }
1196
1197 static int
1198 bnx2_copper_linkup(struct bnx2 *bp)
1199 {
1200         u32 bmcr;
1201
1202         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203         if (bmcr & BMCR_ANENABLE) {
1204                 u32 local_adv, remote_adv, common;
1205
1206                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209                 common = local_adv & (remote_adv >> 2);
1210                 if (common & ADVERTISE_1000FULL) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_FULL;
1213                 }
1214                 else if (common & ADVERTISE_1000HALF) {
1215                         bp->line_speed = SPEED_1000;
1216                         bp->duplex = DUPLEX_HALF;
1217                 }
1218                 else {
1219                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222                         common = local_adv & remote_adv;
1223                         if (common & ADVERTISE_100FULL) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_100HALF) {
1228                                 bp->line_speed = SPEED_100;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else if (common & ADVERTISE_10FULL) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_FULL;
1234                         }
1235                         else if (common & ADVERTISE_10HALF) {
1236                                 bp->line_speed = SPEED_10;
1237                                 bp->duplex = DUPLEX_HALF;
1238                         }
1239                         else {
1240                                 bp->line_speed = 0;
1241                                 bp->link_up = 0;
1242                         }
1243                 }
1244         }
1245         else {
1246                 if (bmcr & BMCR_SPEED100) {
1247                         bp->line_speed = SPEED_100;
1248                 }
1249                 else {
1250                         bp->line_speed = SPEED_10;
1251                 }
1252                 if (bmcr & BMCR_FULLDPLX) {
1253                         bp->duplex = DUPLEX_FULL;
1254                 }
1255                 else {
1256                         bp->duplex = DUPLEX_HALF;
1257                 }
1258         }
1259
1260         return 0;
1261 }
1262
1263 static void
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 {
1266         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270         val |= 0x02 << 8;
1271
1272         if (bp->flow_ctrl & FLOW_CTRL_TX)
1273                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276 }
1277
1278 static void
1279 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280 {
1281         int i;
1282         u32 cid;
1283
1284         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285                 if (i == 1)
1286                         cid = RX_RSS_CID;
1287                 bnx2_init_rx_context(bp, cid);
1288         }
1289 }
1290
1291 static void
1292 bnx2_set_mac_link(struct bnx2 *bp)
1293 {
1294         u32 val;
1295
1296         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298                 (bp->duplex == DUPLEX_HALF)) {
1299                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300         }
1301
1302         /* Configure the EMAC mode register. */
1303         val = REG_RD(bp, BNX2_EMAC_MODE);
1304
1305         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307                 BNX2_EMAC_MODE_25G_MODE);
1308
1309         if (bp->link_up) {
1310                 switch (bp->line_speed) {
1311                         case SPEED_10:
1312                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1314                                         break;
1315                                 }
1316                                 /* fall through */
1317                         case SPEED_100:
1318                                 val |= BNX2_EMAC_MODE_PORT_MII;
1319                                 break;
1320                         case SPEED_2500:
1321                                 val |= BNX2_EMAC_MODE_25G_MODE;
1322                                 /* fall through */
1323                         case SPEED_1000:
1324                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1325                                 break;
1326                 }
1327         }
1328         else {
1329                 val |= BNX2_EMAC_MODE_PORT_GMII;
1330         }
1331
1332         /* Set the MAC to operate in the appropriate duplex mode. */
1333         if (bp->duplex == DUPLEX_HALF)
1334                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335         REG_WR(bp, BNX2_EMAC_MODE, val);
1336
1337         /* Enable/disable rx PAUSE. */
1338         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340         if (bp->flow_ctrl & FLOW_CTRL_RX)
1341                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344         /* Enable/disable tx PAUSE. */
1345         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348         if (bp->flow_ctrl & FLOW_CTRL_TX)
1349                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352         /* Acknowledge the interrupt. */
1353         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
1355         bnx2_init_all_rx_contexts(bp);
1356 }
1357
1358 static void
1359 bnx2_enable_bmsr1(struct bnx2 *bp)
1360 {
1361         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362             (CHIP_NUM(bp) == CHIP_NUM_5709))
1363                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364                                MII_BNX2_BLK_ADDR_GP_STATUS);
1365 }
1366
1367 static void
1368 bnx2_disable_bmsr1(struct bnx2 *bp)
1369 {
1370         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371             (CHIP_NUM(bp) == CHIP_NUM_5709))
1372                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374 }
1375
1376 static int
1377 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378 {
1379         u32 up1;
1380         int ret = 1;
1381
1382         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383                 return 0;
1384
1385         if (bp->autoneg & AUTONEG_SPEED)
1386                 bp->advertising |= ADVERTISED_2500baseX_Full;
1387
1388         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
1391         bnx2_read_phy(bp, bp->mii_up1, &up1);
1392         if (!(up1 & BCM5708S_UP1_2G5)) {
1393                 up1 |= BCM5708S_UP1_2G5;
1394                 bnx2_write_phy(bp, bp->mii_up1, up1);
1395                 ret = 0;
1396         }
1397
1398         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
1402         return ret;
1403 }
1404
1405 static int
1406 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407 {
1408         u32 up1;
1409         int ret = 0;
1410
1411         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412                 return 0;
1413
1414         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417         bnx2_read_phy(bp, bp->mii_up1, &up1);
1418         if (up1 & BCM5708S_UP1_2G5) {
1419                 up1 &= ~BCM5708S_UP1_2G5;
1420                 bnx2_write_phy(bp, bp->mii_up1, up1);
1421                 ret = 1;
1422         }
1423
1424         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428         return ret;
1429 }
1430
1431 static void
1432 bnx2_enable_forced_2g5(struct bnx2 *bp)
1433 {
1434         u32 uninitialized_var(bmcr);
1435         int err;
1436
1437         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438                 return;
1439
1440         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1441                 u32 val;
1442
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1445                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447                         val |= MII_BNX2_SD_MISC1_FORCE |
1448                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450                 }
1451
1452                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455
1456         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1457                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458                 if (!err)
1459                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1460         } else {
1461                 return;
1462         }
1463
1464         if (err)
1465                 return;
1466
1467         if (bp->autoneg & AUTONEG_SPEED) {
1468                 bmcr &= ~BMCR_ANENABLE;
1469                 if (bp->req_duplex == DUPLEX_FULL)
1470                         bmcr |= BMCR_FULLDPLX;
1471         }
1472         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473 }
1474
1475 static void
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 {
1478         u32 uninitialized_var(bmcr);
1479         int err;
1480
1481         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482                 return;
1483
1484         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1485                 u32 val;
1486
1487                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1489                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1491                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492                 }
1493
1494                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500                 if (!err)
1501                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1502         } else {
1503                 return;
1504         }
1505
1506         if (err)
1507                 return;
1508
1509         if (bp->autoneg & AUTONEG_SPEED)
1510                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517         u32 val;
1518
1519         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521         if (start)
1522                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523         else
1524                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530         u32 bmsr;
1531         u8 link_up;
1532
1533         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534                 bp->link_up = 1;
1535                 return 0;
1536         }
1537
1538         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539                 return 0;
1540
1541         link_up = bp->link_up;
1542
1543         bnx2_enable_bmsr1(bp);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_disable_bmsr1(bp);
1547
1548         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550                 u32 val, an_dbg;
1551
1552                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553                         bnx2_5706s_force_link_dn(bp, 0);
1554                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555                 }
1556                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564                         bmsr |= BMSR_LSTATUS;
1565                 else
1566                         bmsr &= ~BMSR_LSTATUS;
1567         }
1568
1569         if (bmsr & BMSR_LSTATUS) {
1570                 bp->link_up = 1;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574                                 bnx2_5706s_linkup(bp);
1575                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576                                 bnx2_5708s_linkup(bp);
1577                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578                                 bnx2_5709s_linkup(bp);
1579                 }
1580                 else {
1581                         bnx2_copper_linkup(bp);
1582                 }
1583                 bnx2_resolve_flow_ctrl(bp);
1584         }
1585         else {
1586                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587                     (bp->autoneg & AUTONEG_SPEED))
1588                         bnx2_disable_forced_2g5(bp);
1589
1590                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591                         u32 bmcr;
1592
1593                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594                         bmcr |= BMCR_ANENABLE;
1595                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598                 }
1599                 bp->link_up = 0;
1600         }
1601
1602         if (bp->link_up != link_up) {
1603                 bnx2_report_link(bp);
1604         }
1605
1606         bnx2_set_mac_link(bp);
1607
1608         return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614         int i;
1615         u32 reg;
1616
1617         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621                 udelay(10);
1622
1623                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624                 if (!(reg & BMCR_RESET)) {
1625                         udelay(20);
1626                         break;
1627                 }
1628         }
1629         if (i == PHY_RESET_MAX_WAIT) {
1630                 return -EBUSY;
1631         }
1632         return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638         u32 adv = 0;
1639
1640         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPAUSE;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_CAP;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676         u32 speed_arg = 0, pause_adv;
1677
1678         pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680         if (bp->autoneg & AUTONEG_SPEED) {
1681                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682                 if (bp->advertising & ADVERTISED_10baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684                 if (bp->advertising & ADVERTISED_10baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686                 if (bp->advertising & ADVERTISED_100baseT_Half)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688                 if (bp->advertising & ADVERTISED_100baseT_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694         } else {
1695                 if (bp->req_line_speed == SPEED_2500)
1696                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697                 else if (bp->req_line_speed == SPEED_1000)
1698                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 else if (bp->req_line_speed == SPEED_100) {
1700                         if (bp->req_duplex == DUPLEX_FULL)
1701                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702                         else
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704                 } else if (bp->req_line_speed == SPEED_10) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 }
1710         }
1711
1712         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717         if (port == PORT_TP)
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723         spin_unlock_bh(&bp->phy_lock);
1724         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725         spin_lock_bh(&bp->phy_lock);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735         u32 adv, bmcr;
1736         u32 new_adv = 0;
1737
1738         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739                 return bnx2_setup_remote_phy(bp, port);
1740
1741         if (!(bp->autoneg & AUTONEG_SPEED)) {
1742                 u32 new_bmcr;
1743                 int force_link_down = 0;
1744
1745                 if (bp->req_line_speed == SPEED_2500) {
1746                         if (!bnx2_test_and_enable_2g5(bp))
1747                                 force_link_down = 1;
1748                 } else if (bp->req_line_speed == SPEED_1000) {
1749                         if (bnx2_test_and_disable_2g5(bp))
1750                                 force_link_down = 1;
1751                 }
1752                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757                 new_bmcr |= BMCR_SPEED1000;
1758
1759                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 bnx2_enable_forced_2g5(bp);
1762                         else if (bp->req_line_speed == SPEED_1000) {
1763                                 bnx2_disable_forced_2g5(bp);
1764                                 new_bmcr &= ~0x2000;
1765                         }
1766
1767                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770                         else
1771                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772                 }
1773
1774                 if (bp->req_duplex == DUPLEX_FULL) {
1775                         adv |= ADVERTISE_1000XFULL;
1776                         new_bmcr |= BMCR_FULLDPLX;
1777                 }
1778                 else {
1779                         adv |= ADVERTISE_1000XHALF;
1780                         new_bmcr &= ~BMCR_FULLDPLX;
1781                 }
1782                 if ((new_bmcr != bmcr) || (force_link_down)) {
1783                         /* Force a link down visible on the other side */
1784                         if (bp->link_up) {
1785                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1786                                                ~(ADVERTISE_1000XFULL |
1787                                                  ADVERTISE_1000XHALF));
1788                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789                                         BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791                                 bp->link_up = 0;
1792                                 netif_carrier_off(bp->dev);
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                                 bnx2_report_link(bp);
1795                         }
1796                         bnx2_write_phy(bp, bp->mii_adv, adv);
1797                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798                 } else {
1799                         bnx2_resolve_flow_ctrl(bp);
1800                         bnx2_set_mac_link(bp);
1801                 }
1802                 return 0;
1803         }
1804
1805         bnx2_test_and_enable_2g5(bp);
1806
1807         if (bp->advertising & ADVERTISED_1000baseT_Full)
1808                 new_adv |= ADVERTISE_1000XFULL;
1809
1810         new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812         bnx2_read_phy(bp, bp->mii_adv, &adv);
1813         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815         bp->serdes_an_pending = 0;
1816         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817                 /* Force a link down visible on the other side */
1818                 if (bp->link_up) {
1819                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820                         spin_unlock_bh(&bp->phy_lock);
1821                         msleep(20);
1822                         spin_lock_bh(&bp->phy_lock);
1823                 }
1824
1825                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827                         BMCR_ANENABLE);
1828                 /* Speed up link-up time when the link partner
1829                  * does not autonegotiate which is very common
1830                  * in blade servers. Some blade servers use
1831                  * IPMI for kerboard input and it's important
1832                  * to minimize link disruptions. Autoneg. involves
1833                  * exchanging base pages plus 3 next pages and
1834                  * normally completes in about 120 msec.
1835                  */
1836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837                 bp->serdes_an_pending = 1;
1838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839         } else {
1840                 bnx2_resolve_flow_ctrl(bp);
1841                 bnx2_set_mac_link(bp);
1842         }
1843
1844         return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1848         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1849                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850                 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1853         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1854         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1855         ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865         u32 link;
1866
1867         if (bp->phy_port == PORT_TP)
1868                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869         else
1870                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873                 bp->req_line_speed = 0;
1874                 bp->autoneg |= AUTONEG_SPEED;
1875                 bp->advertising = ADVERTISED_Autoneg;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877                         bp->advertising |= ADVERTISED_10baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879                         bp->advertising |= ADVERTISED_10baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881                         bp->advertising |= ADVERTISED_100baseT_Half;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883                         bp->advertising |= ADVERTISED_100baseT_Full;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885                         bp->advertising |= ADVERTISED_1000baseT_Full;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887                         bp->advertising |= ADVERTISED_2500baseX_Full;
1888         } else {
1889                 bp->autoneg = 0;
1890                 bp->advertising = 0;
1891                 bp->req_duplex = DUPLEX_FULL;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893                         bp->req_line_speed = SPEED_10;
1894                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895                                 bp->req_duplex = DUPLEX_HALF;
1896                 }
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898                         bp->req_line_speed = SPEED_100;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903                         bp->req_line_speed = SPEED_1000;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905                         bp->req_line_speed = SPEED_2500;
1906         }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913                 bnx2_set_default_remote_link(bp);
1914                 return;
1915         }
1916
1917         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918         bp->req_line_speed = 0;
1919         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920                 u32 reg;
1921
1922                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927                         bp->autoneg = 0;
1928                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1929                         bp->req_duplex = DUPLEX_FULL;
1930                 }
1931         } else
1932                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938         u32 msg;
1939         u32 addr;
1940
1941         spin_lock(&bp->indirect_lock);
1942         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946         spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952         u32 msg;
1953         u8 link_up = bp->link_up;
1954         u8 old_port;
1955
1956         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959                 bnx2_send_heart_beat(bp);
1960
1961         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964                 bp->link_up = 0;
1965         else {
1966                 u32 speed;
1967
1968                 bp->link_up = 1;
1969                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970                 bp->duplex = DUPLEX_FULL;
1971                 switch (speed) {
1972                         case BNX2_LINK_STATUS_10HALF:
1973                                 bp->duplex = DUPLEX_HALF;
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_100BASE_T4:
1980                         case BNX2_LINK_STATUS_100FULL:
1981                                 bp->line_speed = SPEED_100;
1982                                 break;
1983                         case BNX2_LINK_STATUS_1000HALF:
1984                                 bp->duplex = DUPLEX_HALF;
1985                         case BNX2_LINK_STATUS_1000FULL:
1986                                 bp->line_speed = SPEED_1000;
1987                                 break;
1988                         case BNX2_LINK_STATUS_2500HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_2500FULL:
1991                                 bp->line_speed = SPEED_2500;
1992                                 break;
1993                         default:
1994                                 bp->line_speed = 0;
1995                                 break;
1996                 }
1997
1998                 bp->flow_ctrl = 0;
1999                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001                         if (bp->duplex == DUPLEX_FULL)
2002                                 bp->flow_ctrl = bp->req_flow_ctrl;
2003                 } else {
2004                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2006                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2008                 }
2009
2010                 old_port = bp->phy_port;
2011                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012                         bp->phy_port = PORT_FIBRE;
2013                 else
2014                         bp->phy_port = PORT_TP;
2015
2016                 if (old_port != bp->phy_port)
2017                         bnx2_set_default_link(bp);
2018
2019         }
2020         if (bp->link_up != link_up)
2021                 bnx2_report_link(bp);
2022
2023         bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029         u32 evt_code;
2030
2031         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032         switch (evt_code) {
2033                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034                         bnx2_remote_phy_event(bp);
2035                         break;
2036                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037                 default:
2038                         bnx2_send_heart_beat(bp);
2039                         break;
2040         }
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049         u32 bmcr;
2050         u32 new_bmcr;
2051
2052         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054         if (bp->autoneg & AUTONEG_SPEED) {
2055                 u32 adv_reg, adv1000_reg;
2056                 u32 new_adv = 0;
2057                 u32 new_adv1000 = 0;
2058
2059                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061                         ADVERTISE_PAUSE_ASYM);
2062
2063                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064                 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066                 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2067                 new_adv |= ADVERTISE_CSMA;
2068                 new_adv |= bnx2_phy_get_pause_adv(bp);
2069
2070                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2071
2072                 if ((adv1000_reg != new_adv1000) ||
2073                         (adv_reg != new_adv) ||
2074                         ((bmcr & BMCR_ANENABLE) == 0)) {
2075
2076                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2077                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2078                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2079                                 BMCR_ANENABLE);
2080                 }
2081                 else if (bp->link_up) {
2082                         /* Flow ctrl may have changed from auto to forced */
2083                         /* or vice-versa. */
2084
2085                         bnx2_resolve_flow_ctrl(bp);
2086                         bnx2_set_mac_link(bp);
2087                 }
2088                 return 0;
2089         }
2090
2091         new_bmcr = 0;
2092         if (bp->req_line_speed == SPEED_100) {
2093                 new_bmcr |= BMCR_SPEED100;
2094         }
2095         if (bp->req_duplex == DUPLEX_FULL) {
2096                 new_bmcr |= BMCR_FULLDPLX;
2097         }
2098         if (new_bmcr != bmcr) {
2099                 u32 bmsr;
2100
2101                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2102                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103
2104                 if (bmsr & BMSR_LSTATUS) {
2105                         /* Force link down */
2106                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2107                         spin_unlock_bh(&bp->phy_lock);
2108                         msleep(50);
2109                         spin_lock_bh(&bp->phy_lock);
2110
2111                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113                 }
2114
2115                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2116
2117                 /* Normally, the new speed is setup after the link has
2118                  * gone down and up again. In some cases, link will not go
2119                  * down so we need to set up the new speed here.
2120                  */
2121                 if (bmsr & BMSR_LSTATUS) {
2122                         bp->line_speed = bp->req_line_speed;
2123                         bp->duplex = bp->req_duplex;
2124                         bnx2_resolve_flow_ctrl(bp);
2125                         bnx2_set_mac_link(bp);
2126                 }
2127         } else {
2128                 bnx2_resolve_flow_ctrl(bp);
2129                 bnx2_set_mac_link(bp);
2130         }
2131         return 0;
2132 }
2133
2134 static int
2135 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2136 __releases(&bp->phy_lock)
2137 __acquires(&bp->phy_lock)
2138 {
2139         if (bp->loopback == MAC_LOOPBACK)
2140                 return 0;
2141
2142         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2143                 return bnx2_setup_serdes_phy(bp, port);
2144         }
2145         else {
2146                 return bnx2_setup_copper_phy(bp);
2147         }
2148 }
2149
2150 static int
2151 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2152 {
2153         u32 val;
2154
2155         bp->mii_bmcr = MII_BMCR + 0x10;
2156         bp->mii_bmsr = MII_BMSR + 0x10;
2157         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2158         bp->mii_adv = MII_ADVERTISE + 0x10;
2159         bp->mii_lpa = MII_LPA + 0x10;
2160         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2161
2162         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2163         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2166         if (reset_phy)
2167                 bnx2_reset_phy(bp);
2168
2169         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2170
2171         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2172         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2173         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2174         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2175
2176         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2177         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2178         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2179                 val |= BCM5708S_UP1_2G5;
2180         else
2181                 val &= ~BCM5708S_UP1_2G5;
2182         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2183
2184         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2185         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2186         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2187         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2190
2191         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2192               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2193         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2194
2195         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196
2197         return 0;
2198 }
2199
2200 static int
2201 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2202 {
2203         u32 val;
2204
2205         if (reset_phy)
2206                 bnx2_reset_phy(bp);
2207
2208         bp->mii_up1 = BCM5708S_UP1;
2209
2210         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2211         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2212         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2213
2214         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2215         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2216         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2217
2218         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2219         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2220         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2221
2222         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2223                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2224                 val |= BCM5708S_UP1_2G5;
2225                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2226         }
2227
2228         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2229             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2230             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2231                 /* increase tx signal amplitude */
2232                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2233                                BCM5708S_BLK_ADDR_TX_MISC);
2234                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2235                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2236                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2237                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2238         }
2239
2240         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2241               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2242
2243         if (val) {
2244                 u32 is_backplane;
2245
2246                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2247                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2248                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2249                                        BCM5708S_BLK_ADDR_TX_MISC);
2250                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_DIG);
2253                 }
2254         }
2255         return 0;
2256 }
2257
2258 static int
2259 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2260 {
2261         if (reset_phy)
2262                 bnx2_reset_phy(bp);
2263
2264         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2265
2266         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2267                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2268
2269         if (bp->dev->mtu > 1500) {
2270                 u32 val;
2271
2272                 /* Set extended packet length bit */
2273                 bnx2_write_phy(bp, 0x18, 0x7);
2274                 bnx2_read_phy(bp, 0x18, &val);
2275                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2276
2277                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2278                 bnx2_read_phy(bp, 0x1c, &val);
2279                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2280         }
2281         else {
2282                 u32 val;
2283
2284                 bnx2_write_phy(bp, 0x18, 0x7);
2285                 bnx2_read_phy(bp, 0x18, &val);
2286                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2287
2288                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2289                 bnx2_read_phy(bp, 0x1c, &val);
2290                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int
2297 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2298 {
2299         u32 val;
2300
2301         if (reset_phy)
2302                 bnx2_reset_phy(bp);
2303
2304         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2305                 bnx2_write_phy(bp, 0x18, 0x0c00);
2306                 bnx2_write_phy(bp, 0x17, 0x000a);
2307                 bnx2_write_phy(bp, 0x15, 0x310b);
2308                 bnx2_write_phy(bp, 0x17, 0x201f);
2309                 bnx2_write_phy(bp, 0x15, 0x9506);
2310                 bnx2_write_phy(bp, 0x17, 0x401f);
2311                 bnx2_write_phy(bp, 0x15, 0x14e2);
2312                 bnx2_write_phy(bp, 0x18, 0x0400);
2313         }
2314
2315         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2316                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2317                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2318                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2319                 val &= ~(1 << 8);
2320                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2321         }
2322
2323         if (bp->dev->mtu > 1500) {
2324                 /* Set extended packet length bit */
2325                 bnx2_write_phy(bp, 0x18, 0x7);
2326                 bnx2_read_phy(bp, 0x18, &val);
2327                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2328
2329                 bnx2_read_phy(bp, 0x10, &val);
2330                 bnx2_write_phy(bp, 0x10, val | 0x1);
2331         }
2332         else {
2333                 bnx2_write_phy(bp, 0x18, 0x7);
2334                 bnx2_read_phy(bp, 0x18, &val);
2335                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2336
2337                 bnx2_read_phy(bp, 0x10, &val);
2338                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2339         }
2340
2341         /* ethernet@wirespeed */
2342         bnx2_write_phy(bp, 0x18, 0x7007);
2343         bnx2_read_phy(bp, 0x18, &val);
2344         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2345         return 0;
2346 }
2347
2348
2349 static int
2350 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2351 __releases(&bp->phy_lock)
2352 __acquires(&bp->phy_lock)
2353 {
2354         u32 val;
2355         int rc = 0;
2356
2357         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2358         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2359
2360         bp->mii_bmcr = MII_BMCR;
2361         bp->mii_bmsr = MII_BMSR;
2362         bp->mii_bmsr1 = MII_BMSR;
2363         bp->mii_adv = MII_ADVERTISE;
2364         bp->mii_lpa = MII_LPA;
2365
2366         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2367
2368         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2369                 goto setup_phy;
2370
2371         bnx2_read_phy(bp, MII_PHYSID1, &val);
2372         bp->phy_id = val << 16;
2373         bnx2_read_phy(bp, MII_PHYSID2, &val);
2374         bp->phy_id |= val & 0xffff;
2375
2376         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2377                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2378                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2379                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2380                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2381                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2382                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2383         }
2384         else {
2385                 rc = bnx2_init_copper_phy(bp, reset_phy);
2386         }
2387
2388 setup_phy:
2389         if (!rc)
2390                 rc = bnx2_setup_phy(bp, bp->phy_port);
2391
2392         return rc;
2393 }
2394
2395 static int
2396 bnx2_set_mac_loopback(struct bnx2 *bp)
2397 {
2398         u32 mac_mode;
2399
2400         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2401         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2402         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2403         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2404         bp->link_up = 1;
2405         return 0;
2406 }
2407
2408 static int bnx2_test_link(struct bnx2 *);
2409
2410 static int
2411 bnx2_set_phy_loopback(struct bnx2 *bp)
2412 {
2413         u32 mac_mode;
2414         int rc, i;
2415
2416         spin_lock_bh(&bp->phy_lock);
2417         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2418                             BMCR_SPEED1000);
2419         spin_unlock_bh(&bp->phy_lock);
2420         if (rc)
2421                 return rc;
2422
2423         for (i = 0; i < 10; i++) {
2424                 if (bnx2_test_link(bp) == 0)
2425                         break;
2426                 msleep(100);
2427         }
2428
2429         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2430         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2431                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2432                       BNX2_EMAC_MODE_25G_MODE);
2433
2434         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2435         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2436         bp->link_up = 1;
2437         return 0;
2438 }
2439
2440 static void
2441 bnx2_dump_mcp_state(struct bnx2 *bp)
2442 {
2443         struct net_device *dev = bp->dev;
2444         u32 mcp_p0, mcp_p1;
2445
2446         netdev_err(dev, "<--- start MCP states dump --->\n");
2447         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2448                 mcp_p0 = BNX2_MCP_STATE_P0;
2449                 mcp_p1 = BNX2_MCP_STATE_P1;
2450         } else {
2451                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2452                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2453         }
2454         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2455                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2456         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2457                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2458                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2459                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2460         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2461                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2462                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2464         netdev_err(dev, "DEBUG: shmem states:\n");
2465         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2466                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2467                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2468                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2469         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2470         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2471                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2472                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2473         pr_cont(" condition[%08x]\n",
2474                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2475         DP_SHMEM_LINE(bp, 0x3cc);
2476         DP_SHMEM_LINE(bp, 0x3dc);
2477         DP_SHMEM_LINE(bp, 0x3ec);
2478         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2479         netdev_err(dev, "<--- end MCP states dump --->\n");
2480 }
2481
2482 static int
2483 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2484 {
2485         int i;
2486         u32 val;
2487
2488         bp->fw_wr_seq++;
2489         msg_data |= bp->fw_wr_seq;
2490
2491         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2492
2493         if (!ack)
2494                 return 0;
2495
2496         /* wait for an acknowledgement. */
2497         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2498                 msleep(10);
2499
2500                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2501
2502                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2503                         break;
2504         }
2505         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2506                 return 0;
2507
2508         /* If we timed out, inform the firmware that this is the case. */
2509         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2510                 msg_data &= ~BNX2_DRV_MSG_CODE;
2511                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2512
2513                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2514                 if (!silent) {
2515                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2516                         bnx2_dump_mcp_state(bp);
2517                 }
2518
2519                 return -EBUSY;
2520         }
2521
2522         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2523                 return -EIO;
2524
2525         return 0;
2526 }
2527
2528 static int
2529 bnx2_init_5709_context(struct bnx2 *bp)
2530 {
2531         int i, ret = 0;
2532         u32 val;
2533
2534         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2535         val |= (BCM_PAGE_BITS - 8) << 16;
2536         REG_WR(bp, BNX2_CTX_COMMAND, val);
2537         for (i = 0; i < 10; i++) {
2538                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2539                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2540                         break;
2541                 udelay(2);
2542         }
2543         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2544                 return -EBUSY;
2545
2546         for (i = 0; i < bp->ctx_pages; i++) {
2547                 int j;
2548
2549                 if (bp->ctx_blk[i])
2550                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2551                 else
2552                         return -ENOMEM;
2553
2554                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2555                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2556                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2557                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2558                        (u64) bp->ctx_blk_mapping[i] >> 32);
2559                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2560                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2561                 for (j = 0; j < 10; j++) {
2562
2563                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2564                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2565                                 break;
2566                         udelay(5);
2567                 }
2568                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2569                         ret = -EBUSY;
2570                         break;
2571                 }
2572         }
2573         return ret;
2574 }
2575
2576 static void
2577 bnx2_init_context(struct bnx2 *bp)
2578 {
2579         u32 vcid;
2580
2581         vcid = 96;
2582         while (vcid) {
2583                 u32 vcid_addr, pcid_addr, offset;
2584                 int i;
2585
2586                 vcid--;
2587
2588                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2589                         u32 new_vcid;
2590
2591                         vcid_addr = GET_PCID_ADDR(vcid);
2592                         if (vcid & 0x8) {
2593                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2594                         }
2595                         else {
2596                                 new_vcid = vcid;
2597                         }
2598                         pcid_addr = GET_PCID_ADDR(new_vcid);
2599                 }
2600                 else {
2601                         vcid_addr = GET_CID_ADDR(vcid);
2602                         pcid_addr = vcid_addr;
2603                 }
2604
2605                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2606                         vcid_addr += (i << PHY_CTX_SHIFT);
2607                         pcid_addr += (i << PHY_CTX_SHIFT);
2608
2609                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2610                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2611
2612                         /* Zero out the context. */
2613                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2614                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2615                 }
2616         }
2617 }
2618
2619 static int
2620 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2621 {
2622         u16 *good_mbuf;
2623         u32 good_mbuf_cnt;
2624         u32 val;
2625
2626         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2627         if (good_mbuf == NULL)
2628                 return -ENOMEM;
2629
2630         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2631                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2632
2633         good_mbuf_cnt = 0;
2634
2635         /* Allocate a bunch of mbufs and save the good ones in an array. */
2636         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2637         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2638                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2639                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2640
2641                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2642
2643                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2644
2645                 /* The addresses with Bit 9 set are bad memory blocks. */
2646                 if (!(val & (1 << 9))) {
2647                         good_mbuf[good_mbuf_cnt] = (u16) val;
2648                         good_mbuf_cnt++;
2649                 }
2650
2651                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2652         }
2653
2654         /* Free the good ones back to the mbuf pool thus discarding
2655          * all the bad ones. */
2656         while (good_mbuf_cnt) {
2657                 good_mbuf_cnt--;
2658
2659                 val = good_mbuf[good_mbuf_cnt];
2660                 val = (val << 9) | val | 1;
2661
2662                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2663         }
2664         kfree(good_mbuf);
2665         return 0;
2666 }
2667
2668 static void
2669 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2670 {
2671         u32 val;
2672
2673         val = (mac_addr[0] << 8) | mac_addr[1];
2674
2675         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2676
2677         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2678                 (mac_addr[4] << 8) | mac_addr[5];
2679
2680         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2681 }
2682
2683 static inline int
2684 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2685 {
2686         dma_addr_t mapping;
2687         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2688         struct rx_bd *rxbd =
2689                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2690         struct page *page = alloc_page(gfp);
2691
2692         if (!page)
2693                 return -ENOMEM;
2694         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2695                                PCI_DMA_FROMDEVICE);
2696         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2697                 __free_page(page);
2698                 return -EIO;
2699         }
2700
2701         rx_pg->page = page;
2702         dma_unmap_addr_set(rx_pg, mapping, mapping);
2703         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2704         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2705         return 0;
2706 }
2707
2708 static void
2709 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2710 {
2711         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2712         struct page *page = rx_pg->page;
2713
2714         if (!page)
2715                 return;
2716
2717         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2718                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2719
2720         __free_page(page);
2721         rx_pg->page = NULL;
2722 }
2723
2724 static inline int
2725 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2726 {
2727         u8 *data;
2728         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2729         dma_addr_t mapping;
2730         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2731
2732         data = kmalloc(bp->rx_buf_size, gfp);
2733         if (!data)
2734                 return -ENOMEM;
2735
2736         mapping = dma_map_single(&bp->pdev->dev,
2737                                  get_l2_fhdr(data),
2738                                  bp->rx_buf_use_size,
2739                                  PCI_DMA_FROMDEVICE);
2740         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2741                 kfree(data);
2742                 return -EIO;
2743         }
2744
2745         rx_buf->data = data;
2746         dma_unmap_addr_set(rx_buf, mapping, mapping);
2747
2748         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2749         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2750
2751         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2752
2753         return 0;
2754 }
2755
2756 static int
2757 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2758 {
2759         struct status_block *sblk = bnapi->status_blk.msi;
2760         u32 new_link_state, old_link_state;
2761         int is_set = 1;
2762
2763         new_link_state = sblk->status_attn_bits & event;
2764         old_link_state = sblk->status_attn_bits_ack & event;
2765         if (new_link_state != old_link_state) {
2766                 if (new_link_state)
2767                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2768                 else
2769                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2770         } else
2771                 is_set = 0;
2772
2773         return is_set;
2774 }
2775
2776 static void
2777 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2778 {
2779         spin_lock(&bp->phy_lock);
2780
2781         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2782                 bnx2_set_link(bp);
2783         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2784                 bnx2_set_remote_link(bp);
2785
2786         spin_unlock(&bp->phy_lock);
2787
2788 }
2789
2790 static inline u16
2791 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2792 {
2793         u16 cons;
2794
2795         /* Tell compiler that status block fields can change. */
2796         barrier();
2797         cons = *bnapi->hw_tx_cons_ptr;
2798         barrier();
2799         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2800                 cons++;
2801         return cons;
2802 }
2803
2804 static int
2805 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2806 {
2807         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2808         u16 hw_cons, sw_cons, sw_ring_cons;
2809         int tx_pkt = 0, index;
2810         unsigned int tx_bytes = 0;
2811         struct netdev_queue *txq;
2812
2813         index = (bnapi - bp->bnx2_napi);
2814         txq = netdev_get_tx_queue(bp->dev, index);
2815
2816         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2817         sw_cons = txr->tx_cons;
2818
2819         while (sw_cons != hw_cons) {
2820                 struct sw_tx_bd *tx_buf;
2821                 struct sk_buff *skb;
2822                 int i, last;
2823
2824                 sw_ring_cons = TX_RING_IDX(sw_cons);
2825
2826                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2827                 skb = tx_buf->skb;
2828
2829                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2830                 prefetch(&skb->end);
2831
2832                 /* partial BD completions possible with TSO packets */
2833                 if (tx_buf->is_gso) {
2834                         u16 last_idx, last_ring_idx;
2835
2836                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2837                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2838                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2839                                 last_idx++;
2840                         }
2841                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2842                                 break;
2843                         }
2844                 }
2845
2846                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2847                         skb_headlen(skb), PCI_DMA_TODEVICE);
2848
2849                 tx_buf->skb = NULL;
2850                 last = tx_buf->nr_frags;
2851
2852                 for (i = 0; i < last; i++) {
2853                         sw_cons = NEXT_TX_BD(sw_cons);
2854
2855                         dma_unmap_page(&bp->pdev->dev,
2856                                 dma_unmap_addr(
2857                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2858                                         mapping),
2859                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2860                                 PCI_DMA_TODEVICE);
2861                 }
2862
2863                 sw_cons = NEXT_TX_BD(sw_cons);
2864
2865                 tx_bytes += skb->len;
2866                 dev_kfree_skb(skb);
2867                 tx_pkt++;
2868                 if (tx_pkt == budget)
2869                         break;
2870
2871                 if (hw_cons == sw_cons)
2872                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2873         }
2874
2875         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2876         txr->hw_tx_cons = hw_cons;
2877         txr->tx_cons = sw_cons;
2878
2879         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2880          * before checking for netif_tx_queue_stopped().  Without the
2881          * memory barrier, there is a small possibility that bnx2_start_xmit()
2882          * will miss it and cause the queue to be stopped forever.
2883          */
2884         smp_mb();
2885
2886         if (unlikely(netif_tx_queue_stopped(txq)) &&
2887                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2888                 __netif_tx_lock(txq, smp_processor_id());
2889                 if ((netif_tx_queue_stopped(txq)) &&
2890                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2891                         netif_tx_wake_queue(txq);
2892                 __netif_tx_unlock(txq);
2893         }
2894
2895         return tx_pkt;
2896 }
2897
2898 static void
2899 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2900                         struct sk_buff *skb, int count)
2901 {
2902         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2903         struct rx_bd *cons_bd, *prod_bd;
2904         int i;
2905         u16 hw_prod, prod;
2906         u16 cons = rxr->rx_pg_cons;
2907
2908         cons_rx_pg = &rxr->rx_pg_ring[cons];
2909
2910         /* The caller was unable to allocate a new page to replace the
2911          * last one in the frags array, so we need to recycle that page
2912          * and then free the skb.
2913          */
2914         if (skb) {
2915                 struct page *page;
2916                 struct skb_shared_info *shinfo;
2917
2918                 shinfo = skb_shinfo(skb);
2919                 shinfo->nr_frags--;
2920                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2921                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2922
2923                 cons_rx_pg->page = page;
2924                 dev_kfree_skb(skb);
2925         }
2926
2927         hw_prod = rxr->rx_pg_prod;
2928
2929         for (i = 0; i < count; i++) {
2930                 prod = RX_PG_RING_IDX(hw_prod);
2931
2932                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2933                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2934                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2935                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2936
2937                 if (prod != cons) {
2938                         prod_rx_pg->page = cons_rx_pg->page;
2939                         cons_rx_pg->page = NULL;
2940                         dma_unmap_addr_set(prod_rx_pg, mapping,
2941                                 dma_unmap_addr(cons_rx_pg, mapping));
2942
2943                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2944                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2945
2946                 }
2947                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2948                 hw_prod = NEXT_RX_BD(hw_prod);
2949         }
2950         rxr->rx_pg_prod = hw_prod;
2951         rxr->rx_pg_cons = cons;
2952 }
2953
2954 static inline void
2955 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2956                    u8 *data, u16 cons, u16 prod)
2957 {
2958         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2959         struct rx_bd *cons_bd, *prod_bd;
2960
2961         cons_rx_buf = &rxr->rx_buf_ring[cons];
2962         prod_rx_buf = &rxr->rx_buf_ring[prod];
2963
2964         dma_sync_single_for_device(&bp->pdev->dev,
2965                 dma_unmap_addr(cons_rx_buf, mapping),
2966                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2967
2968         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2969
2970         prod_rx_buf->data = data;
2971
2972         if (cons == prod)
2973                 return;
2974
2975         dma_unmap_addr_set(prod_rx_buf, mapping,
2976                         dma_unmap_addr(cons_rx_buf, mapping));
2977
2978         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2979         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2980         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2981         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2982 }
2983
2984 static struct sk_buff *
2985 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2986             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2987             u32 ring_idx)
2988 {
2989         int err;
2990         u16 prod = ring_idx & 0xffff;
2991         struct sk_buff *skb;
2992
2993         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
2994         if (unlikely(err)) {
2995                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
2996 error:
2997                 if (hdr_len) {
2998                         unsigned int raw_len = len + 4;
2999                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3000
3001                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3002                 }
3003                 return NULL;
3004         }
3005
3006         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3007                          PCI_DMA_FROMDEVICE);
3008         skb = build_skb(data, 0);
3009         if (!skb) {
3010                 kfree(data);
3011                 goto error;
3012         }
3013         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3014         if (hdr_len == 0) {
3015                 skb_put(skb, len);
3016                 return skb;
3017         } else {
3018                 unsigned int i, frag_len, frag_size, pages;
3019                 struct sw_pg *rx_pg;
3020                 u16 pg_cons = rxr->rx_pg_cons;
3021                 u16 pg_prod = rxr->rx_pg_prod;
3022
3023                 frag_size = len + 4 - hdr_len;
3024                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3025                 skb_put(skb, hdr_len);
3026
3027                 for (i = 0; i < pages; i++) {
3028                         dma_addr_t mapping_old;
3029
3030                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3031                         if (unlikely(frag_len <= 4)) {
3032                                 unsigned int tail = 4 - frag_len;
3033
3034                                 rxr->rx_pg_cons = pg_cons;
3035                                 rxr->rx_pg_prod = pg_prod;
3036                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3037                                                         pages - i);
3038                                 skb->len -= tail;
3039                                 if (i == 0) {
3040                                         skb->tail -= tail;
3041                                 } else {
3042                                         skb_frag_t *frag =
3043                                                 &skb_shinfo(skb)->frags[i - 1];
3044                                         skb_frag_size_sub(frag, tail);
3045                                         skb->data_len -= tail;
3046                                 }
3047                                 return skb;
3048                         }
3049                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3050
3051                         /* Don't unmap yet.  If we're unable to allocate a new
3052                          * page, we need to recycle the page and the DMA addr.
3053                          */
3054                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3055                         if (i == pages - 1)
3056                                 frag_len -= 4;
3057
3058                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3059                         rx_pg->page = NULL;
3060
3061                         err = bnx2_alloc_rx_page(bp, rxr,
3062                                                  RX_PG_RING_IDX(pg_prod),
3063                                                  GFP_ATOMIC);
3064                         if (unlikely(err)) {
3065                                 rxr->rx_pg_cons = pg_cons;
3066                                 rxr->rx_pg_prod = pg_prod;
3067                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3068                                                         pages - i);
3069                                 return NULL;
3070                         }
3071
3072                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3073                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3074
3075                         frag_size -= frag_len;
3076                         skb->data_len += frag_len;
3077                         skb->truesize += PAGE_SIZE;
3078                         skb->len += frag_len;
3079
3080                         pg_prod = NEXT_RX_BD(pg_prod);
3081                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3082                 }
3083                 rxr->rx_pg_prod = pg_prod;
3084                 rxr->rx_pg_cons = pg_cons;
3085         }
3086         return skb;
3087 }
3088
3089 static inline u16
3090 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3091 {
3092         u16 cons;
3093
3094         /* Tell compiler that status block fields can change. */
3095         barrier();
3096         cons = *bnapi->hw_rx_cons_ptr;
3097         barrier();
3098         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3099                 cons++;
3100         return cons;
3101 }
3102
3103 static int
3104 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3105 {
3106         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3107         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3108         struct l2_fhdr *rx_hdr;
3109         int rx_pkt = 0, pg_ring_used = 0;
3110
3111         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3112         sw_cons = rxr->rx_cons;
3113         sw_prod = rxr->rx_prod;
3114
3115         /* Memory barrier necessary as speculative reads of the rx
3116          * buffer can be ahead of the index in the status block
3117          */
3118         rmb();
3119         while (sw_cons != hw_cons) {
3120                 unsigned int len, hdr_len;
3121                 u32 status;
3122                 struct sw_bd *rx_buf, *next_rx_buf;
3123                 struct sk_buff *skb;
3124                 dma_addr_t dma_addr;
3125                 u8 *data;
3126
3127                 sw_ring_cons = RX_RING_IDX(sw_cons);
3128                 sw_ring_prod = RX_RING_IDX(sw_prod);
3129
3130                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3131                 data = rx_buf->data;
3132                 rx_buf->data = NULL;
3133
3134                 rx_hdr = get_l2_fhdr(data);
3135                 prefetch(rx_hdr);
3136
3137                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3138
3139                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3140                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3141                         PCI_DMA_FROMDEVICE);
3142
3143                 next_rx_buf =
3144                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3145                 prefetch(get_l2_fhdr(next_rx_buf->data));
3146
3147                 len = rx_hdr->l2_fhdr_pkt_len;
3148                 status = rx_hdr->l2_fhdr_status;
3149
3150                 hdr_len = 0;
3151                 if (status & L2_FHDR_STATUS_SPLIT) {
3152                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3153                         pg_ring_used = 1;
3154                 } else if (len > bp->rx_jumbo_thresh) {
3155                         hdr_len = bp->rx_jumbo_thresh;
3156                         pg_ring_used = 1;
3157                 }
3158
3159                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3160                                        L2_FHDR_ERRORS_PHY_DECODE |
3161                                        L2_FHDR_ERRORS_ALIGNMENT |
3162                                        L2_FHDR_ERRORS_TOO_SHORT |
3163                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3164
3165                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3166                                           sw_ring_prod);
3167                         if (pg_ring_used) {
3168                                 int pages;
3169
3170                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3171
3172                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3173                         }
3174                         goto next_rx;
3175                 }
3176
3177                 len -= 4;
3178
3179                 if (len <= bp->rx_copy_thresh) {
3180                         skb = netdev_alloc_skb(bp->dev, len + 6);
3181                         if (skb == NULL) {
3182                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3183                                                   sw_ring_prod);
3184                                 goto next_rx;
3185                         }
3186
3187                         /* aligned copy */
3188                         memcpy(skb->data,
3189                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3190                                len + 6);
3191                         skb_reserve(skb, 6);
3192                         skb_put(skb, len);
3193
3194                         bnx2_reuse_rx_data(bp, rxr, data,
3195                                 sw_ring_cons, sw_ring_prod);
3196
3197                 } else {
3198                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3199                                           (sw_ring_cons << 16) | sw_ring_prod);
3200                         if (!skb)
3201                                 goto next_rx;
3202                 }
3203                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3204                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3205                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3206
3207                 skb->protocol = eth_type_trans(skb, bp->dev);
3208
3209                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3210                         (ntohs(skb->protocol) != 0x8100)) {
3211
3212                         dev_kfree_skb(skb);
3213                         goto next_rx;
3214
3215                 }
3216
3217                 skb_checksum_none_assert(skb);
3218                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3219                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3220                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3221
3222                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3223                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3224                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3225                 }
3226                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3227                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3228                      L2_FHDR_STATUS_USE_RXHASH))
3229                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3230
3231                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3232                 napi_gro_receive(&bnapi->napi, skb);
3233                 rx_pkt++;
3234
3235 next_rx:
3236                 sw_cons = NEXT_RX_BD(sw_cons);
3237                 sw_prod = NEXT_RX_BD(sw_prod);
3238
3239                 if ((rx_pkt == budget))
3240                         break;
3241
3242                 /* Refresh hw_cons to see if there is new work */
3243                 if (sw_cons == hw_cons) {
3244                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3245                         rmb();
3246                 }
3247         }
3248         rxr->rx_cons = sw_cons;
3249         rxr->rx_prod = sw_prod;
3250
3251         if (pg_ring_used)
3252                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3253
3254         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3255
3256         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3257
3258         mmiowb();
3259
3260         return rx_pkt;
3261
3262 }
3263
3264 /* MSI ISR - The only difference between this and the INTx ISR
3265  * is that the MSI interrupt is always serviced.
3266  */
3267 static irqreturn_t
3268 bnx2_msi(int irq, void *dev_instance)
3269 {
3270         struct bnx2_napi *bnapi = dev_instance;
3271         struct bnx2 *bp = bnapi->bp;
3272
3273         prefetch(bnapi->status_blk.msi);
3274         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3275                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3276                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3277
3278         /* Return here if interrupt is disabled. */
3279         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3280                 return IRQ_HANDLED;
3281
3282         napi_schedule(&bnapi->napi);
3283
3284         return IRQ_HANDLED;
3285 }
3286
3287 static irqreturn_t
3288 bnx2_msi_1shot(int irq, void *dev_instance)
3289 {
3290         struct bnx2_napi *bnapi = dev_instance;
3291         struct bnx2 *bp = bnapi->bp;
3292
3293         prefetch(bnapi->status_blk.msi);
3294
3295         /* Return here if interrupt is disabled. */
3296         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3297                 return IRQ_HANDLED;
3298
3299         napi_schedule(&bnapi->napi);
3300
3301         return IRQ_HANDLED;
3302 }
3303
3304 static irqreturn_t
3305 bnx2_interrupt(int irq, void *dev_instance)
3306 {
3307         struct bnx2_napi *bnapi = dev_instance;
3308         struct bnx2 *bp = bnapi->bp;
3309         struct status_block *sblk = bnapi->status_blk.msi;
3310
3311         /* When using INTx, it is possible for the interrupt to arrive
3312          * at the CPU before the status block posted prior to the
3313          * interrupt. Reading a register will flush the status block.
3314          * When using MSI, the MSI message will always complete after
3315          * the status block write.
3316          */
3317         if ((sblk->status_idx == bnapi->last_status_idx) &&
3318             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3319              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3320                 return IRQ_NONE;
3321
3322         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3323                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3324                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3325
3326         /* Read back to deassert IRQ immediately to avoid too many
3327          * spurious interrupts.
3328          */
3329         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3330
3331         /* Return here if interrupt is shared and is disabled. */
3332         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3333                 return IRQ_HANDLED;
3334
3335         if (napi_schedule_prep(&bnapi->napi)) {
3336                 bnapi->last_status_idx = sblk->status_idx;
3337                 __napi_schedule(&bnapi->napi);
3338         }
3339
3340         return IRQ_HANDLED;
3341 }
3342
3343 static inline int
3344 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3345 {
3346         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3347         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3348
3349         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3350             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3351                 return 1;
3352         return 0;
3353 }
3354
3355 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3356                                  STATUS_ATTN_BITS_TIMER_ABORT)
3357
3358 static inline int
3359 bnx2_has_work(struct bnx2_napi *bnapi)
3360 {
3361         struct status_block *sblk = bnapi->status_blk.msi;
3362
3363         if (bnx2_has_fast_work(bnapi))
3364                 return 1;
3365
3366 #ifdef BCM_CNIC
3367         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3368                 return 1;
3369 #endif
3370
3371         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3372             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3373                 return 1;
3374
3375         return 0;
3376 }
3377
3378 static void
3379 bnx2_chk_missed_msi(struct bnx2 *bp)
3380 {
3381         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3382         u32 msi_ctrl;
3383
3384         if (bnx2_has_work(bnapi)) {
3385                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3386                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3387                         return;
3388
3389                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3390                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3391                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3392                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3393                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3394                 }
3395         }
3396
3397         bp->idle_chk_status_idx = bnapi->last_status_idx;
3398 }
3399
3400 #ifdef BCM_CNIC
3401 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3402 {
3403         struct cnic_ops *c_ops;
3404
3405         if (!bnapi->cnic_present)
3406                 return;
3407
3408         rcu_read_lock();
3409         c_ops = rcu_dereference(bp->cnic_ops);
3410         if (c_ops)
3411                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3412                                                       bnapi->status_blk.msi);
3413         rcu_read_unlock();
3414 }
3415 #endif
3416
3417 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3418 {
3419         struct status_block *sblk = bnapi->status_blk.msi;
3420         u32 status_attn_bits = sblk->status_attn_bits;
3421         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3422
3423         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3424             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3425
3426                 bnx2_phy_int(bp, bnapi);
3427
3428                 /* This is needed to take care of transient status
3429                  * during link changes.
3430                  */
3431                 REG_WR(bp, BNX2_HC_COMMAND,
3432                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3433                 REG_RD(bp, BNX2_HC_COMMAND);
3434         }
3435 }
3436
3437 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3438                           int work_done, int budget)
3439 {
3440         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3441         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3442
3443         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3444                 bnx2_tx_int(bp, bnapi, 0);
3445
3446         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3447                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3448
3449         return work_done;
3450 }
3451
3452 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3453 {
3454         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3455         struct bnx2 *bp = bnapi->bp;
3456         int work_done = 0;
3457         struct status_block_msix *sblk = bnapi->status_blk.msix;
3458
3459         while (1) {
3460                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3461                 if (unlikely(work_done >= budget))
3462                         break;
3463
3464                 bnapi->last_status_idx = sblk->status_idx;
3465                 /* status idx must be read before checking for more work. */
3466                 rmb();
3467                 if (likely(!bnx2_has_fast_work(bnapi))) {
3468
3469                         napi_complete(napi);
3470                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3471                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472                                bnapi->last_status_idx);
3473                         break;
3474                 }
3475         }
3476         return work_done;
3477 }
3478
3479 static int bnx2_poll(struct napi_struct *napi, int budget)
3480 {
3481         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3482         struct bnx2 *bp = bnapi->bp;
3483         int work_done = 0;
3484         struct status_block *sblk = bnapi->status_blk.msi;
3485
3486         while (1) {
3487                 bnx2_poll_link(bp, bnapi);
3488
3489                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3490
3491 #ifdef BCM_CNIC
3492                 bnx2_poll_cnic(bp, bnapi);
3493 #endif
3494
3495                 /* bnapi->last_status_idx is used below to tell the hw how
3496                  * much work has been processed, so we must read it before
3497                  * checking for more work.
3498                  */
3499                 bnapi->last_status_idx = sblk->status_idx;
3500
3501                 if (unlikely(work_done >= budget))
3502                         break;
3503
3504                 rmb();
3505                 if (likely(!bnx2_has_work(bnapi))) {
3506                         napi_complete(napi);
3507                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3508                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3509                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3510                                        bnapi->last_status_idx);
3511                                 break;
3512                         }
3513                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3514                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3515                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3516                                bnapi->last_status_idx);
3517
3518                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3519                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520                                bnapi->last_status_idx);
3521                         break;
3522                 }
3523         }
3524
3525         return work_done;
3526 }
3527
3528 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3529  * from set_multicast.
3530  */
3531 static void
3532 bnx2_set_rx_mode(struct net_device *dev)
3533 {
3534         struct bnx2 *bp = netdev_priv(dev);
3535         u32 rx_mode, sort_mode;
3536         struct netdev_hw_addr *ha;
3537         int i;
3538
3539         if (!netif_running(dev))
3540                 return;
3541
3542         spin_lock_bh(&bp->phy_lock);
3543
3544         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3545                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3546         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3547         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3548              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3549                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3550         if (dev->flags & IFF_PROMISC) {
3551                 /* Promiscuous mode. */
3552                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3553                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3554                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3555         }
3556         else if (dev->flags & IFF_ALLMULTI) {
3557                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3558                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3559                                0xffffffff);
3560                 }
3561                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3562         }
3563         else {
3564                 /* Accept one or more multicast(s). */
3565                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3566                 u32 regidx;
3567                 u32 bit;
3568                 u32 crc;
3569
3570                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3571
3572                 netdev_for_each_mc_addr(ha, dev) {
3573                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3574                         bit = crc & 0xff;
3575                         regidx = (bit & 0xe0) >> 5;
3576                         bit &= 0x1f;
3577                         mc_filter[regidx] |= (1 << bit);
3578                 }
3579
3580                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3581                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3582                                mc_filter[i]);
3583                 }
3584
3585                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3586         }
3587
3588         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3589                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3590                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3591                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3592         } else if (!(dev->flags & IFF_PROMISC)) {
3593                 /* Add all entries into to the match filter list */
3594                 i = 0;
3595                 netdev_for_each_uc_addr(ha, dev) {
3596                         bnx2_set_mac_addr(bp, ha->addr,
3597                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3598                         sort_mode |= (1 <<
3599                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3600                         i++;
3601                 }
3602
3603         }
3604
3605         if (rx_mode != bp->rx_mode) {
3606                 bp->rx_mode = rx_mode;
3607                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3608         }
3609
3610         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3611         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3612         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3613
3614         spin_unlock_bh(&bp->phy_lock);
3615 }
3616
3617 static int
3618 check_fw_section(const struct firmware *fw,
3619                  const struct bnx2_fw_file_section *section,
3620                  u32 alignment, bool non_empty)
3621 {
3622         u32 offset = be32_to_cpu(section->offset);
3623         u32 len = be32_to_cpu(section->len);
3624
3625         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3626                 return -EINVAL;
3627         if ((non_empty && len == 0) || len > fw->size - offset ||
3628             len & (alignment - 1))
3629                 return -EINVAL;
3630         return 0;
3631 }
3632
3633 static int
3634 check_mips_fw_entry(const struct firmware *fw,
3635                     const struct bnx2_mips_fw_file_entry *entry)
3636 {
3637         if (check_fw_section(fw, &entry->text, 4, true) ||
3638             check_fw_section(fw, &entry->data, 4, false) ||
3639             check_fw_section(fw, &entry->rodata, 4, false))
3640                 return -EINVAL;
3641         return 0;
3642 }
3643
3644 static void bnx2_release_firmware(struct bnx2 *bp)
3645 {
3646         if (bp->rv2p_firmware) {
3647                 release_firmware(bp->mips_firmware);
3648                 release_firmware(bp->rv2p_firmware);
3649                 bp->rv2p_firmware = NULL;
3650         }
3651 }
3652
3653 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3654 {
3655         const char *mips_fw_file, *rv2p_fw_file;
3656         const struct bnx2_mips_fw_file *mips_fw;
3657         const struct bnx2_rv2p_fw_file *rv2p_fw;
3658         int rc;
3659
3660         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661                 mips_fw_file = FW_MIPS_FILE_09;
3662                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3665                 else
3666                         rv2p_fw_file = FW_RV2P_FILE_09;
3667         } else {
3668                 mips_fw_file = FW_MIPS_FILE_06;
3669                 rv2p_fw_file = FW_RV2P_FILE_06;
3670         }
3671
3672         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3673         if (rc) {
3674                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3675                 goto out;
3676         }
3677
3678         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3679         if (rc) {
3680                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3681                 goto err_release_mips_firmware;
3682         }
3683         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3691                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3692                 rc = -EINVAL;
3693                 goto err_release_firmware;
3694         }
3695         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3696             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3697             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3698                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3699                 rc = -EINVAL;
3700                 goto err_release_firmware;
3701         }
3702 out:
3703         return rc;
3704
3705 err_release_firmware:
3706         release_firmware(bp->rv2p_firmware);
3707         bp->rv2p_firmware = NULL;
3708 err_release_mips_firmware:
3709         release_firmware(bp->mips_firmware);
3710         goto out;
3711 }
3712
3713 static int bnx2_request_firmware(struct bnx2 *bp)
3714 {
3715         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3716 }
3717
3718 static u32
3719 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3720 {
3721         switch (idx) {
3722         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3723                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3724                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3725                 break;
3726         }
3727         return rv2p_code;
3728 }
3729
3730 static int
3731 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3732              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3733 {
3734         u32 rv2p_code_len, file_offset;
3735         __be32 *rv2p_code;
3736         int i;
3737         u32 val, cmd, addr;
3738
3739         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3740         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3741
3742         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3743
3744         if (rv2p_proc == RV2P_PROC1) {
3745                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3746                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3747         } else {
3748                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3749                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3750         }
3751
3752         for (i = 0; i < rv2p_code_len; i += 8) {
3753                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3754                 rv2p_code++;
3755                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3756                 rv2p_code++;
3757
3758                 val = (i / 8) | cmd;
3759                 REG_WR(bp, addr, val);
3760         }
3761
3762         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3763         for (i = 0; i < 8; i++) {
3764                 u32 loc, code;
3765
3766                 loc = be32_to_cpu(fw_entry->fixup[i]);
3767                 if (loc && ((loc * 4) < rv2p_code_len)) {
3768                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3769                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3770                         code = be32_to_cpu(*(rv2p_code + loc));
3771                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3772                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3773
3774                         val = (loc / 2) | cmd;
3775                         REG_WR(bp, addr, val);
3776                 }
3777         }
3778
3779         /* Reset the processor, un-stall is done later. */
3780         if (rv2p_proc == RV2P_PROC1) {
3781                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3782         }
3783         else {
3784                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3785         }
3786
3787         return 0;
3788 }
3789
3790 static int
3791 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3792             const struct bnx2_mips_fw_file_entry *fw_entry)
3793 {
3794         u32 addr, len, file_offset;
3795         __be32 *data;
3796         u32 offset;
3797         u32 val;
3798
3799         /* Halt the CPU. */
3800         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3801         val |= cpu_reg->mode_value_halt;
3802         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3803         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3804
3805         /* Load the Text area. */
3806         addr = be32_to_cpu(fw_entry->text.addr);
3807         len = be32_to_cpu(fw_entry->text.len);
3808         file_offset = be32_to_cpu(fw_entry->text.offset);
3809         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3810
3811         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3812         if (len) {
3813                 int j;
3814
3815                 for (j = 0; j < (len / 4); j++, offset += 4)
3816                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3817         }
3818
3819         /* Load the Data area. */
3820         addr = be32_to_cpu(fw_entry->data.addr);
3821         len = be32_to_cpu(fw_entry->data.len);
3822         file_offset = be32_to_cpu(fw_entry->data.offset);
3823         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3824
3825         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3826         if (len) {
3827                 int j;
3828
3829                 for (j = 0; j < (len / 4); j++, offset += 4)
3830                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3831         }
3832
3833         /* Load the Read-Only area. */
3834         addr = be32_to_cpu(fw_entry->rodata.addr);
3835         len = be32_to_cpu(fw_entry->rodata.len);
3836         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3837         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3838
3839         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3840         if (len) {
3841                 int j;
3842
3843                 for (j = 0; j < (len / 4); j++, offset += 4)
3844                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3845         }
3846
3847         /* Clear the pre-fetch instruction. */
3848         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3849
3850         val = be32_to_cpu(fw_entry->start_addr);
3851         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3852
3853         /* Start the CPU. */
3854         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3855         val &= ~cpu_reg->mode_value_halt;
3856         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3857         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3858
3859         return 0;
3860 }
3861
3862 static int
3863 bnx2_init_cpus(struct bnx2 *bp)
3864 {
3865         const struct bnx2_mips_fw_file *mips_fw =
3866                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3867         const struct bnx2_rv2p_fw_file *rv2p_fw =
3868                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3869         int rc;
3870
3871         /* Initialize the RV2P processor. */
3872         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3873         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3874
3875         /* Initialize the RX Processor. */
3876         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3877         if (rc)
3878                 goto init_cpu_err;
3879
3880         /* Initialize the TX Processor. */
3881         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3882         if (rc)
3883                 goto init_cpu_err;
3884
3885         /* Initialize the TX Patch-up Processor. */
3886         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3887         if (rc)
3888                 goto init_cpu_err;
3889
3890         /* Initialize the Completion Processor. */
3891         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3892         if (rc)
3893                 goto init_cpu_err;
3894
3895         /* Initialize the Command Processor. */
3896         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3897
3898 init_cpu_err:
3899         return rc;
3900 }
3901
3902 static int
3903 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3904 {
3905         u16 pmcsr;
3906
3907         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3908
3909         switch (state) {
3910         case PCI_D0: {
3911                 u32 val;
3912
3913                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3914                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3915                         PCI_PM_CTRL_PME_STATUS);
3916
3917                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3918                         /* delay required during transition out of D3hot */
3919                         msleep(20);
3920
3921                 val = REG_RD(bp, BNX2_EMAC_MODE);
3922                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3923                 val &= ~BNX2_EMAC_MODE_MPKT;
3924                 REG_WR(bp, BNX2_EMAC_MODE, val);
3925
3926                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3927                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3928                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3929                 break;
3930         }
3931         case PCI_D3hot: {
3932                 int i;
3933                 u32 val, wol_msg;
3934
3935                 if (bp->wol) {
3936                         u32 advertising;
3937                         u8 autoneg;
3938
3939                         autoneg = bp->autoneg;
3940                         advertising = bp->advertising;
3941
3942                         if (bp->phy_port == PORT_TP) {
3943                                 bp->autoneg = AUTONEG_SPEED;
3944                                 bp->advertising = ADVERTISED_10baseT_Half |
3945                                         ADVERTISED_10baseT_Full |
3946                                         ADVERTISED_100baseT_Half |
3947                                         ADVERTISED_100baseT_Full |
3948                                         ADVERTISED_Autoneg;
3949                         }
3950
3951                         spin_lock_bh(&bp->phy_lock);
3952                         bnx2_setup_phy(bp, bp->phy_port);
3953                         spin_unlock_bh(&bp->phy_lock);
3954
3955                         bp->autoneg = autoneg;
3956                         bp->advertising = advertising;
3957
3958                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959
3960                         val = REG_RD(bp, BNX2_EMAC_MODE);
3961
3962                         /* Enable port mode. */
3963                         val &= ~BNX2_EMAC_MODE_PORT;
3964                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965                                BNX2_EMAC_MODE_ACPI_RCVD |
3966                                BNX2_EMAC_MODE_MPKT;
3967                         if (bp->phy_port == PORT_TP)
3968                                 val |= BNX2_EMAC_MODE_PORT_MII;
3969                         else {
3970                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3971                                 if (bp->line_speed == SPEED_2500)
3972                                         val |= BNX2_EMAC_MODE_25G_MODE;
3973                         }
3974
3975                         REG_WR(bp, BNX2_EMAC_MODE, val);
3976
3977                         /* receive all multicast */
3978                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3980                                        0xffffffff);
3981                         }
3982                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3983                                BNX2_EMAC_RX_MODE_SORT_MODE);
3984
3985                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3986                               BNX2_RPM_SORT_USER0_MC_EN;
3987                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3988                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3989                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3990                                BNX2_RPM_SORT_USER0_ENA);
3991
3992                         /* Need to enable EMAC and RPM for WOL. */
3993                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3994                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3995                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3996                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3997
3998                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3999                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4000                         REG_WR(bp, BNX2_RPM_CONFIG, val);
4001
4002                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4003                 }
4004                 else {
4005                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4006                 }
4007
4008                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4009                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4010                                      1, 0);
4011
4012                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4013                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4014                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4015
4016                         if (bp->wol)
4017                                 pmcsr |= 3;
4018                 }
4019                 else {
4020                         pmcsr |= 3;
4021                 }
4022                 if (bp->wol) {
4023                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4024                 }
4025                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4026                                       pmcsr);
4027
4028                 /* No more memory access after this point until
4029                  * device is brought back to D0.
4030                  */
4031                 udelay(50);
4032                 break;
4033         }
4034         default:
4035                 return -EINVAL;
4036         }
4037         return 0;
4038 }
4039
4040 static int
4041 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4042 {
4043         u32 val;
4044         int j;
4045
4046         /* Request access to the flash interface. */
4047         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4048         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4049                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4050                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4051                         break;
4052
4053                 udelay(5);
4054         }
4055
4056         if (j >= NVRAM_TIMEOUT_COUNT)
4057                 return -EBUSY;
4058
4059         return 0;
4060 }
4061
4062 static int
4063 bnx2_release_nvram_lock(struct bnx2 *bp)
4064 {
4065         int j;
4066         u32 val;
4067
4068         /* Relinquish nvram interface. */
4069         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4070
4071         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4072                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4073                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4074                         break;
4075
4076                 udelay(5);
4077         }
4078
4079         if (j >= NVRAM_TIMEOUT_COUNT)
4080                 return -EBUSY;
4081
4082         return 0;
4083 }
4084
4085
4086 static int
4087 bnx2_enable_nvram_write(struct bnx2 *bp)
4088 {
4089         u32 val;
4090
4091         val = REG_RD(bp, BNX2_MISC_CFG);
4092         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4093
4094         if (bp->flash_info->flags & BNX2_NV_WREN) {
4095                 int j;
4096
4097                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4098                 REG_WR(bp, BNX2_NVM_COMMAND,
4099                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4100
4101                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4102                         udelay(5);
4103
4104                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4105                         if (val & BNX2_NVM_COMMAND_DONE)
4106                                 break;
4107                 }
4108
4109                 if (j >= NVRAM_TIMEOUT_COUNT)
4110                         return -EBUSY;
4111         }
4112         return 0;
4113 }
4114
4115 static void
4116 bnx2_disable_nvram_write(struct bnx2 *bp)
4117 {
4118         u32 val;
4119
4120         val = REG_RD(bp, BNX2_MISC_CFG);
4121         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4122 }
4123
4124
4125 static void
4126 bnx2_enable_nvram_access(struct bnx2 *bp)
4127 {
4128         u32 val;
4129
4130         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4131         /* Enable both bits, even on read. */
4132         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4133                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4134 }
4135
4136 static void
4137 bnx2_disable_nvram_access(struct bnx2 *bp)
4138 {
4139         u32 val;
4140
4141         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4142         /* Disable both bits, even after read. */
4143         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4144                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4145                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4146 }
4147
4148 static int
4149 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4150 {
4151         u32 cmd;
4152         int j;
4153
4154         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4155                 /* Buffered flash, no erase needed */
4156                 return 0;
4157
4158         /* Build an erase command */
4159         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4160               BNX2_NVM_COMMAND_DOIT;
4161
4162         /* Need to clear DONE bit separately. */
4163         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4164
4165         /* Address of the NVRAM to read from. */
4166         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4167
4168         /* Issue an erase command. */
4169         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4170
4171         /* Wait for completion. */
4172         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4173                 u32 val;
4174
4175                 udelay(5);
4176
4177                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4178                 if (val & BNX2_NVM_COMMAND_DONE)
4179                         break;
4180         }
4181
4182         if (j >= NVRAM_TIMEOUT_COUNT)
4183                 return -EBUSY;
4184
4185         return 0;
4186 }
4187
4188 static int
4189 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4190 {
4191         u32 cmd;
4192         int j;
4193
4194         /* Build the command word. */
4195         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4196
4197         /* Calculate an offset of a buffered flash, not needed for 5709. */
4198         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4199                 offset = ((offset / bp->flash_info->page_size) <<
4200                            bp->flash_info->page_bits) +
4201                           (offset % bp->flash_info->page_size);
4202         }
4203
4204         /* Need to clear DONE bit separately. */
4205         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4206
4207         /* Address of the NVRAM to read from. */
4208         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4209
4210         /* Issue a read command. */
4211         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4212
4213         /* Wait for completion. */
4214         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4215                 u32 val;
4216
4217                 udelay(5);
4218
4219                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4220                 if (val & BNX2_NVM_COMMAND_DONE) {
4221                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4222                         memcpy(ret_val, &v, 4);
4223                         break;
4224                 }
4225         }
4226         if (j >= NVRAM_TIMEOUT_COUNT)
4227                 return -EBUSY;
4228
4229         return 0;
4230 }
4231
4232
4233 static int
4234 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4235 {
4236         u32 cmd;
4237         __be32 val32;
4238         int j;
4239
4240         /* Build the command word. */
4241         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4242
4243         /* Calculate an offset of a buffered flash, not needed for 5709. */
4244         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4245                 offset = ((offset / bp->flash_info->page_size) <<
4246                           bp->flash_info->page_bits) +
4247                          (offset % bp->flash_info->page_size);
4248         }
4249
4250         /* Need to clear DONE bit separately. */
4251         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4252
4253         memcpy(&val32, val, 4);
4254
4255         /* Write the data. */
4256         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4257
4258         /* Address of the NVRAM to write to. */
4259         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4260
4261         /* Issue the write command. */
4262         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4263
4264         /* Wait for completion. */
4265         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4266                 udelay(5);
4267
4268                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4269                         break;
4270         }
4271         if (j >= NVRAM_TIMEOUT_COUNT)
4272                 return -EBUSY;
4273
4274         return 0;
4275 }
4276
4277 static int
4278 bnx2_init_nvram(struct bnx2 *bp)
4279 {
4280         u32 val;
4281         int j, entry_count, rc = 0;
4282         const struct flash_spec *flash;
4283
4284         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4285                 bp->flash_info = &flash_5709;
4286                 goto get_flash_size;
4287         }
4288
4289         /* Determine the selected interface. */
4290         val = REG_RD(bp, BNX2_NVM_CFG1);
4291
4292         entry_count = ARRAY_SIZE(flash_table);
4293
4294         if (val & 0x40000000) {
4295
4296                 /* Flash interface has been reconfigured */
4297                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4298                      j++, flash++) {
4299                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4300                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4301                                 bp->flash_info = flash;
4302                                 break;
4303                         }
4304                 }
4305         }
4306         else {
4307                 u32 mask;
4308                 /* Not yet been reconfigured */
4309
4310                 if (val & (1 << 23))
4311                         mask = FLASH_BACKUP_STRAP_MASK;
4312                 else
4313                         mask = FLASH_STRAP_MASK;
4314
4315                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4316                         j++, flash++) {
4317
4318                         if ((val & mask) == (flash->strapping & mask)) {
4319                                 bp->flash_info = flash;
4320
4321                                 /* Request access to the flash interface. */
4322                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4323                                         return rc;
4324
4325                                 /* Enable access to flash interface */
4326                                 bnx2_enable_nvram_access(bp);
4327
4328                                 /* Reconfigure the flash interface */
4329                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4330                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4331                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4332                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4333
4334                                 /* Disable access to flash interface */
4335                                 bnx2_disable_nvram_access(bp);
4336                                 bnx2_release_nvram_lock(bp);
4337
4338                                 break;
4339                         }
4340                 }
4341         } /* if (val & 0x40000000) */
4342
4343         if (j == entry_count) {
4344                 bp->flash_info = NULL;
4345                 pr_alert("Unknown flash/EEPROM type\n");
4346                 return -ENODEV;
4347         }
4348
4349 get_flash_size:
4350         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4351         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4352         if (val)
4353                 bp->flash_size = val;
4354         else
4355                 bp->flash_size = bp->flash_info->total_size;
4356
4357         return rc;
4358 }
4359
4360 static int
4361 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4362                 int buf_size)
4363 {
4364         int rc = 0;
4365         u32 cmd_flags, offset32, len32, extra;
4366
4367         if (buf_size == 0)
4368                 return 0;
4369
4370         /* Request access to the flash interface. */
4371         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4372                 return rc;
4373
4374         /* Enable access to flash interface */
4375         bnx2_enable_nvram_access(bp);
4376
4377         len32 = buf_size;
4378         offset32 = offset;
4379         extra = 0;
4380
4381         cmd_flags = 0;
4382
4383         if (offset32 & 3) {
4384                 u8 buf[4];
4385                 u32 pre_len;
4386
4387                 offset32 &= ~3;
4388                 pre_len = 4 - (offset & 3);
4389
4390                 if (pre_len >= len32) {
4391                         pre_len = len32;
4392                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4393                                     BNX2_NVM_COMMAND_LAST;
4394                 }
4395                 else {
4396                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4397                 }
4398
4399                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4400
4401                 if (rc)
4402                         return rc;
4403
4404                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4405
4406                 offset32 += 4;
4407                 ret_buf += pre_len;
4408                 len32 -= pre_len;
4409         }
4410         if (len32 & 3) {
4411                 extra = 4 - (len32 & 3);
4412                 len32 = (len32 + 4) & ~3;
4413         }
4414
4415         if (len32 == 4) {
4416                 u8 buf[4];
4417
4418                 if (cmd_flags)
4419                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4420                 else
4421                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4422                                     BNX2_NVM_COMMAND_LAST;
4423
4424                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4425
4426                 memcpy(ret_buf, buf, 4 - extra);
4427         }
4428         else if (len32 > 0) {
4429                 u8 buf[4];
4430
4431                 /* Read the first word. */
4432                 if (cmd_flags)
4433                         cmd_flags = 0;
4434                 else
4435                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4436
4437                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4438
4439                 /* Advance to the next dword. */
4440                 offset32 += 4;
4441                 ret_buf += 4;
4442                 len32 -= 4;
4443
4444                 while (len32 > 4 && rc == 0) {
4445                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4446
4447                         /* Advance to the next dword. */
4448                         offset32 += 4;
4449                         ret_buf += 4;
4450                         len32 -= 4;
4451                 }
4452
4453                 if (rc)
4454                         return rc;
4455
4456                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4457                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4458
4459                 memcpy(ret_buf, buf, 4 - extra);
4460         }
4461
4462         /* Disable access to flash interface */
4463         bnx2_disable_nvram_access(bp);
4464
4465         bnx2_release_nvram_lock(bp);
4466
4467         return rc;
4468 }
4469
4470 static int
4471 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4472                 int buf_size)
4473 {
4474         u32 written, offset32, len32;
4475         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4476         int rc = 0;
4477         int align_start, align_end;
4478
4479         buf = data_buf;
4480         offset32 = offset;
4481         len32 = buf_size;
4482         align_start = align_end = 0;
4483
4484         if ((align_start = (offset32 & 3))) {
4485                 offset32 &= ~3;
4486                 len32 += align_start;
4487                 if (len32 < 4)
4488                         len32 = 4;
4489                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4490                         return rc;
4491         }
4492
4493         if (len32 & 3) {
4494                 align_end = 4 - (len32 & 3);
4495                 len32 += align_end;
4496                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4497                         return rc;
4498         }
4499
4500         if (align_start || align_end) {
4501                 align_buf = kmalloc(len32, GFP_KERNEL);
4502                 if (align_buf == NULL)
4503                         return -ENOMEM;
4504                 if (align_start) {
4505                         memcpy(align_buf, start, 4);
4506                 }
4507                 if (align_end) {
4508                         memcpy(align_buf + len32 - 4, end, 4);
4509                 }
4510                 memcpy(align_buf + align_start, data_buf, buf_size);
4511                 buf = align_buf;
4512         }
4513
4514         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4515                 flash_buffer = kmalloc(264, GFP_KERNEL);
4516                 if (flash_buffer == NULL) {
4517                         rc = -ENOMEM;
4518                         goto nvram_write_end;
4519                 }
4520         }
4521
4522         written = 0;
4523         while ((written < len32) && (rc == 0)) {
4524                 u32 page_start, page_end, data_start, data_end;
4525                 u32 addr, cmd_flags;
4526                 int i;
4527
4528                 /* Find the page_start addr */
4529                 page_start = offset32 + written;
4530                 page_start -= (page_start % bp->flash_info->page_size);
4531                 /* Find the page_end addr */
4532                 page_end = page_start + bp->flash_info->page_size;
4533                 /* Find the data_start addr */
4534                 data_start = (written == 0) ? offset32 : page_start;
4535                 /* Find the data_end addr */
4536                 data_end = (page_end > offset32 + len32) ?
4537                         (offset32 + len32) : page_end;
4538
4539                 /* Request access to the flash interface. */
4540                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4541                         goto nvram_write_end;
4542
4543                 /* Enable access to flash interface */
4544                 bnx2_enable_nvram_access(bp);
4545
4546                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4547                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4548                         int j;
4549
4550                         /* Read the whole page into the buffer
4551                          * (non-buffer flash only) */
4552                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4553                                 if (j == (bp->flash_info->page_size - 4)) {
4554                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4555                                 }
4556                                 rc = bnx2_nvram_read_dword(bp,
4557                                         page_start + j,
4558                                         &flash_buffer[j],
4559                                         cmd_flags);
4560
4561                                 if (rc)
4562                                         goto nvram_write_end;
4563
4564                                 cmd_flags = 0;
4565                         }
4566                 }
4567
4568                 /* Enable writes to flash interface (unlock write-protect) */
4569                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4570                         goto nvram_write_end;
4571
4572                 /* Loop to write back the buffer data from page_start to
4573                  * data_start */
4574                 i = 0;
4575                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4576                         /* Erase the page */
4577                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4578                                 goto nvram_write_end;
4579
4580                         /* Re-enable the write again for the actual write */
4581                         bnx2_enable_nvram_write(bp);
4582
4583                         for (addr = page_start; addr < data_start;
4584                                 addr += 4, i += 4) {
4585
4586                                 rc = bnx2_nvram_write_dword(bp, addr,
4587                                         &flash_buffer[i], cmd_flags);
4588
4589                                 if (rc != 0)
4590                                         goto nvram_write_end;
4591
4592                                 cmd_flags = 0;
4593                         }
4594                 }
4595
4596                 /* Loop to write the new data from data_start to data_end */
4597                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4598                         if ((addr == page_end - 4) ||
4599                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4600                                  (addr == data_end - 4))) {
4601
4602                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4603                         }
4604                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4605                                 cmd_flags);
4606
4607                         if (rc != 0)
4608                                 goto nvram_write_end;
4609
4610                         cmd_flags = 0;
4611                         buf += 4;
4612                 }
4613
4614                 /* Loop to write back the buffer data from data_end
4615                  * to page_end */
4616                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4617                         for (addr = data_end; addr < page_end;
4618                                 addr += 4, i += 4) {
4619
4620                                 if (addr == page_end-4) {
4621                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4622                                 }
4623                                 rc = bnx2_nvram_write_dword(bp, addr,
4624                                         &flash_buffer[i], cmd_flags);
4625
4626                                 if (rc != 0)
4627                                         goto nvram_write_end;
4628
4629                                 cmd_flags = 0;
4630                         }
4631                 }
4632
4633                 /* Disable writes to flash interface (lock write-protect) */
4634                 bnx2_disable_nvram_write(bp);
4635
4636                 /* Disable access to flash interface */
4637                 bnx2_disable_nvram_access(bp);
4638                 bnx2_release_nvram_lock(bp);
4639
4640                 /* Increment written */
4641                 written += data_end - data_start;
4642         }
4643
4644 nvram_write_end:
4645         kfree(flash_buffer);
4646         kfree(align_buf);
4647         return rc;
4648 }
4649
4650 static void
4651 bnx2_init_fw_cap(struct bnx2 *bp)
4652 {
4653         u32 val, sig = 0;
4654
4655         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4656         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4657
4658         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4659                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4660
4661         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4662         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4663                 return;
4664
4665         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4666                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4667                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4668         }
4669
4670         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4671             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4672                 u32 link;
4673
4674                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4675
4676                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4677                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4678                         bp->phy_port = PORT_FIBRE;
4679                 else
4680                         bp->phy_port = PORT_TP;
4681
4682                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4683                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4684         }
4685
4686         if (netif_running(bp->dev) && sig)
4687                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4688 }
4689
4690 static void
4691 bnx2_setup_msix_tbl(struct bnx2 *bp)
4692 {
4693         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4694
4695         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4696         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4697 }
4698
4699 static int
4700 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4701 {
4702         u32 val;
4703         int i, rc = 0;
4704         u8 old_port;
4705
4706         /* Wait for the current PCI transaction to complete before
4707          * issuing a reset. */
4708         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4709             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4710                 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4711                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4712                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4713                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4714                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4715                 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4716                 udelay(5);
4717         } else {  /* 5709 */
4718                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4719                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4720                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4721                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4722
4723                 for (i = 0; i < 100; i++) {
4724                         msleep(1);
4725                         val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4726                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4727                                 break;
4728                 }
4729         }
4730
4731         /* Wait for the firmware to tell us it is ok to issue a reset. */
4732         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4733
4734         /* Deposit a driver reset signature so the firmware knows that
4735          * this is a soft reset. */
4736         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4737                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4738
4739         /* Do a dummy read to force the chip to complete all current transaction
4740          * before we issue a reset. */
4741         val = REG_RD(bp, BNX2_MISC_ID);
4742
4743         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4744                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4745                 REG_RD(bp, BNX2_MISC_COMMAND);
4746                 udelay(5);
4747
4748                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4749                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4750
4751                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4752
4753         } else {
4754                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4755                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4756                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4757
4758                 /* Chip reset. */
4759                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4760
4761                 /* Reading back any register after chip reset will hang the
4762                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4763                  * of margin for write posting.
4764                  */
4765                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4766                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4767                         msleep(20);
4768
4769                 /* Reset takes approximate 30 usec */
4770                 for (i = 0; i < 10; i++) {
4771                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4772                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4773                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4774                                 break;
4775                         udelay(10);
4776                 }
4777
4778                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4779                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4780                         pr_err("Chip reset did not complete\n");
4781                         return -EBUSY;
4782                 }
4783         }
4784
4785         /* Make sure byte swapping is properly configured. */
4786         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4787         if (val != 0x01020304) {
4788                 pr_err("Chip not in correct endian mode\n");
4789                 return -ENODEV;
4790         }
4791
4792         /* Wait for the firmware to finish its initialization. */
4793         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4794         if (rc)
4795                 return rc;
4796
4797         spin_lock_bh(&bp->phy_lock);
4798         old_port = bp->phy_port;
4799         bnx2_init_fw_cap(bp);
4800         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4801             old_port != bp->phy_port)
4802                 bnx2_set_default_remote_link(bp);
4803         spin_unlock_bh(&bp->phy_lock);
4804
4805         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4806                 /* Adjust the voltage regular to two steps lower.  The default
4807                  * of this register is 0x0000000e. */
4808                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4809
4810                 /* Remove bad rbuf memory from the free pool. */
4811                 rc = bnx2_alloc_bad_rbuf(bp);
4812         }
4813
4814         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4815                 bnx2_setup_msix_tbl(bp);
4816                 /* Prevent MSIX table reads and write from timing out */
4817                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4818                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4819         }
4820
4821         return rc;
4822 }
4823
4824 static int
4825 bnx2_init_chip(struct bnx2 *bp)
4826 {
4827         u32 val, mtu;
4828         int rc, i;
4829
4830         /* Make sure the interrupt is not active. */
4831         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4832
4833         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4834               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4835 #ifdef __BIG_ENDIAN
4836               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4837 #endif
4838               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4839               DMA_READ_CHANS << 12 |
4840               DMA_WRITE_CHANS << 16;
4841
4842         val |= (0x2 << 20) | (1 << 11);
4843
4844         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4845                 val |= (1 << 23);
4846
4847         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4848             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4849                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4850
4851         REG_WR(bp, BNX2_DMA_CONFIG, val);
4852
4853         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4854                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4855                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4856                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4857         }
4858
4859         if (bp->flags & BNX2_FLAG_PCIX) {
4860                 u16 val16;
4861
4862                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4863                                      &val16);
4864                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4865                                       val16 & ~PCI_X_CMD_ERO);
4866         }
4867
4868         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4869                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4870                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4871                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4872
4873         /* Initialize context mapping and zero out the quick contexts.  The
4874          * context block must have already been enabled. */
4875         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4876                 rc = bnx2_init_5709_context(bp);
4877                 if (rc)
4878                         return rc;
4879         } else
4880                 bnx2_init_context(bp);
4881
4882         if ((rc = bnx2_init_cpus(bp)) != 0)
4883                 return rc;
4884
4885         bnx2_init_nvram(bp);
4886
4887         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4888
4889         val = REG_RD(bp, BNX2_MQ_CONFIG);
4890         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4891         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4892         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4893                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4894                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4895                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4896         }
4897
4898         REG_WR(bp, BNX2_MQ_CONFIG, val);
4899
4900         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4901         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4902         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4903
4904         val = (BCM_PAGE_BITS - 8) << 24;
4905         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4906
4907         /* Configure page size. */
4908         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4909         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4910         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4911         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4912
4913         val = bp->mac_addr[0] +
4914               (bp->mac_addr[1] << 8) +
4915               (bp->mac_addr[2] << 16) +
4916               bp->mac_addr[3] +
4917               (bp->mac_addr[4] << 8) +
4918               (bp->mac_addr[5] << 16);
4919         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4920
4921         /* Program the MTU.  Also include 4 bytes for CRC32. */
4922         mtu = bp->dev->mtu;
4923         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4924         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4925                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4926         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4927
4928         if (mtu < 1500)
4929                 mtu = 1500;
4930
4931         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4932         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4933         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4934
4935         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4936         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4937                 bp->bnx2_napi[i].last_status_idx = 0;
4938
4939         bp->idle_chk_status_idx = 0xffff;
4940
4941         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4942
4943         /* Set up how to generate a link change interrupt. */
4944         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4945
4946         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4947                (u64) bp->status_blk_mapping & 0xffffffff);
4948         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4949
4950         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4951                (u64) bp->stats_blk_mapping & 0xffffffff);
4952         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4953                (u64) bp->stats_blk_mapping >> 32);
4954
4955         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4956                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4957
4958         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4959                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4960
4961         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4962                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4963
4964         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4965
4966         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4967
4968         REG_WR(bp, BNX2_HC_COM_TICKS,
4969                (bp->com_ticks_int << 16) | bp->com_ticks);
4970
4971         REG_WR(bp, BNX2_HC_CMD_TICKS,
4972                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4973
4974         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4975                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4976         else
4977                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4978         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4979
4980         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4981                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4982         else {
4983                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4984                       BNX2_HC_CONFIG_COLLECT_STATS;
4985         }
4986
4987         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4988                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4989                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4990
4991                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4992         }
4993
4994         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4995                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4996
4997         REG_WR(bp, BNX2_HC_CONFIG, val);
4998
4999         if (bp->rx_ticks < 25)
5000                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5001         else
5002                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5003
5004         for (i = 1; i < bp->irq_nvecs; i++) {
5005                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5006                            BNX2_HC_SB_CONFIG_1;
5007
5008                 REG_WR(bp, base,
5009                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5010                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5011                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5012
5013                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5014                         (bp->tx_quick_cons_trip_int << 16) |
5015                          bp->tx_quick_cons_trip);
5016
5017                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5018                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5019
5020                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5021                        (bp->rx_quick_cons_trip_int << 16) |
5022                         bp->rx_quick_cons_trip);
5023
5024                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5025                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5026         }
5027
5028         /* Clear internal stats counters. */
5029         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5030
5031         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5032
5033         /* Initialize the receive filter. */
5034         bnx2_set_rx_mode(bp->dev);
5035
5036         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5037                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5038                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5039                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5040         }
5041         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5042                           1, 0);
5043
5044         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5045         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5046
5047         udelay(20);
5048
5049         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5050
5051         return rc;
5052 }
5053
5054 static void
5055 bnx2_clear_ring_states(struct bnx2 *bp)
5056 {
5057         struct bnx2_napi *bnapi;
5058         struct bnx2_tx_ring_info *txr;
5059         struct bnx2_rx_ring_info *rxr;
5060         int i;
5061
5062         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5063                 bnapi = &bp->bnx2_napi[i];
5064                 txr = &bnapi->tx_ring;
5065                 rxr = &bnapi->rx_ring;
5066
5067                 txr->tx_cons = 0;
5068                 txr->hw_tx_cons = 0;
5069                 rxr->rx_prod_bseq = 0;
5070                 rxr->rx_prod = 0;
5071                 rxr->rx_cons = 0;
5072                 rxr->rx_pg_prod = 0;
5073                 rxr->rx_pg_cons = 0;
5074         }
5075 }
5076
5077 static void
5078 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5079 {
5080         u32 val, offset0, offset1, offset2, offset3;
5081         u32 cid_addr = GET_CID_ADDR(cid);
5082
5083         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5084                 offset0 = BNX2_L2CTX_TYPE_XI;
5085                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5086                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5087                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5088         } else {
5089                 offset0 = BNX2_L2CTX_TYPE;
5090                 offset1 = BNX2_L2CTX_CMD_TYPE;
5091                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5092                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5093         }
5094         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5095         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5096
5097         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5098         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5099
5100         val = (u64) txr->tx_desc_mapping >> 32;
5101         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5102
5103         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5104         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5105 }
5106
5107 static void
5108 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5109 {
5110         struct tx_bd *txbd;
5111         u32 cid = TX_CID;
5112         struct bnx2_napi *bnapi;
5113         struct bnx2_tx_ring_info *txr;
5114
5115         bnapi = &bp->bnx2_napi[ring_num];
5116         txr = &bnapi->tx_ring;
5117
5118         if (ring_num == 0)
5119                 cid = TX_CID;
5120         else
5121                 cid = TX_TSS_CID + ring_num - 1;
5122
5123         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5124
5125         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5126
5127         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5128         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5129
5130         txr->tx_prod = 0;
5131         txr->tx_prod_bseq = 0;
5132
5133         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5134         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5135
5136         bnx2_init_tx_context(bp, cid, txr);
5137 }
5138
5139 static void
5140 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5141                      int num_rings)
5142 {
5143         int i;
5144         struct rx_bd *rxbd;
5145
5146         for (i = 0; i < num_rings; i++) {
5147                 int j;
5148
5149                 rxbd = &rx_ring[i][0];
5150                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5151                         rxbd->rx_bd_len = buf_size;
5152                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5153                 }
5154                 if (i == (num_rings - 1))
5155                         j = 0;
5156                 else
5157                         j = i + 1;
5158                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5159                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5160         }
5161 }
5162
5163 static void
5164 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5165 {
5166         int i;
5167         u16 prod, ring_prod;
5168         u32 cid, rx_cid_addr, val;
5169         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5170         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5171
5172         if (ring_num == 0)
5173                 cid = RX_CID;
5174         else
5175                 cid = RX_RSS_CID + ring_num - 1;
5176
5177         rx_cid_addr = GET_CID_ADDR(cid);
5178
5179         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5180                              bp->rx_buf_use_size, bp->rx_max_ring);
5181
5182         bnx2_init_rx_context(bp, cid);
5183
5184         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5185                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5186                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5187         }
5188
5189         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5190         if (bp->rx_pg_ring_size) {
5191                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5192                                      rxr->rx_pg_desc_mapping,
5193                                      PAGE_SIZE, bp->rx_max_pg_ring);
5194                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5195                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5196                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5197                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5198
5199                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5200                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5201
5202                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5203                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5204
5205                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5206                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5207         }
5208
5209         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5210         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5211
5212         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5213         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5214
5215         ring_prod = prod = rxr->rx_pg_prod;
5216         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5217                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5218                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5219                                     ring_num, i, bp->rx_pg_ring_size);
5220                         break;
5221                 }
5222                 prod = NEXT_RX_BD(prod);
5223                 ring_prod = RX_PG_RING_IDX(prod);
5224         }
5225         rxr->rx_pg_prod = prod;
5226
5227         ring_prod = prod = rxr->rx_prod;
5228         for (i = 0; i < bp->rx_ring_size; i++) {
5229                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5230                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5231                                     ring_num, i, bp->rx_ring_size);
5232                         break;
5233                 }
5234                 prod = NEXT_RX_BD(prod);
5235                 ring_prod = RX_RING_IDX(prod);
5236         }
5237         rxr->rx_prod = prod;
5238
5239         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5240         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5241         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5242
5243         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5244         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5245
5246         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5247 }
5248
5249 static void
5250 bnx2_init_all_rings(struct bnx2 *bp)
5251 {
5252         int i;
5253         u32 val;
5254
5255         bnx2_clear_ring_states(bp);
5256
5257         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5258         for (i = 0; i < bp->num_tx_rings; i++)
5259                 bnx2_init_tx_ring(bp, i);
5260
5261         if (bp->num_tx_rings > 1)
5262                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5263                        (TX_TSS_CID << 7));
5264
5265         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5266         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5267
5268         for (i = 0; i < bp->num_rx_rings; i++)
5269                 bnx2_init_rx_ring(bp, i);
5270
5271         if (bp->num_rx_rings > 1) {
5272                 u32 tbl_32 = 0;
5273
5274                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5275                         int shift = (i % 8) << 2;
5276
5277                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5278                         if ((i % 8) == 7) {
5279                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5280                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5281                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5282                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5283                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5284                                 tbl_32 = 0;
5285                         }
5286                 }
5287
5288                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5289                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5290
5291                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5292
5293         }
5294 }
5295
5296 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5297 {
5298         u32 max, num_rings = 1;
5299
5300         while (ring_size > MAX_RX_DESC_CNT) {
5301                 ring_size -= MAX_RX_DESC_CNT;
5302                 num_rings++;
5303         }
5304         /* round to next power of 2 */
5305         max = max_size;
5306         while ((max & num_rings) == 0)
5307                 max >>= 1;
5308
5309         if (num_rings != max)
5310                 max <<= 1;
5311
5312         return max;
5313 }
5314
5315 static void
5316 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5317 {
5318         u32 rx_size, rx_space, jumbo_size;
5319
5320         /* 8 for CRC and VLAN */
5321         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5322
5323         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5324                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5325
5326         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5327         bp->rx_pg_ring_size = 0;
5328         bp->rx_max_pg_ring = 0;
5329         bp->rx_max_pg_ring_idx = 0;
5330         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5331                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5332
5333                 jumbo_size = size * pages;
5334                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5335                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5336
5337                 bp->rx_pg_ring_size = jumbo_size;
5338                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5339                                                         MAX_RX_PG_RINGS);
5340                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5341                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5342                 bp->rx_copy_thresh = 0;
5343         }
5344
5345         bp->rx_buf_use_size = rx_size;
5346         /* hw alignment + build_skb() overhead*/
5347         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5348                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5349         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5350         bp->rx_ring_size = size;
5351         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5352         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5353 }
5354
5355 static void
5356 bnx2_free_tx_skbs(struct bnx2 *bp)
5357 {
5358         int i;
5359
5360         for (i = 0; i < bp->num_tx_rings; i++) {
5361                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5362                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5363                 int j;
5364
5365                 if (txr->tx_buf_ring == NULL)
5366                         continue;
5367
5368                 for (j = 0; j < TX_DESC_CNT; ) {
5369                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5370                         struct sk_buff *skb = tx_buf->skb;
5371                         int k, last;
5372
5373                         if (skb == NULL) {
5374                                 j++;
5375                                 continue;
5376                         }
5377
5378                         dma_unmap_single(&bp->pdev->dev,
5379                                          dma_unmap_addr(tx_buf, mapping),
5380                                          skb_headlen(skb),
5381                                          PCI_DMA_TODEVICE);
5382
5383                         tx_buf->skb = NULL;
5384
5385                         last = tx_buf->nr_frags;
5386                         j++;
5387                         for (k = 0; k < last; k++, j++) {
5388                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5389                                 dma_unmap_page(&bp->pdev->dev,
5390                                         dma_unmap_addr(tx_buf, mapping),
5391                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5392                                         PCI_DMA_TODEVICE);
5393                         }
5394                         dev_kfree_skb(skb);
5395                 }
5396                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5397         }
5398 }
5399
5400 static void
5401 bnx2_free_rx_skbs(struct bnx2 *bp)
5402 {
5403         int i;
5404
5405         for (i = 0; i < bp->num_rx_rings; i++) {
5406                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5407                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5408                 int j;
5409
5410                 if (rxr->rx_buf_ring == NULL)
5411                         return;
5412
5413                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5414                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5415                         u8 *data = rx_buf->data;
5416
5417                         if (data == NULL)
5418                                 continue;
5419
5420                         dma_unmap_single(&bp->pdev->dev,
5421                                          dma_unmap_addr(rx_buf, mapping),
5422                                          bp->rx_buf_use_size,
5423                                          PCI_DMA_FROMDEVICE);
5424
5425                         rx_buf->data = NULL;
5426
5427                         kfree(data);
5428                 }
5429                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5430                         bnx2_free_rx_page(bp, rxr, j);
5431         }
5432 }
5433
5434 static void
5435 bnx2_free_skbs(struct bnx2 *bp)
5436 {
5437         bnx2_free_tx_skbs(bp);
5438         bnx2_free_rx_skbs(bp);
5439 }
5440
5441 static int
5442 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5443 {
5444         int rc;
5445
5446         rc = bnx2_reset_chip(bp, reset_code);
5447         bnx2_free_skbs(bp);
5448         if (rc)
5449                 return rc;
5450
5451         if ((rc = bnx2_init_chip(bp)) != 0)
5452                 return rc;
5453
5454         bnx2_init_all_rings(bp);
5455         return 0;
5456 }
5457
5458 static int
5459 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5460 {
5461         int rc;
5462
5463         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5464                 return rc;
5465
5466         spin_lock_bh(&bp->phy_lock);
5467         bnx2_init_phy(bp, reset_phy);
5468         bnx2_set_link(bp);
5469         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5470                 bnx2_remote_phy_event(bp);
5471         spin_unlock_bh(&bp->phy_lock);
5472         return 0;
5473 }
5474
5475 static int
5476 bnx2_shutdown_chip(struct bnx2 *bp)
5477 {
5478         u32 reset_code;
5479
5480         if (bp->flags & BNX2_FLAG_NO_WOL)
5481                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5482         else if (bp->wol)
5483                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5484         else
5485                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5486
5487         return bnx2_reset_chip(bp, reset_code);
5488 }
5489
5490 static int
5491 bnx2_test_registers(struct bnx2 *bp)
5492 {
5493         int ret;
5494         int i, is_5709;
5495         static const struct {
5496                 u16   offset;
5497                 u16   flags;
5498 #define BNX2_FL_NOT_5709        1
5499                 u32   rw_mask;
5500                 u32   ro_mask;
5501         } reg_tbl[] = {
5502                 { 0x006c, 0, 0x00000000, 0x0000003f },
5503                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5504                 { 0x0094, 0, 0x00000000, 0x00000000 },
5505
5506                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5507                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5508                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5510                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5511                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5512                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5513                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515
5516                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5517                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5518                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5519                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5520                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5521                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5522
5523                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5524                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5525                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5526
5527                 { 0x1000, 0, 0x00000000, 0x00000001 },
5528                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5529
5530                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5531                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5532                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5533                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5534                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5535                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5536                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5537                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5538                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5539                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5540
5541                 { 0x1800, 0, 0x00000000, 0x00000001 },
5542                 { 0x1804, 0, 0x00000000, 0x00000003 },
5543
5544                 { 0x2800, 0, 0x00000000, 0x00000001 },
5545                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5546                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5547                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5548                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5549                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5550                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5551                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5552                 { 0x2840, 0, 0x00000000, 0xffffffff },
5553                 { 0x2844, 0, 0x00000000, 0xffffffff },
5554                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5555                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5556
5557                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5558                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5559
5560                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5561                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5562                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5563                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5564                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5565                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5566                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5567                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5568                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5569
5570                 { 0x5004, 0, 0x00000000, 0x0000007f },
5571                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5572
5573                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5574                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5575                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5576                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5577                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5578                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5579                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5580                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5581                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5582
5583                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5584                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5585                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5586                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5587                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5588                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5589                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5590                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5591                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5592                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5593                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5594                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5595                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5596                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5597                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5598                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5599                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5600                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5601                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5602                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5603                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5604                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5605                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5606
5607                 { 0xffff, 0, 0x00000000, 0x00000000 },
5608         };
5609
5610         ret = 0;
5611         is_5709 = 0;
5612         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5613                 is_5709 = 1;
5614
5615         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5616                 u32 offset, rw_mask, ro_mask, save_val, val;
5617                 u16 flags = reg_tbl[i].flags;
5618
5619                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5620                         continue;
5621
5622                 offset = (u32) reg_tbl[i].offset;
5623                 rw_mask = reg_tbl[i].rw_mask;
5624                 ro_mask = reg_tbl[i].ro_mask;
5625
5626                 save_val = readl(bp->regview + offset);
5627
5628                 writel(0, bp->regview + offset);
5629
5630                 val = readl(bp->regview + offset);
5631                 if ((val & rw_mask) != 0) {
5632                         goto reg_test_err;
5633                 }
5634
5635                 if ((val & ro_mask) != (save_val & ro_mask)) {
5636                         goto reg_test_err;
5637                 }
5638
5639                 writel(0xffffffff, bp->regview + offset);
5640
5641                 val = readl(bp->regview + offset);
5642                 if ((val & rw_mask) != rw_mask) {
5643                         goto reg_test_err;
5644                 }
5645
5646                 if ((val & ro_mask) != (save_val & ro_mask)) {
5647                         goto reg_test_err;
5648                 }
5649
5650                 writel(save_val, bp->regview + offset);
5651                 continue;
5652
5653 reg_test_err:
5654                 writel(save_val, bp->regview + offset);
5655                 ret = -ENODEV;
5656                 break;
5657         }
5658         return ret;
5659 }
5660
5661 static int
5662 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5663 {
5664         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5665                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5666         int i;
5667
5668         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5669                 u32 offset;
5670
5671                 for (offset = 0; offset < size; offset += 4) {
5672
5673                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5674
5675                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5676                                 test_pattern[i]) {
5677                                 return -ENODEV;
5678                         }
5679                 }
5680         }
5681         return 0;
5682 }
5683
5684 static int
5685 bnx2_test_memory(struct bnx2 *bp)
5686 {
5687         int ret = 0;
5688         int i;
5689         static struct mem_entry {
5690                 u32   offset;
5691                 u32   len;
5692         } mem_tbl_5706[] = {
5693                 { 0x60000,  0x4000 },
5694                 { 0xa0000,  0x3000 },
5695                 { 0xe0000,  0x4000 },
5696                 { 0x120000, 0x4000 },
5697                 { 0x1a0000, 0x4000 },
5698                 { 0x160000, 0x4000 },
5699                 { 0xffffffff, 0    },
5700         },
5701         mem_tbl_5709[] = {
5702                 { 0x60000,  0x4000 },
5703                 { 0xa0000,  0x3000 },
5704                 { 0xe0000,  0x4000 },
5705                 { 0x120000, 0x4000 },
5706                 { 0x1a0000, 0x4000 },
5707                 { 0xffffffff, 0    },
5708         };
5709         struct mem_entry *mem_tbl;
5710
5711         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5712                 mem_tbl = mem_tbl_5709;
5713         else
5714                 mem_tbl = mem_tbl_5706;
5715
5716         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5717                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5718                         mem_tbl[i].len)) != 0) {
5719                         return ret;
5720                 }
5721         }
5722
5723         return ret;
5724 }
5725
5726 #define BNX2_MAC_LOOPBACK       0
5727 #define BNX2_PHY_LOOPBACK       1
5728
5729 static int
5730 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5731 {
5732         unsigned int pkt_size, num_pkts, i;
5733         struct sk_buff *skb;
5734         u8 *data;
5735         unsigned char *packet;
5736         u16 rx_start_idx, rx_idx;
5737         dma_addr_t map;
5738         struct tx_bd *txbd;
5739         struct sw_bd *rx_buf;
5740         struct l2_fhdr *rx_hdr;
5741         int ret = -ENODEV;
5742         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5743         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5744         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5745
5746         tx_napi = bnapi;
5747
5748         txr = &tx_napi->tx_ring;
5749         rxr = &bnapi->rx_ring;
5750         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5751                 bp->loopback = MAC_LOOPBACK;
5752                 bnx2_set_mac_loopback(bp);
5753         }
5754         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5755                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5756                         return 0;
5757
5758                 bp->loopback = PHY_LOOPBACK;
5759                 bnx2_set_phy_loopback(bp);
5760         }
5761         else
5762                 return -EINVAL;
5763
5764         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5765         skb = netdev_alloc_skb(bp->dev, pkt_size);
5766         if (!skb)
5767                 return -ENOMEM;
5768         packet = skb_put(skb, pkt_size);
5769         memcpy(packet, bp->dev->dev_addr, 6);
5770         memset(packet + 6, 0x0, 8);
5771         for (i = 14; i < pkt_size; i++)
5772                 packet[i] = (unsigned char) (i & 0xff);
5773
5774         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5775                              PCI_DMA_TODEVICE);
5776         if (dma_mapping_error(&bp->pdev->dev, map)) {
5777                 dev_kfree_skb(skb);
5778                 return -EIO;
5779         }
5780
5781         REG_WR(bp, BNX2_HC_COMMAND,
5782                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5783
5784         REG_RD(bp, BNX2_HC_COMMAND);
5785
5786         udelay(5);
5787         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5788
5789         num_pkts = 0;
5790
5791         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5792
5793         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5794         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5795         txbd->tx_bd_mss_nbytes = pkt_size;
5796         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5797
5798         num_pkts++;
5799         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5800         txr->tx_prod_bseq += pkt_size;
5801
5802         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5803         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5804
5805         udelay(100);
5806
5807         REG_WR(bp, BNX2_HC_COMMAND,
5808                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5809
5810         REG_RD(bp, BNX2_HC_COMMAND);
5811
5812         udelay(5);
5813
5814         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5815         dev_kfree_skb(skb);
5816
5817         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5818                 goto loopback_test_done;
5819
5820         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5821         if (rx_idx != rx_start_idx + num_pkts) {
5822                 goto loopback_test_done;
5823         }
5824
5825         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5826         data = rx_buf->data;
5827
5828         rx_hdr = get_l2_fhdr(data);
5829         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5830
5831         dma_sync_single_for_cpu(&bp->pdev->dev,
5832                 dma_unmap_addr(rx_buf, mapping),
5833                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5834
5835         if (rx_hdr->l2_fhdr_status &
5836                 (L2_FHDR_ERRORS_BAD_CRC |
5837                 L2_FHDR_ERRORS_PHY_DECODE |
5838                 L2_FHDR_ERRORS_ALIGNMENT |
5839                 L2_FHDR_ERRORS_TOO_SHORT |
5840                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5841
5842                 goto loopback_test_done;
5843         }
5844
5845         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5846                 goto loopback_test_done;
5847         }
5848
5849         for (i = 14; i < pkt_size; i++) {
5850                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5851                         goto loopback_test_done;
5852                 }
5853         }
5854
5855         ret = 0;
5856
5857 loopback_test_done:
5858         bp->loopback = 0;
5859         return ret;
5860 }
5861
5862 #define BNX2_MAC_LOOPBACK_FAILED        1
5863 #define BNX2_PHY_LOOPBACK_FAILED        2
5864 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5865                                          BNX2_PHY_LOOPBACK_FAILED)
5866
5867 static int
5868 bnx2_test_loopback(struct bnx2 *bp)
5869 {
5870         int rc = 0;
5871
5872         if (!netif_running(bp->dev))
5873                 return BNX2_LOOPBACK_FAILED;
5874
5875         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5876         spin_lock_bh(&bp->phy_lock);
5877         bnx2_init_phy(bp, 1);
5878         spin_unlock_bh(&bp->phy_lock);
5879         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5880                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5881         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5882                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5883         return rc;
5884 }
5885
5886 #define NVRAM_SIZE 0x200
5887 #define CRC32_RESIDUAL 0xdebb20e3
5888
5889 static int
5890 bnx2_test_nvram(struct bnx2 *bp)
5891 {
5892         __be32 buf[NVRAM_SIZE / 4];
5893         u8 *data = (u8 *) buf;
5894         int rc = 0;
5895         u32 magic, csum;
5896
5897         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5898                 goto test_nvram_done;
5899
5900         magic = be32_to_cpu(buf[0]);
5901         if (magic != 0x669955aa) {
5902                 rc = -ENODEV;
5903                 goto test_nvram_done;
5904         }
5905
5906         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5907                 goto test_nvram_done;
5908
5909         csum = ether_crc_le(0x100, data);
5910         if (csum != CRC32_RESIDUAL) {
5911                 rc = -ENODEV;
5912                 goto test_nvram_done;
5913         }
5914
5915         csum = ether_crc_le(0x100, data + 0x100);
5916         if (csum != CRC32_RESIDUAL) {
5917                 rc = -ENODEV;
5918         }
5919
5920 test_nvram_done:
5921         return rc;
5922 }
5923
5924 static int
5925 bnx2_test_link(struct bnx2 *bp)
5926 {
5927         u32 bmsr;
5928
5929         if (!netif_running(bp->dev))
5930                 return -ENODEV;
5931
5932         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5933                 if (bp->link_up)
5934                         return 0;
5935                 return -ENODEV;
5936         }
5937         spin_lock_bh(&bp->phy_lock);
5938         bnx2_enable_bmsr1(bp);
5939         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5940         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5941         bnx2_disable_bmsr1(bp);
5942         spin_unlock_bh(&bp->phy_lock);
5943
5944         if (bmsr & BMSR_LSTATUS) {
5945                 return 0;
5946         }
5947         return -ENODEV;
5948 }
5949
5950 static int
5951 bnx2_test_intr(struct bnx2 *bp)
5952 {
5953         int i;
5954         u16 status_idx;
5955
5956         if (!netif_running(bp->dev))
5957                 return -ENODEV;
5958
5959         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5960
5961         /* This register is not touched during run-time. */
5962         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5963         REG_RD(bp, BNX2_HC_COMMAND);
5964
5965         for (i = 0; i < 10; i++) {
5966                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5967                         status_idx) {
5968
5969                         break;
5970                 }
5971
5972                 msleep_interruptible(10);
5973         }
5974         if (i < 10)
5975                 return 0;
5976
5977         return -ENODEV;
5978 }
5979
5980 /* Determining link for parallel detection. */
5981 static int
5982 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5983 {
5984         u32 mode_ctl, an_dbg, exp;
5985
5986         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5987                 return 0;
5988
5989         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5990         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5991
5992         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5993                 return 0;
5994
5995         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5996         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5997         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5998
5999         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6000                 return 0;
6001
6002         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6003         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6004         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6005
6006         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6007                 return 0;
6008
6009         return 1;
6010 }
6011
6012 static void
6013 bnx2_5706_serdes_timer(struct bnx2 *bp)
6014 {
6015         int check_link = 1;
6016
6017         spin_lock(&bp->phy_lock);
6018         if (bp->serdes_an_pending) {
6019                 bp->serdes_an_pending--;
6020                 check_link = 0;
6021         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6022                 u32 bmcr;
6023
6024                 bp->current_interval = BNX2_TIMER_INTERVAL;
6025
6026                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6027
6028                 if (bmcr & BMCR_ANENABLE) {
6029                         if (bnx2_5706_serdes_has_link(bp)) {
6030                                 bmcr &= ~BMCR_ANENABLE;
6031                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6032                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6033                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6034                         }
6035                 }
6036         }
6037         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6038                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6039                 u32 phy2;
6040
6041                 bnx2_write_phy(bp, 0x17, 0x0f01);
6042                 bnx2_read_phy(bp, 0x15, &phy2);
6043                 if (phy2 & 0x20) {
6044                         u32 bmcr;
6045
6046                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6047                         bmcr |= BMCR_ANENABLE;
6048                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6049
6050                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6051                 }
6052         } else
6053                 bp->current_interval = BNX2_TIMER_INTERVAL;
6054
6055         if (check_link) {
6056                 u32 val;
6057
6058                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6059                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6060                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6061
6062                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6063                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6064                                 bnx2_5706s_force_link_dn(bp, 1);
6065                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6066                         } else
6067                                 bnx2_set_link(bp);
6068                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6069                         bnx2_set_link(bp);
6070         }
6071         spin_unlock(&bp->phy_lock);
6072 }
6073
6074 static void
6075 bnx2_5708_serdes_timer(struct bnx2 *bp)
6076 {
6077         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6078                 return;
6079
6080         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6081                 bp->serdes_an_pending = 0;
6082                 return;
6083         }
6084
6085         spin_lock(&bp->phy_lock);
6086         if (bp->serdes_an_pending)
6087                 bp->serdes_an_pending--;
6088         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6089                 u32 bmcr;
6090
6091                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6092                 if (bmcr & BMCR_ANENABLE) {
6093                         bnx2_enable_forced_2g5(bp);
6094                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6095                 } else {
6096                         bnx2_disable_forced_2g5(bp);
6097                         bp->serdes_an_pending = 2;
6098                         bp->current_interval = BNX2_TIMER_INTERVAL;
6099                 }
6100
6101         } else
6102                 bp->current_interval = BNX2_TIMER_INTERVAL;
6103
6104         spin_unlock(&bp->phy_lock);
6105 }
6106
6107 static void
6108 bnx2_timer(unsigned long data)
6109 {
6110         struct bnx2 *bp = (struct bnx2 *) data;
6111
6112         if (!netif_running(bp->dev))
6113                 return;
6114
6115         if (atomic_read(&bp->intr_sem) != 0)
6116                 goto bnx2_restart_timer;
6117
6118         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6119              BNX2_FLAG_USING_MSI)
6120                 bnx2_chk_missed_msi(bp);
6121
6122         bnx2_send_heart_beat(bp);
6123
6124         bp->stats_blk->stat_FwRxDrop =
6125                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6126
6127         /* workaround occasional corrupted counters */
6128         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6129                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6130                                             BNX2_HC_COMMAND_STATS_NOW);
6131
6132         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6133                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6134                         bnx2_5706_serdes_timer(bp);
6135                 else
6136                         bnx2_5708_serdes_timer(bp);
6137         }
6138
6139 bnx2_restart_timer:
6140         mod_timer(&bp->timer, jiffies + bp->current_interval);
6141 }
6142
6143 static int
6144 bnx2_request_irq(struct bnx2 *bp)
6145 {
6146         unsigned long flags;
6147         struct bnx2_irq *irq;
6148         int rc = 0, i;
6149
6150         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6151                 flags = 0;
6152         else
6153                 flags = IRQF_SHARED;
6154
6155         for (i = 0; i < bp->irq_nvecs; i++) {
6156                 irq = &bp->irq_tbl[i];
6157                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6158                                  &bp->bnx2_napi[i]);
6159                 if (rc)
6160                         break;
6161                 irq->requested = 1;
6162         }
6163         return rc;
6164 }
6165
6166 static void
6167 __bnx2_free_irq(struct bnx2 *bp)
6168 {
6169         struct bnx2_irq *irq;
6170         int i;
6171
6172         for (i = 0; i < bp->irq_nvecs; i++) {
6173                 irq = &bp->irq_tbl[i];
6174                 if (irq->requested)
6175                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6176                 irq->requested = 0;
6177         }
6178 }
6179
6180 static void
6181 bnx2_free_irq(struct bnx2 *bp)
6182 {
6183
6184         __bnx2_free_irq(bp);
6185         if (bp->flags & BNX2_FLAG_USING_MSI)
6186                 pci_disable_msi(bp->pdev);
6187         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6188                 pci_disable_msix(bp->pdev);
6189
6190         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6191 }
6192
6193 static void
6194 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6195 {
6196         int i, total_vecs, rc;
6197         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6198         struct net_device *dev = bp->dev;
6199         const int len = sizeof(bp->irq_tbl[0].name);
6200
6201         bnx2_setup_msix_tbl(bp);
6202         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6203         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6204         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6205
6206         /*  Need to flush the previous three writes to ensure MSI-X
6207          *  is setup properly */
6208         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6209
6210         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6211                 msix_ent[i].entry = i;
6212                 msix_ent[i].vector = 0;
6213         }
6214
6215         total_vecs = msix_vecs;
6216 #ifdef BCM_CNIC
6217         total_vecs++;
6218 #endif
6219         rc = -ENOSPC;
6220         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6221                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6222                 if (rc <= 0)
6223                         break;
6224                 if (rc > 0)
6225                         total_vecs = rc;
6226         }
6227
6228         if (rc != 0)
6229                 return;
6230
6231         msix_vecs = total_vecs;
6232 #ifdef BCM_CNIC
6233         msix_vecs--;
6234 #endif
6235         bp->irq_nvecs = msix_vecs;
6236         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6237         for (i = 0; i < total_vecs; i++) {
6238                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6239                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6240                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6241         }
6242 }
6243
6244 static int
6245 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6246 {
6247         int cpus = num_online_cpus();
6248         int msix_vecs;
6249
6250         if (!bp->num_req_rx_rings)
6251                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6252         else if (!bp->num_req_tx_rings)
6253                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6254         else
6255                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6256
6257         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6258
6259         bp->irq_tbl[0].handler = bnx2_interrupt;
6260         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6261         bp->irq_nvecs = 1;
6262         bp->irq_tbl[0].vector = bp->pdev->irq;
6263
6264         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6265                 bnx2_enable_msix(bp, msix_vecs);
6266
6267         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6268             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6269                 if (pci_enable_msi(bp->pdev) == 0) {
6270                         bp->flags |= BNX2_FLAG_USING_MSI;
6271                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6272                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6273                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6274                         } else
6275                                 bp->irq_tbl[0].handler = bnx2_msi;
6276
6277                         bp->irq_tbl[0].vector = bp->pdev->irq;
6278                 }
6279         }
6280
6281         if (!bp->num_req_tx_rings)
6282                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6283         else
6284                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6285
6286         if (!bp->num_req_rx_rings)
6287                 bp->num_rx_rings = bp->irq_nvecs;
6288         else
6289                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6290
6291         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6292
6293         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6294 }
6295
6296 /* Called with rtnl_lock */
6297 static int
6298 bnx2_open(struct net_device *dev)
6299 {
6300         struct bnx2 *bp = netdev_priv(dev);
6301         int rc;
6302
6303         rc = bnx2_request_firmware(bp);
6304         if (rc < 0)
6305                 goto out;
6306
6307         netif_carrier_off(dev);
6308
6309         bnx2_set_power_state(bp, PCI_D0);
6310         bnx2_disable_int(bp);
6311
6312         rc = bnx2_setup_int_mode(bp, disable_msi);
6313         if (rc)
6314                 goto open_err;
6315         bnx2_init_napi(bp);
6316         bnx2_napi_enable(bp);
6317         rc = bnx2_alloc_mem(bp);
6318         if (rc)
6319                 goto open_err;
6320
6321         rc = bnx2_request_irq(bp);
6322         if (rc)
6323                 goto open_err;
6324
6325         rc = bnx2_init_nic(bp, 1);
6326         if (rc)
6327                 goto open_err;
6328
6329         mod_timer(&bp->timer, jiffies + bp->current_interval);
6330
6331         atomic_set(&bp->intr_sem, 0);
6332
6333         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6334
6335         bnx2_enable_int(bp);
6336
6337         if (bp->flags & BNX2_FLAG_USING_MSI) {
6338                 /* Test MSI to make sure it is working
6339                  * If MSI test fails, go back to INTx mode
6340                  */
6341                 if (bnx2_test_intr(bp) != 0) {
6342                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6343
6344                         bnx2_disable_int(bp);
6345                         bnx2_free_irq(bp);
6346
6347                         bnx2_setup_int_mode(bp, 1);
6348
6349                         rc = bnx2_init_nic(bp, 0);
6350
6351                         if (!rc)
6352                                 rc = bnx2_request_irq(bp);
6353
6354                         if (rc) {
6355                                 del_timer_sync(&bp->timer);
6356                                 goto open_err;
6357                         }
6358                         bnx2_enable_int(bp);
6359                 }
6360         }
6361         if (bp->flags & BNX2_FLAG_USING_MSI)
6362                 netdev_info(dev, "using MSI\n");
6363         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6364                 netdev_info(dev, "using MSIX\n");
6365
6366         netif_tx_start_all_queues(dev);
6367 out:
6368         return rc;
6369
6370 open_err:
6371         bnx2_napi_disable(bp);
6372         bnx2_free_skbs(bp);
6373         bnx2_free_irq(bp);
6374         bnx2_free_mem(bp);
6375         bnx2_del_napi(bp);
6376         bnx2_release_firmware(bp);
6377         goto out;
6378 }
6379
6380 static void
6381 bnx2_reset_task(struct work_struct *work)
6382 {
6383         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6384         int rc;
6385
6386         rtnl_lock();
6387         if (!netif_running(bp->dev)) {
6388                 rtnl_unlock();
6389                 return;
6390         }
6391
6392         bnx2_netif_stop(bp, true);
6393
6394         rc = bnx2_init_nic(bp, 1);
6395         if (rc) {
6396                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6397                 bnx2_napi_enable(bp);
6398                 dev_close(bp->dev);
6399                 rtnl_unlock();
6400                 return;
6401         }
6402
6403         atomic_set(&bp->intr_sem, 1);
6404         bnx2_netif_start(bp, true);
6405         rtnl_unlock();
6406 }
6407
6408 static void
6409 bnx2_dump_state(struct bnx2 *bp)
6410 {
6411         struct net_device *dev = bp->dev;
6412         u32 val1, val2;
6413
6414         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6415         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6416                    atomic_read(&bp->intr_sem), val1);
6417         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6418         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6419         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6420         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6421                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6422                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6423         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6424                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6425         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6426                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6427         if (bp->flags & BNX2_FLAG_USING_MSIX)
6428                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6429                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6430 }
6431
6432 static void
6433 bnx2_tx_timeout(struct net_device *dev)
6434 {
6435         struct bnx2 *bp = netdev_priv(dev);
6436
6437         bnx2_dump_state(bp);
6438         bnx2_dump_mcp_state(bp);
6439
6440         /* This allows the netif to be shutdown gracefully before resetting */
6441         schedule_work(&bp->reset_task);
6442 }
6443
6444 /* Called with netif_tx_lock.
6445  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6446  * netif_wake_queue().
6447  */
6448 static netdev_tx_t
6449 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6450 {
6451         struct bnx2 *bp = netdev_priv(dev);
6452         dma_addr_t mapping;
6453         struct tx_bd *txbd;
6454         struct sw_tx_bd *tx_buf;
6455         u32 len, vlan_tag_flags, last_frag, mss;
6456         u16 prod, ring_prod;
6457         int i;
6458         struct bnx2_napi *bnapi;
6459         struct bnx2_tx_ring_info *txr;
6460         struct netdev_queue *txq;
6461
6462         /*  Determine which tx ring we will be placed on */
6463         i = skb_get_queue_mapping(skb);
6464         bnapi = &bp->bnx2_napi[i];
6465         txr = &bnapi->tx_ring;
6466         txq = netdev_get_tx_queue(dev, i);
6467
6468         if (unlikely(bnx2_tx_avail(bp, txr) <
6469             (skb_shinfo(skb)->nr_frags + 1))) {
6470                 netif_tx_stop_queue(txq);
6471                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6472
6473                 return NETDEV_TX_BUSY;
6474         }
6475         len = skb_headlen(skb);
6476         prod = txr->tx_prod;
6477         ring_prod = TX_RING_IDX(prod);
6478
6479         vlan_tag_flags = 0;
6480         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6481                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6482         }
6483
6484         if (vlan_tx_tag_present(skb)) {
6485                 vlan_tag_flags |=
6486                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6487         }
6488
6489         if ((mss = skb_shinfo(skb)->gso_size)) {
6490                 u32 tcp_opt_len;
6491                 struct iphdr *iph;
6492
6493                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6494
6495                 tcp_opt_len = tcp_optlen(skb);
6496
6497                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6498                         u32 tcp_off = skb_transport_offset(skb) -
6499                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6500
6501                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6502                                           TX_BD_FLAGS_SW_FLAGS;
6503                         if (likely(tcp_off == 0))
6504                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6505                         else {
6506                                 tcp_off >>= 3;
6507                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6508                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6509                                                   ((tcp_off & 0x10) <<
6510                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6511                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6512                         }
6513                 } else {
6514                         iph = ip_hdr(skb);
6515                         if (tcp_opt_len || (iph->ihl > 5)) {
6516                                 vlan_tag_flags |= ((iph->ihl - 5) +
6517                                                    (tcp_opt_len >> 2)) << 8;
6518                         }
6519                 }
6520         } else
6521                 mss = 0;
6522
6523         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6524         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6525                 dev_kfree_skb(skb);
6526                 return NETDEV_TX_OK;
6527         }
6528
6529         tx_buf = &txr->tx_buf_ring[ring_prod];
6530         tx_buf->skb = skb;
6531         dma_unmap_addr_set(tx_buf, mapping, mapping);
6532
6533         txbd = &txr->tx_desc_ring[ring_prod];
6534
6535         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6536         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6537         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6538         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6539
6540         last_frag = skb_shinfo(skb)->nr_frags;
6541         tx_buf->nr_frags = last_frag;
6542         tx_buf->is_gso = skb_is_gso(skb);
6543
6544         for (i = 0; i < last_frag; i++) {
6545                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6546
6547                 prod = NEXT_TX_BD(prod);
6548                 ring_prod = TX_RING_IDX(prod);
6549                 txbd = &txr->tx_desc_ring[ring_prod];
6550
6551                 len = skb_frag_size(frag);
6552                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6553                                            DMA_TO_DEVICE);
6554                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6555                         goto dma_error;
6556                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6557                                    mapping);
6558
6559                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6560                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6561                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6562                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6563
6564         }
6565         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6566
6567         /* Sync BD data before updating TX mailbox */
6568         wmb();
6569
6570         netdev_tx_sent_queue(txq, skb->len);
6571
6572         prod = NEXT_TX_BD(prod);
6573         txr->tx_prod_bseq += skb->len;
6574
6575         REG_WR16(bp, txr->tx_bidx_addr, prod);
6576         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6577
6578         mmiowb();
6579
6580         txr->tx_prod = prod;
6581
6582         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6583                 netif_tx_stop_queue(txq);
6584
6585                 /* netif_tx_stop_queue() must be done before checking
6586                  * tx index in bnx2_tx_avail() below, because in
6587                  * bnx2_tx_int(), we update tx index before checking for
6588                  * netif_tx_queue_stopped().
6589                  */
6590                 smp_mb();
6591                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6592                         netif_tx_wake_queue(txq);
6593         }
6594
6595         return NETDEV_TX_OK;
6596 dma_error:
6597         /* save value of frag that failed */
6598         last_frag = i;
6599
6600         /* start back at beginning and unmap skb */
6601         prod = txr->tx_prod;
6602         ring_prod = TX_RING_IDX(prod);
6603         tx_buf = &txr->tx_buf_ring[ring_prod];
6604         tx_buf->skb = NULL;
6605         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6606                          skb_headlen(skb), PCI_DMA_TODEVICE);
6607
6608         /* unmap remaining mapped pages */
6609         for (i = 0; i < last_frag; i++) {
6610                 prod = NEXT_TX_BD(prod);
6611                 ring_prod = TX_RING_IDX(prod);
6612                 tx_buf = &txr->tx_buf_ring[ring_prod];
6613                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6614                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6615                                PCI_DMA_TODEVICE);
6616         }
6617
6618         dev_kfree_skb(skb);
6619         return NETDEV_TX_OK;
6620 }
6621
6622 /* Called with rtnl_lock */
6623 static int
6624 bnx2_close(struct net_device *dev)
6625 {
6626         struct bnx2 *bp = netdev_priv(dev);
6627
6628         bnx2_disable_int_sync(bp);
6629         bnx2_napi_disable(bp);
6630         del_timer_sync(&bp->timer);
6631         bnx2_shutdown_chip(bp);
6632         bnx2_free_irq(bp);
6633         bnx2_free_skbs(bp);
6634         bnx2_free_mem(bp);
6635         bnx2_del_napi(bp);
6636         bp->link_up = 0;
6637         netif_carrier_off(bp->dev);
6638         bnx2_set_power_state(bp, PCI_D3hot);
6639         return 0;
6640 }
6641
6642 static void
6643 bnx2_save_stats(struct bnx2 *bp)
6644 {
6645         u32 *hw_stats = (u32 *) bp->stats_blk;
6646         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6647         int i;
6648
6649         /* The 1st 10 counters are 64-bit counters */
6650         for (i = 0; i < 20; i += 2) {
6651                 u32 hi;
6652                 u64 lo;
6653
6654                 hi = temp_stats[i] + hw_stats[i];
6655                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6656                 if (lo > 0xffffffff)
6657                         hi++;
6658                 temp_stats[i] = hi;
6659                 temp_stats[i + 1] = lo & 0xffffffff;
6660         }
6661
6662         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6663                 temp_stats[i] += hw_stats[i];
6664 }
6665
6666 #define GET_64BIT_NET_STATS64(ctr)              \
6667         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6668
6669 #define GET_64BIT_NET_STATS(ctr)                                \
6670         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6671         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6672
6673 #define GET_32BIT_NET_STATS(ctr)                                \
6674         (unsigned long) (bp->stats_blk->ctr +                   \
6675                          bp->temp_stats_blk->ctr)
6676
6677 static struct rtnl_link_stats64 *
6678 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6679 {
6680         struct bnx2 *bp = netdev_priv(dev);
6681
6682         if (bp->stats_blk == NULL)
6683                 return net_stats;
6684
6685         net_stats->rx_packets =
6686                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6687                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6688                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6689
6690         net_stats->tx_packets =
6691                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6692                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6693                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6694
6695         net_stats->rx_bytes =
6696                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6697
6698         net_stats->tx_bytes =
6699                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6700
6701         net_stats->multicast =
6702                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6703
6704         net_stats->collisions =
6705                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6706
6707         net_stats->rx_length_errors =
6708                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6709                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6710
6711         net_stats->rx_over_errors =
6712                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6713                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6714
6715         net_stats->rx_frame_errors =
6716                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6717
6718         net_stats->rx_crc_errors =
6719                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6720
6721         net_stats->rx_errors = net_stats->rx_length_errors +
6722                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6723                 net_stats->rx_crc_errors;
6724
6725         net_stats->tx_aborted_errors =
6726                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6727                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6728
6729         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6730             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6731                 net_stats->tx_carrier_errors = 0;
6732         else {
6733                 net_stats->tx_carrier_errors =
6734                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6735         }
6736
6737         net_stats->tx_errors =
6738                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6739                 net_stats->tx_aborted_errors +
6740                 net_stats->tx_carrier_errors;
6741
6742         net_stats->rx_missed_errors =
6743                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6744                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6745                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6746
6747         return net_stats;
6748 }
6749
6750 /* All ethtool functions called with rtnl_lock */
6751
6752 static int
6753 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6754 {
6755         struct bnx2 *bp = netdev_priv(dev);
6756         int support_serdes = 0, support_copper = 0;
6757
6758         cmd->supported = SUPPORTED_Autoneg;
6759         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6760                 support_serdes = 1;
6761                 support_copper = 1;
6762         } else if (bp->phy_port == PORT_FIBRE)
6763                 support_serdes = 1;
6764         else
6765                 support_copper = 1;
6766
6767         if (support_serdes) {
6768                 cmd->supported |= SUPPORTED_1000baseT_Full |
6769                         SUPPORTED_FIBRE;
6770                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6771                         cmd->supported |= SUPPORTED_2500baseX_Full;
6772
6773         }
6774         if (support_copper) {
6775                 cmd->supported |= SUPPORTED_10baseT_Half |
6776                         SUPPORTED_10baseT_Full |
6777                         SUPPORTED_100baseT_Half |
6778                         SUPPORTED_100baseT_Full |
6779                         SUPPORTED_1000baseT_Full |
6780                         SUPPORTED_TP;
6781
6782         }
6783
6784         spin_lock_bh(&bp->phy_lock);
6785         cmd->port = bp->phy_port;
6786         cmd->advertising = bp->advertising;
6787
6788         if (bp->autoneg & AUTONEG_SPEED) {
6789                 cmd->autoneg = AUTONEG_ENABLE;
6790         } else {
6791                 cmd->autoneg = AUTONEG_DISABLE;
6792         }
6793
6794         if (netif_carrier_ok(dev)) {
6795                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6796                 cmd->duplex = bp->duplex;
6797         }
6798         else {
6799                 ethtool_cmd_speed_set(cmd, -1);
6800                 cmd->duplex = -1;
6801         }
6802         spin_unlock_bh(&bp->phy_lock);
6803
6804         cmd->transceiver = XCVR_INTERNAL;
6805         cmd->phy_address = bp->phy_addr;
6806
6807         return 0;
6808 }
6809
6810 static int
6811 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6812 {
6813         struct bnx2 *bp = netdev_priv(dev);
6814         u8 autoneg = bp->autoneg;
6815         u8 req_duplex = bp->req_duplex;
6816         u16 req_line_speed = bp->req_line_speed;
6817         u32 advertising = bp->advertising;
6818         int err = -EINVAL;
6819
6820         spin_lock_bh(&bp->phy_lock);
6821
6822         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6823                 goto err_out_unlock;
6824
6825         if (cmd->port != bp->phy_port &&
6826             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6827                 goto err_out_unlock;
6828
6829         /* If device is down, we can store the settings only if the user
6830          * is setting the currently active port.
6831          */
6832         if (!netif_running(dev) && cmd->port != bp->phy_port)
6833                 goto err_out_unlock;
6834
6835         if (cmd->autoneg == AUTONEG_ENABLE) {
6836                 autoneg |= AUTONEG_SPEED;
6837
6838                 advertising = cmd->advertising;
6839                 if (cmd->port == PORT_TP) {
6840                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6841                         if (!advertising)
6842                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6843                 } else {
6844                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6845                         if (!advertising)
6846                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6847                 }
6848                 advertising |= ADVERTISED_Autoneg;
6849         }
6850         else {
6851                 u32 speed = ethtool_cmd_speed(cmd);
6852                 if (cmd->port == PORT_FIBRE) {
6853                         if ((speed != SPEED_1000 &&
6854                              speed != SPEED_2500) ||
6855                             (cmd->duplex != DUPLEX_FULL))
6856                                 goto err_out_unlock;
6857
6858                         if (speed == SPEED_2500 &&
6859                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6860                                 goto err_out_unlock;
6861                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6862                         goto err_out_unlock;
6863
6864                 autoneg &= ~AUTONEG_SPEED;
6865                 req_line_speed = speed;
6866                 req_duplex = cmd->duplex;
6867                 advertising = 0;
6868         }
6869
6870         bp->autoneg = autoneg;
6871         bp->advertising = advertising;
6872         bp->req_line_speed = req_line_speed;
6873         bp->req_duplex = req_duplex;
6874
6875         err = 0;
6876         /* If device is down, the new settings will be picked up when it is
6877          * brought up.
6878          */
6879         if (netif_running(dev))
6880                 err = bnx2_setup_phy(bp, cmd->port);
6881
6882 err_out_unlock:
6883         spin_unlock_bh(&bp->phy_lock);
6884
6885         return err;
6886 }
6887
6888 static void
6889 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6890 {
6891         struct bnx2 *bp = netdev_priv(dev);
6892
6893         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6894         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6895         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6896         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6897 }
6898
6899 #define BNX2_REGDUMP_LEN                (32 * 1024)
6900
6901 static int
6902 bnx2_get_regs_len(struct net_device *dev)
6903 {
6904         return BNX2_REGDUMP_LEN;
6905 }
6906
6907 static void
6908 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6909 {
6910         u32 *p = _p, i, offset;
6911         u8 *orig_p = _p;
6912         struct bnx2 *bp = netdev_priv(dev);
6913         static const u32 reg_boundaries[] = {
6914                 0x0000, 0x0098, 0x0400, 0x045c,
6915                 0x0800, 0x0880, 0x0c00, 0x0c10,
6916                 0x0c30, 0x0d08, 0x1000, 0x101c,
6917                 0x1040, 0x1048, 0x1080, 0x10a4,
6918                 0x1400, 0x1490, 0x1498, 0x14f0,
6919                 0x1500, 0x155c, 0x1580, 0x15dc,
6920                 0x1600, 0x1658, 0x1680, 0x16d8,
6921                 0x1800, 0x1820, 0x1840, 0x1854,
6922                 0x1880, 0x1894, 0x1900, 0x1984,
6923                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6924                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6925                 0x2000, 0x2030, 0x23c0, 0x2400,
6926                 0x2800, 0x2820, 0x2830, 0x2850,
6927                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6928                 0x3c00, 0x3c94, 0x4000, 0x4010,
6929                 0x4080, 0x4090, 0x43c0, 0x4458,
6930                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6931                 0x4fc0, 0x5010, 0x53c0, 0x5444,
6932                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6933                 0x5fc0, 0x6000, 0x6400, 0x6428,
6934                 0x6800, 0x6848, 0x684c, 0x6860,
6935                 0x6888, 0x6910, 0x8000
6936         };
6937
6938         regs->version = 0;
6939
6940         memset(p, 0, BNX2_REGDUMP_LEN);
6941
6942         if (!netif_running(bp->dev))
6943                 return;
6944
6945         i = 0;
6946         offset = reg_boundaries[0];
6947         p += offset;
6948         while (offset < BNX2_REGDUMP_LEN) {
6949                 *p++ = REG_RD(bp, offset);
6950                 offset += 4;
6951                 if (offset == reg_boundaries[i + 1]) {
6952                         offset = reg_boundaries[i + 2];
6953                         p = (u32 *) (orig_p + offset);
6954                         i += 2;
6955                 }
6956         }
6957 }
6958
6959 static void
6960 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6961 {
6962         struct bnx2 *bp = netdev_priv(dev);
6963
6964         if (bp->flags & BNX2_FLAG_NO_WOL) {
6965                 wol->supported = 0;
6966                 wol->wolopts = 0;
6967         }
6968         else {
6969                 wol->supported = WAKE_MAGIC;
6970                 if (bp->wol)
6971                         wol->wolopts = WAKE_MAGIC;
6972                 else
6973                         wol->wolopts = 0;
6974         }
6975         memset(&wol->sopass, 0, sizeof(wol->sopass));
6976 }
6977
6978 static int
6979 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6980 {
6981         struct bnx2 *bp = netdev_priv(dev);
6982
6983         if (wol->wolopts & ~WAKE_MAGIC)
6984                 return -EINVAL;
6985
6986         if (wol->wolopts & WAKE_MAGIC) {
6987                 if (bp->flags & BNX2_FLAG_NO_WOL)
6988                         return -EINVAL;
6989
6990                 bp->wol = 1;
6991         }
6992         else {
6993                 bp->wol = 0;
6994         }
6995         return 0;
6996 }
6997
6998 static int
6999 bnx2_nway_reset(struct net_device *dev)
7000 {
7001         struct bnx2 *bp = netdev_priv(dev);
7002         u32 bmcr;
7003
7004         if (!netif_running(dev))
7005                 return -EAGAIN;
7006
7007         if (!(bp->autoneg & AUTONEG_SPEED)) {
7008                 return -EINVAL;
7009         }
7010
7011         spin_lock_bh(&bp->phy_lock);
7012
7013         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7014                 int rc;
7015
7016                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7017                 spin_unlock_bh(&bp->phy_lock);
7018                 return rc;
7019         }
7020
7021         /* Force a link down visible on the other side */
7022         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7023                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7024                 spin_unlock_bh(&bp->phy_lock);
7025
7026                 msleep(20);
7027
7028                 spin_lock_bh(&bp->phy_lock);
7029
7030                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7031                 bp->serdes_an_pending = 1;
7032                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7033         }
7034
7035         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7036         bmcr &= ~BMCR_LOOPBACK;
7037         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7038
7039         spin_unlock_bh(&bp->phy_lock);
7040
7041         return 0;
7042 }
7043
7044 static u32
7045 bnx2_get_link(struct net_device *dev)
7046 {
7047         struct bnx2 *bp = netdev_priv(dev);
7048
7049         return bp->link_up;
7050 }
7051
7052 static int
7053 bnx2_get_eeprom_len(struct net_device *dev)
7054 {
7055         struct bnx2 *bp = netdev_priv(dev);
7056
7057         if (bp->flash_info == NULL)
7058                 return 0;
7059
7060         return (int) bp->flash_size;
7061 }
7062
7063 static int
7064 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7065                 u8 *eebuf)
7066 {
7067         struct bnx2 *bp = netdev_priv(dev);
7068         int rc;
7069
7070         if (!netif_running(dev))
7071                 return -EAGAIN;
7072
7073         /* parameters already validated in ethtool_get_eeprom */
7074
7075         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7076
7077         return rc;
7078 }
7079
7080 static int
7081 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7082                 u8 *eebuf)
7083 {
7084         struct bnx2 *bp = netdev_priv(dev);
7085         int rc;
7086
7087         if (!netif_running(dev))
7088                 return -EAGAIN;
7089
7090         /* parameters already validated in ethtool_set_eeprom */
7091
7092         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7093
7094         return rc;
7095 }
7096
7097 static int
7098 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7099 {
7100         struct bnx2 *bp = netdev_priv(dev);
7101
7102         memset(coal, 0, sizeof(struct ethtool_coalesce));
7103
7104         coal->rx_coalesce_usecs = bp->rx_ticks;
7105         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7106         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7107         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7108
7109         coal->tx_coalesce_usecs = bp->tx_ticks;
7110         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7111         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7112         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7113
7114         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7115
7116         return 0;
7117 }
7118
7119 static int
7120 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7121 {
7122         struct bnx2 *bp = netdev_priv(dev);
7123
7124         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7125         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7126
7127         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7128         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7129
7130         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7131         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7132
7133         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7134         if (bp->rx_quick_cons_trip_int > 0xff)
7135                 bp->rx_quick_cons_trip_int = 0xff;
7136
7137         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7138         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7139
7140         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7141         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7142
7143         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7144         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7145
7146         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7147         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7148                 0xff;
7149
7150         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7151         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7152                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7153                         bp->stats_ticks = USEC_PER_SEC;
7154         }
7155         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7156                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7157         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7158
7159         if (netif_running(bp->dev)) {
7160                 bnx2_netif_stop(bp, true);
7161                 bnx2_init_nic(bp, 0);
7162                 bnx2_netif_start(bp, true);
7163         }
7164
7165         return 0;
7166 }
7167
7168 static void
7169 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7170 {
7171         struct bnx2 *bp = netdev_priv(dev);
7172
7173         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7174         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7175
7176         ering->rx_pending = bp->rx_ring_size;
7177         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7178
7179         ering->tx_max_pending = MAX_TX_DESC_CNT;
7180         ering->tx_pending = bp->tx_ring_size;
7181 }
7182
7183 static int
7184 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7185 {
7186         if (netif_running(bp->dev)) {
7187                 /* Reset will erase chipset stats; save them */
7188                 bnx2_save_stats(bp);
7189
7190                 bnx2_netif_stop(bp, true);
7191                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7192                 if (reset_irq) {
7193                         bnx2_free_irq(bp);
7194                         bnx2_del_napi(bp);
7195                 } else {
7196                         __bnx2_free_irq(bp);
7197                 }
7198                 bnx2_free_skbs(bp);
7199                 bnx2_free_mem(bp);
7200         }
7201
7202         bnx2_set_rx_ring_size(bp, rx);
7203         bp->tx_ring_size = tx;
7204
7205         if (netif_running(bp->dev)) {
7206                 int rc = 0;
7207
7208                 if (reset_irq) {
7209                         rc = bnx2_setup_int_mode(bp, disable_msi);
7210                         bnx2_init_napi(bp);
7211                 }
7212
7213                 if (!rc)
7214                         rc = bnx2_alloc_mem(bp);
7215
7216                 if (!rc)
7217                         rc = bnx2_request_irq(bp);
7218
7219                 if (!rc)
7220                         rc = bnx2_init_nic(bp, 0);
7221
7222                 if (rc) {
7223                         bnx2_napi_enable(bp);
7224                         dev_close(bp->dev);
7225                         return rc;
7226                 }
7227 #ifdef BCM_CNIC
7228                 mutex_lock(&bp->cnic_lock);
7229                 /* Let cnic know about the new status block. */
7230                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7231                         bnx2_setup_cnic_irq_info(bp);
7232                 mutex_unlock(&bp->cnic_lock);
7233 #endif
7234                 bnx2_netif_start(bp, true);
7235         }
7236         return 0;
7237 }
7238
7239 static int
7240 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7241 {
7242         struct bnx2 *bp = netdev_priv(dev);
7243         int rc;
7244
7245         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7246                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7247                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7248
7249                 return -EINVAL;
7250         }
7251         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7252                                    false);
7253         return rc;
7254 }
7255
7256 static void
7257 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7258 {
7259         struct bnx2 *bp = netdev_priv(dev);
7260
7261         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7262         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7263         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7264 }
7265
7266 static int
7267 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7268 {
7269         struct bnx2 *bp = netdev_priv(dev);
7270
7271         bp->req_flow_ctrl = 0;
7272         if (epause->rx_pause)
7273                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7274         if (epause->tx_pause)
7275                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7276
7277         if (epause->autoneg) {
7278                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7279         }
7280         else {
7281                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7282         }
7283
7284         if (netif_running(dev)) {
7285                 spin_lock_bh(&bp->phy_lock);
7286                 bnx2_setup_phy(bp, bp->phy_port);
7287                 spin_unlock_bh(&bp->phy_lock);
7288         }
7289
7290         return 0;
7291 }
7292
7293 static struct {
7294         char string[ETH_GSTRING_LEN];
7295 } bnx2_stats_str_arr[] = {
7296         { "rx_bytes" },
7297         { "rx_error_bytes" },
7298         { "tx_bytes" },
7299         { "tx_error_bytes" },
7300         { "rx_ucast_packets" },
7301         { "rx_mcast_packets" },
7302         { "rx_bcast_packets" },
7303         { "tx_ucast_packets" },
7304         { "tx_mcast_packets" },
7305         { "tx_bcast_packets" },
7306         { "tx_mac_errors" },
7307         { "tx_carrier_errors" },
7308         { "rx_crc_errors" },
7309         { "rx_align_errors" },
7310         { "tx_single_collisions" },
7311         { "tx_multi_collisions" },
7312         { "tx_deferred" },
7313         { "tx_excess_collisions" },
7314         { "tx_late_collisions" },
7315         { "tx_total_collisions" },
7316         { "rx_fragments" },
7317         { "rx_jabbers" },
7318         { "rx_undersize_packets" },
7319         { "rx_oversize_packets" },
7320         { "rx_64_byte_packets" },
7321         { "rx_65_to_127_byte_packets" },
7322         { "rx_128_to_255_byte_packets" },
7323         { "rx_256_to_511_byte_packets" },
7324         { "rx_512_to_1023_byte_packets" },
7325         { "rx_1024_to_1522_byte_packets" },
7326         { "rx_1523_to_9022_byte_packets" },
7327         { "tx_64_byte_packets" },
7328         { "tx_65_to_127_byte_packets" },
7329         { "tx_128_to_255_byte_packets" },
7330         { "tx_256_to_511_byte_packets" },
7331         { "tx_512_to_1023_byte_packets" },
7332         { "tx_1024_to_1522_byte_packets" },
7333         { "tx_1523_to_9022_byte_packets" },
7334         { "rx_xon_frames" },
7335         { "rx_xoff_frames" },
7336         { "tx_xon_frames" },
7337         { "tx_xoff_frames" },
7338         { "rx_mac_ctrl_frames" },
7339         { "rx_filtered_packets" },
7340         { "rx_ftq_discards" },
7341         { "rx_discards" },
7342         { "rx_fw_discards" },
7343 };
7344
7345 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7346
7347 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7348
7349 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7350     STATS_OFFSET32(stat_IfHCInOctets_hi),
7351     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7352     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7353     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7354     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7355     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7356     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7357     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7358     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7359     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7360     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7361     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7362     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7363     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7364     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7365     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7366     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7367     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7368     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7369     STATS_OFFSET32(stat_EtherStatsCollisions),
7370     STATS_OFFSET32(stat_EtherStatsFragments),
7371     STATS_OFFSET32(stat_EtherStatsJabbers),
7372     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7373     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7374     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7375     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7376     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7377     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7378     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7379     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7380     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7381     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7382     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7383     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7384     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7385     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7386     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7387     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7388     STATS_OFFSET32(stat_XonPauseFramesReceived),
7389     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7390     STATS_OFFSET32(stat_OutXonSent),
7391     STATS_OFFSET32(stat_OutXoffSent),
7392     STATS_OFFSET32(stat_MacControlFramesReceived),
7393     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7394     STATS_OFFSET32(stat_IfInFTQDiscards),
7395     STATS_OFFSET32(stat_IfInMBUFDiscards),
7396     STATS_OFFSET32(stat_FwRxDrop),
7397 };
7398
7399 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7400  * skipped because of errata.
7401  */
7402 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7403         8,0,8,8,8,8,8,8,8,8,
7404         4,0,4,4,4,4,4,4,4,4,
7405         4,4,4,4,4,4,4,4,4,4,
7406         4,4,4,4,4,4,4,4,4,4,
7407         4,4,4,4,4,4,4,
7408 };
7409
7410 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7411         8,0,8,8,8,8,8,8,8,8,
7412         4,4,4,4,4,4,4,4,4,4,
7413         4,4,4,4,4,4,4,4,4,4,
7414         4,4,4,4,4,4,4,4,4,4,
7415         4,4,4,4,4,4,4,
7416 };
7417
7418 #define BNX2_NUM_TESTS 6
7419
7420 static struct {
7421         char string[ETH_GSTRING_LEN];
7422 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7423         { "register_test (offline)" },
7424         { "memory_test (offline)" },
7425         { "loopback_test (offline)" },
7426         { "nvram_test (online)" },
7427         { "interrupt_test (online)" },
7428         { "link_test (online)" },
7429 };
7430
7431 static int
7432 bnx2_get_sset_count(struct net_device *dev, int sset)
7433 {
7434         switch (sset) {
7435         case ETH_SS_TEST:
7436                 return BNX2_NUM_TESTS;
7437         case ETH_SS_STATS:
7438                 return BNX2_NUM_STATS;
7439         default:
7440                 return -EOPNOTSUPP;
7441         }
7442 }
7443
7444 static void
7445 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7446 {
7447         struct bnx2 *bp = netdev_priv(dev);
7448
7449         bnx2_set_power_state(bp, PCI_D0);
7450
7451         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7452         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7453                 int i;
7454
7455                 bnx2_netif_stop(bp, true);
7456                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7457                 bnx2_free_skbs(bp);
7458
7459                 if (bnx2_test_registers(bp) != 0) {
7460                         buf[0] = 1;
7461                         etest->flags |= ETH_TEST_FL_FAILED;
7462                 }
7463                 if (bnx2_test_memory(bp) != 0) {
7464                         buf[1] = 1;
7465                         etest->flags |= ETH_TEST_FL_FAILED;
7466                 }
7467                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7468                         etest->flags |= ETH_TEST_FL_FAILED;
7469
7470                 if (!netif_running(bp->dev))
7471                         bnx2_shutdown_chip(bp);
7472                 else {
7473                         bnx2_init_nic(bp, 1);
7474                         bnx2_netif_start(bp, true);
7475                 }
7476
7477                 /* wait for link up */
7478                 for (i = 0; i < 7; i++) {
7479                         if (bp->link_up)
7480                                 break;
7481                         msleep_interruptible(1000);
7482                 }
7483         }
7484
7485         if (bnx2_test_nvram(bp) != 0) {
7486                 buf[3] = 1;
7487                 etest->flags |= ETH_TEST_FL_FAILED;
7488         }
7489         if (bnx2_test_intr(bp) != 0) {
7490                 buf[4] = 1;
7491                 etest->flags |= ETH_TEST_FL_FAILED;
7492         }
7493
7494         if (bnx2_test_link(bp) != 0) {
7495                 buf[5] = 1;
7496                 etest->flags |= ETH_TEST_FL_FAILED;
7497
7498         }
7499         if (!netif_running(bp->dev))
7500                 bnx2_set_power_state(bp, PCI_D3hot);
7501 }
7502
7503 static void
7504 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7505 {
7506         switch (stringset) {
7507         case ETH_SS_STATS:
7508                 memcpy(buf, bnx2_stats_str_arr,
7509                         sizeof(bnx2_stats_str_arr));
7510                 break;
7511         case ETH_SS_TEST:
7512                 memcpy(buf, bnx2_tests_str_arr,
7513                         sizeof(bnx2_tests_str_arr));
7514                 break;
7515         }
7516 }
7517
7518 static void
7519 bnx2_get_ethtool_stats(struct net_device *dev,
7520                 struct ethtool_stats *stats, u64 *buf)
7521 {
7522         struct bnx2 *bp = netdev_priv(dev);
7523         int i;
7524         u32 *hw_stats = (u32 *) bp->stats_blk;
7525         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7526         u8 *stats_len_arr = NULL;
7527
7528         if (hw_stats == NULL) {
7529                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7530                 return;
7531         }
7532
7533         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7534             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7535             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7536             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7537                 stats_len_arr = bnx2_5706_stats_len_arr;
7538         else
7539                 stats_len_arr = bnx2_5708_stats_len_arr;
7540
7541         for (i = 0; i < BNX2_NUM_STATS; i++) {
7542                 unsigned long offset;
7543
7544                 if (stats_len_arr[i] == 0) {
7545                         /* skip this counter */
7546                         buf[i] = 0;
7547                         continue;
7548                 }
7549
7550                 offset = bnx2_stats_offset_arr[i];
7551                 if (stats_len_arr[i] == 4) {
7552                         /* 4-byte counter */
7553                         buf[i] = (u64) *(hw_stats + offset) +
7554                                  *(temp_stats + offset);
7555                         continue;
7556                 }
7557                 /* 8-byte counter */
7558                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7559                          *(hw_stats + offset + 1) +
7560                          (((u64) *(temp_stats + offset)) << 32) +
7561                          *(temp_stats + offset + 1);
7562         }
7563 }
7564
7565 static int
7566 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7567 {
7568         struct bnx2 *bp = netdev_priv(dev);
7569
7570         switch (state) {
7571         case ETHTOOL_ID_ACTIVE:
7572                 bnx2_set_power_state(bp, PCI_D0);
7573
7574                 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7575                 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7576                 return 1;       /* cycle on/off once per second */
7577
7578         case ETHTOOL_ID_ON:
7579                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7580                        BNX2_EMAC_LED_1000MB_OVERRIDE |
7581                        BNX2_EMAC_LED_100MB_OVERRIDE |
7582                        BNX2_EMAC_LED_10MB_OVERRIDE |
7583                        BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7584                        BNX2_EMAC_LED_TRAFFIC);
7585                 break;
7586
7587         case ETHTOOL_ID_OFF:
7588                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7589                 break;
7590
7591         case ETHTOOL_ID_INACTIVE:
7592                 REG_WR(bp, BNX2_EMAC_LED, 0);
7593                 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7594
7595                 if (!netif_running(dev))
7596                         bnx2_set_power_state(bp, PCI_D3hot);
7597                 break;
7598         }
7599
7600         return 0;
7601 }
7602
7603 static netdev_features_t
7604 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7605 {
7606         struct bnx2 *bp = netdev_priv(dev);
7607
7608         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7609                 features |= NETIF_F_HW_VLAN_RX;
7610
7611         return features;
7612 }
7613
7614 static int
7615 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7616 {
7617         struct bnx2 *bp = netdev_priv(dev);
7618
7619         /* TSO with VLAN tag won't work with current firmware */
7620         if (features & NETIF_F_HW_VLAN_TX)
7621                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7622         else
7623                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7624
7625         if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7626             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7627             netif_running(dev)) {
7628                 bnx2_netif_stop(bp, false);
7629                 dev->features = features;
7630                 bnx2_set_rx_mode(dev);
7631                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7632                 bnx2_netif_start(bp, false);
7633                 return 1;
7634         }
7635
7636         return 0;
7637 }
7638
7639 static void bnx2_get_channels(struct net_device *dev,
7640                               struct ethtool_channels *channels)
7641 {
7642         struct bnx2 *bp = netdev_priv(dev);
7643         u32 max_rx_rings = 1;
7644         u32 max_tx_rings = 1;
7645
7646         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7647                 max_rx_rings = RX_MAX_RINGS;
7648                 max_tx_rings = TX_MAX_RINGS;
7649         }
7650
7651         channels->max_rx = max_rx_rings;
7652         channels->max_tx = max_tx_rings;
7653         channels->max_other = 0;
7654         channels->max_combined = 0;
7655         channels->rx_count = bp->num_rx_rings;
7656         channels->tx_count = bp->num_tx_rings;
7657         channels->other_count = 0;
7658         channels->combined_count = 0;
7659 }
7660
7661 static int bnx2_set_channels(struct net_device *dev,
7662                               struct ethtool_channels *channels)
7663 {
7664         struct bnx2 *bp = netdev_priv(dev);
7665         u32 max_rx_rings = 1;
7666         u32 max_tx_rings = 1;
7667         int rc = 0;
7668
7669         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7670                 max_rx_rings = RX_MAX_RINGS;
7671                 max_tx_rings = TX_MAX_RINGS;
7672         }
7673         if (channels->rx_count > max_rx_rings ||
7674             channels->tx_count > max_tx_rings)
7675                 return -EINVAL;
7676
7677         bp->num_req_rx_rings = channels->rx_count;
7678         bp->num_req_tx_rings = channels->tx_count;
7679
7680         if (netif_running(dev))
7681                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7682                                            bp->tx_ring_size, true);
7683
7684         return rc;
7685 }
7686
7687 static const struct ethtool_ops bnx2_ethtool_ops = {
7688         .get_settings           = bnx2_get_settings,
7689         .set_settings           = bnx2_set_settings,
7690         .get_drvinfo            = bnx2_get_drvinfo,
7691         .get_regs_len           = bnx2_get_regs_len,
7692         .get_regs               = bnx2_get_regs,
7693         .get_wol                = bnx2_get_wol,
7694         .set_wol                = bnx2_set_wol,
7695         .nway_reset             = bnx2_nway_reset,
7696         .get_link               = bnx2_get_link,
7697         .get_eeprom_len         = bnx2_get_eeprom_len,
7698         .get_eeprom             = bnx2_get_eeprom,
7699         .set_eeprom             = bnx2_set_eeprom,
7700         .get_coalesce           = bnx2_get_coalesce,
7701         .set_coalesce           = bnx2_set_coalesce,
7702         .get_ringparam          = bnx2_get_ringparam,
7703         .set_ringparam          = bnx2_set_ringparam,
7704         .get_pauseparam         = bnx2_get_pauseparam,
7705         .set_pauseparam         = bnx2_set_pauseparam,
7706         .self_test              = bnx2_self_test,
7707         .get_strings            = bnx2_get_strings,
7708         .set_phys_id            = bnx2_set_phys_id,
7709         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7710         .get_sset_count         = bnx2_get_sset_count,
7711         .get_channels           = bnx2_get_channels,
7712         .set_channels           = bnx2_set_channels,
7713 };
7714
7715 /* Called with rtnl_lock */
7716 static int
7717 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7718 {
7719         struct mii_ioctl_data *data = if_mii(ifr);
7720         struct bnx2 *bp = netdev_priv(dev);
7721         int err;
7722
7723         switch(cmd) {
7724         case SIOCGMIIPHY:
7725                 data->phy_id = bp->phy_addr;
7726
7727                 /* fallthru */
7728         case SIOCGMIIREG: {
7729                 u32 mii_regval;
7730
7731                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7732                         return -EOPNOTSUPP;
7733
7734                 if (!netif_running(dev))
7735                         return -EAGAIN;
7736
7737                 spin_lock_bh(&bp->phy_lock);
7738                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7739                 spin_unlock_bh(&bp->phy_lock);
7740
7741                 data->val_out = mii_regval;
7742
7743                 return err;
7744         }
7745
7746         case SIOCSMIIREG:
7747                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7748                         return -EOPNOTSUPP;
7749
7750                 if (!netif_running(dev))
7751                         return -EAGAIN;
7752
7753                 spin_lock_bh(&bp->phy_lock);
7754                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7755                 spin_unlock_bh(&bp->phy_lock);
7756
7757                 return err;
7758
7759         default:
7760                 /* do nothing */
7761                 break;
7762         }
7763         return -EOPNOTSUPP;
7764 }
7765
7766 /* Called with rtnl_lock */
7767 static int
7768 bnx2_change_mac_addr(struct net_device *dev, void *p)
7769 {
7770         struct sockaddr *addr = p;
7771         struct bnx2 *bp = netdev_priv(dev);
7772
7773         if (!is_valid_ether_addr(addr->sa_data))
7774                 return -EADDRNOTAVAIL;
7775
7776         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7777         if (netif_running(dev))
7778                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7779
7780         return 0;
7781 }
7782
7783 /* Called with rtnl_lock */
7784 static int
7785 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7786 {
7787         struct bnx2 *bp = netdev_priv(dev);
7788
7789         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7790                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7791                 return -EINVAL;
7792
7793         dev->mtu = new_mtu;
7794         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7795                                      false);
7796 }
7797
7798 #ifdef CONFIG_NET_POLL_CONTROLLER
7799 static void
7800 poll_bnx2(struct net_device *dev)
7801 {
7802         struct bnx2 *bp = netdev_priv(dev);
7803         int i;
7804
7805         for (i = 0; i < bp->irq_nvecs; i++) {
7806                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7807
7808                 disable_irq(irq->vector);
7809                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7810                 enable_irq(irq->vector);
7811         }
7812 }
7813 #endif
7814
7815 static void __devinit
7816 bnx2_get_5709_media(struct bnx2 *bp)
7817 {
7818         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7819         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7820         u32 strap;
7821
7822         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7823                 return;
7824         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7825                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7826                 return;
7827         }
7828
7829         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7830                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7831         else
7832                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7833
7834         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7835                 switch (strap) {
7836                 case 0x4:
7837                 case 0x5:
7838                 case 0x6:
7839                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7840                         return;
7841                 }
7842         } else {
7843                 switch (strap) {
7844                 case 0x1:
7845                 case 0x2:
7846                 case 0x4:
7847                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7848                         return;
7849                 }
7850         }
7851 }
7852
7853 static void __devinit
7854 bnx2_get_pci_speed(struct bnx2 *bp)
7855 {
7856         u32 reg;
7857
7858         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7859         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7860                 u32 clkreg;
7861
7862                 bp->flags |= BNX2_FLAG_PCIX;
7863
7864                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7865
7866                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7867                 switch (clkreg) {
7868                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7869                         bp->bus_speed_mhz = 133;
7870                         break;
7871
7872                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7873                         bp->bus_speed_mhz = 100;
7874                         break;
7875
7876                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7877                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7878                         bp->bus_speed_mhz = 66;
7879                         break;
7880
7881                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7882                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7883                         bp->bus_speed_mhz = 50;
7884                         break;
7885
7886                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7887                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7888                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7889                         bp->bus_speed_mhz = 33;
7890                         break;
7891                 }
7892         }
7893         else {
7894                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7895                         bp->bus_speed_mhz = 66;
7896                 else
7897                         bp->bus_speed_mhz = 33;
7898         }
7899
7900         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7901                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7902
7903 }
7904
7905 static void __devinit
7906 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7907 {
7908         int rc, i, j;
7909         u8 *data;
7910         unsigned int block_end, rosize, len;
7911
7912 #define BNX2_VPD_NVRAM_OFFSET   0x300
7913 #define BNX2_VPD_LEN            128
7914 #define BNX2_MAX_VER_SLEN       30
7915
7916         data = kmalloc(256, GFP_KERNEL);
7917         if (!data)
7918                 return;
7919
7920         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7921                              BNX2_VPD_LEN);
7922         if (rc)
7923                 goto vpd_done;
7924
7925         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7926                 data[i] = data[i + BNX2_VPD_LEN + 3];
7927                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7928                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7929                 data[i + 3] = data[i + BNX2_VPD_LEN];
7930         }
7931
7932         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7933         if (i < 0)
7934                 goto vpd_done;
7935
7936         rosize = pci_vpd_lrdt_size(&data[i]);
7937         i += PCI_VPD_LRDT_TAG_SIZE;
7938         block_end = i + rosize;
7939
7940         if (block_end > BNX2_VPD_LEN)
7941                 goto vpd_done;
7942
7943         j = pci_vpd_find_info_keyword(data, i, rosize,
7944                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7945         if (j < 0)
7946                 goto vpd_done;
7947
7948         len = pci_vpd_info_field_size(&data[j]);
7949
7950         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7951         if (j + len > block_end || len != 4 ||
7952             memcmp(&data[j], "1028", 4))
7953                 goto vpd_done;
7954
7955         j = pci_vpd_find_info_keyword(data, i, rosize,
7956                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7957         if (j < 0)
7958                 goto vpd_done;
7959
7960         len = pci_vpd_info_field_size(&data[j]);
7961
7962         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7963         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7964                 goto vpd_done;
7965
7966         memcpy(bp->fw_version, &data[j], len);
7967         bp->fw_version[len] = ' ';
7968
7969 vpd_done:
7970         kfree(data);
7971 }
7972
7973 static int __devinit
7974 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7975 {
7976         struct bnx2 *bp;
7977         int rc, i, j;
7978         u32 reg;
7979         u64 dma_mask, persist_dma_mask;
7980         int err;
7981
7982         SET_NETDEV_DEV(dev, &pdev->dev);
7983         bp = netdev_priv(dev);
7984
7985         bp->flags = 0;
7986         bp->phy_flags = 0;
7987
7988         bp->temp_stats_blk =
7989                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7990
7991         if (bp->temp_stats_blk == NULL) {
7992                 rc = -ENOMEM;
7993                 goto err_out;
7994         }
7995
7996         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7997         rc = pci_enable_device(pdev);
7998         if (rc) {
7999                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8000                 goto err_out;
8001         }
8002
8003         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8004                 dev_err(&pdev->dev,
8005                         "Cannot find PCI device base address, aborting\n");
8006                 rc = -ENODEV;
8007                 goto err_out_disable;
8008         }
8009
8010         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8011         if (rc) {
8012                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8013                 goto err_out_disable;
8014         }
8015
8016         pci_set_master(pdev);
8017
8018         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8019         if (bp->pm_cap == 0) {
8020                 dev_err(&pdev->dev,
8021                         "Cannot find power management capability, aborting\n");
8022                 rc = -EIO;
8023                 goto err_out_release;
8024         }
8025
8026         bp->dev = dev;
8027         bp->pdev = pdev;
8028
8029         spin_lock_init(&bp->phy_lock);
8030         spin_lock_init(&bp->indirect_lock);
8031 #ifdef BCM_CNIC
8032         mutex_init(&bp->cnic_lock);
8033 #endif
8034         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8035
8036         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8037                                                          TX_MAX_TSS_RINGS + 1));
8038         if (!bp->regview) {
8039                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8040                 rc = -ENOMEM;
8041                 goto err_out_release;
8042         }
8043
8044         bnx2_set_power_state(bp, PCI_D0);
8045
8046         /* Configure byte swap and enable write to the reg_window registers.
8047          * Rely on CPU to do target byte swapping on big endian systems
8048          * The chip's target access swapping will not swap all accesses
8049          */
8050         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8051                    BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8052                    BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8053
8054         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
8055
8056         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8057                 if (!pci_is_pcie(pdev)) {
8058                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8059                         rc = -EIO;
8060                         goto err_out_unmap;
8061                 }
8062                 bp->flags |= BNX2_FLAG_PCIE;
8063                 if (CHIP_REV(bp) == CHIP_REV_Ax)
8064                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8065
8066                 /* AER (Advanced Error Reporting) hooks */
8067                 err = pci_enable_pcie_error_reporting(pdev);
8068                 if (!err)
8069                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8070
8071         } else {
8072                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8073                 if (bp->pcix_cap == 0) {
8074                         dev_err(&pdev->dev,
8075                                 "Cannot find PCIX capability, aborting\n");
8076                         rc = -EIO;
8077                         goto err_out_unmap;
8078                 }
8079                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8080         }
8081
8082         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8083                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8084                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8085         }
8086
8087         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8088                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8089                         bp->flags |= BNX2_FLAG_MSI_CAP;
8090         }
8091
8092         /* 5708 cannot support DMA addresses > 40-bit.  */
8093         if (CHIP_NUM(bp) == CHIP_NUM_5708)
8094                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8095         else
8096                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8097
8098         /* Configure DMA attributes. */
8099         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8100                 dev->features |= NETIF_F_HIGHDMA;
8101                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8102                 if (rc) {
8103                         dev_err(&pdev->dev,
8104                                 "pci_set_consistent_dma_mask failed, aborting\n");
8105                         goto err_out_unmap;
8106                 }
8107         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8108                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8109                 goto err_out_unmap;
8110         }
8111
8112         if (!(bp->flags & BNX2_FLAG_PCIE))
8113                 bnx2_get_pci_speed(bp);
8114
8115         /* 5706A0 may falsely detect SERR and PERR. */
8116         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8117                 reg = REG_RD(bp, PCI_COMMAND);
8118                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8119                 REG_WR(bp, PCI_COMMAND, reg);
8120         }
8121         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8122                 !(bp->flags & BNX2_FLAG_PCIX)) {
8123
8124                 dev_err(&pdev->dev,
8125                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8126                 goto err_out_unmap;
8127         }
8128
8129         bnx2_init_nvram(bp);
8130
8131         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8132
8133         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8134             BNX2_SHM_HDR_SIGNATURE_SIG) {
8135                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8136
8137                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8138         } else
8139                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8140
8141         /* Get the permanent MAC address.  First we need to make sure the
8142          * firmware is actually running.
8143          */
8144         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8145
8146         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8147             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8148                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8149                 rc = -ENODEV;
8150                 goto err_out_unmap;
8151         }
8152
8153         bnx2_read_vpd_fw_ver(bp);
8154
8155         j = strlen(bp->fw_version);
8156         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8157         for (i = 0; i < 3 && j < 24; i++) {
8158                 u8 num, k, skip0;
8159
8160                 if (i == 0) {
8161                         bp->fw_version[j++] = 'b';
8162                         bp->fw_version[j++] = 'c';
8163                         bp->fw_version[j++] = ' ';
8164                 }
8165                 num = (u8) (reg >> (24 - (i * 8)));
8166                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8167                         if (num >= k || !skip0 || k == 1) {
8168                                 bp->fw_version[j++] = (num / k) + '0';
8169                                 skip0 = 0;
8170                         }
8171                 }
8172                 if (i != 2)
8173                         bp->fw_version[j++] = '.';
8174         }
8175         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8176         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8177                 bp->wol = 1;
8178
8179         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8180                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8181
8182                 for (i = 0; i < 30; i++) {
8183                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8184                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8185                                 break;
8186                         msleep(10);
8187                 }
8188         }
8189         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8190         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8191         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8192             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8193                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8194
8195                 if (j < 32)
8196                         bp->fw_version[j++] = ' ';
8197                 for (i = 0; i < 3 && j < 28; i++) {
8198                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8199                         reg = be32_to_cpu(reg);
8200                         memcpy(&bp->fw_version[j], &reg, 4);
8201                         j += 4;
8202                 }
8203         }
8204
8205         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8206         bp->mac_addr[0] = (u8) (reg >> 8);
8207         bp->mac_addr[1] = (u8) reg;
8208
8209         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8210         bp->mac_addr[2] = (u8) (reg >> 24);
8211         bp->mac_addr[3] = (u8) (reg >> 16);
8212         bp->mac_addr[4] = (u8) (reg >> 8);
8213         bp->mac_addr[5] = (u8) reg;
8214
8215         bp->tx_ring_size = MAX_TX_DESC_CNT;
8216         bnx2_set_rx_ring_size(bp, 255);
8217
8218         bp->tx_quick_cons_trip_int = 2;
8219         bp->tx_quick_cons_trip = 20;
8220         bp->tx_ticks_int = 18;
8221         bp->tx_ticks = 80;
8222
8223         bp->rx_quick_cons_trip_int = 2;
8224         bp->rx_quick_cons_trip = 12;
8225         bp->rx_ticks_int = 18;
8226         bp->rx_ticks = 18;
8227
8228         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8229
8230         bp->current_interval = BNX2_TIMER_INTERVAL;
8231
8232         bp->phy_addr = 1;
8233
8234         /* Disable WOL support if we are running on a SERDES chip. */
8235         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8236                 bnx2_get_5709_media(bp);
8237         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8238                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8239
8240         bp->phy_port = PORT_TP;
8241         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8242                 bp->phy_port = PORT_FIBRE;
8243                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8244                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8245                         bp->flags |= BNX2_FLAG_NO_WOL;
8246                         bp->wol = 0;
8247                 }
8248                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8249                         /* Don't do parallel detect on this board because of
8250                          * some board problems.  The link will not go down
8251                          * if we do parallel detect.
8252                          */
8253                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8254                             pdev->subsystem_device == 0x310c)
8255                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8256                 } else {
8257                         bp->phy_addr = 2;
8258                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8259                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8260                 }
8261         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8262                    CHIP_NUM(bp) == CHIP_NUM_5708)
8263                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8264         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8265                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8266                   CHIP_REV(bp) == CHIP_REV_Bx))
8267                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8268
8269         bnx2_init_fw_cap(bp);
8270
8271         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8272             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8273             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8274             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8275                 bp->flags |= BNX2_FLAG_NO_WOL;
8276                 bp->wol = 0;
8277         }
8278
8279         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8280                 bp->tx_quick_cons_trip_int =
8281                         bp->tx_quick_cons_trip;
8282                 bp->tx_ticks_int = bp->tx_ticks;
8283                 bp->rx_quick_cons_trip_int =
8284                         bp->rx_quick_cons_trip;
8285                 bp->rx_ticks_int = bp->rx_ticks;
8286                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8287                 bp->com_ticks_int = bp->com_ticks;
8288                 bp->cmd_ticks_int = bp->cmd_ticks;
8289         }
8290
8291         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8292          *
8293          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8294          * with byte enables disabled on the unused 32-bit word.  This is legal
8295          * but causes problems on the AMD 8132 which will eventually stop
8296          * responding after a while.
8297          *
8298          * AMD believes this incompatibility is unique to the 5706, and
8299          * prefers to locally disable MSI rather than globally disabling it.
8300          */
8301         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8302                 struct pci_dev *amd_8132 = NULL;
8303
8304                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8305                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8306                                                   amd_8132))) {
8307
8308                         if (amd_8132->revision >= 0x10 &&
8309                             amd_8132->revision <= 0x13) {
8310                                 disable_msi = 1;
8311                                 pci_dev_put(amd_8132);
8312                                 break;
8313                         }
8314                 }
8315         }
8316
8317         bnx2_set_default_link(bp);
8318         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8319
8320         init_timer(&bp->timer);
8321         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8322         bp->timer.data = (unsigned long) bp;
8323         bp->timer.function = bnx2_timer;
8324
8325 #ifdef BCM_CNIC
8326         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8327                 bp->cnic_eth_dev.max_iscsi_conn =
8328                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8329                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8330 #endif
8331         pci_save_state(pdev);
8332
8333         return 0;
8334
8335 err_out_unmap:
8336         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8337                 pci_disable_pcie_error_reporting(pdev);
8338                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8339         }
8340
8341         pci_iounmap(pdev, bp->regview);
8342         bp->regview = NULL;
8343
8344 err_out_release:
8345         pci_release_regions(pdev);
8346
8347 err_out_disable:
8348         pci_disable_device(pdev);
8349         pci_set_drvdata(pdev, NULL);
8350
8351 err_out:
8352         return rc;
8353 }
8354
8355 static char * __devinit
8356 bnx2_bus_string(struct bnx2 *bp, char *str)
8357 {
8358         char *s = str;
8359
8360         if (bp->flags & BNX2_FLAG_PCIE) {
8361                 s += sprintf(s, "PCI Express");
8362         } else {
8363                 s += sprintf(s, "PCI");
8364                 if (bp->flags & BNX2_FLAG_PCIX)
8365                         s += sprintf(s, "-X");
8366                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8367                         s += sprintf(s, " 32-bit");
8368                 else
8369                         s += sprintf(s, " 64-bit");
8370                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8371         }
8372         return str;
8373 }
8374
8375 static void
8376 bnx2_del_napi(struct bnx2 *bp)
8377 {
8378         int i;
8379
8380         for (i = 0; i < bp->irq_nvecs; i++)
8381                 netif_napi_del(&bp->bnx2_napi[i].napi);
8382 }
8383
8384 static void
8385 bnx2_init_napi(struct bnx2 *bp)
8386 {
8387         int i;
8388
8389         for (i = 0; i < bp->irq_nvecs; i++) {
8390                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8391                 int (*poll)(struct napi_struct *, int);
8392
8393                 if (i == 0)
8394                         poll = bnx2_poll;
8395                 else
8396                         poll = bnx2_poll_msix;
8397
8398                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8399                 bnapi->bp = bp;
8400         }
8401 }
8402
8403 static const struct net_device_ops bnx2_netdev_ops = {
8404         .ndo_open               = bnx2_open,
8405         .ndo_start_xmit         = bnx2_start_xmit,
8406         .ndo_stop               = bnx2_close,
8407         .ndo_get_stats64        = bnx2_get_stats64,
8408         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8409         .ndo_do_ioctl           = bnx2_ioctl,
8410         .ndo_validate_addr      = eth_validate_addr,
8411         .ndo_set_mac_address    = bnx2_change_mac_addr,
8412         .ndo_change_mtu         = bnx2_change_mtu,
8413         .ndo_fix_features       = bnx2_fix_features,
8414         .ndo_set_features       = bnx2_set_features,
8415         .ndo_tx_timeout         = bnx2_tx_timeout,
8416 #ifdef CONFIG_NET_POLL_CONTROLLER
8417         .ndo_poll_controller    = poll_bnx2,
8418 #endif
8419 };
8420
8421 static int __devinit
8422 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8423 {
8424         static int version_printed = 0;
8425         struct net_device *dev;
8426         struct bnx2 *bp;
8427         int rc;
8428         char str[40];
8429
8430         if (version_printed++ == 0)
8431                 pr_info("%s", version);
8432
8433         /* dev zeroed in init_etherdev */
8434         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8435         if (!dev)
8436                 return -ENOMEM;
8437
8438         rc = bnx2_init_board(pdev, dev);
8439         if (rc < 0)
8440                 goto err_free;
8441
8442         dev->netdev_ops = &bnx2_netdev_ops;
8443         dev->watchdog_timeo = TX_TIMEOUT;
8444         dev->ethtool_ops = &bnx2_ethtool_ops;
8445
8446         bp = netdev_priv(dev);
8447
8448         pci_set_drvdata(pdev, dev);
8449
8450         memcpy(dev->dev_addr, bp->mac_addr, 6);
8451         memcpy(dev->perm_addr, bp->mac_addr, 6);
8452
8453         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8454                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8455                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8456
8457         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8458                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8459
8460         dev->vlan_features = dev->hw_features;
8461         dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8462         dev->features |= dev->hw_features;
8463         dev->priv_flags |= IFF_UNICAST_FLT;
8464
8465         if ((rc = register_netdev(dev))) {
8466                 dev_err(&pdev->dev, "Cannot register net device\n");
8467                 goto error;
8468         }
8469
8470         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8471                     "node addr %pM\n", board_info[ent->driver_data].name,
8472                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8473                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8474                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8475                     pdev->irq, dev->dev_addr);
8476
8477         return 0;
8478
8479 error:
8480         iounmap(bp->regview);
8481         pci_release_regions(pdev);
8482         pci_disable_device(pdev);
8483         pci_set_drvdata(pdev, NULL);
8484 err_free:
8485         free_netdev(dev);
8486         return rc;
8487 }
8488
8489 static void __devexit
8490 bnx2_remove_one(struct pci_dev *pdev)
8491 {
8492         struct net_device *dev = pci_get_drvdata(pdev);
8493         struct bnx2 *bp = netdev_priv(dev);
8494
8495         unregister_netdev(dev);
8496
8497         del_timer_sync(&bp->timer);
8498         cancel_work_sync(&bp->reset_task);
8499
8500         pci_iounmap(bp->pdev, bp->regview);
8501
8502         kfree(bp->temp_stats_blk);
8503
8504         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8505                 pci_disable_pcie_error_reporting(pdev);
8506                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8507         }
8508
8509         bnx2_release_firmware(bp);
8510
8511         free_netdev(dev);
8512
8513         pci_release_regions(pdev);
8514         pci_disable_device(pdev);
8515         pci_set_drvdata(pdev, NULL);
8516 }
8517
8518 static int
8519 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8520 {
8521         struct net_device *dev = pci_get_drvdata(pdev);
8522         struct bnx2 *bp = netdev_priv(dev);
8523
8524         /* PCI register 4 needs to be saved whether netif_running() or not.
8525          * MSI address and data need to be saved if using MSI and
8526          * netif_running().
8527          */
8528         pci_save_state(pdev);
8529         if (!netif_running(dev))
8530                 return 0;
8531
8532         cancel_work_sync(&bp->reset_task);
8533         bnx2_netif_stop(bp, true);
8534         netif_device_detach(dev);
8535         del_timer_sync(&bp->timer);
8536         bnx2_shutdown_chip(bp);
8537         bnx2_free_skbs(bp);
8538         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8539         return 0;
8540 }
8541
8542 static int
8543 bnx2_resume(struct pci_dev *pdev)
8544 {
8545         struct net_device *dev = pci_get_drvdata(pdev);
8546         struct bnx2 *bp = netdev_priv(dev);
8547
8548         pci_restore_state(pdev);
8549         if (!netif_running(dev))
8550                 return 0;
8551
8552         bnx2_set_power_state(bp, PCI_D0);
8553         netif_device_attach(dev);
8554         bnx2_init_nic(bp, 1);
8555         bnx2_netif_start(bp, true);
8556         return 0;
8557 }
8558
8559 /**
8560  * bnx2_io_error_detected - called when PCI error is detected
8561  * @pdev: Pointer to PCI device
8562  * @state: The current pci connection state
8563  *
8564  * This function is called after a PCI bus error affecting
8565  * this device has been detected.
8566  */
8567 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8568                                                pci_channel_state_t state)
8569 {
8570         struct net_device *dev = pci_get_drvdata(pdev);
8571         struct bnx2 *bp = netdev_priv(dev);
8572
8573         rtnl_lock();
8574         netif_device_detach(dev);
8575
8576         if (state == pci_channel_io_perm_failure) {
8577                 rtnl_unlock();
8578                 return PCI_ERS_RESULT_DISCONNECT;
8579         }
8580
8581         if (netif_running(dev)) {
8582                 bnx2_netif_stop(bp, true);
8583                 del_timer_sync(&bp->timer);
8584                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8585         }
8586
8587         pci_disable_device(pdev);
8588         rtnl_unlock();
8589
8590         /* Request a slot slot reset. */
8591         return PCI_ERS_RESULT_NEED_RESET;
8592 }
8593
8594 /**
8595  * bnx2_io_slot_reset - called after the pci bus has been reset.
8596  * @pdev: Pointer to PCI device
8597  *
8598  * Restart the card from scratch, as if from a cold-boot.
8599  */
8600 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8601 {
8602         struct net_device *dev = pci_get_drvdata(pdev);
8603         struct bnx2 *bp = netdev_priv(dev);
8604         pci_ers_result_t result;
8605         int err;
8606
8607         rtnl_lock();
8608         if (pci_enable_device(pdev)) {
8609                 dev_err(&pdev->dev,
8610                         "Cannot re-enable PCI device after reset\n");
8611                 result = PCI_ERS_RESULT_DISCONNECT;
8612         } else {
8613                 pci_set_master(pdev);
8614                 pci_restore_state(pdev);
8615                 pci_save_state(pdev);
8616
8617                 if (netif_running(dev)) {
8618                         bnx2_set_power_state(bp, PCI_D0);
8619                         bnx2_init_nic(bp, 1);
8620                 }
8621                 result = PCI_ERS_RESULT_RECOVERED;
8622         }
8623         rtnl_unlock();
8624
8625         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8626                 return result;
8627
8628         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8629         if (err) {
8630                 dev_err(&pdev->dev,
8631                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8632                          err); /* non-fatal, continue */
8633         }
8634
8635         return result;
8636 }
8637
8638 /**
8639  * bnx2_io_resume - called when traffic can start flowing again.
8640  * @pdev: Pointer to PCI device
8641  *
8642  * This callback is called when the error recovery driver tells us that
8643  * its OK to resume normal operation.
8644  */
8645 static void bnx2_io_resume(struct pci_dev *pdev)
8646 {
8647         struct net_device *dev = pci_get_drvdata(pdev);
8648         struct bnx2 *bp = netdev_priv(dev);
8649
8650         rtnl_lock();
8651         if (netif_running(dev))
8652                 bnx2_netif_start(bp, true);
8653
8654         netif_device_attach(dev);
8655         rtnl_unlock();
8656 }
8657
8658 static struct pci_error_handlers bnx2_err_handler = {
8659         .error_detected = bnx2_io_error_detected,
8660         .slot_reset     = bnx2_io_slot_reset,
8661         .resume         = bnx2_io_resume,
8662 };
8663
8664 static struct pci_driver bnx2_pci_driver = {
8665         .name           = DRV_MODULE_NAME,
8666         .id_table       = bnx2_pci_tbl,
8667         .probe          = bnx2_init_one,
8668         .remove         = __devexit_p(bnx2_remove_one),
8669         .suspend        = bnx2_suspend,
8670         .resume         = bnx2_resume,
8671         .err_handler    = &bnx2_err_handler,
8672 };
8673
8674 static int __init bnx2_init(void)
8675 {
8676         return pci_register_driver(&bnx2_pci_driver);
8677 }
8678
8679 static void __exit bnx2_cleanup(void)
8680 {
8681         pci_unregister_driver(&bnx2_pci_driver);
8682 }
8683
8684 module_init(bnx2_init);
8685 module_exit(bnx2_cleanup);
8686
8687
8688